mongo 2.19.3 → 2.20.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- checksums.yaml.gz.sig +0 -0
- data/Rakefile +27 -154
- data/lib/mongo/cluster/topology/base.rb +16 -0
- data/lib/mongo/cluster.rb +27 -1
- data/lib/mongo/collection/view/iterable.rb +1 -0
- data/lib/mongo/collection.rb +4 -2
- data/lib/mongo/error/transactions_not_supported.rb +34 -0
- data/lib/mongo/error.rb +1 -0
- data/lib/mongo/grid/fs_bucket.rb +6 -0
- data/lib/mongo/monitoring/event/secure.rb +1 -1
- data/lib/mongo/operation/shared/executable.rb +43 -27
- data/lib/mongo/operation/shared/response_handling.rb +23 -25
- data/lib/mongo/retryable/read_worker.rb +7 -6
- data/lib/mongo/retryable/write_worker.rb +7 -4
- data/lib/mongo/retryable.rb +2 -2
- data/lib/mongo/server/app_metadata/environment.rb +64 -9
- data/lib/mongo/server/app_metadata.rb +5 -4
- data/lib/mongo/server/description/features.rb +1 -0
- data/lib/mongo/server_selector/base.rb +32 -6
- data/lib/mongo/session/server_session/dirtyable.rb +52 -0
- data/lib/mongo/session/server_session.rb +3 -0
- data/lib/mongo/session/session_pool.rb +12 -18
- data/lib/mongo/session.rb +32 -0
- data/lib/mongo/uri.rb +0 -4
- data/lib/mongo/version.rb +1 -1
- data/mongo.gemspec +1 -7
- data/spec/atlas/atlas_connectivity_spec.rb +4 -4
- data/spec/faas/ruby-sam-app/Gemfile +9 -0
- data/spec/faas/ruby-sam-app/mongodb/Gemfile +4 -0
- data/spec/faas/ruby-sam-app/mongodb/app.rb +149 -0
- data/spec/faas/ruby-sam-app/template.yaml +48 -0
- data/spec/integration/client_side_encryption/corpus_spec.rb +10 -2
- data/spec/integration/retryable_reads_errors_spec.rb +161 -8
- data/spec/integration/retryable_writes_errors_spec.rb +156 -0
- data/spec/mongo/cluster_spec.rb +36 -0
- data/spec/mongo/collection/view/aggregation_spec.rb +6 -1
- data/spec/mongo/collection/view/explainable_spec.rb +2 -0
- data/spec/mongo/collection_crud_spec.rb +1 -1
- data/spec/mongo/operation/insert_spec.rb +1 -1
- data/spec/mongo/retryable/write_worker_spec.rb +39 -0
- data/spec/mongo/server/app_metadata/environment_spec.rb +135 -0
- data/spec/mongo/server/app_metadata_spec.rb +12 -2
- data/spec/mongo/server/connection_spec.rb +4 -0
- data/spec/mongo/session/session_pool_spec.rb +1 -16
- data/spec/mongo/session_transaction_spec.rb +15 -0
- data/spec/mongo/uri_spec.rb +0 -9
- data/spec/runners/crud/test.rb +0 -8
- data/spec/runners/crud.rb +1 -1
- data/spec/runners/transactions/test.rb +12 -3
- data/spec/runners/unified/assertions.rb +16 -3
- data/spec/runners/unified/crud_operations.rb +12 -0
- data/spec/runners/unified/support_operations.rb +3 -5
- data/spec/runners/unified/test.rb +8 -1
- data/spec/shared/lib/mrss/docker_runner.rb +3 -0
- data/spec/shared/share/Dockerfile.erb +20 -69
- data/spec/shared/shlib/server.sh +1 -0
- data/spec/shared/shlib/set_env.sh +5 -28
- data/spec/spec_tests/data/client_side_encryption/explain.yml +2 -2
- data/spec/spec_tests/data/connection_string/invalid-uris.yml +0 -10
- data/spec/spec_tests/data/connection_string/valid-options.yml +13 -0
- data/spec/spec_tests/data/crud_unified/find-test-all-options.yml +348 -0
- data/spec/spec_tests/data/index_management/createSearchIndex.yml +5 -3
- data/spec/spec_tests/data/index_management/createSearchIndexes.yml +7 -4
- data/spec/spec_tests/data/index_management/dropSearchIndex.yml +2 -1
- data/spec/spec_tests/data/index_management/listSearchIndexes.yml +13 -7
- data/spec/spec_tests/data/index_management/updateSearchIndex.yml +2 -1
- data/spec/spec_tests/data/retryable_writes/unified/bulkWrite-serverErrors.yml +3 -6
- data/spec/spec_tests/data/retryable_writes/unified/insertOne-serverErrors.yml +3 -6
- data/spec/spec_tests/data/run_command_unified/runCommand.yml +319 -0
- data/spec/spec_tests/data/sessions_unified/driver-sessions-dirty-session-errors.yml +351 -0
- data/spec/spec_tests/data/unified/valid-pass/poc-crud.yml +1 -1
- data/spec/spec_tests/data/unified/valid-pass/poc-retryable-writes.yml +7 -7
- data/spec/spec_tests/data/unified/valid-pass/poc-sessions.yml +3 -4
- data/spec/spec_tests/data/unified/valid-pass/poc-transactions-convenient-api.yml +1 -1
- data/spec/spec_tests/data/unified/valid-pass/poc-transactions-mongos-pin-auto.yml +1 -1
- data/spec/spec_tests/data/unified/valid-pass/poc-transactions.yml +3 -3
- data/spec/spec_tests/run_command_unified_spec.rb +13 -0
- data/spec/spec_tests/sdam_unified_spec.rb +2 -0
- data/spec/support/constraints.rb +6 -0
- data/spec/support/ocsp +1 -1
- data/spec/support/recording_logger.rb +27 -0
- data.tar.gz.sig +0 -0
- metadata +1272 -1253
- metadata.gz.sig +0 -0
- data/spec/spec_tests/data/cmap/pool-clear-interrupt-immediately.yml +0 -49
@@ -188,6 +188,15 @@ describe 'Client-Side Encryption' do
|
|
188
188
|
key_vault_collection.insert_one(kmip_data_key)
|
189
189
|
end
|
190
190
|
|
191
|
+
# This method compensates for an API change between BSON 4 and
|
192
|
+
# BSON 5.
|
193
|
+
def normalize_cse_value(a)
|
194
|
+
case a
|
195
|
+
when BSON::Decimal128 then a.to_d
|
196
|
+
else a
|
197
|
+
end
|
198
|
+
end
|
199
|
+
|
191
200
|
shared_context 'with jsonSchema collection validator' do
|
192
201
|
let(:local_schema_map) { nil }
|
193
202
|
|
@@ -228,12 +237,11 @@ describe 'Client-Side Encryption' do
|
|
228
237
|
.find(_id: corpus_encrypted_id)
|
229
238
|
.first
|
230
239
|
|
231
|
-
|
232
240
|
corpus_encrypted_actual.each do |key, value|
|
233
241
|
# If it was deterministically encrypted, test the encrypted values
|
234
242
|
# for equality.
|
235
243
|
if value['algo'] == 'det'
|
236
|
-
expect(value['value']).to eq(corpus_encrypted_expected[key]['value'])
|
244
|
+
expect(normalize_cse_value(value['value'])).to eq(normalize_cse_value(corpus_encrypted_expected[key]['value']))
|
237
245
|
else
|
238
246
|
# If the document was randomly encrypted, the two encrypted values
|
239
247
|
# will not be equal. Ensure that they are equal when decrypted.
|
@@ -20,14 +20,14 @@ describe 'Retryable reads errors tests' do
|
|
20
20
|
|
21
21
|
let(:failpoint) do
|
22
22
|
{
|
23
|
-
|
24
|
-
|
25
|
-
|
26
|
-
|
27
|
-
|
28
|
-
|
29
|
-
|
30
|
-
|
23
|
+
configureFailPoint: "failCommand",
|
24
|
+
mode: { times: 1 },
|
25
|
+
data: {
|
26
|
+
failCommands: [ "find" ],
|
27
|
+
errorCode: 91,
|
28
|
+
blockConnection: true,
|
29
|
+
blockTimeMS: 1000
|
30
|
+
}
|
31
31
|
}
|
32
32
|
end
|
33
33
|
|
@@ -107,4 +107,157 @@ describe 'Retryable reads errors tests' do
|
|
107
107
|
})
|
108
108
|
end
|
109
109
|
end
|
110
|
+
|
111
|
+
context 'Retries in a sharded cluster' do
|
112
|
+
require_topology :sharded
|
113
|
+
min_server_version '4.2'
|
114
|
+
require_no_auth
|
115
|
+
|
116
|
+
let(:subscriber) { Mrss::EventSubscriber.new }
|
117
|
+
|
118
|
+
let(:find_started_events) do
|
119
|
+
subscriber.started_events.select { |e| e.command_name == "find" }
|
120
|
+
end
|
121
|
+
|
122
|
+
let(:find_failed_events) do
|
123
|
+
subscriber.failed_events.select { |e| e.command_name == "find" }
|
124
|
+
end
|
125
|
+
|
126
|
+
let(:find_succeeded_events) do
|
127
|
+
subscriber.succeeded_events.select { |e| e.command_name == "find" }
|
128
|
+
end
|
129
|
+
|
130
|
+
context 'when another mongos is available' do
|
131
|
+
|
132
|
+
let(:first_mongos) do
|
133
|
+
Mongo::Client.new(
|
134
|
+
[SpecConfig.instance.addresses.first],
|
135
|
+
direct_connection: true,
|
136
|
+
database: 'admin'
|
137
|
+
)
|
138
|
+
end
|
139
|
+
|
140
|
+
let(:second_mongos) do
|
141
|
+
Mongo::Client.new(
|
142
|
+
[SpecConfig.instance.addresses.last],
|
143
|
+
direct_connection: false,
|
144
|
+
database: 'admin'
|
145
|
+
)
|
146
|
+
end
|
147
|
+
|
148
|
+
let(:client) do
|
149
|
+
new_local_client(
|
150
|
+
[
|
151
|
+
SpecConfig.instance.addresses.first,
|
152
|
+
SpecConfig.instance.addresses.last,
|
153
|
+
],
|
154
|
+
SpecConfig.instance.test_options.merge(retry_reads: true)
|
155
|
+
)
|
156
|
+
end
|
157
|
+
|
158
|
+
let(:expected_servers) do
|
159
|
+
[
|
160
|
+
SpecConfig.instance.addresses.first.to_s,
|
161
|
+
SpecConfig.instance.addresses.last.to_s
|
162
|
+
].sort
|
163
|
+
end
|
164
|
+
|
165
|
+
before do
|
166
|
+
skip 'This test requires at least two mongos' if SpecConfig.instance.addresses.length < 2
|
167
|
+
|
168
|
+
first_mongos.database.command(
|
169
|
+
configureFailPoint: 'failCommand',
|
170
|
+
mode: { times: 1 },
|
171
|
+
data: {
|
172
|
+
failCommands: %w(find),
|
173
|
+
closeConnection: false,
|
174
|
+
errorCode: 6
|
175
|
+
}
|
176
|
+
)
|
177
|
+
|
178
|
+
second_mongos.database.command(
|
179
|
+
configureFailPoint: 'failCommand',
|
180
|
+
mode: { times: 1 },
|
181
|
+
data: {
|
182
|
+
failCommands: %w(find),
|
183
|
+
closeConnection: false,
|
184
|
+
errorCode: 6
|
185
|
+
}
|
186
|
+
)
|
187
|
+
end
|
188
|
+
|
189
|
+
after do
|
190
|
+
[first_mongos, second_mongos].each do |admin_client|
|
191
|
+
admin_client.database.command(
|
192
|
+
configureFailPoint: 'failCommand',
|
193
|
+
mode: 'off'
|
194
|
+
)
|
195
|
+
admin_client.close
|
196
|
+
end
|
197
|
+
client.close
|
198
|
+
end
|
199
|
+
|
200
|
+
it 'retries on different mongos' do
|
201
|
+
client.subscribe(Mongo::Monitoring::COMMAND, subscriber)
|
202
|
+
expect { collection.find.first }.to raise_error(Mongo::Error::OperationFailure)
|
203
|
+
expect(find_started_events.map { |e| e.address.to_s }.sort).to eq(expected_servers)
|
204
|
+
expect(find_failed_events.map { |e| e.address.to_s }.sort).to eq(expected_servers)
|
205
|
+
end
|
206
|
+
end
|
207
|
+
|
208
|
+
context 'when no other mongos is available' do
|
209
|
+
let(:mongos) do
|
210
|
+
Mongo::Client.new(
|
211
|
+
[SpecConfig.instance.addresses.first],
|
212
|
+
direct_connection: true,
|
213
|
+
database: 'admin'
|
214
|
+
)
|
215
|
+
end
|
216
|
+
|
217
|
+
let(:client) do
|
218
|
+
new_local_client(
|
219
|
+
[
|
220
|
+
SpecConfig.instance.addresses.first
|
221
|
+
],
|
222
|
+
SpecConfig.instance.test_options.merge(retry_reads: true)
|
223
|
+
)
|
224
|
+
end
|
225
|
+
|
226
|
+
before do
|
227
|
+
mongos.database.command(
|
228
|
+
configureFailPoint: 'failCommand',
|
229
|
+
mode: { times: 1 },
|
230
|
+
data: {
|
231
|
+
failCommands: %w(find),
|
232
|
+
closeConnection: false,
|
233
|
+
errorCode: 6
|
234
|
+
}
|
235
|
+
)
|
236
|
+
end
|
237
|
+
|
238
|
+
after do
|
239
|
+
mongos.database.command(
|
240
|
+
configureFailPoint: 'failCommand',
|
241
|
+
mode: 'off'
|
242
|
+
)
|
243
|
+
mongos.close
|
244
|
+
client.close
|
245
|
+
end
|
246
|
+
|
247
|
+
it 'retries on the same mongos' do
|
248
|
+
client.subscribe(Mongo::Monitoring::COMMAND, subscriber)
|
249
|
+
expect { collection.find.first }.not_to raise_error
|
250
|
+
expect(find_started_events.map { |e| e.address.to_s }.sort).to eq([
|
251
|
+
SpecConfig.instance.addresses.first.to_s,
|
252
|
+
SpecConfig.instance.addresses.first.to_s
|
253
|
+
])
|
254
|
+
expect(find_failed_events.map { |e| e.address.to_s }.sort).to eq([
|
255
|
+
SpecConfig.instance.addresses.first.to_s
|
256
|
+
])
|
257
|
+
expect(find_succeeded_events.map { |e| e.address.to_s }.sort).to eq([
|
258
|
+
SpecConfig.instance.addresses.first.to_s
|
259
|
+
])
|
260
|
+
end
|
261
|
+
end
|
262
|
+
end
|
110
263
|
end
|
@@ -189,4 +189,160 @@ describe 'Retryable writes errors tests' do
|
|
189
189
|
})
|
190
190
|
end
|
191
191
|
end
|
192
|
+
|
193
|
+
context 'Retries in a sharded cluster' do
|
194
|
+
require_topology :sharded
|
195
|
+
min_server_version '4.2'
|
196
|
+
require_no_auth
|
197
|
+
|
198
|
+
let(:subscriber) { Mrss::EventSubscriber.new }
|
199
|
+
|
200
|
+
let(:insert_started_events) do
|
201
|
+
subscriber.started_events.select { |e| e.command_name == "insert" }
|
202
|
+
end
|
203
|
+
|
204
|
+
let(:insert_failed_events) do
|
205
|
+
subscriber.failed_events.select { |e| e.command_name == "insert" }
|
206
|
+
end
|
207
|
+
|
208
|
+
let(:insert_succeeded_events) do
|
209
|
+
subscriber.succeeded_events.select { |e| e.command_name == "insert" }
|
210
|
+
end
|
211
|
+
|
212
|
+
context 'when another mongos is available' do
|
213
|
+
|
214
|
+
let(:first_mongos) do
|
215
|
+
Mongo::Client.new(
|
216
|
+
[SpecConfig.instance.addresses.first],
|
217
|
+
direct_connection: true,
|
218
|
+
database: 'admin'
|
219
|
+
)
|
220
|
+
end
|
221
|
+
|
222
|
+
let(:second_mongos) do
|
223
|
+
Mongo::Client.new(
|
224
|
+
[SpecConfig.instance.addresses.last],
|
225
|
+
direct_connection: false,
|
226
|
+
database: 'admin'
|
227
|
+
)
|
228
|
+
end
|
229
|
+
|
230
|
+
let(:client) do
|
231
|
+
new_local_client(
|
232
|
+
[
|
233
|
+
SpecConfig.instance.addresses.first,
|
234
|
+
SpecConfig.instance.addresses.last,
|
235
|
+
],
|
236
|
+
SpecConfig.instance.test_options.merge(retry_writes: true)
|
237
|
+
)
|
238
|
+
end
|
239
|
+
|
240
|
+
let(:expected_servers) do
|
241
|
+
[
|
242
|
+
SpecConfig.instance.addresses.first.to_s,
|
243
|
+
SpecConfig.instance.addresses.last.to_s
|
244
|
+
].sort
|
245
|
+
end
|
246
|
+
|
247
|
+
before do
|
248
|
+
skip 'This test requires at least two mongos' if SpecConfig.instance.addresses.length < 2
|
249
|
+
|
250
|
+
first_mongos.database.command(
|
251
|
+
configureFailPoint: 'failCommand',
|
252
|
+
mode: { times: 1 },
|
253
|
+
data: {
|
254
|
+
failCommands: %w(insert),
|
255
|
+
closeConnection: false,
|
256
|
+
errorCode: 6,
|
257
|
+
errorLabels: ['RetryableWriteError']
|
258
|
+
}
|
259
|
+
)
|
260
|
+
|
261
|
+
second_mongos.database.command(
|
262
|
+
configureFailPoint: 'failCommand',
|
263
|
+
mode: { times: 1 },
|
264
|
+
data: {
|
265
|
+
failCommands: %w(insert),
|
266
|
+
closeConnection: false,
|
267
|
+
errorCode: 6,
|
268
|
+
errorLabels: ['RetryableWriteError']
|
269
|
+
}
|
270
|
+
)
|
271
|
+
end
|
272
|
+
|
273
|
+
after do
|
274
|
+
[first_mongos, second_mongos].each do |admin_client|
|
275
|
+
admin_client.database.command(
|
276
|
+
configureFailPoint: 'failCommand',
|
277
|
+
mode: 'off'
|
278
|
+
)
|
279
|
+
admin_client.close
|
280
|
+
end
|
281
|
+
client.close
|
282
|
+
end
|
283
|
+
|
284
|
+
it 'retries on different mongos' do
|
285
|
+
client.subscribe(Mongo::Monitoring::COMMAND, subscriber)
|
286
|
+
expect { collection.insert_one(x: 1) }.to raise_error(Mongo::Error::OperationFailure)
|
287
|
+
expect(insert_started_events.map { |e| e.address.to_s }.sort).to eq(expected_servers)
|
288
|
+
expect(insert_failed_events.map { |e| e.address.to_s }.sort).to eq(expected_servers)
|
289
|
+
end
|
290
|
+
end
|
291
|
+
|
292
|
+
context 'when no other mongos is available' do
|
293
|
+
let(:mongos) do
|
294
|
+
Mongo::Client.new(
|
295
|
+
[SpecConfig.instance.addresses.first],
|
296
|
+
direct_connection: true,
|
297
|
+
database: 'admin'
|
298
|
+
)
|
299
|
+
end
|
300
|
+
|
301
|
+
let(:client) do
|
302
|
+
new_local_client(
|
303
|
+
[
|
304
|
+
SpecConfig.instance.addresses.first
|
305
|
+
],
|
306
|
+
SpecConfig.instance.test_options.merge(retry_writes: true)
|
307
|
+
)
|
308
|
+
end
|
309
|
+
|
310
|
+
before do
|
311
|
+
mongos.database.command(
|
312
|
+
configureFailPoint: 'failCommand',
|
313
|
+
mode: { times: 1 },
|
314
|
+
data: {
|
315
|
+
failCommands: %w(insert),
|
316
|
+
closeConnection: false,
|
317
|
+
errorCode: 6,
|
318
|
+
errorLabels: ['RetryableWriteError']
|
319
|
+
}
|
320
|
+
)
|
321
|
+
end
|
322
|
+
|
323
|
+
after do
|
324
|
+
mongos.database.command(
|
325
|
+
configureFailPoint: 'failCommand',
|
326
|
+
mode: 'off'
|
327
|
+
)
|
328
|
+
mongos.close
|
329
|
+
client.close
|
330
|
+
end
|
331
|
+
|
332
|
+
it 'retries on the same mongos' do
|
333
|
+
client.subscribe(Mongo::Monitoring::COMMAND, subscriber)
|
334
|
+
expect { collection.insert_one(x: 1) }.not_to raise_error
|
335
|
+
expect(insert_started_events.map { |e| e.address.to_s }.sort).to eq([
|
336
|
+
SpecConfig.instance.addresses.first.to_s,
|
337
|
+
SpecConfig.instance.addresses.first.to_s
|
338
|
+
])
|
339
|
+
expect(insert_failed_events.map { |e| e.address.to_s }.sort).to eq([
|
340
|
+
SpecConfig.instance.addresses.first.to_s
|
341
|
+
])
|
342
|
+
expect(insert_succeeded_events.map { |e| e.address.to_s }.sort).to eq([
|
343
|
+
SpecConfig.instance.addresses.first.to_s
|
344
|
+
])
|
345
|
+
end
|
346
|
+
end
|
347
|
+
end
|
192
348
|
end
|
data/spec/mongo/cluster_spec.rb
CHANGED
@@ -1,6 +1,7 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
2
|
|
3
3
|
require 'spec_helper'
|
4
|
+
require 'support/recording_logger'
|
4
5
|
|
5
6
|
# let these existing styles stand, rather than going in for a deep refactoring
|
6
7
|
# of these specs.
|
@@ -84,6 +85,41 @@ describe Mongo::Cluster do
|
|
84
85
|
)
|
85
86
|
end
|
86
87
|
end
|
88
|
+
|
89
|
+
context 'when a non-genuine host is detected' do
|
90
|
+
before { described_class.new(host_names, monitoring, logger: logger, monitoring_io: false) }
|
91
|
+
|
92
|
+
let(:logger) { RecordingLogger.new }
|
93
|
+
|
94
|
+
shared_examples 'an action that logs' do
|
95
|
+
it 'writes a warning to the log' do
|
96
|
+
expect(logger.lines).to include(a_string_matching(expected_log_output))
|
97
|
+
end
|
98
|
+
end
|
99
|
+
|
100
|
+
context 'when CosmosDB is detected' do
|
101
|
+
let(:host_names) { %w[ xyz.cosmos.azure.com ] }
|
102
|
+
let(:expected_log_output) { %r{https://www.mongodb.com/supportability/cosmosdb} }
|
103
|
+
|
104
|
+
it_behaves_like 'an action that logs'
|
105
|
+
end
|
106
|
+
|
107
|
+
context 'when DocumentDB is detected' do
|
108
|
+
let(:expected_log_output) { %r{https://www.mongodb.com/supportability/documentdb} }
|
109
|
+
|
110
|
+
context 'with docdb uri' do
|
111
|
+
let(:host_names) { [ 'xyz.docdb.amazonaws.com' ] }
|
112
|
+
|
113
|
+
it_behaves_like 'an action that logs'
|
114
|
+
end
|
115
|
+
|
116
|
+
context 'with docdb-elastic uri' do
|
117
|
+
let(:host_names) { [ 'xyz.docdb-elastic.amazonaws.com' ] }
|
118
|
+
|
119
|
+
it_behaves_like 'an action that logs'
|
120
|
+
end
|
121
|
+
end
|
122
|
+
end
|
87
123
|
end
|
88
124
|
|
89
125
|
describe '#==' do
|
@@ -321,7 +321,12 @@ describe Mongo::Collection::View::Aggregation do
|
|
321
321
|
min_server_fcv '4.2'
|
322
322
|
|
323
323
|
let(:result) do
|
324
|
-
aggregation.explain
|
324
|
+
if aggregation.explain.key?('queryPlanner')
|
325
|
+
aggregation.explain['queryPlanner']['collation']['locale']
|
326
|
+
else
|
327
|
+
# 7.2+ sharded cluster
|
328
|
+
aggregation.explain['shards'].first.last['queryPlanner']['collation']['locale']
|
329
|
+
end
|
325
330
|
end
|
326
331
|
|
327
332
|
it_behaves_like 'applies the collation'
|
@@ -42,6 +42,7 @@ describe Mongo::Collection::View::Explainable do
|
|
42
42
|
max_server_version '3.0'
|
43
43
|
|
44
44
|
it 'executes the explain' do
|
45
|
+
skip 'https://jira.mongodb.org/browse/RUBY-3399'
|
45
46
|
explain[:queryPlanner][:parsedQuery].should be_a(Hash)
|
46
47
|
end
|
47
48
|
end
|
@@ -50,6 +51,7 @@ describe Mongo::Collection::View::Explainable do
|
|
50
51
|
min_server_fcv '3.2'
|
51
52
|
|
52
53
|
it 'executes the explain' do
|
54
|
+
skip 'https://jira.mongodb.org/browse/RUBY-3399'
|
53
55
|
explain[:queryPlanner][:mongosPlannerVersion].should == 1
|
54
56
|
end
|
55
57
|
end
|
@@ -177,7 +177,7 @@ describe Mongo::Operation::Insert do
|
|
177
177
|
end
|
178
178
|
|
179
179
|
it 'inserts the documents into the collection' do
|
180
|
-
expect(authorized_collection.find.to_a). to eq(documents)
|
180
|
+
expect(authorized_collection.find.sort(_id: 1).to_a). to eq(documents)
|
181
181
|
end
|
182
182
|
end
|
183
183
|
|
@@ -0,0 +1,39 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require 'spec_helper'
|
4
|
+
|
5
|
+
describe Mongo::Retryable::WriteWorker do
|
6
|
+
describe '#nro_write_with_retry' do
|
7
|
+
context 'when session is nil' do
|
8
|
+
let(:retryable) do
|
9
|
+
authorized_client['write_worker_test']
|
10
|
+
end
|
11
|
+
|
12
|
+
let(:write_concern) do
|
13
|
+
Mongo::WriteConcern.get(w: 0)
|
14
|
+
end
|
15
|
+
|
16
|
+
let(:write_worker) do
|
17
|
+
described_class.new(retryable)
|
18
|
+
end
|
19
|
+
|
20
|
+
let(:context) do
|
21
|
+
instance_double(Mongo::Operation::Context).tap do |context|
|
22
|
+
allow(context).to receive(:session).and_return(nil)
|
23
|
+
end
|
24
|
+
end
|
25
|
+
|
26
|
+
before do
|
27
|
+
# We avoid actual execution of the operation to speed up and simplify
|
28
|
+
# the spec.
|
29
|
+
allow(write_worker).to receive(:legacy_write_with_retry).and_return(nil)
|
30
|
+
end
|
31
|
+
|
32
|
+
it 'does not raise' do
|
33
|
+
expect do
|
34
|
+
write_worker.nro_write_with_retry(write_concern, context: context)
|
35
|
+
end.not_to raise_error
|
36
|
+
end
|
37
|
+
end
|
38
|
+
end
|
39
|
+
end
|
@@ -1,8 +1,52 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
|
+
# rubocop:todo all
|
2
3
|
|
3
4
|
require 'spec_helper'
|
5
|
+
require 'fileutils'
|
6
|
+
|
7
|
+
MOCKED_DOCKERENV_PATH = File.expand_path(File.join(Dir.pwd, '.dockerenv-mocked'))
|
8
|
+
|
9
|
+
module ContainerChecking
|
10
|
+
def mock_dockerenv_path
|
11
|
+
before do
|
12
|
+
allow_any_instance_of(Mongo::Server::AppMetadata::Environment)
|
13
|
+
.to receive(:dockerenv_path)
|
14
|
+
.and_return(MOCKED_DOCKERENV_PATH)
|
15
|
+
end
|
16
|
+
end
|
17
|
+
|
18
|
+
def with_docker
|
19
|
+
mock_dockerenv_path
|
20
|
+
|
21
|
+
around do |example|
|
22
|
+
File.write(MOCKED_DOCKERENV_PATH, 'placeholder')
|
23
|
+
example.run
|
24
|
+
ensure
|
25
|
+
File.delete(MOCKED_DOCKERENV_PATH)
|
26
|
+
end
|
27
|
+
end
|
28
|
+
|
29
|
+
def without_docker
|
30
|
+
mock_dockerenv_path
|
31
|
+
|
32
|
+
around do |example|
|
33
|
+
FileUtils.rm_f(MOCKED_DOCKERENV_PATH)
|
34
|
+
example.run
|
35
|
+
end
|
36
|
+
end
|
37
|
+
|
38
|
+
def with_kubernetes
|
39
|
+
local_env 'KUBERNETES_SERVICE_HOST' => 'kubernetes.default.svc.cluster.local'
|
40
|
+
end
|
41
|
+
|
42
|
+
def without_kubernetes
|
43
|
+
local_env 'KUBERNETES_SERVICE_HOST' => nil
|
44
|
+
end
|
45
|
+
end
|
4
46
|
|
5
47
|
describe Mongo::Server::AppMetadata::Environment do
|
48
|
+
extend ContainerChecking
|
49
|
+
|
6
50
|
let(:env) { described_class.new }
|
7
51
|
|
8
52
|
shared_examples_for 'running in a FaaS environment' do
|
@@ -17,6 +61,36 @@ describe Mongo::Server::AppMetadata::Environment do
|
|
17
61
|
end
|
18
62
|
end
|
19
63
|
|
64
|
+
shared_examples_for 'not running in a Docker container' do
|
65
|
+
it 'does not detect Docker' do
|
66
|
+
expect(env.container || {}).not_to include :runtime
|
67
|
+
end
|
68
|
+
end
|
69
|
+
|
70
|
+
shared_examples_for 'not running under Kubernetes' do
|
71
|
+
it 'does not detect Kubernetes' do
|
72
|
+
expect(env.container || {}).not_to include :orchestrator
|
73
|
+
end
|
74
|
+
end
|
75
|
+
|
76
|
+
shared_examples_for 'running under Kubernetes' do
|
77
|
+
it 'detects that Kubernetes is present' do
|
78
|
+
expect(env.container[:orchestrator]).to be == 'kubernetes'
|
79
|
+
end
|
80
|
+
end
|
81
|
+
|
82
|
+
shared_examples_for 'running in a Docker container' do
|
83
|
+
it 'detects that Docker is present' do
|
84
|
+
expect(env.container[:runtime]).to be == 'docker'
|
85
|
+
end
|
86
|
+
end
|
87
|
+
|
88
|
+
shared_examples_for 'running under Kerbenetes' do
|
89
|
+
it 'detects that kubernetes is present' do
|
90
|
+
expect(env.container['orchestrator']).to be == 'kubernetes'
|
91
|
+
end
|
92
|
+
end
|
93
|
+
|
20
94
|
context 'when run outside of a FaaS environment' do
|
21
95
|
it_behaves_like 'running outside a FaaS environment'
|
22
96
|
end
|
@@ -204,6 +278,67 @@ describe Mongo::Server::AppMetadata::Environment do
|
|
204
278
|
timeout_sec: 60, region: 'us-central1',
|
205
279
|
}
|
206
280
|
end
|
281
|
+
|
282
|
+
context 'when a container is present' do
|
283
|
+
with_kubernetes
|
284
|
+
with_docker
|
285
|
+
|
286
|
+
it 'includes a container key' do
|
287
|
+
expect(env.to_h[:container]).to be == {
|
288
|
+
runtime: 'docker',
|
289
|
+
orchestrator: 'kubernetes'
|
290
|
+
}
|
291
|
+
end
|
292
|
+
end
|
293
|
+
|
294
|
+
context 'when no container is present' do
|
295
|
+
without_kubernetes
|
296
|
+
without_docker
|
297
|
+
|
298
|
+
it 'does not include a container key' do
|
299
|
+
expect(env.to_h).not_to include(:container)
|
300
|
+
end
|
301
|
+
end
|
302
|
+
end
|
303
|
+
end
|
304
|
+
|
305
|
+
# have a specific test for this, since the tests that check
|
306
|
+
# for Docker use a mocked value for the .dockerenv path.
|
307
|
+
it 'should look for dockerenv in root directory' do
|
308
|
+
expect(described_class::DOCKERENV_PATH).to be == '/.dockerenv'
|
309
|
+
end
|
310
|
+
|
311
|
+
context 'when no container is present' do
|
312
|
+
without_kubernetes
|
313
|
+
without_docker
|
314
|
+
|
315
|
+
it_behaves_like 'not running in a Docker container'
|
316
|
+
it_behaves_like 'not running under Kubernetes'
|
317
|
+
end
|
318
|
+
|
319
|
+
context 'when container is present' do
|
320
|
+
context 'when kubernetes is present' do
|
321
|
+
without_docker
|
322
|
+
with_kubernetes
|
323
|
+
|
324
|
+
it_behaves_like 'not running in a Docker container'
|
325
|
+
it_behaves_like 'running under Kubernetes'
|
326
|
+
end
|
327
|
+
|
328
|
+
context 'when docker is present' do
|
329
|
+
with_docker
|
330
|
+
without_kubernetes
|
331
|
+
|
332
|
+
it_behaves_like 'running in a Docker container'
|
333
|
+
it_behaves_like 'not running under Kubernetes'
|
334
|
+
end
|
335
|
+
|
336
|
+
context 'when both kubernetes and docker are present' do
|
337
|
+
with_docker
|
338
|
+
with_kubernetes
|
339
|
+
|
340
|
+
it_behaves_like 'running in a Docker container'
|
341
|
+
it_behaves_like 'running under Kubernetes'
|
207
342
|
end
|
208
343
|
end
|
209
344
|
end
|
@@ -87,8 +87,18 @@ describe Mongo::Server::AppMetadata do
|
|
87
87
|
end
|
88
88
|
|
89
89
|
context 'when run outside of a FaaS environment' do
|
90
|
-
|
91
|
-
|
90
|
+
context 'when a container is present' do
|
91
|
+
local_env 'KUBERNETES_SERVICE_HOST' => 'something'
|
92
|
+
|
93
|
+
it 'includes the :env key in the client document' do
|
94
|
+
expect(app_metadata.client_document.key?(:env)).to be true
|
95
|
+
end
|
96
|
+
end
|
97
|
+
|
98
|
+
context 'when no container is present' do
|
99
|
+
it 'excludes the :env key from the client document' do
|
100
|
+
expect(app_metadata.client_document.key?(:env)).to be false
|
101
|
+
end
|
92
102
|
end
|
93
103
|
end
|
94
104
|
|
@@ -265,6 +265,10 @@ describe Mongo::Server::Connection do
|
|
265
265
|
context 'when #authenticate! raises an exception' do
|
266
266
|
require_auth
|
267
267
|
|
268
|
+
# because the mock/stub flow here doesn't cover the flow used by
|
269
|
+
# the X.509 authentication mechanism...
|
270
|
+
forbid_x509_auth
|
271
|
+
|
268
272
|
let(:server_options) do
|
269
273
|
Mongo::Client.canonicalize_ruby_options(
|
270
274
|
SpecConfig.instance.all_test_options,
|