mongoid 7.0.11 → 7.0.12
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- checksums.yaml.gz.sig +0 -0
- data.tar.gz.sig +0 -0
- data/Rakefile +2 -7
- data/lib/mongoid/document.rb +3 -2
- data/lib/mongoid/interceptable.rb +3 -1
- data/lib/mongoid/version.rb +1 -1
- data/spec/app/models/customer.rb +11 -0
- data/spec/app/models/customer_address.rb +12 -0
- data/spec/integration/callbacks_models.rb +49 -0
- data/spec/integration/callbacks_spec.rb +216 -0
- data/spec/mongoid/association/embedded/embedded_in/proxy_spec.rb +50 -0
- data/spec/mongoid/atomic/paths_spec.rb +41 -0
- data/spec/shared/LICENSE +20 -0
- data/spec/shared/lib/mrss/child_process_helper.rb +80 -0
- data/spec/shared/lib/mrss/cluster_config.rb +211 -0
- data/spec/shared/lib/mrss/constraints.rb +330 -0
- data/spec/shared/lib/mrss/docker_runner.rb +262 -0
- data/spec/shared/lib/mrss/lite_constraints.rb +175 -0
- data/spec/shared/lib/mrss/server_version_registry.rb +69 -0
- data/spec/shared/lib/mrss/spec_organizer.rb +149 -0
- data/spec/shared/share/Dockerfile.erb +229 -0
- data/spec/shared/shlib/distro.sh +73 -0
- data/spec/shared/shlib/server.sh +270 -0
- data/spec/shared/shlib/set_env.sh +128 -0
- metadata +479 -446
- metadata.gz.sig +0 -0
data/spec/shared/LICENSE
ADDED
@@ -0,0 +1,20 @@
|
|
1
|
+
Copyright (c) 2020 MongoDB, Inc.
|
2
|
+
|
3
|
+
Permission is hereby granted, free of charge, to any person obtaining
|
4
|
+
a copy of this software and associated documentation files (the
|
5
|
+
"Software"), to deal in the Software without restriction, including
|
6
|
+
without limitation the rights to use, copy, modify, merge, publish,
|
7
|
+
distribute, sublicense, and/or sell copies of the Software, and to
|
8
|
+
permit persons to whom the Software is furnished to do so, subject to
|
9
|
+
the following conditions:
|
10
|
+
|
11
|
+
The above copyright notice and this permission notice shall be
|
12
|
+
included in all copies or substantial portions of the Software.
|
13
|
+
|
14
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
15
|
+
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
16
|
+
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
17
|
+
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
18
|
+
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
19
|
+
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
20
|
+
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
@@ -0,0 +1,80 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
# encoding: utf-8
|
3
|
+
|
4
|
+
autoload :ChildProcess, 'childprocess'
|
5
|
+
autoload :Tempfile, 'tempfile'
|
6
|
+
|
7
|
+
module Mrss
|
8
|
+
module ChildProcessHelper
|
9
|
+
class SpawnError < StandardError; end
|
10
|
+
|
11
|
+
module_function def call(cmd, env: nil, cwd: nil)
|
12
|
+
process = ChildProcess.new(*cmd)
|
13
|
+
process.io.inherit!
|
14
|
+
if cwd
|
15
|
+
process.cwd = cwd
|
16
|
+
end
|
17
|
+
if env
|
18
|
+
env.each do |k, v|
|
19
|
+
process.environment[k.to_s] = v
|
20
|
+
end
|
21
|
+
end
|
22
|
+
process.start
|
23
|
+
process.wait
|
24
|
+
process
|
25
|
+
end
|
26
|
+
|
27
|
+
module_function def check_call(cmd, env: nil, cwd: nil)
|
28
|
+
process = call(cmd, env: env, cwd: cwd)
|
29
|
+
unless process.exit_code == 0
|
30
|
+
raise SpawnError, "Failed to execute: #{cmd}"
|
31
|
+
end
|
32
|
+
end
|
33
|
+
|
34
|
+
module_function def get_output(cmd, env: nil, cwd: nil)
|
35
|
+
process = ChildProcess.new(*cmd)
|
36
|
+
process.io.inherit!
|
37
|
+
if cwd
|
38
|
+
process.cwd = cwd
|
39
|
+
end
|
40
|
+
if env
|
41
|
+
env.each do |k, v|
|
42
|
+
process.environment[k.to_s] = v
|
43
|
+
end
|
44
|
+
end
|
45
|
+
|
46
|
+
output = ''
|
47
|
+
r, w = IO.pipe
|
48
|
+
|
49
|
+
begin
|
50
|
+
process.io.stdout = w
|
51
|
+
process.start
|
52
|
+
w.close
|
53
|
+
|
54
|
+
thread = Thread.new do
|
55
|
+
begin
|
56
|
+
loop do
|
57
|
+
output << r.readpartial(16384)
|
58
|
+
end
|
59
|
+
rescue EOFError
|
60
|
+
end
|
61
|
+
end
|
62
|
+
|
63
|
+
process.wait
|
64
|
+
thread.join
|
65
|
+
ensure
|
66
|
+
r.close
|
67
|
+
end
|
68
|
+
|
69
|
+
[process, output]
|
70
|
+
end
|
71
|
+
|
72
|
+
module_function def check_output(*args)
|
73
|
+
process, output = get_output(*args)
|
74
|
+
unless process.exit_code == 0
|
75
|
+
raise SpawnError,"Failed to execute: #{args}"
|
76
|
+
end
|
77
|
+
output
|
78
|
+
end
|
79
|
+
end
|
80
|
+
end
|
@@ -0,0 +1,211 @@
|
|
1
|
+
# ClusterConfig requires ClientRegistry class provided by the host project.
|
2
|
+
|
3
|
+
require 'singleton'
|
4
|
+
|
5
|
+
module Mrss
|
6
|
+
class ClusterConfig
|
7
|
+
include Singleton
|
8
|
+
include RSpec::Core::Pending
|
9
|
+
|
10
|
+
def single_server?
|
11
|
+
determine_cluster_config
|
12
|
+
@single_server
|
13
|
+
end
|
14
|
+
|
15
|
+
def replica_set_name
|
16
|
+
determine_cluster_config
|
17
|
+
@replica_set_name
|
18
|
+
end
|
19
|
+
|
20
|
+
def server_version
|
21
|
+
determine_cluster_config
|
22
|
+
@server_version
|
23
|
+
end
|
24
|
+
|
25
|
+
def enterprise?
|
26
|
+
determine_cluster_config
|
27
|
+
@enterprise
|
28
|
+
end
|
29
|
+
|
30
|
+
def short_server_version
|
31
|
+
server_version.split('.')[0..1].join('.')
|
32
|
+
end
|
33
|
+
|
34
|
+
def fcv
|
35
|
+
determine_cluster_config
|
36
|
+
@fcv
|
37
|
+
end
|
38
|
+
|
39
|
+
# Per https://jira.mongodb.org/browse/SERVER-39052, working with FCV
|
40
|
+
# in sharded topologies is annoying. Also, FCV doesn't exist in servers
|
41
|
+
# less than 3.4. This method returns FCV on 3.4+ servers when in single
|
42
|
+
# or RS topologies, and otherwise returns the major.minor server version.
|
43
|
+
def fcv_ish
|
44
|
+
if server_version.nil?
|
45
|
+
raise "Deployment server version not known - check that connection to deployment succeeded"
|
46
|
+
end
|
47
|
+
|
48
|
+
if server_version >= '3.4' && topology != :sharded
|
49
|
+
fcv
|
50
|
+
else
|
51
|
+
if short_server_version == '4.1'
|
52
|
+
'4.2'
|
53
|
+
else
|
54
|
+
short_server_version
|
55
|
+
end
|
56
|
+
end
|
57
|
+
end
|
58
|
+
|
59
|
+
# @return [ Mongo::Address ] The address of the primary in the deployment.
|
60
|
+
def primary_address
|
61
|
+
determine_cluster_config
|
62
|
+
@primary_address
|
63
|
+
end
|
64
|
+
|
65
|
+
def primary_address_str
|
66
|
+
determine_cluster_config
|
67
|
+
@primary_address.seed
|
68
|
+
end
|
69
|
+
|
70
|
+
def primary_address_host
|
71
|
+
both = primary_address_str
|
72
|
+
both.split(':').first
|
73
|
+
end
|
74
|
+
|
75
|
+
def primary_address_port
|
76
|
+
both = primary_address_str
|
77
|
+
both.split(':')[1] || 27017
|
78
|
+
end
|
79
|
+
|
80
|
+
def primary_description
|
81
|
+
determine_cluster_config
|
82
|
+
@primary_description
|
83
|
+
end
|
84
|
+
|
85
|
+
# Try running a command on the admin database to see if the mongod was
|
86
|
+
# started with auth.
|
87
|
+
def auth_enabled?
|
88
|
+
if @auth_enabled.nil?
|
89
|
+
@auth_enabled = begin
|
90
|
+
basic_client.use(:admin).command(getCmdLineOpts: 1).first["argv"].include?("--auth")
|
91
|
+
rescue => e
|
92
|
+
e.message =~ /(not authorized)|(unauthorized)|(no users authenticated)|(requires authentication)/
|
93
|
+
end
|
94
|
+
end
|
95
|
+
@auth_enabled
|
96
|
+
end
|
97
|
+
|
98
|
+
def topology
|
99
|
+
determine_cluster_config
|
100
|
+
@topology
|
101
|
+
end
|
102
|
+
|
103
|
+
def storage_engine
|
104
|
+
@storage_engine ||= begin
|
105
|
+
# 2.6 does not have wired tiger
|
106
|
+
if short_server_version == '2.6'
|
107
|
+
:mmapv1
|
108
|
+
else
|
109
|
+
client = ClientRegistry.instance.global_client('root_authorized')
|
110
|
+
if topology == :sharded
|
111
|
+
shards = client.use(:admin).command(listShards: 1).first
|
112
|
+
if shards['shards'].empty?
|
113
|
+
raise 'Shards are empty'
|
114
|
+
end
|
115
|
+
shard = shards['shards'].first
|
116
|
+
address_str = shard['host'].sub(/^.*\//, '').sub(/,.*/, '')
|
117
|
+
client = ClusterTools.instance.direct_client(address_str,
|
118
|
+
SpecConfig.instance.test_options.merge(SpecConfig.instance.auth_options).merge(connect: :direct))
|
119
|
+
end
|
120
|
+
rv = client.use(:admin).command(serverStatus: 1).first
|
121
|
+
rv = rv['storageEngine']['name']
|
122
|
+
rv_map = {
|
123
|
+
'wiredTiger' => :wired_tiger,
|
124
|
+
'mmapv1' => :mmapv1,
|
125
|
+
}
|
126
|
+
rv_map[rv] || rv
|
127
|
+
end
|
128
|
+
end
|
129
|
+
end
|
130
|
+
|
131
|
+
# This method returns an alternate address for connecting to the configured
|
132
|
+
# deployment. For example, if the replica set is configured with nodes at
|
133
|
+
# of localhost:27017 and so on, this method will return 127.0.0.:27017.
|
134
|
+
#
|
135
|
+
# Note that the "alternate" refers to replica set configuration, not the
|
136
|
+
# addresses specified in test suite configuration. If the deployment topology
|
137
|
+
# is not a replica set, "alternate" refers to test suite configuration as
|
138
|
+
# this is the only configuration available.
|
139
|
+
def alternate_address
|
140
|
+
@alternate_address ||= begin
|
141
|
+
address = primary_address_host
|
142
|
+
str = case address
|
143
|
+
when '127.0.0.1'
|
144
|
+
'localhost'
|
145
|
+
when /^(\d+\.){3}\d+$/
|
146
|
+
skip 'This test requires a hostname or 127.0.0.1 as address'
|
147
|
+
else
|
148
|
+
# We don't know if mongod is listening on ipv4 or ipv6, in principle.
|
149
|
+
# Our tests use ipv4, so hardcode that for now.
|
150
|
+
# To support both we need to try both addresses which will make this
|
151
|
+
# test more complicated.
|
152
|
+
#
|
153
|
+
# JRuby chokes on primary_address_port as the port (e.g. 27017).
|
154
|
+
# Since the port does not actually matter, use a common port like 80.
|
155
|
+
resolved_address = Addrinfo.getaddrinfo(address, 80, Socket::PF_INET).first.ip_address
|
156
|
+
if resolved_address.include?(':')
|
157
|
+
"[#{resolved_address}]"
|
158
|
+
else
|
159
|
+
resolved_address
|
160
|
+
end
|
161
|
+
end + ":#{primary_address_port}"
|
162
|
+
Mongo::Address.new(str)
|
163
|
+
end
|
164
|
+
end
|
165
|
+
|
166
|
+
private
|
167
|
+
|
168
|
+
def determine_cluster_config
|
169
|
+
return if @primary_address
|
170
|
+
|
171
|
+
# Run all commands to figure out the cluster configuration from the same
|
172
|
+
# client. This is somewhat wasteful when running a single test, but reduces
|
173
|
+
# test runtime for the suite overall because all commands are sent on the
|
174
|
+
# same connection rather than each command connecting to the cluster by
|
175
|
+
# itself.
|
176
|
+
client = ClientRegistry.instance.global_client('root_authorized')
|
177
|
+
|
178
|
+
primary = client.cluster.next_primary
|
179
|
+
@primary_address = primary.address
|
180
|
+
@primary_description = primary.description
|
181
|
+
@replica_set_name = client.cluster.topology.replica_set_name
|
182
|
+
|
183
|
+
@topology ||= begin
|
184
|
+
topology = client.cluster.topology.class.name.sub(/.*::/, '')
|
185
|
+
topology = topology.gsub(/([A-Z])/) { |match| '_' + match.downcase }.sub(/^_/, '')
|
186
|
+
if topology =~ /^replica_set/
|
187
|
+
topology = 'replica_set'
|
188
|
+
end
|
189
|
+
topology.to_sym
|
190
|
+
end
|
191
|
+
|
192
|
+
@single_server = client.cluster.servers_list.length == 1
|
193
|
+
|
194
|
+
build_info = client.database.command(buildInfo: 1).first
|
195
|
+
|
196
|
+
@server_version = build_info['version']
|
197
|
+
@enterprise = build_info['modules'] && build_info['modules'].include?('enterprise')
|
198
|
+
|
199
|
+
if @topology != :sharded && short_server_version >= '3.4'
|
200
|
+
rv = client.use(:admin).command(getParameter: 1, featureCompatibilityVersion: 1).first['featureCompatibilityVersion']
|
201
|
+
@fcv = rv['version'] || rv
|
202
|
+
end
|
203
|
+
end
|
204
|
+
|
205
|
+
def basic_client
|
206
|
+
# Do not cache the result here so that if the client gets closed,
|
207
|
+
# client registry reconnects it in subsequent tests
|
208
|
+
ClientRegistry.instance.global_client('basic')
|
209
|
+
end
|
210
|
+
end
|
211
|
+
end
|
@@ -0,0 +1,330 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
# encoding: utf-8
|
3
|
+
|
4
|
+
module Mrss
|
5
|
+
module Constraints
|
6
|
+
def min_server_version(version)
|
7
|
+
unless version =~ /\A\d+\.\d+\z/
|
8
|
+
raise ArgumentError, "Version can only be major.minor: #{version}"
|
9
|
+
end
|
10
|
+
|
11
|
+
before(:all) do
|
12
|
+
if version > ClusterConfig.instance.server_version
|
13
|
+
skip "Server version #{version} or higher required, we have #{ClusterConfig.instance.server_version}"
|
14
|
+
end
|
15
|
+
end
|
16
|
+
end
|
17
|
+
|
18
|
+
def max_server_version(version)
|
19
|
+
unless version =~ /\A\d+\.\d+\z/
|
20
|
+
raise ArgumentError, "Version can only be major.minor: #{version}"
|
21
|
+
end
|
22
|
+
|
23
|
+
before(:all) do
|
24
|
+
if version < ClusterConfig.instance.short_server_version
|
25
|
+
skip "Server version #{version} or lower required, we have #{ClusterConfig.instance.server_version}"
|
26
|
+
end
|
27
|
+
end
|
28
|
+
end
|
29
|
+
|
30
|
+
def min_server_fcv(version)
|
31
|
+
unless version =~ /\A\d+\.\d+\z/
|
32
|
+
raise ArgumentError, "FCV can only be major.minor: #{version}"
|
33
|
+
end
|
34
|
+
|
35
|
+
before(:all) do
|
36
|
+
unless ClusterConfig.instance.fcv_ish >= version
|
37
|
+
skip "FCV #{version} or higher required, we have #{ClusterConfig.instance.fcv_ish} (server #{ClusterConfig.instance.server_version})"
|
38
|
+
end
|
39
|
+
end
|
40
|
+
end
|
41
|
+
|
42
|
+
def max_server_fcv(version)
|
43
|
+
unless version =~ /\A\d+\.\d+\z/
|
44
|
+
raise ArgumentError, "Version can only be major.minor: #{version}"
|
45
|
+
end
|
46
|
+
|
47
|
+
before(:all) do
|
48
|
+
if version < ClusterConfig.instance.fcv_ish
|
49
|
+
skip "FCV #{version} or lower required, we have #{ClusterConfig.instance.fcv_ish} (server #{ClusterConfig.instance.server_version})"
|
50
|
+
end
|
51
|
+
end
|
52
|
+
end
|
53
|
+
|
54
|
+
def require_topology(*topologies)
|
55
|
+
invalid_topologies = topologies - [:single, :replica_set, :sharded]
|
56
|
+
|
57
|
+
unless invalid_topologies.empty?
|
58
|
+
raise ArgumentError, "Invalid topologies requested: #{invalid_topologies.join(', ')}"
|
59
|
+
end
|
60
|
+
|
61
|
+
before(:all) do
|
62
|
+
unless topologies.include?(topology = ClusterConfig.instance.topology)
|
63
|
+
skip "Topology #{topologies.join(' or ')} required, we have #{topology}"
|
64
|
+
end
|
65
|
+
end
|
66
|
+
end
|
67
|
+
|
68
|
+
def max_example_run_time(timeout)
|
69
|
+
around do |example|
|
70
|
+
TimeoutInterrupt.timeout(timeout, TimeoutInterrupt::Error.new("Test execution terminated after #{timeout} seconds")) do
|
71
|
+
example.run
|
72
|
+
end
|
73
|
+
end
|
74
|
+
end
|
75
|
+
|
76
|
+
def require_transaction_support
|
77
|
+
before(:all) do
|
78
|
+
case ClusterConfig.instance.topology
|
79
|
+
when :single
|
80
|
+
skip 'Transactions tests require a replica set (4.0+) or a sharded cluster (4.2+)'
|
81
|
+
when :replica_set
|
82
|
+
unless ClusterConfig.instance.server_version >= '4.0'
|
83
|
+
skip 'Transactions tests in a replica set topology require server 4.0+'
|
84
|
+
end
|
85
|
+
when :sharded
|
86
|
+
unless ClusterConfig.instance.server_version >= '4.2'
|
87
|
+
skip 'Transactions tests in a sharded cluster topology require server 4.2+'
|
88
|
+
end
|
89
|
+
else
|
90
|
+
raise NotImplementedError
|
91
|
+
end
|
92
|
+
end
|
93
|
+
end
|
94
|
+
|
95
|
+
# Fail command fail point was added to mongod in 4.0 and to mongos in 4.2.
|
96
|
+
def require_fail_command
|
97
|
+
require_transaction_support
|
98
|
+
end
|
99
|
+
|
100
|
+
def require_tls
|
101
|
+
before(:all) do
|
102
|
+
unless SpecConfig.instance.ssl?
|
103
|
+
skip "SSL not enabled"
|
104
|
+
end
|
105
|
+
end
|
106
|
+
end
|
107
|
+
|
108
|
+
def require_no_tls
|
109
|
+
before(:all) do
|
110
|
+
if SpecConfig.instance.ssl?
|
111
|
+
skip "SSL enabled"
|
112
|
+
end
|
113
|
+
end
|
114
|
+
end
|
115
|
+
|
116
|
+
def require_no_retry_writes
|
117
|
+
before(:all) do
|
118
|
+
if SpecConfig.instance.retry_writes?
|
119
|
+
skip "Retry writes is enabled"
|
120
|
+
end
|
121
|
+
end
|
122
|
+
end
|
123
|
+
|
124
|
+
def require_compression
|
125
|
+
before(:all) do
|
126
|
+
if SpecConfig.instance.compressors.nil?
|
127
|
+
skip "Compression is not enabled"
|
128
|
+
end
|
129
|
+
end
|
130
|
+
end
|
131
|
+
|
132
|
+
def require_zlib_compression
|
133
|
+
before(:all) do
|
134
|
+
compressors = SpecConfig.instance.compressors
|
135
|
+
unless compressors && compressors.include?('zlib')
|
136
|
+
skip "Zlib compression is not enabled"
|
137
|
+
end
|
138
|
+
end
|
139
|
+
end
|
140
|
+
|
141
|
+
def require_snappy_compression
|
142
|
+
before(:all) do
|
143
|
+
compressors = SpecConfig.instance.compressors
|
144
|
+
unless compressors && compressors.include?('snappy')
|
145
|
+
skip "Snappy compression is not enabled"
|
146
|
+
end
|
147
|
+
end
|
148
|
+
end
|
149
|
+
|
150
|
+
def require_no_snappy_compression
|
151
|
+
before(:all) do
|
152
|
+
compressors = SpecConfig.instance.compressors
|
153
|
+
if compressors && compressors.include?('snappy')
|
154
|
+
skip "Snappy compression is enabled"
|
155
|
+
end
|
156
|
+
end
|
157
|
+
end
|
158
|
+
|
159
|
+
def require_zstd_compression
|
160
|
+
before(:all) do
|
161
|
+
compressors = SpecConfig.instance.compressors
|
162
|
+
unless compressors && compressors.include?('zstd')
|
163
|
+
skip "Zstd compression is not enabled"
|
164
|
+
end
|
165
|
+
end
|
166
|
+
end
|
167
|
+
|
168
|
+
def require_no_zstd_compression
|
169
|
+
before(:all) do
|
170
|
+
compressors = SpecConfig.instance.compressors
|
171
|
+
if compressors && compressors.include?('zstd')
|
172
|
+
skip "Zstd compression is enabled"
|
173
|
+
end
|
174
|
+
end
|
175
|
+
end
|
176
|
+
|
177
|
+
def require_no_compression
|
178
|
+
before(:all) do
|
179
|
+
if SpecConfig.instance.compressors
|
180
|
+
skip "Compression is enabled"
|
181
|
+
end
|
182
|
+
end
|
183
|
+
end
|
184
|
+
|
185
|
+
def ruby_version_gte(version)
|
186
|
+
before(:all) do
|
187
|
+
if RUBY_VERSION < version
|
188
|
+
skip "Ruby version #{version} or higher required"
|
189
|
+
end
|
190
|
+
end
|
191
|
+
end
|
192
|
+
|
193
|
+
def ruby_version_lt(version)
|
194
|
+
before(:all) do
|
195
|
+
if RUBY_VERSION >= version
|
196
|
+
skip "Ruby version less than #{version} required"
|
197
|
+
end
|
198
|
+
end
|
199
|
+
end
|
200
|
+
|
201
|
+
def require_auth(*values)
|
202
|
+
before(:all) do
|
203
|
+
if values.any?
|
204
|
+
unless values.include?(ENV['AUTH'])
|
205
|
+
msg = values.map { |v| "AUTH=#{v}" }.join(' or ')
|
206
|
+
skip "This test requires #{msg}"
|
207
|
+
end
|
208
|
+
else
|
209
|
+
unless ENV['AUTH'] == 'auth' || SpecConfig.instance.user || ClusterConfig.instance.auth_enabled?
|
210
|
+
skip "Auth required"
|
211
|
+
end
|
212
|
+
end
|
213
|
+
end
|
214
|
+
end
|
215
|
+
|
216
|
+
def require_no_auth
|
217
|
+
before(:all) do
|
218
|
+
if (ENV['AUTH'] && ENV['AUTH'] != 'noauth') || SpecConfig.instance.user || ClusterConfig.instance.auth_enabled?
|
219
|
+
skip "Auth not allowed"
|
220
|
+
end
|
221
|
+
end
|
222
|
+
end
|
223
|
+
|
224
|
+
def require_x509_auth
|
225
|
+
before(:all) do
|
226
|
+
unless SpecConfig.instance.x509_auth?
|
227
|
+
skip "X.509 auth required"
|
228
|
+
end
|
229
|
+
end
|
230
|
+
end
|
231
|
+
|
232
|
+
def require_no_external_user
|
233
|
+
before(:all) do
|
234
|
+
if SpecConfig.instance.external_user?
|
235
|
+
skip "External user configurations are not compatible with this test"
|
236
|
+
end
|
237
|
+
end
|
238
|
+
end
|
239
|
+
|
240
|
+
# Can the driver specify a write concern that won't be overridden?
|
241
|
+
# (mongos 4.0+ overrides the write concern)
|
242
|
+
def require_set_write_concern
|
243
|
+
before(:all) do
|
244
|
+
if ClusterConfig.instance.topology == :sharded && ClusterConfig.instance.short_server_version >= '4.0'
|
245
|
+
skip "mongos 4.0+ overrides write concern"
|
246
|
+
end
|
247
|
+
end
|
248
|
+
end
|
249
|
+
|
250
|
+
def require_multi_shard
|
251
|
+
before(:all) do
|
252
|
+
if ClusterConfig.instance.topology == :sharded && SpecConfig.instance.addresses.length == 1
|
253
|
+
skip 'Test requires a minimum of two shards if run in sharded topology'
|
254
|
+
end
|
255
|
+
end
|
256
|
+
end
|
257
|
+
|
258
|
+
def require_no_multi_shard
|
259
|
+
before(:all) do
|
260
|
+
if ClusterConfig.instance.topology == :sharded && SpecConfig.instance.addresses.length > 1
|
261
|
+
skip 'Test requires a single shard if run in sharded topology'
|
262
|
+
end
|
263
|
+
end
|
264
|
+
end
|
265
|
+
|
266
|
+
def require_wired_tiger
|
267
|
+
before(:all) do
|
268
|
+
if ClusterConfig.instance.storage_engine != :wired_tiger
|
269
|
+
skip 'Test requires WiredTiger storage engine'
|
270
|
+
end
|
271
|
+
end
|
272
|
+
end
|
273
|
+
|
274
|
+
def require_wired_tiger_on_36
|
275
|
+
before(:all) do
|
276
|
+
if ClusterConfig.instance.short_server_version >= '3.6'
|
277
|
+
if ClusterConfig.instance.storage_engine != :wired_tiger
|
278
|
+
skip 'Test requires WiredTiger storage engine on 3.6+ servers'
|
279
|
+
end
|
280
|
+
end
|
281
|
+
end
|
282
|
+
end
|
283
|
+
|
284
|
+
def require_mmapv1
|
285
|
+
before(:all) do
|
286
|
+
if ClusterConfig.instance.storage_engine != :mmapv1
|
287
|
+
skip 'Test requires MMAPv1 storage engine'
|
288
|
+
end
|
289
|
+
end
|
290
|
+
end
|
291
|
+
|
292
|
+
def require_enterprise
|
293
|
+
before(:all) do
|
294
|
+
unless ClusterConfig.instance.enterprise?
|
295
|
+
skip 'Test requires enterprise build of MongoDB'
|
296
|
+
end
|
297
|
+
end
|
298
|
+
end
|
299
|
+
|
300
|
+
# Integration tests for SRV polling require internet connectivity to
|
301
|
+
# look up SRV records and a sharded cluster configured on default port on
|
302
|
+
# localhost (localhost:27017, localhost:27018).
|
303
|
+
def require_default_port_deployment
|
304
|
+
# Because the DNS records at test1.test.build.10gen.cc point to
|
305
|
+
# localhost:27017 & localhost:27018, the test suite must have been
|
306
|
+
# configured to use these addresses
|
307
|
+
before(:all) do
|
308
|
+
have_default_port = SpecConfig.instance.addresses.any? do |address|
|
309
|
+
%w(127.0.0.1 127.0.0.1:27017 localhost localhost:27017).include?(address)
|
310
|
+
end
|
311
|
+
unless have_default_port
|
312
|
+
skip 'This test requires the test suite to be configured for localhost:27017'
|
313
|
+
end
|
314
|
+
end
|
315
|
+
end
|
316
|
+
|
317
|
+
# Some tests perform assertions on what the driver is logging.
|
318
|
+
# Some test configurations, for example OCSP with unknown response,
|
319
|
+
# produce warnings due to optional checks failing.
|
320
|
+
# This constraint skips tests that issue logging assertions on configurations
|
321
|
+
# that may produce non-test-originated log entries.
|
322
|
+
def require_warning_clean
|
323
|
+
before(:all) do
|
324
|
+
if ENV['OCSP_STATUS'] == 'unknown'
|
325
|
+
skip 'Unknown OCSP status is not global warning-clean'
|
326
|
+
end
|
327
|
+
end
|
328
|
+
end
|
329
|
+
end
|
330
|
+
end
|