bson 5.0.0 → 5.0.1
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/README.md +91 -7
- data/Rakefile +63 -39
- data/ext/bson/read.c +18 -3
- data/ext/bson/util.c +1 -1
- data/ext/bson/write.c +4 -0
- data/lib/bson/decimal128/builder.rb +1 -1
- data/lib/bson/object_id.rb +11 -1
- data/lib/bson/version.rb +2 -1
- data/spec/bson/object_id_spec.rb +14 -0
- metadata +7 -80
- checksums.yaml.gz.sig +0 -0
- data/spec/shared/LICENSE +0 -20
- data/spec/shared/bin/get-mongodb-download-url +0 -17
- data/spec/shared/bin/s3-copy +0 -45
- data/spec/shared/bin/s3-upload +0 -69
- data/spec/shared/lib/mrss/child_process_helper.rb +0 -80
- data/spec/shared/lib/mrss/cluster_config.rb +0 -231
- data/spec/shared/lib/mrss/constraints.rb +0 -378
- data/spec/shared/lib/mrss/docker_runner.rb +0 -298
- data/spec/shared/lib/mrss/eg_config_utils.rb +0 -51
- data/spec/shared/lib/mrss/event_subscriber.rb +0 -210
- data/spec/shared/lib/mrss/lite_constraints.rb +0 -238
- data/spec/shared/lib/mrss/server_version_registry.rb +0 -113
- data/spec/shared/lib/mrss/session_registry.rb +0 -69
- data/spec/shared/lib/mrss/session_registry_legacy.rb +0 -60
- data/spec/shared/lib/mrss/spec_organizer.rb +0 -179
- data/spec/shared/lib/mrss/utils.rb +0 -37
- data/spec/shared/share/Dockerfile.erb +0 -321
- data/spec/shared/share/haproxy-1.conf +0 -16
- data/spec/shared/share/haproxy-2.conf +0 -17
- data/spec/shared/shlib/config.sh +0 -27
- data/spec/shared/shlib/distro.sh +0 -74
- data/spec/shared/shlib/server.sh +0 -416
- data/spec/shared/shlib/set_env.sh +0 -169
- data.tar.gz.sig +0 -0
- metadata.gz.sig +0 -2
data/spec/shared/bin/s3-upload
DELETED
@@ -1,69 +0,0 @@
|
|
1
|
-
#!/usr/bin/env ruby
|
2
|
-
|
3
|
-
require 'optparse'
|
4
|
-
require 'aws-sdk-s3'
|
5
|
-
|
6
|
-
options = {}
|
7
|
-
OptionParser.new do |opts|
|
8
|
-
opts.banner = "Usage: s3-upload options"
|
9
|
-
|
10
|
-
opts.on("-r", "--region=REGION", "AWS region to use (default us-east-1)") do |v|
|
11
|
-
options[:region] = v
|
12
|
-
end
|
13
|
-
|
14
|
-
opts.on("-p", "--param=KEY=VALUE", "Specify parameter for S3 upload") do |v|
|
15
|
-
options[:params] ||= {}
|
16
|
-
k, v = v.split('=', 2)
|
17
|
-
options[:params][k.to_sym] = v
|
18
|
-
end
|
19
|
-
|
20
|
-
opts.on("-f", "--file=PATH", "Path to the file to upload, - to upload standard input") do |v|
|
21
|
-
options[:file] = v
|
22
|
-
end
|
23
|
-
|
24
|
-
opts.on("-w", "--write=BUCKET:PATH", "Bucket name and key (or path) to upload to") do |v|
|
25
|
-
options[:write] = v
|
26
|
-
end
|
27
|
-
|
28
|
-
opts.on("-c", "--copy=BUCKET:PATH", "Bucket name and key (or path) to copy to (may be specified more than once)") do |v|
|
29
|
-
options[:copy] ||= []
|
30
|
-
options[:copy] << v
|
31
|
-
end
|
32
|
-
end.parse!
|
33
|
-
|
34
|
-
ENV['AWS_REGION'] ||= options[:region] || 'us-east-1'
|
35
|
-
|
36
|
-
def upload(f, options)
|
37
|
-
s3 = Aws::S3::Client.new
|
38
|
-
write = options.fetch(:write)
|
39
|
-
STDERR.puts "Writing #{write}"
|
40
|
-
bucket, key = write.split(':', 2)
|
41
|
-
s3.put_object(
|
42
|
-
body: f.read,
|
43
|
-
bucket: bucket,
|
44
|
-
key: key,
|
45
|
-
**options[:params] || {},
|
46
|
-
)
|
47
|
-
if copy = options[:copy]
|
48
|
-
copy.each do |dest|
|
49
|
-
STDERR.puts "Copying to #{dest}"
|
50
|
-
dbucket, dkey = dest.split(':', 2)
|
51
|
-
s3.copy_object(
|
52
|
-
bucket: dbucket,
|
53
|
-
key: dkey,
|
54
|
-
copy_source: "/#{bucket}/#{key}",
|
55
|
-
**options[:params] || {},
|
56
|
-
)
|
57
|
-
end
|
58
|
-
end
|
59
|
-
end
|
60
|
-
|
61
|
-
if options[:file] == '-'
|
62
|
-
upload(STDIN, options)
|
63
|
-
elsif options[:file]
|
64
|
-
File.open(options[:file]) do |f|
|
65
|
-
upload(f, options)
|
66
|
-
end
|
67
|
-
else
|
68
|
-
upload(STDIN, options)
|
69
|
-
end
|
@@ -1,80 +0,0 @@
|
|
1
|
-
# frozen_string_literal: true
|
2
|
-
# encoding: utf-8
|
3
|
-
|
4
|
-
autoload :ChildProcess, 'childprocess'
|
5
|
-
autoload :Tempfile, 'tempfile'
|
6
|
-
|
7
|
-
module Mrss
|
8
|
-
module ChildProcessHelper
|
9
|
-
class SpawnError < StandardError; end
|
10
|
-
|
11
|
-
module_function def call(cmd, env: nil, cwd: nil)
|
12
|
-
process = ChildProcess.new(*cmd)
|
13
|
-
process.io.inherit!
|
14
|
-
if cwd
|
15
|
-
process.cwd = cwd
|
16
|
-
end
|
17
|
-
if env
|
18
|
-
env.each do |k, v|
|
19
|
-
process.environment[k.to_s] = v
|
20
|
-
end
|
21
|
-
end
|
22
|
-
process.start
|
23
|
-
process.wait
|
24
|
-
process
|
25
|
-
end
|
26
|
-
|
27
|
-
module_function def check_call(cmd, env: nil, cwd: nil)
|
28
|
-
process = call(cmd, env: env, cwd: cwd)
|
29
|
-
unless process.exit_code == 0
|
30
|
-
raise SpawnError, "Failed to execute: #{cmd}"
|
31
|
-
end
|
32
|
-
end
|
33
|
-
|
34
|
-
module_function def get_output(cmd, env: nil, cwd: nil)
|
35
|
-
process = ChildProcess.new(*cmd)
|
36
|
-
process.io.inherit!
|
37
|
-
if cwd
|
38
|
-
process.cwd = cwd
|
39
|
-
end
|
40
|
-
if env
|
41
|
-
env.each do |k, v|
|
42
|
-
process.environment[k.to_s] = v
|
43
|
-
end
|
44
|
-
end
|
45
|
-
|
46
|
-
output = ''
|
47
|
-
r, w = IO.pipe
|
48
|
-
|
49
|
-
begin
|
50
|
-
process.io.stdout = w
|
51
|
-
process.start
|
52
|
-
w.close
|
53
|
-
|
54
|
-
thread = Thread.new do
|
55
|
-
begin
|
56
|
-
loop do
|
57
|
-
output << r.readpartial(16384)
|
58
|
-
end
|
59
|
-
rescue EOFError
|
60
|
-
end
|
61
|
-
end
|
62
|
-
|
63
|
-
process.wait
|
64
|
-
thread.join
|
65
|
-
ensure
|
66
|
-
r.close
|
67
|
-
end
|
68
|
-
|
69
|
-
[process, output]
|
70
|
-
end
|
71
|
-
|
72
|
-
module_function def check_output(*args)
|
73
|
-
process, output = get_output(*args)
|
74
|
-
unless process.exit_code == 0
|
75
|
-
raise SpawnError,"Failed to execute: #{args}"
|
76
|
-
end
|
77
|
-
output
|
78
|
-
end
|
79
|
-
end
|
80
|
-
end
|
@@ -1,231 +0,0 @@
|
|
1
|
-
# frozen_string_literal: true
|
2
|
-
# encoding: utf-8
|
3
|
-
|
4
|
-
# ClusterConfig requires ClientRegistry class provided by the host project.
|
5
|
-
|
6
|
-
require 'singleton'
|
7
|
-
|
8
|
-
module Mrss
|
9
|
-
class ClusterConfig
|
10
|
-
include Singleton
|
11
|
-
include RSpec::Core::Pending
|
12
|
-
|
13
|
-
def single_server?
|
14
|
-
determine_cluster_config
|
15
|
-
@single_server
|
16
|
-
end
|
17
|
-
|
18
|
-
def sharded_ish?
|
19
|
-
determine_cluster_config
|
20
|
-
@topology == :sharded || @topology == :load_balanced
|
21
|
-
end
|
22
|
-
|
23
|
-
def replica_set_name
|
24
|
-
determine_cluster_config
|
25
|
-
@replica_set_name
|
26
|
-
end
|
27
|
-
|
28
|
-
def server_version
|
29
|
-
determine_cluster_config
|
30
|
-
@server_version
|
31
|
-
end
|
32
|
-
|
33
|
-
def enterprise?
|
34
|
-
determine_cluster_config
|
35
|
-
@enterprise
|
36
|
-
end
|
37
|
-
|
38
|
-
def short_server_version
|
39
|
-
server_version.split('.')[0..1].join('.')
|
40
|
-
end
|
41
|
-
|
42
|
-
def fcv
|
43
|
-
determine_cluster_config
|
44
|
-
@fcv
|
45
|
-
end
|
46
|
-
|
47
|
-
# Per https://jira.mongodb.org/browse/SERVER-39052, working with FCV
|
48
|
-
# in sharded topologies is annoying. Also, FCV doesn't exist in servers
|
49
|
-
# less than 3.4. This method returns FCV on 3.4+ servers when in single
|
50
|
-
# or RS topologies, and otherwise returns the major.minor server version.
|
51
|
-
def fcv_ish
|
52
|
-
if server_version.nil?
|
53
|
-
raise "Deployment server version not known - check that connection to deployment succeeded"
|
54
|
-
end
|
55
|
-
|
56
|
-
if server_version >= '3.4' && !sharded_ish?
|
57
|
-
fcv
|
58
|
-
else
|
59
|
-
if short_server_version == '4.1'
|
60
|
-
'4.2'
|
61
|
-
else
|
62
|
-
short_server_version
|
63
|
-
end
|
64
|
-
end
|
65
|
-
end
|
66
|
-
|
67
|
-
# @return [ Mongo::Address ] The address of the primary in the deployment.
|
68
|
-
def primary_address
|
69
|
-
determine_cluster_config
|
70
|
-
@primary_address
|
71
|
-
end
|
72
|
-
|
73
|
-
def primary_address_str
|
74
|
-
determine_cluster_config
|
75
|
-
@primary_address.seed
|
76
|
-
end
|
77
|
-
|
78
|
-
def primary_address_host
|
79
|
-
both = primary_address_str
|
80
|
-
both.split(':').first
|
81
|
-
end
|
82
|
-
|
83
|
-
def primary_address_port
|
84
|
-
both = primary_address_str
|
85
|
-
both.split(':')[1] || 27017
|
86
|
-
end
|
87
|
-
|
88
|
-
def primary_description
|
89
|
-
determine_cluster_config
|
90
|
-
@primary_description
|
91
|
-
end
|
92
|
-
|
93
|
-
def server_parameters
|
94
|
-
determine_cluster_config
|
95
|
-
@server_parameters
|
96
|
-
end
|
97
|
-
|
98
|
-
# Try running a command on the admin database to see if the mongod was
|
99
|
-
# started with auth.
|
100
|
-
def auth_enabled?
|
101
|
-
if @auth_enabled.nil?
|
102
|
-
@auth_enabled = begin
|
103
|
-
basic_client.use(:admin).command(getCmdLineOpts: 1).first["argv"].include?("--auth")
|
104
|
-
rescue => e
|
105
|
-
e.message =~ /(not authorized)|(unauthorized)|(no users authenticated)|(requires authentication)/
|
106
|
-
end
|
107
|
-
end
|
108
|
-
@auth_enabled
|
109
|
-
end
|
110
|
-
|
111
|
-
def topology
|
112
|
-
determine_cluster_config
|
113
|
-
@topology
|
114
|
-
end
|
115
|
-
|
116
|
-
def storage_engine
|
117
|
-
@storage_engine ||= begin
|
118
|
-
# 2.6 does not have wired tiger
|
119
|
-
if short_server_version == '2.6'
|
120
|
-
:mmapv1
|
121
|
-
else
|
122
|
-
client = ClientRegistry.instance.global_client('root_authorized')
|
123
|
-
if sharded_ish?
|
124
|
-
shards = client.use(:admin).command(listShards: 1).first
|
125
|
-
if shards['shards'].empty?
|
126
|
-
raise 'Shards are empty'
|
127
|
-
end
|
128
|
-
shard = shards['shards'].first
|
129
|
-
address_str = shard['host'].sub(/^.*\//, '').sub(/,.*/, '')
|
130
|
-
client = ClusterTools.instance.direct_client(address_str,
|
131
|
-
SpecConfig.instance.test_options.merge(SpecConfig.instance.auth_options).merge(connect: :direct))
|
132
|
-
end
|
133
|
-
rv = client.use(:admin).command(serverStatus: 1).first
|
134
|
-
rv = rv['storageEngine']['name']
|
135
|
-
rv_map = {
|
136
|
-
'wiredTiger' => :wired_tiger,
|
137
|
-
'mmapv1' => :mmapv1,
|
138
|
-
}
|
139
|
-
rv_map[rv] || rv
|
140
|
-
end
|
141
|
-
end
|
142
|
-
end
|
143
|
-
|
144
|
-
# This method returns an alternate address for connecting to the configured
|
145
|
-
# deployment. For example, if the replica set is configured with nodes at
|
146
|
-
# of localhost:27017 and so on, this method will return 127.0.0.:27017.
|
147
|
-
#
|
148
|
-
# Note that the "alternate" refers to replica set configuration, not the
|
149
|
-
# addresses specified in test suite configuration. If the deployment topology
|
150
|
-
# is not a replica set, "alternate" refers to test suite configuration as
|
151
|
-
# this is the only configuration available.
|
152
|
-
def alternate_address
|
153
|
-
@alternate_address ||= begin
|
154
|
-
address = primary_address_host
|
155
|
-
str = case address
|
156
|
-
when '127.0.0.1'
|
157
|
-
'localhost'
|
158
|
-
when /^(\d+\.){3}\d+$/
|
159
|
-
skip 'This test requires a hostname or 127.0.0.1 as address'
|
160
|
-
else
|
161
|
-
# We don't know if mongod is listening on ipv4 or ipv6, in principle.
|
162
|
-
# Our tests use ipv4, so hardcode that for now.
|
163
|
-
# To support both we need to try both addresses which will make this
|
164
|
-
# test more complicated.
|
165
|
-
#
|
166
|
-
# JRuby chokes on primary_address_port as the port (e.g. 27017).
|
167
|
-
# Since the port does not actually matter, use a common port like 80.
|
168
|
-
resolved_address = Addrinfo.getaddrinfo(address, 80, Socket::PF_INET).first.ip_address
|
169
|
-
if resolved_address.include?(':')
|
170
|
-
"[#{resolved_address}]"
|
171
|
-
else
|
172
|
-
resolved_address
|
173
|
-
end
|
174
|
-
end + ":#{primary_address_port}"
|
175
|
-
Mongo::Address.new(str)
|
176
|
-
end
|
177
|
-
end
|
178
|
-
|
179
|
-
private
|
180
|
-
|
181
|
-
def determine_cluster_config
|
182
|
-
return if @primary_address
|
183
|
-
|
184
|
-
# Run all commands to figure out the cluster configuration from the same
|
185
|
-
# client. This is somewhat wasteful when running a single test, but reduces
|
186
|
-
# test runtime for the suite overall because all commands are sent on the
|
187
|
-
# same connection rather than each command connecting to the cluster by
|
188
|
-
# itself.
|
189
|
-
client = ClientRegistry.instance.global_client('root_authorized')
|
190
|
-
|
191
|
-
primary = client.cluster.next_primary
|
192
|
-
@primary_address = primary.address
|
193
|
-
@primary_description = primary.description
|
194
|
-
@replica_set_name = client.cluster.topology.replica_set_name
|
195
|
-
|
196
|
-
@topology ||= begin
|
197
|
-
topology = client.cluster.topology.class.name.sub(/.*::/, '')
|
198
|
-
topology = topology.gsub(/([A-Z])/) { |match| '_' + match.downcase }.sub(/^_/, '')
|
199
|
-
if topology =~ /^replica_set/
|
200
|
-
topology = 'replica_set'
|
201
|
-
end
|
202
|
-
topology.to_sym
|
203
|
-
end
|
204
|
-
|
205
|
-
@single_server = client.cluster.servers_list.length == 1
|
206
|
-
|
207
|
-
build_info = client.database.command(buildInfo: 1).first
|
208
|
-
|
209
|
-
@server_version = build_info['version']
|
210
|
-
@enterprise = build_info['modules'] && build_info['modules'].include?('enterprise')
|
211
|
-
|
212
|
-
@server_parameters = begin
|
213
|
-
client.use(:admin).command(getParameter: '*').first
|
214
|
-
rescue => e
|
215
|
-
STDERR.puts("WARNING: Failed to obtain server parameters: #{e.class}: #{e.message}")
|
216
|
-
{}
|
217
|
-
end
|
218
|
-
|
219
|
-
if !sharded_ish? && short_server_version >= '3.4'
|
220
|
-
rv = @server_parameters['featureCompatibilityVersion']
|
221
|
-
@fcv = rv['version'] || rv
|
222
|
-
end
|
223
|
-
end
|
224
|
-
|
225
|
-
def basic_client
|
226
|
-
# Do not cache the result here so that if the client gets closed,
|
227
|
-
# client registry reconnects it in subsequent tests
|
228
|
-
ClientRegistry.instance.global_client('basic')
|
229
|
-
end
|
230
|
-
end
|
231
|
-
end
|