mongo 2.12.1 → 2.12.2

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 3445c61a01ced5590fabff016e17fb745789b0bca07628c653272ea5f1ba9c22
4
- data.tar.gz: 6a945cc89db62d7bb613ef03c262407f56820e4aedfc5c2aa25fe813a02a7b51
3
+ metadata.gz: 32b9c6615d4d33e980ab07cba7c01992f0d370e9ee8c671fd01f9310c56c0cf2
4
+ data.tar.gz: a8344cf96ce4c9f69a92b5a8dc25ad38a70f8145d48ecf4c1eb89609a014ef06
5
5
  SHA512:
6
- metadata.gz: 207218c0f2715f90d877d697f24aa2dce851032ef8affd8e8fa222d608f886f0bb9e3a1f7763e57070bc0214952285d1ad7c54454d90772be4c8bbe2d274d6b8
7
- data.tar.gz: d95ad222012c17207208e6d2bf32e33a744823ceaee1695cc794c89fb672c7a186eaba3c7aeae8b68dcfe4b0b03543b7474bd5420271f571513b6fdab29478d4
6
+ metadata.gz: 97cc2df80dce9acaaf2fcc5b1b55ffc9f1d6dc37d4903f522f4dcc2366a487ae9daafdd66a81c8e399f561c105e40377a2cc16ac03471dbb2a3ecab0054a2713
7
+ data.tar.gz: 758df1949b5f9018270fe11812580a91bf8fd55f56990cdd45b16127520919097ed9aeabc1a04ec1e32c393f41ec336228bbf82c5a9abd304b56b8793206a5ec
Binary file
data.tar.gz.sig CHANGED
Binary file
@@ -180,42 +180,44 @@ module Mongo
180
180
  # @return [ Mongo::Socket::SSL | Mongo::Socket::TCP | Mongo::Socket::Unix ]
181
181
  # The socket.
182
182
  #
183
- # @raise [ Exception ] If network connection failed.
183
+ # @raise [ Mongo::Error ] If network connection failed.
184
184
  #
185
185
  # @since 2.0.0
186
186
  def socket(socket_timeout, ssl_options = {}, options = {})
187
- if seed.downcase =~ Unix::MATCH
188
- specific_address = Unix.new(seed.downcase)
189
- return specific_address.socket(socket_timeout, ssl_options, options)
190
- end
187
+ map_exceptions do
188
+ if seed.downcase =~ Unix::MATCH
189
+ specific_address = Unix.new(seed.downcase)
190
+ return specific_address.socket(socket_timeout, ssl_options, options)
191
+ end
192
+
193
+ options = {
194
+ connect_timeout: Server::CONNECT_TIMEOUT,
195
+ }.update(options)
191
196
 
192
- options = {
193
- connect_timeout: Server::CONNECT_TIMEOUT,
194
- }.update(options)
195
-
196
- # When the driver connects to "localhost", it only attempts IPv4
197
- # connections. When the driver connects to other hosts, it will
198
- # attempt both IPv4 and IPv6 connections.
199
- family = (host == LOCALHOST) ? ::Socket::AF_INET : ::Socket::AF_UNSPEC
200
- error = nil
201
- # Sometimes Socket#getaddrinfo returns the same info more than once
202
- # (multiple identical items in the returned array). It does not make
203
- # sense to try to connect to the same address more than once, thus
204
- # eliminate duplicates here.
205
- infos = ::Socket.getaddrinfo(host, nil, family, ::Socket::SOCK_STREAM)
206
- results = infos.map do |info|
207
- [info[4], info[3]]
208
- end.uniq
209
- results.each do |family, address_str|
210
- begin
211
- specific_address = FAMILY_MAP[family].new(address_str, port, host)
212
- socket = specific_address.socket(socket_timeout, ssl_options, options)
213
- return socket
214
- rescue IOError, SystemCallError, Error::SocketTimeoutError, Error::SocketError => e
215
- error = e
197
+ # When the driver connects to "localhost", it only attempts IPv4
198
+ # connections. When the driver connects to other hosts, it will
199
+ # attempt both IPv4 and IPv6 connections.
200
+ family = (host == LOCALHOST) ? ::Socket::AF_INET : ::Socket::AF_UNSPEC
201
+ error = nil
202
+ # Sometimes Socket#getaddrinfo returns the same info more than once
203
+ # (multiple identical items in the returned array). It does not make
204
+ # sense to try to connect to the same address more than once, thus
205
+ # eliminate duplicates here.
206
+ infos = ::Socket.getaddrinfo(host, nil, family, ::Socket::SOCK_STREAM)
207
+ results = infos.map do |info|
208
+ [info[4], info[3]]
209
+ end.uniq
210
+ results.each do |family, address_str|
211
+ begin
212
+ specific_address = FAMILY_MAP[family].new(address_str, port, host)
213
+ socket = specific_address.socket(socket_timeout, ssl_options, options)
214
+ return socket
215
+ rescue IOError, SystemCallError, Error::SocketTimeoutError, Error::SocketError => e
216
+ error = e
217
+ end
216
218
  end
219
+ raise error
217
220
  end
218
- raise error
219
221
  end
220
222
 
221
223
  # Get the address as a string.
@@ -248,5 +250,17 @@ module Mongo
248
250
  else IPv4.parse(address)
249
251
  end
250
252
  end
253
+
254
+ def map_exceptions
255
+ begin
256
+ yield
257
+ rescue Errno::ETIMEDOUT => e
258
+ raise Error::SocketTimeoutError, "#{e.class}: #{e} (for #{self})"
259
+ rescue IOError, SystemCallError => e
260
+ raise Error::SocketError, "#{e.class}: #{e} (for #{self})"
261
+ rescue OpenSSL::SSL::SSLError => e
262
+ raise Error::SocketError, "#{e.class}: #{e} (for #{self}) (#{SSL_ERROR})"
263
+ end
264
+ end
251
265
  end
252
266
  end
@@ -185,7 +185,9 @@ module Mongo
185
185
  pipeline << { :'$limit' => opts[:limit] } if opts[:limit]
186
186
  pipeline << { :'$group' => { _id: 1, n: { :'$sum' => 1 } } }
187
187
 
188
- opts.select! { |k, _| [:hint, :max_time_ms, :read, :collation, :session].include?(k) }
188
+ opts = opts.select { |k, _| [:hint, :max_time_ms, :read, :collation, :session].include?(k) }
189
+ opts[:collation] ||= collation
190
+
189
191
  first = aggregate(pipeline, opts).first
190
192
  return 0 unless first
191
193
  first['n'].to_i
@@ -104,6 +104,11 @@ module Mongo
104
104
  # chunk objects and assemble the data. If we have an IO object, then
105
105
  # it's the original file data and we must split it into chunks and set
106
106
  # the original data itself.
107
+ #
108
+ # @param [ IO, String, Array<BSON::Document> ] value The file object,
109
+ # file contents or chunk documents.
110
+ #
111
+ # @return [ Array<Grid::File::Chunk> ] Array of chunks.
107
112
  def initialize_chunks!(value)
108
113
  if value.is_a?(Array)
109
114
  @chunks = value.map{ |doc| Chunk.new(doc) }
@@ -151,6 +151,7 @@ module Mongo
151
151
  # @return [ String ] The assembled data.
152
152
  #
153
153
  # @since 2.0.0
154
+ # @api private
154
155
  def assemble(chunks)
155
156
  chunks.reduce(''){ |data, chunk| data << chunk.data.data }
156
157
  end
@@ -167,6 +168,7 @@ module Mongo
167
168
  # @return [ Array<Chunk> ] The chunks of the data.
168
169
  #
169
170
  # @since 2.0.0
171
+ # @api private
170
172
  def split(io, file_info, offset = 0)
171
173
  io = StringIO.new(io) if io.is_a?(String)
172
174
  parts = Enumerator.new { |y| y << io.read(file_info.chunk_size) until io.eof? }
@@ -177,7 +177,7 @@ module Mongo
177
177
  #
178
178
  # @since 2.0.0
179
179
  def prefix
180
- @options[:fs_name] || @options[:bucket_name]|| DEFAULT_ROOT
180
+ @options[:fs_name] || @options[:bucket_name] || DEFAULT_ROOT
181
181
  end
182
182
 
183
183
  # Remove a single file from the GridFS.
@@ -230,7 +230,8 @@ module Mongo
230
230
  #
231
231
  # @since 2.1.0
232
232
  def open_download_stream(id, options = nil)
233
- read_stream(id, options).tap do |stream|
233
+ options = Hash[(options || {}).map { |k, v| [k.to_sym, v] }]
234
+ read_stream(id, **options).tap do |stream|
234
235
  if block_given?
235
236
  begin
236
237
  yield stream
@@ -348,15 +349,15 @@ module Mongo
348
349
  download_to_stream(open_download_stream_by_name(filename, opts).file_id, io)
349
350
  end
350
351
 
351
- # Opens an upload stream to GridFS to which the contents of a user file came be written.
352
+ # Opens an upload stream to GridFS to which the contents of a file or
353
+ # blob can be written.
352
354
  #
353
- # @example Open a stream to which the contents of a file came be written.
354
- # fs.open_upload_stream('a-file.txt')
355
- #
356
- # @param [ String ] filename The filename of the file to upload.
355
+ # @param [ String ] filename The name of the file in GridFS.
357
356
  # @param [ Hash ] opts The options for the write stream.
358
357
  #
359
- # @option opts [ Object ] :file_id An optional unique file id. An ObjectId is generated otherwise.
358
+ # @option opts [ Object ] :file_id An optional unique file id.
359
+ # A BSON::ObjectId is automatically generated if a file id is not
360
+ # provided.
360
361
  # @option opts [ Integer ] :chunk_size Override the default chunk size.
361
362
  # @option opts [ Hash ] :metadata User data for the 'metadata' field of the files
362
363
  # collection document.
@@ -375,7 +376,8 @@ module Mongo
375
376
  #
376
377
  # @since 2.1.0
377
378
  def open_upload_stream(filename, opts = {})
378
- write_stream(filename, opts).tap do |stream|
379
+ opts = Hash[opts.map { |k, v| [k.to_sym, v] }]
380
+ write_stream(filename, **opts).tap do |stream|
379
381
  if block_given?
380
382
  begin
381
383
  yield stream
@@ -462,12 +464,12 @@ module Mongo
462
464
  #
463
465
  # @option opts [ BSON::Document ] :file_info_doc For internal
464
466
  # driver use only. A BSON document to use as file information.
465
- def read_stream(id, opts = nil)
466
- Stream.get(self, Stream::READ_MODE, { file_id: id }.update(options).update(opts || {}))
467
+ def read_stream(id, **opts)
468
+ Stream.get(self, Stream::READ_MODE, { file_id: id }.update(options).update(opts))
467
469
  end
468
470
 
469
- def write_stream(filename, opts)
470
- Stream.get(self, Stream::WRITE_MODE, { filename: filename }.merge!(options).merge!(opts))
471
+ def write_stream(filename, **opts)
472
+ Stream.get(self, Stream::WRITE_MODE, { filename: filename }.update(options).update(opts))
471
473
  end
472
474
 
473
475
  def chunks_name
@@ -82,12 +82,12 @@ module Mongo
82
82
  @open = true
83
83
  end
84
84
 
85
- # Write to the GridFS bucket from the source stream.
85
+ # Write to the GridFS bucket from the source stream or a string.
86
86
  #
87
87
  # @example Write to GridFS.
88
88
  # stream.write(io)
89
89
  #
90
- # @param [ IO ] io The source io stream to upload from.
90
+ # @param [ String | IO ] io The string or IO object to upload from.
91
91
  #
92
92
  # @return [ Stream::Write ] self The write stream itself.
93
93
  #
@@ -95,7 +95,13 @@ module Mongo
95
95
  def write(io)
96
96
  ensure_open!
97
97
  @indexes ||= ensure_indexes!
98
- @length += io.size
98
+ @length += if io.respond_to?(:bytesize)
99
+ # String objects
100
+ io.bytesize
101
+ else
102
+ # IO objects
103
+ io.size
104
+ end
99
105
  chunks = File::Chunk.split(io, file_info, @n)
100
106
  @n += chunks.size
101
107
  chunks_collection.insert_many(chunks) unless chunks.empty?
@@ -374,7 +374,7 @@ module Mongo
374
374
  # each of the elements in this array using BSON types wherever possible.
375
375
  #
376
376
  # @return [Message] Message with deserialized array.
377
- def self.deserialize_array(message, io, field, options)
377
+ def self.deserialize_array(message, io, field, options = {})
378
378
  elements = []
379
379
  count = message.instance_variable_get(field[:multi])
380
380
  count.times { elements << field[:type].deserialize(io, options) }
@@ -392,7 +392,7 @@ module Mongo
392
392
  # this field using BSON types wherever possible.
393
393
  #
394
394
  # @return [Message] Message with deserialized field.
395
- def self.deserialize_field(message, io, field, options)
395
+ def self.deserialize_field(message, io, field, options = {})
396
396
  message.instance_variable_set(
397
397
  field[:name],
398
398
  field[:type].deserialize(io, options)
@@ -440,12 +440,15 @@ module Mongo
440
440
  # Deserializes bytes from the byte buffer.
441
441
  #
442
442
  # @param [ BSON::ByteBuffer ] buffer Buffer containing the value to read.
443
- # @param [ Integer ] num_bytes Number of bytes to read.
443
+ # @param [ Hash ] options The method options.
444
+ #
445
+ # @option options [ Integer ] num_bytes Number of bytes to read.
444
446
  #
445
447
  # @return [ String ] The bytes.
446
448
  #
447
449
  # @since 2.5.0
448
- def self.deserialize(buffer, num_bytes = nil)
450
+ def self.deserialize(buffer, options = {})
451
+ num_bytes = options[:num_bytes]
449
452
  buffer.get_bytes(num_bytes || buffer.length)
450
453
  end
451
454
  end
@@ -17,5 +17,5 @@ module Mongo
17
17
  # The current version of the driver.
18
18
  #
19
19
  # @since 2.0.0
20
- VERSION = '2.12.1'.freeze
20
+ VERSION = '2.12.2'.freeze
21
21
  end
@@ -0,0 +1,48 @@
1
+ require 'spec_helper'
2
+
3
+ describe 'GridFS bucket integration' do
4
+ let(:fs) do
5
+ authorized_client.database.fs
6
+ end
7
+
8
+ describe 'UTF-8 string write' do
9
+ let(:data) { "hello\u2210" }
10
+
11
+ before do
12
+ data.length.should_not == data.bytesize
13
+ end
14
+
15
+ shared_examples 'round-trips' do
16
+ it 'round-trips' do
17
+ stream = fs.open_upload_stream('test') do |stream|
18
+ stream.write(data_to_write)
19
+ end
20
+
21
+ actual = nil
22
+ fs.open_download_stream(stream.file_id) do |stream|
23
+ actual = stream.read
24
+ end
25
+
26
+ actual.encoding.name.should == 'ASCII-8BIT'
27
+ actual.should == data.dup.force_encoding('binary')
28
+ end
29
+ end
30
+
31
+ context 'in binary encoding' do
32
+ let(:data_to_write) do
33
+ data.force_encoding('binary').freeze
34
+ end
35
+
36
+ it_behaves_like 'round-trips'
37
+ end
38
+
39
+ context 'in UTF-8 encoding' do
40
+ let(:data_to_write) do
41
+ data.encoding.name.should == 'UTF-8'
42
+ data.freeze
43
+ end
44
+
45
+ it_behaves_like 'round-trips'
46
+ end
47
+ end
48
+ end
@@ -81,6 +81,10 @@ describe 'BSON & command size limits' do
81
81
  end
82
82
 
83
83
  it 'allows bulk writes of multiple documents of exactly 16 MiB each' do
84
+ if SpecConfig.instance.compressors
85
+ pending "RUBY-2234"
86
+ end
87
+
84
88
  documents = []
85
89
  1.upto(3) do |index|
86
90
  document = { key: 'a' * (max_document_size - 28), _id: "in#{index}" }
@@ -0,0 +1,98 @@
1
+ require 'spec_helper'
2
+
3
+ describe 'BSON & command size limits' do
4
+ let(:max_document_size) { 16*1024*1024 }
5
+
6
+ before do
7
+ authorized_collection.delete_many
8
+ end
9
+
10
+ # This test uses a large document that is significantly smaller than the
11
+ # size limit. It is a basic sanity check.
12
+ it 'allows user-provided documents to be 15MiB' do
13
+ document = { key: 'a' * 15*1024*1024, _id: 'foo' }
14
+
15
+ authorized_collection.insert_one(document)
16
+ end
17
+
18
+ # This test uses a large document that is significantly larger than the
19
+ # size limit. It is a basic sanity check.
20
+ it 'fails single write of oversized documents' do
21
+ document = { key: 'a' * 17*1024*1024, _id: 'foo' }
22
+
23
+ lambda do
24
+ authorized_collection.insert_one(document)
25
+ end.should raise_error(Mongo::Error::MaxBSONSize, /The document exceeds maximum allowed BSON object size after serialization/)
26
+ end
27
+
28
+ # This test checks our bulk write splitting when documents are not close
29
+ # to the limit, but where splitting is definitely required.
30
+ it 'allows split bulk write of medium sized documents' do
31
+ # 8 documents of 4 MiB each = 32 MiB total data, should be split over
32
+ # either 2 or 3 bulk writes depending on how well the driver splits
33
+ documents = []
34
+ 1.upto(8) do |index|
35
+ documents << { key: 'a' * 4*1024*1024, _id: "in#{index}" }
36
+ end
37
+
38
+ authorized_collection.insert_many(documents)
39
+ authorized_collection.count_documents({}).should == 8
40
+ end
41
+
42
+ # This test ensures that document which are too big definitely fail insertion.
43
+ it 'fails bulk write of oversized documents' do
44
+ documents = []
45
+ 1.upto(3) do |index|
46
+ documents << { key: 'a' * 17*1024*1024, _id: "in#{index}" }
47
+ end
48
+
49
+ lambda do
50
+ authorized_collection.insert_many(documents)
51
+ end.should raise_error(Mongo::Error::MaxBSONSize, /The document exceeds maximum allowed BSON object size after serialization/)
52
+ authorized_collection.count_documents({}).should == 0
53
+ end
54
+
55
+ it 'allows user-provided documents to be exactly 16MiB' do
56
+ # The document must contain the _id field, otherwise the server will
57
+ # add it which will increase the size of the document as persisted by
58
+ # the server.
59
+ document = { key: 'a' * (max_document_size - 28), _id: 'foo' }
60
+ expect(document.to_bson.length).to eq(max_document_size)
61
+
62
+ authorized_collection.insert_one(document)
63
+ end
64
+
65
+ it 'fails on the server when a document larger than 16MiB is inserted' do
66
+ document = { key: 'a' * (max_document_size - 27), _id: 'foo' }
67
+ expect(document.to_bson.length).to eq(max_document_size+1)
68
+
69
+ lambda do
70
+ authorized_collection.insert_one(document)
71
+ end.should raise_error(Mongo::Error::OperationFailure, /object to insert too large/)
72
+ end
73
+
74
+ it 'fails in the driver when a document larger than 16MiB+16KiB is inserted' do
75
+ document = { key: 'a' * (max_document_size - 27 + 16*1024), _id: 'foo' }
76
+ expect(document.to_bson.length).to eq(max_document_size+16*1024+1)
77
+
78
+ lambda do
79
+ authorized_collection.insert_one(document)
80
+ end.should raise_error(Mongo::Error::MaxBSONSize, /The document exceeds maximum allowed BSON object size after serialization/)
81
+ end
82
+
83
+ it 'allows bulk writes of multiple documents of exactly 16 MiB each' do
84
+ if SpecConfig.instance.compressors
85
+ pending "RUBY-2234"
86
+ end
87
+
88
+ documents = []
89
+ 1.upto(3) do |index|
90
+ document = { key: 'a' * (max_document_size - 28), _id: "in#{index}" }
91
+ expect(document.to_bson.length).to eq(max_document_size)
92
+ documents << document
93
+ end
94
+
95
+ authorized_collection.insert_many(documents)
96
+ authorized_collection.count_documents({}).should == 3
97
+ end
98
+ end