mongo 1.9.2 → 1.10.0.rc0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- checksums.yaml.gz.sig +0 -0
- data.tar.gz.sig +0 -0
- data/LICENSE +1 -1
- data/README.md +94 -334
- data/Rakefile +6 -4
- data/VERSION +1 -1
- data/bin/mongo_console +13 -6
- data/lib/mongo.rb +22 -27
- data/lib/mongo/bulk_write_collection_view.rb +352 -0
- data/lib/mongo/collection.rb +128 -188
- data/lib/mongo/collection_writer.rb +348 -0
- data/lib/mongo/connection.rb +19 -0
- data/lib/mongo/{util → connection}/node.rb +15 -1
- data/lib/mongo/{util → connection}/pool.rb +34 -19
- data/lib/mongo/{util → connection}/pool_manager.rb +8 -2
- data/lib/mongo/{util → connection}/sharding_pool_manager.rb +1 -1
- data/lib/mongo/connection/socket.rb +18 -0
- data/lib/mongo/{util → connection/socket}/socket_util.rb +5 -2
- data/lib/mongo/{util → connection/socket}/ssl_socket.rb +3 -4
- data/lib/mongo/{util → connection/socket}/tcp_socket.rb +25 -15
- data/lib/mongo/{util → connection/socket}/unix_socket.rb +6 -4
- data/lib/mongo/cursor.rb +113 -47
- data/lib/mongo/db.rb +203 -131
- data/lib/mongo/{exceptions.rb → exception.rb} +7 -1
- data/lib/mongo/functional.rb +19 -0
- data/lib/mongo/functional/authentication.rb +303 -0
- data/lib/mongo/{util → functional}/logging.rb +1 -1
- data/lib/mongo/{util → functional}/read_preference.rb +49 -1
- data/lib/mongo/{util → functional}/uri_parser.rb +81 -69
- data/lib/mongo/{util → functional}/write_concern.rb +2 -1
- data/{test/unit/pool_test.rb → lib/mongo/gridfs.rb} +5 -10
- data/lib/mongo/gridfs/grid.rb +1 -3
- data/lib/mongo/gridfs/grid_ext.rb +1 -1
- data/lib/mongo/gridfs/grid_file_system.rb +1 -1
- data/lib/mongo/gridfs/grid_io.rb +1 -1
- data/lib/mongo/legacy.rb +63 -8
- data/lib/mongo/mongo_client.rb +128 -154
- data/lib/mongo/mongo_replica_set_client.rb +17 -11
- data/lib/mongo/mongo_sharded_client.rb +2 -1
- data/lib/mongo/networking.rb +19 -10
- data/lib/mongo/utils.rb +19 -0
- data/lib/mongo/{util → utils}/conversions.rb +1 -1
- data/lib/mongo/{util → utils}/core_ext.rb +1 -1
- data/lib/mongo/{util → utils}/server_version.rb +1 -1
- data/lib/mongo/{util → utils}/support.rb +10 -57
- data/lib/mongo/{util → utils}/thread_local_variable_manager.rb +1 -1
- data/test/functional/authentication_test.rb +8 -21
- data/test/functional/bulk_write_collection_view_test.rb +782 -0
- data/test/functional/{connection_test.rb → client_test.rb} +153 -78
- data/test/functional/collection_test.rb +343 -97
- data/test/functional/collection_writer_test.rb +83 -0
- data/test/functional/conversions_test.rb +1 -3
- data/test/functional/cursor_fail_test.rb +3 -3
- data/test/functional/cursor_message_test.rb +3 -3
- data/test/functional/cursor_test.rb +38 -3
- data/test/functional/db_api_test.rb +5 -5
- data/test/functional/db_connection_test.rb +2 -2
- data/test/functional/db_test.rb +35 -11
- data/test/functional/grid_file_system_test.rb +2 -2
- data/test/functional/grid_io_test.rb +2 -2
- data/test/functional/grid_test.rb +2 -2
- data/test/functional/pool_test.rb +2 -3
- data/test/functional/safe_test.rb +5 -5
- data/test/functional/ssl_test.rb +22 -102
- data/test/functional/support_test.rb +1 -1
- data/test/functional/timeout_test.rb +6 -22
- data/test/functional/uri_test.rb +113 -12
- data/test/functional/write_concern_test.rb +6 -6
- data/test/helpers/general.rb +50 -0
- data/test/helpers/test_unit.rb +309 -0
- data/test/replica_set/authentication_test.rb +8 -23
- data/test/replica_set/basic_test.rb +41 -14
- data/test/replica_set/client_test.rb +179 -117
- data/test/replica_set/complex_connect_test.rb +6 -7
- data/test/replica_set/connection_test.rb +46 -38
- data/test/replica_set/count_test.rb +2 -2
- data/test/replica_set/cursor_test.rb +8 -8
- data/test/replica_set/insert_test.rb +64 -2
- data/test/replica_set/max_values_test.rb +59 -10
- data/test/replica_set/pinning_test.rb +2 -2
- data/test/replica_set/query_test.rb +2 -2
- data/test/replica_set/read_preference_test.rb +6 -6
- data/test/replica_set/refresh_test.rb +7 -7
- data/test/replica_set/replication_ack_test.rb +5 -5
- data/test/replica_set/ssl_test.rb +24 -106
- data/test/sharded_cluster/basic_test.rb +43 -15
- data/test/shared/authentication/basic_auth_shared.rb +215 -0
- data/test/shared/authentication/sasl_plain_shared.rb +96 -0
- data/test/shared/ssl_shared.rb +173 -0
- data/test/test_helper.rb +31 -199
- data/test/threading/basic_test.rb +29 -3
- data/test/tools/mongo_config.rb +45 -20
- data/test/tools/mongo_config_test.rb +1 -1
- data/test/unit/client_test.rb +136 -57
- data/test/unit/collection_test.rb +31 -55
- data/test/unit/connection_test.rb +135 -72
- data/test/unit/cursor_test.rb +2 -2
- data/test/unit/db_test.rb +19 -15
- data/test/unit/grid_test.rb +2 -2
- data/test/unit/mongo_sharded_client_test.rb +17 -15
- data/test/unit/node_test.rb +2 -2
- data/test/unit/pool_manager_test.rb +7 -5
- data/test/unit/read_pref_test.rb +82 -2
- data/test/unit/read_test.rb +14 -14
- data/test/unit/safe_test.rb +9 -9
- data/test/unit/sharding_pool_manager_test.rb +11 -5
- data/test/unit/write_concern_test.rb +9 -9
- metadata +71 -56
- metadata.gz.sig +0 -0
- data/test/functional/threading_test.rb +0 -109
- data/test/shared/authentication.rb +0 -121
- data/test/unit/util_test.rb +0 -69
@@ -0,0 +1,348 @@
|
|
1
|
+
# Copyright (C) 2009-2013 MongoDB, Inc.
|
2
|
+
#
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
# you may not use this file except in compliance with the License.
|
5
|
+
# You may obtain a copy of the License at
|
6
|
+
#
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
#
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
# See the License for the specific language governing permissions and
|
13
|
+
# limitations under the License.
|
14
|
+
|
15
|
+
module Mongo
|
16
|
+
|
17
|
+
class CollectionWriter
|
18
|
+
include Mongo::Logging
|
19
|
+
include Mongo::WriteConcern
|
20
|
+
|
21
|
+
OPCODE = {
|
22
|
+
:insert => Mongo::Constants::OP_INSERT,
|
23
|
+
:update => Mongo::Constants::OP_UPDATE,
|
24
|
+
:delete => Mongo::Constants::OP_DELETE
|
25
|
+
}
|
26
|
+
WRITE_COMMAND_ARG_KEY = {
|
27
|
+
:insert => :documents,
|
28
|
+
:update => :updates,
|
29
|
+
:delete => :deletes
|
30
|
+
}
|
31
|
+
MAX_WRITE_BATCH_SIZE = 1000
|
32
|
+
|
33
|
+
def initialize(collection)
|
34
|
+
@collection = collection
|
35
|
+
@name = @collection.name
|
36
|
+
@db = @collection.db
|
37
|
+
@connection = @db.connection
|
38
|
+
@logger = @connection.logger
|
39
|
+
@max_write_batch_size = MAX_WRITE_BATCH_SIZE
|
40
|
+
end
|
41
|
+
|
42
|
+
# common implementation only for new batch write commands (insert, update, delete) and old batch insert
|
43
|
+
def batch_write_incremental(op_type, documents, check_keys=true, opts={})
|
44
|
+
raise Mongo::OperationFailure, "Request contains no documents" if documents.empty?
|
45
|
+
write_concern = get_write_concern(opts, @collection)
|
46
|
+
max_message_size, max_append_size, max_serialize_size = batch_write_max_sizes(write_concern)
|
47
|
+
ordered = opts[:ordered]
|
48
|
+
continue_on_error = !!opts[:continue_on_error] || ordered == false
|
49
|
+
collect_on_error = !!opts[:collect_on_error] || ordered == false
|
50
|
+
error_docs = [] # docs with serialization errors
|
51
|
+
errors = []
|
52
|
+
exchanges = []
|
53
|
+
serialized_doc = nil
|
54
|
+
message = BSON::ByteBuffer.new("", max_message_size)
|
55
|
+
docs = documents.dup
|
56
|
+
catch(:error) do
|
57
|
+
until docs.empty? || (!errors.empty? && !collect_on_error) # process documents a batch at a time
|
58
|
+
batch_docs = []
|
59
|
+
batch_message_initialize(message, op_type, continue_on_error, write_concern)
|
60
|
+
while !docs.empty? && batch_docs.size < @max_write_batch_size
|
61
|
+
begin
|
62
|
+
doc = docs.first
|
63
|
+
doc = doc[:d] if op_type == :insert && !ordered.nil? #check_keys for :update outside of serialize
|
64
|
+
serialized_doc ||= BSON::BSON_CODER.serialize(doc, check_keys, true, max_serialize_size)
|
65
|
+
rescue BSON::InvalidDocument, BSON::InvalidKeyName, BSON::InvalidStringEncoding => ex
|
66
|
+
bulk_message = "Bulk write error - #{ex.message} - examine result for complete information"
|
67
|
+
ex = BulkWriteError.new(bulk_message, Mongo::ErrorCode::INVALID_BSON,
|
68
|
+
{:op_type => op_type, :serialize => doc, :ord => docs.first[:ord], :error => ex}) unless ordered.nil?
|
69
|
+
error_docs << docs.shift
|
70
|
+
errors << ex
|
71
|
+
next if collect_on_error
|
72
|
+
throw(:error) if batch_docs.empty?
|
73
|
+
break # defer exit and send batch
|
74
|
+
end
|
75
|
+
break if message.size + serialized_doc.size > max_append_size
|
76
|
+
batch_docs << docs.shift
|
77
|
+
batch_message_append(message, serialized_doc, write_concern)
|
78
|
+
serialized_doc = nil
|
79
|
+
end
|
80
|
+
begin
|
81
|
+
response = batch_message_send(message, op_type, batch_docs, write_concern, continue_on_error) if batch_docs.size > 0
|
82
|
+
exchanges << {:op_type => op_type, :batch => batch_docs, :opts => opts, :response => response}
|
83
|
+
rescue Mongo::OperationFailure => ex
|
84
|
+
errors << ex
|
85
|
+
exchanges << {:op_type => op_type, :batch => batch_docs, :opts => opts, :response => ex.result}
|
86
|
+
throw(:error) unless continue_on_error
|
87
|
+
end
|
88
|
+
end
|
89
|
+
end
|
90
|
+
[error_docs, errors, exchanges]
|
91
|
+
end
|
92
|
+
|
93
|
+
def batch_write_partition(op_type, documents, check_keys, opts)
|
94
|
+
raise Mongo::OperationFailure, "Request contains no documents" if documents.empty?
|
95
|
+
write_concern = get_write_concern(opts, @collection)
|
96
|
+
ordered = opts[:ordered]
|
97
|
+
continue_on_error = !!opts[:continue_on_error] || ordered == false # continue_on_error default false
|
98
|
+
collect_on_error = !!opts[:collect_on_error] # collect_on_error default false
|
99
|
+
error_docs = [] # docs with serialization errors
|
100
|
+
errors = []
|
101
|
+
exchanges = []
|
102
|
+
@write_batch_size = [documents.size, @max_write_batch_size].min
|
103
|
+
docs = documents.dup
|
104
|
+
until docs.empty?
|
105
|
+
batch = docs.take(@write_batch_size)
|
106
|
+
begin
|
107
|
+
batch_to_send = batch #(op_type == :insert && !ordered.nil?) ? batch.collect{|doc|doc[:d]} : batch
|
108
|
+
if @collection.db.connection.use_write_command?(write_concern) # TODO - polymorphic send_write including legacy insert
|
109
|
+
response = send_bulk_write_command(op_type, batch_to_send, check_keys, opts)
|
110
|
+
else
|
111
|
+
response = send_write_operation(op_type, nil, batch_to_send, check_keys, opts, write_concern)
|
112
|
+
end
|
113
|
+
exchanges << {:op_type => op_type, :batch => batch, :opts => opts, :response => response}
|
114
|
+
docs = docs.drop(batch.size)
|
115
|
+
@write_batch_size = [(@write_batch_size*1097) >> 10, @write_batch_size+1].max unless docs.empty? # 2**(1/10) multiplicative increase
|
116
|
+
@write_batch_size = @max_write_batch_size if @write_batch_size > @max_write_batch_size
|
117
|
+
rescue BSON::InvalidDocument, BSON::InvalidKeyName, BSON::InvalidStringEncoding => ex
|
118
|
+
if @write_batch_size > 1 # decrease batch size
|
119
|
+
@write_batch_size = (@write_batch_size+1) >> 1 # 2**(-1) multiplicative decrease
|
120
|
+
next
|
121
|
+
end
|
122
|
+
# error on a single document
|
123
|
+
bulk_message = "Bulk write error - #{ex.message} - examine result for complete information"
|
124
|
+
ex = BulkWriteError.new(bulk_message, Mongo::ErrorCode::INVALID_BSON,
|
125
|
+
{:op_type => op_type, :batch => batch, :ord => batch.first[:ord], :opts => opts, :error => ex}) unless ordered.nil?
|
126
|
+
error_docs << docs.shift
|
127
|
+
next if collect_on_error
|
128
|
+
errors << ex
|
129
|
+
break unless continue_on_error
|
130
|
+
rescue Mongo::OperationFailure => ex
|
131
|
+
errors << ex
|
132
|
+
exchanges << {:op_type => op_type, :batch => batch, :opts => opts, :response => ex.result}
|
133
|
+
docs = docs.drop(batch.size)
|
134
|
+
break if !continue_on_error && !collect_on_error
|
135
|
+
end
|
136
|
+
end
|
137
|
+
[error_docs, errors, exchanges]
|
138
|
+
end
|
139
|
+
|
140
|
+
alias :batch_write :batch_write_incremental
|
141
|
+
|
142
|
+
def send_bulk_write_command(op_type, documents, check_keys, opts, collection_name=@name)
|
143
|
+
if op_type == :insert
|
144
|
+
documents = documents.collect{|doc| doc[:d]} if opts.key?(:ordered)
|
145
|
+
documents.each do |doc|
|
146
|
+
# TODO - @pk_factory.create_pk(doc)
|
147
|
+
if check_keys
|
148
|
+
doc.each_key do |key|
|
149
|
+
key = key.to_s
|
150
|
+
raise BSON::InvalidKeyName.new("key #{key} must not start with '$'") if key[0] == ?$
|
151
|
+
raise BSON::InvalidKeyName.new("key #{key} must not contain '.'") if key.include? ?.
|
152
|
+
end
|
153
|
+
end
|
154
|
+
end
|
155
|
+
#elsif op_type == :update # TODO - check keys
|
156
|
+
#elsif op_type == :delete
|
157
|
+
#else
|
158
|
+
# raise ArgumentError, "Write operation type must be :insert, :update or :delete"
|
159
|
+
end
|
160
|
+
request = BSON::OrderedHash[op_type, collection_name].merge!(
|
161
|
+
Mongo::CollectionWriter::WRITE_COMMAND_ARG_KEY[op_type] => documents,
|
162
|
+
:writeConcern => get_write_concern(opts, @collection),
|
163
|
+
:ordered => opts[:ordered] || !opts[:continue_on_error]
|
164
|
+
)
|
165
|
+
@db.command(request)
|
166
|
+
end
|
167
|
+
|
168
|
+
private
|
169
|
+
|
170
|
+
def sort_by_first_sym(pairs)
|
171
|
+
pairs = pairs.collect{|first, rest| [first.to_s, rest]} #stringify_first
|
172
|
+
pairs = pairs.sort{|x,y| x.first <=> y.first }
|
173
|
+
pairs.collect{|first, rest| [first.to_sym, rest]} #symbolize_first
|
174
|
+
end
|
175
|
+
|
176
|
+
def ordered_group_by_first(pairs)
|
177
|
+
pairs.inject([[], nil]) do |memo, pair|
|
178
|
+
result, previous_value = memo
|
179
|
+
current_value = pair.first
|
180
|
+
result << [current_value, []] if previous_value != current_value
|
181
|
+
result.last.last << pair.last
|
182
|
+
[result, current_value]
|
183
|
+
end.first
|
184
|
+
end
|
185
|
+
|
186
|
+
end
|
187
|
+
|
188
|
+
class CollectionOperationWriter < CollectionWriter
|
189
|
+
def initialize(collection)
|
190
|
+
super(collection)
|
191
|
+
end
|
192
|
+
|
193
|
+
def send_write_operation(op_type, selector, doc_or_docs, check_keys, opts, write_concern, collection_name=@name)
|
194
|
+
message = BSON::ByteBuffer.new("", @connection.max_message_size)
|
195
|
+
message.put_int((op_type == :insert && !!opts[:continue_on_error]) ? 1 : 0)
|
196
|
+
BSON::BSON_RUBY.serialize_cstr(message, "#{@db.name}.#{collection_name}")
|
197
|
+
if op_type == :update
|
198
|
+
update_options = 0
|
199
|
+
update_options += 1 if opts[:upsert]
|
200
|
+
update_options += 2 if opts[:multi]
|
201
|
+
message.put_int(update_options)
|
202
|
+
elsif op_type == :delete
|
203
|
+
delete_options = 0
|
204
|
+
delete_options += 1 if opts[:limit] && opts[:limit] != 0
|
205
|
+
message.put_int(delete_options)
|
206
|
+
end
|
207
|
+
message.put_binary(BSON::BSON_CODER.serialize(selector, false, true, @connection.max_bson_size).to_s) if selector
|
208
|
+
[doc_or_docs].flatten(1).compact.each do |document|
|
209
|
+
message.put_binary(BSON::BSON_CODER.serialize(document, check_keys, true, @connection.max_bson_size).to_s)
|
210
|
+
if message.size > @connection.max_message_size
|
211
|
+
raise BSON::InvalidDocument, "Message is too large. This message is limited to #{@connection.max_message_size} bytes."
|
212
|
+
end
|
213
|
+
end
|
214
|
+
instrument(op_type, :database => @db.name, :collection => collection_name, :selector => selector, :documents => doc_or_docs) do
|
215
|
+
op_code = OPCODE[op_type]
|
216
|
+
if Mongo::WriteConcern.gle?(write_concern)
|
217
|
+
@connection.send_message_with_gle(op_code, message, @db.name, nil, write_concern)
|
218
|
+
else
|
219
|
+
@connection.send_message(op_code, message)
|
220
|
+
end
|
221
|
+
end
|
222
|
+
end
|
223
|
+
|
224
|
+
def bulk_execute(ops, options, opts = {})
|
225
|
+
write_concern = get_write_concern(opts, @collection)
|
226
|
+
errors = []
|
227
|
+
exchanges = []
|
228
|
+
ops.each do |op_type, doc|
|
229
|
+
doc = {:d => @collection.pk_factory.create_pk(doc[:d]), :ord => doc[:ord]} if op_type == :insert
|
230
|
+
doc_opts = doc.merge(opts)
|
231
|
+
d = doc_opts.delete(:d)
|
232
|
+
q = doc_opts.delete(:q)
|
233
|
+
u = doc_opts.delete(:u)
|
234
|
+
begin # use single and NOT batch inserts since there no index for an error
|
235
|
+
response = @collection.operation_writer.send_write_operation(op_type, q, d || u, check_keys = false, doc_opts, write_concern)
|
236
|
+
exchanges << {:op_type => op_type, :batch => [doc], :opts => opts, :response => response}
|
237
|
+
rescue BSON::InvalidDocument, BSON::InvalidKeyName, BSON::InvalidStringEncoding => ex
|
238
|
+
bulk_message = "Bulk write error - #{ex.message} - examine result for complete information"
|
239
|
+
ex = BulkWriteError.new(bulk_message, Mongo::ErrorCode::INVALID_BSON,
|
240
|
+
{:op_type => op_type, :serialize => doc, :ord => doc[:ord], :error => ex})
|
241
|
+
errors << ex
|
242
|
+
break if options[:ordered]
|
243
|
+
rescue Mongo::OperationFailure => ex
|
244
|
+
errors << ex
|
245
|
+
exchanges << {:op_type => op_type, :batch => [doc], :opts => opts, :response => ex.result}
|
246
|
+
break if options[:ordered] && ex.result["err"] != "norepl"
|
247
|
+
end
|
248
|
+
end
|
249
|
+
[errors, exchanges]
|
250
|
+
end
|
251
|
+
|
252
|
+
private
|
253
|
+
|
254
|
+
def batch_message_initialize(message, op_type, continue_on_error, write_concern)
|
255
|
+
message.clear!.clear
|
256
|
+
message.put_int(continue_on_error ? 1 : 0)
|
257
|
+
BSON::BSON_RUBY.serialize_cstr(message, "#{@db.name}.#{@name}")
|
258
|
+
end
|
259
|
+
|
260
|
+
def batch_message_append(message, serialized_doc, write_concern)
|
261
|
+
message.put_binary(serialized_doc.to_s)
|
262
|
+
end
|
263
|
+
|
264
|
+
def batch_message_send(message, op_type, batch_docs, write_concern, continue_on_error)
|
265
|
+
instrument(:insert, :database => @db.name, :collection => @name, :documents => batch_docs) do
|
266
|
+
if Mongo::WriteConcern.gle?(write_concern)
|
267
|
+
@connection.send_message_with_gle(Mongo::Constants::OP_INSERT, message, @db.name, nil, write_concern)
|
268
|
+
else
|
269
|
+
@connection.send_message(Mongo::Constants::OP_INSERT, message)
|
270
|
+
end
|
271
|
+
end
|
272
|
+
end
|
273
|
+
|
274
|
+
def batch_write_max_sizes(write_concern)
|
275
|
+
[@connection.max_message_size, @connection.max_message_size, @connection.max_bson_size]
|
276
|
+
end
|
277
|
+
|
278
|
+
end
|
279
|
+
|
280
|
+
class CollectionCommandWriter < CollectionWriter
|
281
|
+
def initialize(collection)
|
282
|
+
super(collection)
|
283
|
+
end
|
284
|
+
|
285
|
+
def send_write_command(op_type, selector, doc_or_docs, check_keys, opts, write_concern, collection_name=@name)
|
286
|
+
if op_type == :insert
|
287
|
+
argument = [doc_or_docs].flatten(1).compact
|
288
|
+
elsif op_type == :update
|
289
|
+
argument = [{:q => selector, :u => doc_or_docs, :multi => !!opts[:multi]}]
|
290
|
+
argument.first.merge!(:upsert => opts[:upsert]) if opts[:upsert]
|
291
|
+
elsif op_type == :delete
|
292
|
+
argument = [{:q => selector, :limit => (opts[:limit] || 0)}]
|
293
|
+
else
|
294
|
+
raise ArgumentError, "Write operation type must be :insert, :update or :delete"
|
295
|
+
end
|
296
|
+
request = BSON::OrderedHash[op_type, collection_name, WRITE_COMMAND_ARG_KEY[op_type], argument]
|
297
|
+
request.merge!(:writeConcern => write_concern, :ordered => !opts[:continue_on_error])
|
298
|
+
request.merge!(opts)
|
299
|
+
instrument(op_type, :database => @db.name, :collection => collection_name, :selector => selector, :documents => doc_or_docs) do
|
300
|
+
@db.command(request)
|
301
|
+
end
|
302
|
+
end
|
303
|
+
|
304
|
+
def bulk_execute(ops, options, opts = {})
|
305
|
+
errors = []
|
306
|
+
exchanges = []
|
307
|
+
ops = (options[:ordered] == false) ? sort_by_first_sym(ops) : ops # sort by write-type
|
308
|
+
ordered_group_by_first(ops).each do |op_type, documents|
|
309
|
+
documents.collect! {|doc| {:d => @collection.pk_factory.create_pk(doc[:d]), :ord => doc[:ord]} } if op_type == :insert
|
310
|
+
error_docs, batch_errors, batch_exchanges =
|
311
|
+
batch_write(op_type, documents, check_keys = false, opts.merge(:ordered => options[:ordered]))
|
312
|
+
errors += batch_errors
|
313
|
+
exchanges += batch_exchanges
|
314
|
+
break if options[:ordered] && !batch_errors.empty?
|
315
|
+
end
|
316
|
+
[errors, exchanges]
|
317
|
+
end
|
318
|
+
|
319
|
+
private
|
320
|
+
|
321
|
+
def batch_message_initialize(message, op_type, continue_on_error, write_concern)
|
322
|
+
message.clear!.clear
|
323
|
+
@bson_empty ||= BSON::BSON_CODER.serialize({})
|
324
|
+
message.put_binary(@bson_empty.to_s)
|
325
|
+
message.unfinish!.array!(WRITE_COMMAND_ARG_KEY[op_type])
|
326
|
+
end
|
327
|
+
|
328
|
+
def batch_message_append(message, serialized_doc, write_concern)
|
329
|
+
message.push_doc!(serialized_doc)
|
330
|
+
end
|
331
|
+
|
332
|
+
def batch_message_send(message, op_type, batch_docs, write_concern, continue_on_error)
|
333
|
+
message.finish!
|
334
|
+
request = BSON::OrderedHash[op_type, @name, :bson, message]
|
335
|
+
request.merge!(:writeConcern => write_concern, :ordered => !continue_on_error)
|
336
|
+
instrument(:insert, :database => @db.name, :collection => @name, :documents => batch_docs) do
|
337
|
+
@db.command(request)
|
338
|
+
end
|
339
|
+
end
|
340
|
+
|
341
|
+
def batch_write_max_sizes(write_concern)
|
342
|
+
[MongoClient::COMMAND_HEADROOM, MongoClient::APPEND_HEADROOM, MongoClient::SERIALIZE_HEADROOM].collect{|h| @connection.max_bson_size + h}
|
343
|
+
end
|
344
|
+
|
345
|
+
end
|
346
|
+
|
347
|
+
end
|
348
|
+
|
@@ -0,0 +1,19 @@
|
|
1
|
+
# Copyright (C) 2009-2013 MongoDB, Inc.
|
2
|
+
#
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
# you may not use this file except in compliance with the License.
|
5
|
+
# You may obtain a copy of the License at
|
6
|
+
#
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
#
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
# See the License for the specific language governing permissions and
|
13
|
+
# limitations under the License.
|
14
|
+
|
15
|
+
require 'mongo/connection/socket'
|
16
|
+
require 'mongo/connection/node'
|
17
|
+
require 'mongo/connection/pool'
|
18
|
+
require 'mongo/connection/pool_manager'
|
19
|
+
require 'mongo/connection/sharding_pool_manager'
|
@@ -1,4 +1,4 @@
|
|
1
|
-
# Copyright (C) 2013
|
1
|
+
# Copyright (C) 2009-2013 MongoDB, Inc.
|
2
2
|
#
|
3
3
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
4
|
# you may not use this file except in compliance with the License.
|
@@ -181,6 +181,18 @@ module Mongo
|
|
181
181
|
@max_message_size || max_bson_size * MESSAGE_SIZE_FACTOR
|
182
182
|
end
|
183
183
|
|
184
|
+
def max_wire_version
|
185
|
+
@max_wire_version || 0
|
186
|
+
end
|
187
|
+
|
188
|
+
def min_wire_version
|
189
|
+
@min_wire_version || 0
|
190
|
+
end
|
191
|
+
|
192
|
+
def wire_version_feature?(feature)
|
193
|
+
min_wire_version <= feature && feature <= max_wire_version
|
194
|
+
end
|
195
|
+
|
184
196
|
protected
|
185
197
|
|
186
198
|
# Ensure that this node is a healthy member of a replica set.
|
@@ -215,6 +227,8 @@ module Mongo
|
|
215
227
|
def update_max_sizes
|
216
228
|
@max_bson_size = config['maxBsonObjectSize'] || DEFAULT_MAX_BSON_SIZE
|
217
229
|
@max_message_size = config['maxMessageSizeBytes'] || @max_bson_size * MESSAGE_SIZE_FACTOR
|
230
|
+
@max_wire_version = config['maxWireVersion'] || 0
|
231
|
+
@min_wire_version = config['minWireVersion'] || 0
|
218
232
|
end
|
219
233
|
end
|
220
234
|
end
|
@@ -1,4 +1,4 @@
|
|
1
|
-
# Copyright (C) 2013
|
1
|
+
# Copyright (C) 2009-2013 MongoDB, Inc.
|
2
2
|
#
|
3
3
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
4
|
# you may not use this file except in compliance with the License.
|
@@ -52,9 +52,6 @@ module Mongo
|
|
52
52
|
# Condition variable for signal and wait
|
53
53
|
@queue = ConditionVariable.new
|
54
54
|
|
55
|
-
# Operations to perform on a socket
|
56
|
-
@socket_ops = Hash.new { |h, k| h[k] = [] }
|
57
|
-
|
58
55
|
@sockets = []
|
59
56
|
@checked_out = []
|
60
57
|
@ping_time = nil
|
@@ -186,8 +183,8 @@ module Mongo
|
|
186
183
|
end
|
187
184
|
|
188
185
|
# If any saved authentications exist, we want to apply those
|
189
|
-
# when creating new sockets.
|
190
|
-
|
186
|
+
# when creating new sockets and process logouts.
|
187
|
+
check_auths(socket)
|
191
188
|
|
192
189
|
@sockets << socket
|
193
190
|
@checked_out << socket
|
@@ -199,24 +196,26 @@ module Mongo
|
|
199
196
|
# then we need a way to apply the authentication on each socket.
|
200
197
|
# So we store the apply_authentication method, and this will be
|
201
198
|
# applied right before the next use of each socket.
|
199
|
+
#
|
200
|
+
# @deprecated This method has been replaced by Pool#check_auths (private)
|
201
|
+
# and it isn't necessary to ever invoke this method directly.
|
202
202
|
def authenticate_existing
|
203
203
|
@connection_mutex.synchronize do
|
204
204
|
@sockets.each do |socket|
|
205
|
-
|
206
|
-
@client.apply_saved_authentication(:socket => socket)
|
207
|
-
end
|
205
|
+
check_auths(socket)
|
208
206
|
end
|
209
207
|
end
|
210
208
|
end
|
211
209
|
|
212
210
|
# Store the logout op for each existing socket to be applied before
|
213
211
|
# the next use of each socket.
|
214
|
-
|
212
|
+
#
|
213
|
+
# @deprecated This method has been replaced by Pool#check_auths (private)
|
214
|
+
# and it isn't necessary to ever invoke this method directly.
|
215
|
+
def logout_existing(database)
|
215
216
|
@connection_mutex.synchronize do
|
216
217
|
@sockets.each do |socket|
|
217
|
-
|
218
|
-
@client.db(db).issue_logout(:socket => socket)
|
219
|
-
end
|
218
|
+
check_auths(socket)
|
220
219
|
end
|
221
220
|
end
|
222
221
|
end
|
@@ -292,12 +291,7 @@ module Mongo
|
|
292
291
|
end
|
293
292
|
|
294
293
|
if socket
|
295
|
-
|
296
|
-
# At the moment, we use this to lazily authenticate and
|
297
|
-
# logout existing socket connections.
|
298
|
-
@socket_ops[socket].reject! do |op|
|
299
|
-
op.call
|
300
|
-
end
|
294
|
+
check_auths(socket)
|
301
295
|
|
302
296
|
if socket.closed?
|
303
297
|
@checked_out.delete(socket)
|
@@ -317,6 +311,27 @@ module Mongo
|
|
317
311
|
|
318
312
|
private
|
319
313
|
|
314
|
+
# Helper method to handle keeping track of auths/logouts for sockets.
|
315
|
+
#
|
316
|
+
# @param socket [Socket] The socket instance to be checked.
|
317
|
+
#
|
318
|
+
# @return [Socket] The authenticated socket instance.
|
319
|
+
def check_auths(socket)
|
320
|
+
# find and handle logouts
|
321
|
+
(socket.auths - @client.auths).each do |auth|
|
322
|
+
@client.issue_logout(auth[:source], :socket => socket)
|
323
|
+
socket.auths.delete(auth)
|
324
|
+
end
|
325
|
+
|
326
|
+
# find and handle new auths
|
327
|
+
(@client.auths - socket.auths).each do |auth|
|
328
|
+
@client.issue_authentication(auth, :socket => socket)
|
329
|
+
socket.auths.add(auth)
|
330
|
+
end
|
331
|
+
|
332
|
+
socket
|
333
|
+
end
|
334
|
+
|
320
335
|
def close_sockets(sockets)
|
321
336
|
sockets.each do |socket|
|
322
337
|
@sockets.delete(socket)
|