mongo 1.3.0 → 1.12.5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- checksums.yaml.gz.sig +0 -0
- data/{LICENSE.txt → LICENSE} +1 -1
- data/README.md +122 -271
- data/Rakefile +25 -209
- data/VERSION +1 -0
- data/bin/mongo_console +31 -9
- data/lib/mongo/bulk_write_collection_view.rb +387 -0
- data/lib/mongo/collection.rb +576 -269
- data/lib/mongo/collection_writer.rb +364 -0
- data/lib/mongo/connection/node.rb +249 -0
- data/lib/mongo/connection/pool.rb +340 -0
- data/lib/mongo/connection/pool_manager.rb +320 -0
- data/lib/mongo/connection/sharding_pool_manager.rb +67 -0
- data/lib/mongo/connection/socket/socket_util.rb +37 -0
- data/lib/mongo/connection/socket/ssl_socket.rb +95 -0
- data/lib/mongo/connection/socket/tcp_socket.rb +87 -0
- data/lib/mongo/connection/socket/unix_socket.rb +39 -0
- data/lib/mongo/connection/socket.rb +18 -0
- data/lib/mongo/connection.rb +7 -875
- data/lib/mongo/cursor.rb +403 -117
- data/lib/mongo/db.rb +444 -243
- data/lib/mongo/exception.rb +145 -0
- data/lib/mongo/functional/authentication.rb +455 -0
- data/lib/mongo/functional/logging.rb +85 -0
- data/lib/mongo/functional/read_preference.rb +183 -0
- data/lib/mongo/functional/scram.rb +556 -0
- data/lib/mongo/functional/uri_parser.rb +409 -0
- data/lib/mongo/functional/write_concern.rb +66 -0
- data/lib/mongo/functional.rb +20 -0
- data/lib/mongo/gridfs/grid.rb +30 -24
- data/lib/mongo/gridfs/grid_ext.rb +6 -10
- data/lib/mongo/gridfs/grid_file_system.rb +38 -20
- data/lib/mongo/gridfs/grid_io.rb +84 -75
- data/lib/mongo/gridfs.rb +18 -0
- data/lib/mongo/legacy.rb +140 -0
- data/lib/mongo/mongo_client.rb +697 -0
- data/lib/mongo/mongo_replica_set_client.rb +535 -0
- data/lib/mongo/mongo_sharded_client.rb +159 -0
- data/lib/mongo/networking.rb +372 -0
- data/lib/mongo/{util → utils}/conversions.rb +29 -8
- data/lib/mongo/{util → utils}/core_ext.rb +28 -18
- data/lib/mongo/{util → utils}/server_version.rb +4 -6
- data/lib/mongo/{util → utils}/support.rb +29 -31
- data/lib/mongo/utils/thread_local_variable_manager.rb +25 -0
- data/lib/mongo/utils.rb +19 -0
- data/lib/mongo.rb +51 -50
- data/mongo.gemspec +29 -32
- data/test/functional/authentication_test.rb +39 -0
- data/test/functional/bulk_api_stress_test.rb +133 -0
- data/test/functional/bulk_write_collection_view_test.rb +1198 -0
- data/test/functional/client_test.rb +627 -0
- data/test/functional/collection_test.rb +2175 -0
- data/test/functional/collection_writer_test.rb +83 -0
- data/test/{conversions_test.rb → functional/conversions_test.rb} +47 -3
- data/test/functional/cursor_fail_test.rb +57 -0
- data/test/functional/cursor_message_test.rb +56 -0
- data/test/functional/cursor_test.rb +683 -0
- data/test/functional/db_api_test.rb +835 -0
- data/test/functional/db_connection_test.rb +25 -0
- data/test/functional/db_test.rb +348 -0
- data/test/functional/grid_file_system_test.rb +285 -0
- data/test/{grid_io_test.rb → functional/grid_io_test.rb} +72 -11
- data/test/{grid_test.rb → functional/grid_test.rb} +88 -15
- data/test/functional/pool_test.rb +136 -0
- data/test/functional/safe_test.rb +98 -0
- data/test/functional/ssl_test.rb +29 -0
- data/test/functional/support_test.rb +62 -0
- data/test/functional/timeout_test.rb +60 -0
- data/test/functional/uri_test.rb +446 -0
- data/test/functional/write_concern_test.rb +118 -0
- data/test/helpers/general.rb +50 -0
- data/test/helpers/test_unit.rb +476 -0
- data/test/replica_set/authentication_test.rb +37 -0
- data/test/replica_set/basic_test.rb +189 -0
- data/test/replica_set/client_test.rb +393 -0
- data/test/replica_set/connection_test.rb +138 -0
- data/test/replica_set/count_test.rb +66 -0
- data/test/replica_set/cursor_test.rb +220 -0
- data/test/replica_set/insert_test.rb +157 -0
- data/test/replica_set/max_values_test.rb +151 -0
- data/test/replica_set/pinning_test.rb +105 -0
- data/test/replica_set/query_test.rb +73 -0
- data/test/replica_set/read_preference_test.rb +219 -0
- data/test/replica_set/refresh_test.rb +211 -0
- data/test/replica_set/replication_ack_test.rb +95 -0
- data/test/replica_set/ssl_test.rb +32 -0
- data/test/sharded_cluster/basic_test.rb +203 -0
- data/test/shared/authentication/basic_auth_shared.rb +260 -0
- data/test/shared/authentication/bulk_api_auth_shared.rb +249 -0
- data/test/shared/authentication/gssapi_shared.rb +176 -0
- data/test/shared/authentication/sasl_plain_shared.rb +96 -0
- data/test/shared/authentication/scram_shared.rb +92 -0
- data/test/shared/ssl_shared.rb +235 -0
- data/test/test_helper.rb +53 -94
- data/test/threading/basic_test.rb +120 -0
- data/test/tools/mongo_config.rb +708 -0
- data/test/tools/mongo_config_test.rb +160 -0
- data/test/unit/client_test.rb +381 -0
- data/test/unit/collection_test.rb +89 -53
- data/test/unit/connection_test.rb +282 -32
- data/test/unit/cursor_test.rb +206 -8
- data/test/unit/db_test.rb +55 -13
- data/test/unit/grid_test.rb +43 -16
- data/test/unit/mongo_sharded_client_test.rb +48 -0
- data/test/unit/node_test.rb +93 -0
- data/test/unit/pool_manager_test.rb +111 -0
- data/test/unit/read_pref_test.rb +406 -0
- data/test/unit/read_test.rb +159 -0
- data/test/unit/safe_test.rb +69 -36
- data/test/unit/sharding_pool_manager_test.rb +84 -0
- data/test/unit/write_concern_test.rb +175 -0
- data.tar.gz.sig +3 -0
- metadata +227 -216
- metadata.gz.sig +0 -0
- data/docs/CREDITS.md +0 -123
- data/docs/FAQ.md +0 -116
- data/docs/GridFS.md +0 -158
- data/docs/HISTORY.md +0 -244
- data/docs/RELEASES.md +0 -33
- data/docs/REPLICA_SETS.md +0 -72
- data/docs/TUTORIAL.md +0 -247
- data/docs/WRITE_CONCERN.md +0 -28
- data/lib/mongo/exceptions.rb +0 -71
- data/lib/mongo/gridfs/grid_io_fix.rb +0 -38
- data/lib/mongo/repl_set_connection.rb +0 -342
- data/lib/mongo/test.rb +0 -20
- data/lib/mongo/util/pool.rb +0 -177
- data/lib/mongo/util/uri_parser.rb +0 -185
- data/test/async/collection_test.rb +0 -224
- data/test/async/connection_test.rb +0 -24
- data/test/async/cursor_test.rb +0 -162
- data/test/async/worker_pool_test.rb +0 -99
- data/test/auxillary/1.4_features.rb +0 -166
- data/test/auxillary/authentication_test.rb +0 -68
- data/test/auxillary/autoreconnect_test.rb +0 -41
- data/test/auxillary/fork_test.rb +0 -30
- data/test/auxillary/repl_set_auth_test.rb +0 -58
- data/test/auxillary/slave_connection_test.rb +0 -36
- data/test/auxillary/threaded_authentication_test.rb +0 -101
- data/test/bson/binary_test.rb +0 -15
- data/test/bson/bson_test.rb +0 -649
- data/test/bson/byte_buffer_test.rb +0 -208
- data/test/bson/hash_with_indifferent_access_test.rb +0 -38
- data/test/bson/json_test.rb +0 -17
- data/test/bson/object_id_test.rb +0 -154
- data/test/bson/ordered_hash_test.rb +0 -204
- data/test/bson/timestamp_test.rb +0 -24
- data/test/collection_test.rb +0 -910
- data/test/connection_test.rb +0 -309
- data/test/cursor_fail_test.rb +0 -75
- data/test/cursor_message_test.rb +0 -43
- data/test/cursor_test.rb +0 -483
- data/test/db_api_test.rb +0 -726
- data/test/db_connection_test.rb +0 -15
- data/test/db_test.rb +0 -287
- data/test/grid_file_system_test.rb +0 -243
- data/test/load/resque/load.rb +0 -21
- data/test/load/resque/processor.rb +0 -26
- data/test/load/thin/load.rb +0 -24
- data/test/load/unicorn/load.rb +0 -23
- data/test/load/unicorn/unicorn.rb +0 -29
- data/test/replica_sets/connect_test.rb +0 -94
- data/test/replica_sets/connection_string_test.rb +0 -32
- data/test/replica_sets/count_test.rb +0 -35
- data/test/replica_sets/insert_test.rb +0 -53
- data/test/replica_sets/pooled_insert_test.rb +0 -55
- data/test/replica_sets/query_secondaries.rb +0 -96
- data/test/replica_sets/query_test.rb +0 -51
- data/test/replica_sets/replication_ack_test.rb +0 -66
- data/test/replica_sets/rs_test_helper.rb +0 -27
- data/test/safe_test.rb +0 -68
- data/test/support/hash_with_indifferent_access.rb +0 -186
- data/test/support/keys.rb +0 -45
- data/test/support_test.rb +0 -18
- data/test/threading/threading_with_large_pool_test.rb +0 -90
- data/test/threading_test.rb +0 -87
- data/test/tools/auth_repl_set_manager.rb +0 -14
- data/test/tools/load.rb +0 -58
- data/test/tools/repl_set_manager.rb +0 -266
- data/test/tools/sharding_manager.rb +0 -202
- data/test/tools/test.rb +0 -4
- data/test/unit/pool_test.rb +0 -9
- data/test/unit/repl_set_connection_test.rb +0 -59
- data/test/uri_test.rb +0 -91
@@ -0,0 +1,364 @@
|
|
1
|
+
# Copyright (C) 2009-2013 MongoDB, Inc.
|
2
|
+
#
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
# you may not use this file except in compliance with the License.
|
5
|
+
# You may obtain a copy of the License at
|
6
|
+
#
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
#
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
# See the License for the specific language governing permissions and
|
13
|
+
# limitations under the License.
|
14
|
+
|
15
|
+
module Mongo
|
16
|
+
|
17
|
+
class CollectionWriter
|
18
|
+
include Mongo::Logging
|
19
|
+
include Mongo::WriteConcern
|
20
|
+
|
21
|
+
OPCODE = {
|
22
|
+
:insert => Mongo::Constants::OP_INSERT,
|
23
|
+
:update => Mongo::Constants::OP_UPDATE,
|
24
|
+
:delete => Mongo::Constants::OP_DELETE
|
25
|
+
}
|
26
|
+
WRITE_COMMAND_ARG_KEY = {
|
27
|
+
:insert => :documents,
|
28
|
+
:update => :updates,
|
29
|
+
:delete => :deletes
|
30
|
+
}
|
31
|
+
|
32
|
+
def initialize(collection)
|
33
|
+
@collection = collection
|
34
|
+
@name = @collection.name
|
35
|
+
@db = @collection.db
|
36
|
+
@connection = @db.connection
|
37
|
+
@logger = @connection.logger
|
38
|
+
@max_write_batch_size = Mongo::MongoClient::DEFAULT_MAX_WRITE_BATCH_SIZE
|
39
|
+
end
|
40
|
+
|
41
|
+
# common implementation only for new batch write commands (insert, update, delete) and old batch insert
|
42
|
+
def batch_write_incremental(op_type, documents, check_keys=true, opts={})
|
43
|
+
raise Mongo::OperationFailure, "Request contains no documents" if documents.empty?
|
44
|
+
write_concern = get_write_concern(opts, @collection)
|
45
|
+
max_message_size, max_append_size, max_serialize_size = batch_write_max_sizes(write_concern)
|
46
|
+
ordered = opts[:ordered]
|
47
|
+
continue_on_error = !!opts[:continue_on_error] || ordered == false
|
48
|
+
collect_on_error = !!opts[:collect_on_error] || ordered == false
|
49
|
+
error_docs = [] # docs with serialization errors
|
50
|
+
errors = []
|
51
|
+
write_concern_errors = []
|
52
|
+
exchanges = []
|
53
|
+
serialized_doc = nil
|
54
|
+
message = BSON::ByteBuffer.new("", max_message_size)
|
55
|
+
@max_write_batch_size = @collection.db.connection.max_write_batch_size
|
56
|
+
docs = documents.dup
|
57
|
+
catch(:error) do
|
58
|
+
until docs.empty? || (!errors.empty? && !collect_on_error && !continue_on_error) # process documents a batch at a time
|
59
|
+
batch_docs = []
|
60
|
+
batch_message_initialize(message, op_type, continue_on_error, write_concern)
|
61
|
+
while !docs.empty? && batch_docs.size < @max_write_batch_size
|
62
|
+
begin
|
63
|
+
doc = docs.first
|
64
|
+
doc = doc[:d] if op_type == :insert && !ordered.nil? #check_keys for :update outside of serialize
|
65
|
+
serialized_doc ||= BSON::BSON_CODER.serialize(doc, check_keys, true, max_serialize_size)
|
66
|
+
rescue BSON::InvalidDocument, BSON::InvalidKeyName, BSON::InvalidStringEncoding => ex
|
67
|
+
bulk_message = "Bulk write error - #{ex.message} - examine result for complete information"
|
68
|
+
ex = BulkWriteError.new(bulk_message, Mongo::ErrorCode::INVALID_BSON,
|
69
|
+
{:op_type => op_type, :serialize => doc, :ord => docs.first[:ord], :error => ex}) unless ordered.nil?
|
70
|
+
error_docs << docs.shift
|
71
|
+
errors << ex
|
72
|
+
next if collect_on_error
|
73
|
+
throw(:error) if batch_docs.empty?
|
74
|
+
break # defer exit and send batch
|
75
|
+
end
|
76
|
+
break if message.size + serialized_doc.size > max_append_size
|
77
|
+
batch_docs << docs.shift
|
78
|
+
batch_message_append(message, serialized_doc, write_concern)
|
79
|
+
serialized_doc = nil
|
80
|
+
end
|
81
|
+
begin
|
82
|
+
response = batch_message_send(message, op_type, batch_docs, write_concern, continue_on_error) if batch_docs.size > 0
|
83
|
+
exchanges << {:op_type => op_type, :batch => batch_docs, :opts => opts, :response => response}
|
84
|
+
rescue Mongo::WriteConcernError => ex
|
85
|
+
write_concern_errors << ex
|
86
|
+
exchanges << {:op_type => op_type, :batch => batch_docs, :opts => opts, :response => ex.result}
|
87
|
+
rescue Mongo::OperationFailure => ex
|
88
|
+
errors << ex
|
89
|
+
exchanges << {:op_type => op_type, :batch => batch_docs, :opts => opts, :response => ex.result}
|
90
|
+
throw(:error) unless continue_on_error
|
91
|
+
end
|
92
|
+
end
|
93
|
+
end
|
94
|
+
[error_docs, errors, write_concern_errors, exchanges]
|
95
|
+
end
|
96
|
+
|
97
|
+
def batch_write_partition(op_type, documents, check_keys, opts)
|
98
|
+
raise Mongo::OperationFailure, "Request contains no documents" if documents.empty?
|
99
|
+
write_concern = get_write_concern(opts, @collection)
|
100
|
+
ordered = opts[:ordered]
|
101
|
+
continue_on_error = !!opts[:continue_on_error] || ordered == false # continue_on_error default false
|
102
|
+
collect_on_error = !!opts[:collect_on_error] # collect_on_error default false
|
103
|
+
error_docs = [] # docs with serialization errors
|
104
|
+
errors = []
|
105
|
+
write_concern_errors = []
|
106
|
+
exchanges = []
|
107
|
+
@max_write_batch_size = @collection.db.connection.max_write_batch_size
|
108
|
+
@write_batch_size = [documents.size, @max_write_batch_size].min
|
109
|
+
docs = documents.dup
|
110
|
+
until docs.empty?
|
111
|
+
batch = docs.take(@write_batch_size)
|
112
|
+
begin
|
113
|
+
batch_to_send = batch #(op_type == :insert && !ordered.nil?) ? batch.collect{|doc|doc[:d]} : batch
|
114
|
+
if @collection.db.connection.use_write_command?(write_concern) # TODO - polymorphic send_write including legacy insert
|
115
|
+
response = send_bulk_write_command(op_type, batch_to_send, check_keys, opts)
|
116
|
+
else
|
117
|
+
response = send_write_operation(op_type, nil, batch_to_send, check_keys, opts, write_concern)
|
118
|
+
end
|
119
|
+
exchanges << {:op_type => op_type, :batch => batch, :opts => opts, :response => response}
|
120
|
+
docs = docs.drop(batch.size)
|
121
|
+
@write_batch_size = [(@write_batch_size*1097) >> 10, @write_batch_size+1].max unless docs.empty? # 2**(1/10) multiplicative increase
|
122
|
+
@write_batch_size = @max_write_batch_size if @write_batch_size > @max_write_batch_size
|
123
|
+
rescue BSON::InvalidDocument, BSON::InvalidKeyName, BSON::InvalidStringEncoding => ex
|
124
|
+
if @write_batch_size > 1 # decrease batch size
|
125
|
+
@write_batch_size = (@write_batch_size+1) >> 1 # 2**(-1) multiplicative decrease
|
126
|
+
next
|
127
|
+
end
|
128
|
+
# error on a single document
|
129
|
+
bulk_message = "Bulk write error - #{ex.message} - examine result for complete information"
|
130
|
+
ex = BulkWriteError.new(bulk_message, Mongo::ErrorCode::INVALID_BSON,
|
131
|
+
{:op_type => op_type, :batch => batch, :ord => batch.first[:ord], :opts => opts, :error => ex}) unless ordered.nil?
|
132
|
+
error_docs << docs.shift
|
133
|
+
next if collect_on_error
|
134
|
+
errors << ex
|
135
|
+
break unless continue_on_error
|
136
|
+
rescue Mongo::WriteConcernError => ex
|
137
|
+
write_concern_errors << ex
|
138
|
+
exchanges << {:op_type => op_type, :batch => batch_docs, :opts => opts, :response => ex.result}
|
139
|
+
docs = docs.drop(batch.size)
|
140
|
+
rescue Mongo::OperationFailure => ex
|
141
|
+
errors << ex
|
142
|
+
exchanges << {:op_type => op_type, :batch => batch, :opts => opts, :response => ex.result}
|
143
|
+
docs = docs.drop(batch.size)
|
144
|
+
break if !continue_on_error && !collect_on_error
|
145
|
+
end
|
146
|
+
end
|
147
|
+
[error_docs, errors, write_concern_errors, exchanges]
|
148
|
+
end
|
149
|
+
|
150
|
+
alias :batch_write :batch_write_incremental
|
151
|
+
|
152
|
+
def send_bulk_write_command(op_type, documents, check_keys, opts, collection_name=@name)
|
153
|
+
if op_type == :insert
|
154
|
+
documents = documents.collect{|doc| doc[:d]} if opts.key?(:ordered)
|
155
|
+
documents.each do |doc|
|
156
|
+
# TODO - @pk_factory.create_pk(doc)
|
157
|
+
if check_keys
|
158
|
+
doc.each_key do |key|
|
159
|
+
key = key.to_s
|
160
|
+
raise BSON::InvalidKeyName.new("key #{key} must not start with '$'") if key[0] == ?$
|
161
|
+
raise BSON::InvalidKeyName.new("key #{key} must not contain '.'") if key.include? ?.
|
162
|
+
end
|
163
|
+
end
|
164
|
+
end
|
165
|
+
#elsif op_type == :update # TODO - check keys
|
166
|
+
#elsif op_type == :delete
|
167
|
+
#else
|
168
|
+
# raise ArgumentError, "Write operation type must be :insert, :update or :delete"
|
169
|
+
end
|
170
|
+
request = BSON::OrderedHash[op_type, collection_name].merge!(
|
171
|
+
Mongo::CollectionWriter::WRITE_COMMAND_ARG_KEY[op_type] => documents,
|
172
|
+
:writeConcern => get_write_concern(opts, @collection),
|
173
|
+
:ordered => opts[:ordered] || !opts[:continue_on_error]
|
174
|
+
)
|
175
|
+
@db.command(request)
|
176
|
+
end
|
177
|
+
|
178
|
+
private
|
179
|
+
|
180
|
+
def sort_by_first_sym(pairs)
|
181
|
+
pairs = pairs.collect{|first, rest| [first.to_s, rest]} #stringify_first
|
182
|
+
pairs = pairs.sort{|x,y| x.first <=> y.first }
|
183
|
+
pairs.collect{|first, rest| [first.to_sym, rest]} #symbolize_first
|
184
|
+
end
|
185
|
+
|
186
|
+
def ordered_group_by_first(pairs)
|
187
|
+
pairs.inject([[], nil]) do |memo, pair|
|
188
|
+
result, previous_value = memo
|
189
|
+
current_value = pair.first
|
190
|
+
result << [current_value, []] if previous_value != current_value
|
191
|
+
result.last.last << pair.last
|
192
|
+
[result, current_value]
|
193
|
+
end.first
|
194
|
+
end
|
195
|
+
|
196
|
+
end
|
197
|
+
|
198
|
+
class CollectionOperationWriter < CollectionWriter
|
199
|
+
def initialize(collection)
|
200
|
+
super(collection)
|
201
|
+
end
|
202
|
+
|
203
|
+
def send_write_operation(op_type, selector, doc_or_docs, check_keys, opts, write_concern, collection_name=@name)
|
204
|
+
message = BSON::ByteBuffer.new("", @connection.max_message_size)
|
205
|
+
message.put_int((op_type == :insert && !!opts[:continue_on_error]) ? 1 : 0)
|
206
|
+
BSON::BSON_RUBY.serialize_cstr(message, "#{@db.name}.#{collection_name}")
|
207
|
+
if op_type == :update
|
208
|
+
update_options = 0
|
209
|
+
update_options += 1 if opts[:upsert]
|
210
|
+
update_options += 2 if opts[:multi]
|
211
|
+
message.put_int(update_options)
|
212
|
+
elsif op_type == :delete
|
213
|
+
delete_options = 0
|
214
|
+
delete_options += 1 if opts[:limit] && opts[:limit] != 0
|
215
|
+
message.put_int(delete_options)
|
216
|
+
end
|
217
|
+
message.put_binary(BSON::BSON_CODER.serialize(selector, false, true, @connection.max_bson_size).to_s) if selector
|
218
|
+
[doc_or_docs].flatten(1).compact.each do |document|
|
219
|
+
message.put_binary(BSON::BSON_CODER.serialize(document, check_keys, true, @connection.max_bson_size).to_s)
|
220
|
+
if message.size > @connection.max_message_size
|
221
|
+
raise BSON::InvalidDocument, "Message is too large. This message is limited to #{@connection.max_message_size} bytes."
|
222
|
+
end
|
223
|
+
end
|
224
|
+
instrument(op_type, :database => @db.name, :collection => collection_name, :selector => selector, :documents => doc_or_docs) do
|
225
|
+
op_code = OPCODE[op_type]
|
226
|
+
if Mongo::WriteConcern.gle?(write_concern)
|
227
|
+
@connection.send_message_with_gle(op_code, message, @db.name, nil, write_concern)
|
228
|
+
else
|
229
|
+
@connection.send_message(op_code, message)
|
230
|
+
end
|
231
|
+
end
|
232
|
+
end
|
233
|
+
|
234
|
+
def bulk_execute(ops, options, opts = {})
|
235
|
+
write_concern = get_write_concern(opts, @collection)
|
236
|
+
errors = []
|
237
|
+
write_concern_errors = []
|
238
|
+
exchanges = []
|
239
|
+
ops.each do |op_type, doc|
|
240
|
+
doc = {:d => @collection.pk_factory.create_pk(doc[:d]), :ord => doc[:ord]} if op_type == :insert
|
241
|
+
doc_opts = doc.merge(opts)
|
242
|
+
d = doc_opts.delete(:d)
|
243
|
+
q = doc_opts.delete(:q)
|
244
|
+
u = doc_opts.delete(:u)
|
245
|
+
begin # use single and NOT batch inserts since there no index for an error
|
246
|
+
response = @collection.operation_writer.send_write_operation(op_type, q, d || u, check_keys = false, doc_opts, write_concern)
|
247
|
+
exchanges << {:op_type => op_type, :batch => [doc], :opts => opts, :response => response}
|
248
|
+
rescue BSON::InvalidDocument, BSON::InvalidKeyName, BSON::InvalidStringEncoding => ex
|
249
|
+
bulk_message = "Bulk write error - #{ex.message} - examine result for complete information"
|
250
|
+
ex = BulkWriteError.new(bulk_message, Mongo::ErrorCode::INVALID_BSON,
|
251
|
+
{:op_type => op_type, :serialize => doc, :ord => doc[:ord], :error => ex})
|
252
|
+
errors << ex
|
253
|
+
break if options[:ordered]
|
254
|
+
rescue Mongo::WriteConcernError => ex
|
255
|
+
write_concern_errors << ex
|
256
|
+
exchanges << {:op_type => op_type, :batch => [doc], :opts => opts, :response => ex.result}
|
257
|
+
rescue Mongo::OperationFailure => ex
|
258
|
+
errors << ex
|
259
|
+
exchanges << {:op_type => op_type, :batch => [doc], :opts => opts, :response => ex.result}
|
260
|
+
break if options[:ordered] && ex.result["err"] != "norepl"
|
261
|
+
end
|
262
|
+
end
|
263
|
+
[errors, write_concern_errors, exchanges]
|
264
|
+
end
|
265
|
+
|
266
|
+
private
|
267
|
+
|
268
|
+
def batch_message_initialize(message, op_type, continue_on_error, write_concern)
|
269
|
+
message.clear!.clear
|
270
|
+
message.put_int(continue_on_error ? 1 : 0)
|
271
|
+
BSON::BSON_RUBY.serialize_cstr(message, "#{@db.name}.#{@name}")
|
272
|
+
end
|
273
|
+
|
274
|
+
def batch_message_append(message, serialized_doc, write_concern)
|
275
|
+
message.put_binary(serialized_doc.to_s)
|
276
|
+
end
|
277
|
+
|
278
|
+
def batch_message_send(message, op_type, batch_docs, write_concern, continue_on_error)
|
279
|
+
instrument(:insert, :database => @db.name, :collection => @name, :documents => batch_docs) do
|
280
|
+
if Mongo::WriteConcern.gle?(write_concern)
|
281
|
+
@connection.send_message_with_gle(Mongo::Constants::OP_INSERT, message, @db.name, nil, write_concern)
|
282
|
+
else
|
283
|
+
@connection.send_message(Mongo::Constants::OP_INSERT, message)
|
284
|
+
end
|
285
|
+
end
|
286
|
+
end
|
287
|
+
|
288
|
+
def batch_write_max_sizes(write_concern)
|
289
|
+
[@connection.max_message_size, @connection.max_message_size, @connection.max_bson_size]
|
290
|
+
end
|
291
|
+
|
292
|
+
end
|
293
|
+
|
294
|
+
class CollectionCommandWriter < CollectionWriter
|
295
|
+
def initialize(collection)
|
296
|
+
super(collection)
|
297
|
+
end
|
298
|
+
|
299
|
+
def send_write_command(op_type, selector, doc_or_docs, check_keys, opts, write_concern, collection_name=@name)
|
300
|
+
if op_type == :insert
|
301
|
+
argument = [doc_or_docs].flatten(1).compact
|
302
|
+
elsif op_type == :update
|
303
|
+
argument = [{:q => selector, :u => doc_or_docs, :multi => !!opts[:multi]}]
|
304
|
+
argument.first.merge!(:upsert => opts[:upsert]) if opts[:upsert]
|
305
|
+
elsif op_type == :delete
|
306
|
+
argument = [{:q => selector, :limit => (opts[:limit] || 0)}]
|
307
|
+
else
|
308
|
+
raise ArgumentError, "Write operation type must be :insert, :update or :delete"
|
309
|
+
end
|
310
|
+
request = BSON::OrderedHash[op_type, collection_name, WRITE_COMMAND_ARG_KEY[op_type], argument]
|
311
|
+
request.merge!(:writeConcern => write_concern, :ordered => !opts[:continue_on_error])
|
312
|
+
request.merge!(opts)
|
313
|
+
instrument(op_type, :database => @db.name, :collection => collection_name, :selector => selector, :documents => doc_or_docs) do
|
314
|
+
@db.command(request)
|
315
|
+
end
|
316
|
+
end
|
317
|
+
|
318
|
+
def bulk_execute(ops, options, opts = {})
|
319
|
+
errors = []
|
320
|
+
write_concern_errors = []
|
321
|
+
exchanges = []
|
322
|
+
ops = (options[:ordered] == false) ? sort_by_first_sym(ops) : ops # sort by write-type
|
323
|
+
ordered_group_by_first(ops).each do |op_type, documents|
|
324
|
+
documents.collect! {|doc| {:d => @collection.pk_factory.create_pk(doc[:d]), :ord => doc[:ord]} } if op_type == :insert
|
325
|
+
error_docs, batch_errors, batch_write_concern_errors, batch_exchanges =
|
326
|
+
batch_write(op_type, documents, check_keys = false, opts.merge(:ordered => options[:ordered]))
|
327
|
+
errors += batch_errors
|
328
|
+
write_concern_errors += batch_write_concern_errors
|
329
|
+
exchanges += batch_exchanges
|
330
|
+
break if options[:ordered] && !batch_errors.empty?
|
331
|
+
end
|
332
|
+
[errors, write_concern_errors, exchanges]
|
333
|
+
end
|
334
|
+
|
335
|
+
private
|
336
|
+
|
337
|
+
def batch_message_initialize(message, op_type, continue_on_error, write_concern)
|
338
|
+
message.clear!.clear
|
339
|
+
@bson_empty ||= BSON::BSON_CODER.serialize({})
|
340
|
+
message.put_binary(@bson_empty.to_s)
|
341
|
+
message.unfinish!.array!(WRITE_COMMAND_ARG_KEY[op_type])
|
342
|
+
end
|
343
|
+
|
344
|
+
def batch_message_append(message, serialized_doc, write_concern)
|
345
|
+
message.push_doc!(serialized_doc)
|
346
|
+
end
|
347
|
+
|
348
|
+
def batch_message_send(message, op_type, batch_docs, write_concern, continue_on_error)
|
349
|
+
message.finish!
|
350
|
+
request = BSON::OrderedHash[op_type, @name, :bson, message]
|
351
|
+
request.merge!(:writeConcern => write_concern, :ordered => !continue_on_error)
|
352
|
+
instrument(:insert, :database => @db.name, :collection => @name, :documents => batch_docs) do
|
353
|
+
@db.command(request)
|
354
|
+
end
|
355
|
+
end
|
356
|
+
|
357
|
+
def batch_write_max_sizes(write_concern)
|
358
|
+
[MongoClient::COMMAND_HEADROOM, MongoClient::APPEND_HEADROOM, MongoClient::SERIALIZE_HEADROOM].collect{|h| @connection.max_bson_size + h}
|
359
|
+
end
|
360
|
+
|
361
|
+
end
|
362
|
+
|
363
|
+
end
|
364
|
+
|
@@ -0,0 +1,249 @@
|
|
1
|
+
# Copyright (C) 2009-2013 MongoDB, Inc.
|
2
|
+
#
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
# you may not use this file except in compliance with the License.
|
5
|
+
# You may obtain a copy of the License at
|
6
|
+
#
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
#
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12
|
+
# See the License for the specific language governing permissions and
|
13
|
+
# limitations under the License.
|
14
|
+
|
15
|
+
module Mongo
|
16
|
+
class Node
|
17
|
+
|
18
|
+
attr_accessor :host, :port, :address, :client, :socket, :last_state
|
19
|
+
|
20
|
+
def initialize(client, host_port)
|
21
|
+
@client = client
|
22
|
+
@manager = @client.local_manager
|
23
|
+
@host, @port = Support.normalize_seeds(host_port)
|
24
|
+
@address = "#{@host}:#{@port}"
|
25
|
+
@config = nil
|
26
|
+
@socket = nil
|
27
|
+
@node_mutex = Monitor.new
|
28
|
+
end
|
29
|
+
|
30
|
+
def eql?(other)
|
31
|
+
(other.is_a?(Node) && @address == other.address)
|
32
|
+
end
|
33
|
+
alias :== :eql?
|
34
|
+
|
35
|
+
def =~(other)
|
36
|
+
if other.is_a?(String)
|
37
|
+
h, p = Support.normalize_seeds(other)
|
38
|
+
h == @host && p == @port
|
39
|
+
else
|
40
|
+
false
|
41
|
+
end
|
42
|
+
end
|
43
|
+
|
44
|
+
def host_string
|
45
|
+
address
|
46
|
+
end
|
47
|
+
|
48
|
+
def config
|
49
|
+
connect unless connected?
|
50
|
+
set_config unless @config || !connected?
|
51
|
+
@config
|
52
|
+
end
|
53
|
+
|
54
|
+
def inspect
|
55
|
+
"<Mongo::Node:0x#{self.object_id.to_s(16)} @host=#{@host} @port=#{@port}>"
|
56
|
+
end
|
57
|
+
|
58
|
+
# Create a connection to the provided node,
|
59
|
+
# and, if successful, return the socket. Otherwise,
|
60
|
+
# return nil.
|
61
|
+
def connect
|
62
|
+
@node_mutex.synchronize do
|
63
|
+
begin
|
64
|
+
@socket = @client.socket_class.new(@host, @port,
|
65
|
+
@client.op_timeout,
|
66
|
+
@client.connect_timeout,
|
67
|
+
@client.socket_opts)
|
68
|
+
rescue ConnectionTimeoutError, OperationTimeout, ConnectionFailure, OperationFailure,
|
69
|
+
SocketError, SystemCallError, IOError => ex
|
70
|
+
@client.log(:debug, "Failed connection to #{host_string} with #{ex.class}, #{ex.message}.")
|
71
|
+
close
|
72
|
+
end
|
73
|
+
end
|
74
|
+
end
|
75
|
+
|
76
|
+
# This should only be called within a mutex
|
77
|
+
def close
|
78
|
+
if @socket && !@socket.closed?
|
79
|
+
@socket.close
|
80
|
+
end
|
81
|
+
@socket = nil
|
82
|
+
@config = nil
|
83
|
+
end
|
84
|
+
|
85
|
+
def connected?
|
86
|
+
@socket != nil && !@socket.closed?
|
87
|
+
end
|
88
|
+
|
89
|
+
def active?
|
90
|
+
begin
|
91
|
+
result = @client['admin'].command({:ping => 1}, :socket => usable_socket)
|
92
|
+
rescue OperationFailure, SocketError, SystemCallError, IOError
|
93
|
+
return nil
|
94
|
+
end
|
95
|
+
result['ok'] == 1
|
96
|
+
end
|
97
|
+
|
98
|
+
# Get the configuration for the provided node as returned by the
|
99
|
+
# ismaster command. Additionally, check that the replica set name
|
100
|
+
# matches with the name provided.
|
101
|
+
def set_config
|
102
|
+
@node_mutex.synchronize do
|
103
|
+
begin
|
104
|
+
if @config
|
105
|
+
@last_state = @config['ismaster'] ? :primary : :other
|
106
|
+
end
|
107
|
+
|
108
|
+
if @client.connect_timeout
|
109
|
+
Timeout::timeout(@client.connect_timeout, OperationTimeout) do
|
110
|
+
@config = @client['admin'].command({:ismaster => 1}, :socket => usable_socket)
|
111
|
+
end
|
112
|
+
else
|
113
|
+
@config = @client['admin'].command({:ismaster => 1}, :socket => usable_socket)
|
114
|
+
end
|
115
|
+
|
116
|
+
update_max_sizes
|
117
|
+
|
118
|
+
if @config['msg']
|
119
|
+
@client.log(:warn, "#{config['msg']}")
|
120
|
+
end
|
121
|
+
|
122
|
+
unless @client.mongos?
|
123
|
+
check_set_membership(@config)
|
124
|
+
check_set_name(@config)
|
125
|
+
end
|
126
|
+
rescue ConnectionFailure, OperationFailure, OperationTimeout, SocketError, SystemCallError, IOError => ex
|
127
|
+
@client.log(:warn, "Attempted connection to node #{host_string} raised " +
|
128
|
+
"#{ex.class}: #{ex.message}")
|
129
|
+
# Socket may already be nil from issuing command
|
130
|
+
close
|
131
|
+
end
|
132
|
+
end
|
133
|
+
end
|
134
|
+
|
135
|
+
# Return a list of replica set nodes from the config.
|
136
|
+
# Note: this excludes arbiters.
|
137
|
+
def node_list
|
138
|
+
nodes = []
|
139
|
+
nodes += config['hosts'] if config['hosts']
|
140
|
+
nodes += config['passives'] if config['passives']
|
141
|
+
nodes += ["#{@host}:#{@port}"] if @client.mongos?
|
142
|
+
nodes
|
143
|
+
end
|
144
|
+
|
145
|
+
def arbiters
|
146
|
+
return [] unless config['arbiters']
|
147
|
+
config['arbiters'].map do |arbiter|
|
148
|
+
Support.normalize_seeds(arbiter)
|
149
|
+
end
|
150
|
+
end
|
151
|
+
|
152
|
+
def primary?
|
153
|
+
config['ismaster'] == true || config['ismaster'] == 1
|
154
|
+
end
|
155
|
+
|
156
|
+
def secondary?
|
157
|
+
config['secondary'] == true || config['secondary'] == 1
|
158
|
+
end
|
159
|
+
|
160
|
+
def tags
|
161
|
+
config['tags'] || {}
|
162
|
+
end
|
163
|
+
|
164
|
+
def host_port
|
165
|
+
[@host, @port]
|
166
|
+
end
|
167
|
+
|
168
|
+
def hash
|
169
|
+
address.hash
|
170
|
+
end
|
171
|
+
|
172
|
+
def healthy?
|
173
|
+
connected? && config
|
174
|
+
end
|
175
|
+
|
176
|
+
def max_bson_size
|
177
|
+
@max_bson_size || DEFAULT_MAX_BSON_SIZE
|
178
|
+
end
|
179
|
+
|
180
|
+
def max_message_size
|
181
|
+
@max_message_size || max_bson_size * MESSAGE_SIZE_FACTOR
|
182
|
+
end
|
183
|
+
|
184
|
+
def max_wire_version
|
185
|
+
@max_wire_version || 0
|
186
|
+
end
|
187
|
+
|
188
|
+
def min_wire_version
|
189
|
+
@min_wire_version || 0
|
190
|
+
end
|
191
|
+
|
192
|
+
def wire_version_feature?(feature)
|
193
|
+
min_wire_version <= feature && feature <= max_wire_version
|
194
|
+
end
|
195
|
+
|
196
|
+
def max_write_batch_size
|
197
|
+
@max_write_batch_size || Mongo::MongoClient::DEFAULT_MAX_WRITE_BATCH_SIZE
|
198
|
+
end
|
199
|
+
|
200
|
+
protected
|
201
|
+
|
202
|
+
# Ensure that this node is a healthy member of a replica set.
|
203
|
+
def check_set_membership(config)
|
204
|
+
if !config.has_key?('hosts')
|
205
|
+
message = "Will not connect to #{host_string} because it's not a member " +
|
206
|
+
"of a replica set."
|
207
|
+
raise ConnectionFailure, message
|
208
|
+
elsif config['hosts'].length == 1 && !config['ismaster'] &&
|
209
|
+
!config['secondary']
|
210
|
+
message = "Attempting to connect to an unhealthy, single-node replica set."
|
211
|
+
raise ConnectionFailure, message
|
212
|
+
end
|
213
|
+
end
|
214
|
+
|
215
|
+
# Ensure that this node is part of a replica set of the expected name.
|
216
|
+
def check_set_name(config)
|
217
|
+
if @client.replica_set_name
|
218
|
+
if !config['setName']
|
219
|
+
@client.log(:warn, "Could not verify replica set name for member #{host_string} " +
|
220
|
+
"because ismaster does not return name in this version of MongoDB")
|
221
|
+
elsif @client.replica_set_name != config['setName']
|
222
|
+
message = "Attempting to connect to replica set '#{config['setName']}' on member #{host_string} " +
|
223
|
+
"but expected '#{@client.replica_set_name}'"
|
224
|
+
raise ReplicaSetConnectionError, message
|
225
|
+
end
|
226
|
+
end
|
227
|
+
end
|
228
|
+
|
229
|
+
private
|
230
|
+
|
231
|
+
def usable_socket
|
232
|
+
if @socket && @socket.pid != Process.pid
|
233
|
+
@socket.close
|
234
|
+
@socket = nil
|
235
|
+
connect
|
236
|
+
else
|
237
|
+
@socket
|
238
|
+
end
|
239
|
+
end
|
240
|
+
|
241
|
+
def update_max_sizes
|
242
|
+
@max_bson_size = config['maxBsonObjectSize'] || DEFAULT_MAX_BSON_SIZE
|
243
|
+
@max_message_size = config['maxMessageSizeBytes'] || @max_bson_size * MESSAGE_SIZE_FACTOR
|
244
|
+
@max_wire_version = config['maxWireVersion'] || 0
|
245
|
+
@min_wire_version = config['minWireVersion'] || 0
|
246
|
+
@max_write_batch_size = config['maxWriteBatchSize'] || Mongo::MongoClient::DEFAULT_MAX_WRITE_BATCH_SIZE
|
247
|
+
end
|
248
|
+
end
|
249
|
+
end
|