mongo 1.10.0-java

Sign up to get free protection for your applications and to get access to all the features.
Files changed (116) hide show
  1. checksums.yaml +7 -0
  2. checksums.yaml.gz.sig +0 -0
  3. data.tar.gz.sig +0 -0
  4. data/LICENSE +190 -0
  5. data/README.md +149 -0
  6. data/Rakefile +31 -0
  7. data/VERSION +1 -0
  8. data/bin/mongo_console +43 -0
  9. data/ext/jsasl/target/jsasl.jar +0 -0
  10. data/lib/mongo.rb +90 -0
  11. data/lib/mongo/bulk_write_collection_view.rb +380 -0
  12. data/lib/mongo/collection.rb +1164 -0
  13. data/lib/mongo/collection_writer.rb +364 -0
  14. data/lib/mongo/connection.rb +19 -0
  15. data/lib/mongo/connection/node.rb +239 -0
  16. data/lib/mongo/connection/pool.rb +347 -0
  17. data/lib/mongo/connection/pool_manager.rb +325 -0
  18. data/lib/mongo/connection/sharding_pool_manager.rb +67 -0
  19. data/lib/mongo/connection/socket.rb +18 -0
  20. data/lib/mongo/connection/socket/socket_util.rb +37 -0
  21. data/lib/mongo/connection/socket/ssl_socket.rb +95 -0
  22. data/lib/mongo/connection/socket/tcp_socket.rb +86 -0
  23. data/lib/mongo/connection/socket/unix_socket.rb +39 -0
  24. data/lib/mongo/cursor.rb +719 -0
  25. data/lib/mongo/db.rb +735 -0
  26. data/lib/mongo/exception.rb +88 -0
  27. data/lib/mongo/functional.rb +21 -0
  28. data/lib/mongo/functional/authentication.rb +318 -0
  29. data/lib/mongo/functional/logging.rb +85 -0
  30. data/lib/mongo/functional/read_preference.rb +174 -0
  31. data/lib/mongo/functional/sasl_java.rb +48 -0
  32. data/lib/mongo/functional/uri_parser.rb +374 -0
  33. data/lib/mongo/functional/write_concern.rb +66 -0
  34. data/lib/mongo/gridfs.rb +18 -0
  35. data/lib/mongo/gridfs/grid.rb +112 -0
  36. data/lib/mongo/gridfs/grid_ext.rb +53 -0
  37. data/lib/mongo/gridfs/grid_file_system.rb +163 -0
  38. data/lib/mongo/gridfs/grid_io.rb +484 -0
  39. data/lib/mongo/legacy.rb +140 -0
  40. data/lib/mongo/mongo_client.rb +702 -0
  41. data/lib/mongo/mongo_replica_set_client.rb +523 -0
  42. data/lib/mongo/mongo_sharded_client.rb +159 -0
  43. data/lib/mongo/networking.rb +370 -0
  44. data/lib/mongo/utils.rb +19 -0
  45. data/lib/mongo/utils/conversions.rb +110 -0
  46. data/lib/mongo/utils/core_ext.rb +70 -0
  47. data/lib/mongo/utils/server_version.rb +69 -0
  48. data/lib/mongo/utils/support.rb +80 -0
  49. data/lib/mongo/utils/thread_local_variable_manager.rb +25 -0
  50. data/mongo.gemspec +36 -0
  51. data/test/functional/authentication_test.rb +35 -0
  52. data/test/functional/bulk_api_stress_test.rb +133 -0
  53. data/test/functional/bulk_write_collection_view_test.rb +1129 -0
  54. data/test/functional/client_test.rb +565 -0
  55. data/test/functional/collection_test.rb +2073 -0
  56. data/test/functional/collection_writer_test.rb +83 -0
  57. data/test/functional/conversions_test.rb +163 -0
  58. data/test/functional/cursor_fail_test.rb +63 -0
  59. data/test/functional/cursor_message_test.rb +57 -0
  60. data/test/functional/cursor_test.rb +625 -0
  61. data/test/functional/db_api_test.rb +819 -0
  62. data/test/functional/db_connection_test.rb +27 -0
  63. data/test/functional/db_test.rb +344 -0
  64. data/test/functional/grid_file_system_test.rb +285 -0
  65. data/test/functional/grid_io_test.rb +252 -0
  66. data/test/functional/grid_test.rb +273 -0
  67. data/test/functional/pool_test.rb +62 -0
  68. data/test/functional/safe_test.rb +98 -0
  69. data/test/functional/ssl_test.rb +29 -0
  70. data/test/functional/support_test.rb +62 -0
  71. data/test/functional/timeout_test.rb +58 -0
  72. data/test/functional/uri_test.rb +330 -0
  73. data/test/functional/write_concern_test.rb +118 -0
  74. data/test/helpers/general.rb +50 -0
  75. data/test/helpers/test_unit.rb +317 -0
  76. data/test/replica_set/authentication_test.rb +35 -0
  77. data/test/replica_set/basic_test.rb +174 -0
  78. data/test/replica_set/client_test.rb +341 -0
  79. data/test/replica_set/complex_connect_test.rb +77 -0
  80. data/test/replica_set/connection_test.rb +138 -0
  81. data/test/replica_set/count_test.rb +64 -0
  82. data/test/replica_set/cursor_test.rb +212 -0
  83. data/test/replica_set/insert_test.rb +140 -0
  84. data/test/replica_set/max_values_test.rb +145 -0
  85. data/test/replica_set/pinning_test.rb +55 -0
  86. data/test/replica_set/query_test.rb +73 -0
  87. data/test/replica_set/read_preference_test.rb +214 -0
  88. data/test/replica_set/refresh_test.rb +175 -0
  89. data/test/replica_set/replication_ack_test.rb +94 -0
  90. data/test/replica_set/ssl_test.rb +32 -0
  91. data/test/sharded_cluster/basic_test.rb +197 -0
  92. data/test/shared/authentication/basic_auth_shared.rb +286 -0
  93. data/test/shared/authentication/bulk_api_auth_shared.rb +259 -0
  94. data/test/shared/authentication/gssapi_shared.rb +164 -0
  95. data/test/shared/authentication/sasl_plain_shared.rb +96 -0
  96. data/test/shared/ssl_shared.rb +235 -0
  97. data/test/test_helper.rb +56 -0
  98. data/test/threading/basic_test.rb +120 -0
  99. data/test/tools/mongo_config.rb +608 -0
  100. data/test/tools/mongo_config_test.rb +160 -0
  101. data/test/unit/client_test.rb +347 -0
  102. data/test/unit/collection_test.rb +166 -0
  103. data/test/unit/connection_test.rb +325 -0
  104. data/test/unit/cursor_test.rb +299 -0
  105. data/test/unit/db_test.rb +136 -0
  106. data/test/unit/grid_test.rb +76 -0
  107. data/test/unit/mongo_sharded_client_test.rb +48 -0
  108. data/test/unit/node_test.rb +93 -0
  109. data/test/unit/pool_manager_test.rb +142 -0
  110. data/test/unit/read_pref_test.rb +115 -0
  111. data/test/unit/read_test.rb +159 -0
  112. data/test/unit/safe_test.rb +158 -0
  113. data/test/unit/sharding_pool_manager_test.rb +84 -0
  114. data/test/unit/write_concern_test.rb +175 -0
  115. metadata +260 -0
  116. metadata.gz.sig +0 -0
@@ -0,0 +1,364 @@
1
+ # Copyright (C) 2009-2013 MongoDB, Inc.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ module Mongo
16
+
17
+ class CollectionWriter
18
+ include Mongo::Logging
19
+ include Mongo::WriteConcern
20
+
21
+ OPCODE = {
22
+ :insert => Mongo::Constants::OP_INSERT,
23
+ :update => Mongo::Constants::OP_UPDATE,
24
+ :delete => Mongo::Constants::OP_DELETE
25
+ }
26
+ WRITE_COMMAND_ARG_KEY = {
27
+ :insert => :documents,
28
+ :update => :updates,
29
+ :delete => :deletes
30
+ }
31
+
32
+ def initialize(collection)
33
+ @collection = collection
34
+ @name = @collection.name
35
+ @db = @collection.db
36
+ @connection = @db.connection
37
+ @logger = @connection.logger
38
+ @max_write_batch_size = Mongo::MongoClient::DEFAULT_MAX_WRITE_BATCH_SIZE
39
+ end
40
+
41
+ # common implementation only for new batch write commands (insert, update, delete) and old batch insert
42
+ def batch_write_incremental(op_type, documents, check_keys=true, opts={})
43
+ raise Mongo::OperationFailure, "Request contains no documents" if documents.empty?
44
+ write_concern = get_write_concern(opts, @collection)
45
+ max_message_size, max_append_size, max_serialize_size = batch_write_max_sizes(write_concern)
46
+ ordered = opts[:ordered]
47
+ continue_on_error = !!opts[:continue_on_error] || ordered == false
48
+ collect_on_error = !!opts[:collect_on_error] || ordered == false
49
+ error_docs = [] # docs with serialization errors
50
+ errors = []
51
+ write_concern_errors = []
52
+ exchanges = []
53
+ serialized_doc = nil
54
+ message = BSON::ByteBuffer.new("", max_message_size)
55
+ @max_write_batch_size = @collection.db.connection.max_write_batch_size
56
+ docs = documents.dup
57
+ catch(:error) do
58
+ until docs.empty? || (!errors.empty? && !collect_on_error) # process documents a batch at a time
59
+ batch_docs = []
60
+ batch_message_initialize(message, op_type, continue_on_error, write_concern)
61
+ while !docs.empty? && batch_docs.size < @max_write_batch_size
62
+ begin
63
+ doc = docs.first
64
+ doc = doc[:d] if op_type == :insert && !ordered.nil? #check_keys for :update outside of serialize
65
+ serialized_doc ||= BSON::BSON_CODER.serialize(doc, check_keys, true, max_serialize_size)
66
+ rescue BSON::InvalidDocument, BSON::InvalidKeyName, BSON::InvalidStringEncoding => ex
67
+ bulk_message = "Bulk write error - #{ex.message} - examine result for complete information"
68
+ ex = BulkWriteError.new(bulk_message, Mongo::ErrorCode::INVALID_BSON,
69
+ {:op_type => op_type, :serialize => doc, :ord => docs.first[:ord], :error => ex}) unless ordered.nil?
70
+ error_docs << docs.shift
71
+ errors << ex
72
+ next if collect_on_error
73
+ throw(:error) if batch_docs.empty?
74
+ break # defer exit and send batch
75
+ end
76
+ break if message.size + serialized_doc.size > max_append_size
77
+ batch_docs << docs.shift
78
+ batch_message_append(message, serialized_doc, write_concern)
79
+ serialized_doc = nil
80
+ end
81
+ begin
82
+ response = batch_message_send(message, op_type, batch_docs, write_concern, continue_on_error) if batch_docs.size > 0
83
+ exchanges << {:op_type => op_type, :batch => batch_docs, :opts => opts, :response => response}
84
+ rescue Mongo::WriteConcernError => ex
85
+ write_concern_errors << ex
86
+ exchanges << {:op_type => op_type, :batch => batch_docs, :opts => opts, :response => ex.result}
87
+ rescue Mongo::OperationFailure => ex
88
+ errors << ex
89
+ exchanges << {:op_type => op_type, :batch => batch_docs, :opts => opts, :response => ex.result}
90
+ throw(:error) unless continue_on_error
91
+ end
92
+ end
93
+ end
94
+ [error_docs, errors, write_concern_errors, exchanges]
95
+ end
96
+
97
+ def batch_write_partition(op_type, documents, check_keys, opts)
98
+ raise Mongo::OperationFailure, "Request contains no documents" if documents.empty?
99
+ write_concern = get_write_concern(opts, @collection)
100
+ ordered = opts[:ordered]
101
+ continue_on_error = !!opts[:continue_on_error] || ordered == false # continue_on_error default false
102
+ collect_on_error = !!opts[:collect_on_error] # collect_on_error default false
103
+ error_docs = [] # docs with serialization errors
104
+ errors = []
105
+ write_concern_errors = []
106
+ exchanges = []
107
+ @max_write_batch_size = @collection.db.connection.max_write_batch_size
108
+ @write_batch_size = [documents.size, @max_write_batch_size].min
109
+ docs = documents.dup
110
+ until docs.empty?
111
+ batch = docs.take(@write_batch_size)
112
+ begin
113
+ batch_to_send = batch #(op_type == :insert && !ordered.nil?) ? batch.collect{|doc|doc[:d]} : batch
114
+ if @collection.db.connection.use_write_command?(write_concern) # TODO - polymorphic send_write including legacy insert
115
+ response = send_bulk_write_command(op_type, batch_to_send, check_keys, opts)
116
+ else
117
+ response = send_write_operation(op_type, nil, batch_to_send, check_keys, opts, write_concern)
118
+ end
119
+ exchanges << {:op_type => op_type, :batch => batch, :opts => opts, :response => response}
120
+ docs = docs.drop(batch.size)
121
+ @write_batch_size = [(@write_batch_size*1097) >> 10, @write_batch_size+1].max unless docs.empty? # 2**(1/10) multiplicative increase
122
+ @write_batch_size = @max_write_batch_size if @write_batch_size > @max_write_batch_size
123
+ rescue BSON::InvalidDocument, BSON::InvalidKeyName, BSON::InvalidStringEncoding => ex
124
+ if @write_batch_size > 1 # decrease batch size
125
+ @write_batch_size = (@write_batch_size+1) >> 1 # 2**(-1) multiplicative decrease
126
+ next
127
+ end
128
+ # error on a single document
129
+ bulk_message = "Bulk write error - #{ex.message} - examine result for complete information"
130
+ ex = BulkWriteError.new(bulk_message, Mongo::ErrorCode::INVALID_BSON,
131
+ {:op_type => op_type, :batch => batch, :ord => batch.first[:ord], :opts => opts, :error => ex}) unless ordered.nil?
132
+ error_docs << docs.shift
133
+ next if collect_on_error
134
+ errors << ex
135
+ break unless continue_on_error
136
+ rescue Mongo::WriteConcernError => ex
137
+ write_concern_errors << ex
138
+ exchanges << {:op_type => op_type, :batch => batch_docs, :opts => opts, :response => ex.result}
139
+ docs = docs.drop(batch.size)
140
+ rescue Mongo::OperationFailure => ex
141
+ errors << ex
142
+ exchanges << {:op_type => op_type, :batch => batch, :opts => opts, :response => ex.result}
143
+ docs = docs.drop(batch.size)
144
+ break if !continue_on_error && !collect_on_error
145
+ end
146
+ end
147
+ [error_docs, errors, write_concern_errors, exchanges]
148
+ end
149
+
150
+ alias :batch_write :batch_write_incremental
151
+
152
+ def send_bulk_write_command(op_type, documents, check_keys, opts, collection_name=@name)
153
+ if op_type == :insert
154
+ documents = documents.collect{|doc| doc[:d]} if opts.key?(:ordered)
155
+ documents.each do |doc|
156
+ # TODO - @pk_factory.create_pk(doc)
157
+ if check_keys
158
+ doc.each_key do |key|
159
+ key = key.to_s
160
+ raise BSON::InvalidKeyName.new("key #{key} must not start with '$'") if key[0] == ?$
161
+ raise BSON::InvalidKeyName.new("key #{key} must not contain '.'") if key.include? ?.
162
+ end
163
+ end
164
+ end
165
+ #elsif op_type == :update # TODO - check keys
166
+ #elsif op_type == :delete
167
+ #else
168
+ # raise ArgumentError, "Write operation type must be :insert, :update or :delete"
169
+ end
170
+ request = BSON::OrderedHash[op_type, collection_name].merge!(
171
+ Mongo::CollectionWriter::WRITE_COMMAND_ARG_KEY[op_type] => documents,
172
+ :writeConcern => get_write_concern(opts, @collection),
173
+ :ordered => opts[:ordered] || !opts[:continue_on_error]
174
+ )
175
+ @db.command(request)
176
+ end
177
+
178
+ private
179
+
180
+ def sort_by_first_sym(pairs)
181
+ pairs = pairs.collect{|first, rest| [first.to_s, rest]} #stringify_first
182
+ pairs = pairs.sort{|x,y| x.first <=> y.first }
183
+ pairs.collect{|first, rest| [first.to_sym, rest]} #symbolize_first
184
+ end
185
+
186
+ def ordered_group_by_first(pairs)
187
+ pairs.inject([[], nil]) do |memo, pair|
188
+ result, previous_value = memo
189
+ current_value = pair.first
190
+ result << [current_value, []] if previous_value != current_value
191
+ result.last.last << pair.last
192
+ [result, current_value]
193
+ end.first
194
+ end
195
+
196
+ end
197
+
198
+ class CollectionOperationWriter < CollectionWriter
199
+ def initialize(collection)
200
+ super(collection)
201
+ end
202
+
203
+ def send_write_operation(op_type, selector, doc_or_docs, check_keys, opts, write_concern, collection_name=@name)
204
+ message = BSON::ByteBuffer.new("", @connection.max_message_size)
205
+ message.put_int((op_type == :insert && !!opts[:continue_on_error]) ? 1 : 0)
206
+ BSON::BSON_RUBY.serialize_cstr(message, "#{@db.name}.#{collection_name}")
207
+ if op_type == :update
208
+ update_options = 0
209
+ update_options += 1 if opts[:upsert]
210
+ update_options += 2 if opts[:multi]
211
+ message.put_int(update_options)
212
+ elsif op_type == :delete
213
+ delete_options = 0
214
+ delete_options += 1 if opts[:limit] && opts[:limit] != 0
215
+ message.put_int(delete_options)
216
+ end
217
+ message.put_binary(BSON::BSON_CODER.serialize(selector, false, true, @connection.max_bson_size).to_s) if selector
218
+ [doc_or_docs].flatten(1).compact.each do |document|
219
+ message.put_binary(BSON::BSON_CODER.serialize(document, check_keys, true, @connection.max_bson_size).to_s)
220
+ if message.size > @connection.max_message_size
221
+ raise BSON::InvalidDocument, "Message is too large. This message is limited to #{@connection.max_message_size} bytes."
222
+ end
223
+ end
224
+ instrument(op_type, :database => @db.name, :collection => collection_name, :selector => selector, :documents => doc_or_docs) do
225
+ op_code = OPCODE[op_type]
226
+ if Mongo::WriteConcern.gle?(write_concern)
227
+ @connection.send_message_with_gle(op_code, message, @db.name, nil, write_concern)
228
+ else
229
+ @connection.send_message(op_code, message)
230
+ end
231
+ end
232
+ end
233
+
234
+ def bulk_execute(ops, options, opts = {})
235
+ write_concern = get_write_concern(opts, @collection)
236
+ errors = []
237
+ write_concern_errors = []
238
+ exchanges = []
239
+ ops.each do |op_type, doc|
240
+ doc = {:d => @collection.pk_factory.create_pk(doc[:d]), :ord => doc[:ord]} if op_type == :insert
241
+ doc_opts = doc.merge(opts)
242
+ d = doc_opts.delete(:d)
243
+ q = doc_opts.delete(:q)
244
+ u = doc_opts.delete(:u)
245
+ begin # use single and NOT batch inserts since there no index for an error
246
+ response = @collection.operation_writer.send_write_operation(op_type, q, d || u, check_keys = false, doc_opts, write_concern)
247
+ exchanges << {:op_type => op_type, :batch => [doc], :opts => opts, :response => response}
248
+ rescue BSON::InvalidDocument, BSON::InvalidKeyName, BSON::InvalidStringEncoding => ex
249
+ bulk_message = "Bulk write error - #{ex.message} - examine result for complete information"
250
+ ex = BulkWriteError.new(bulk_message, Mongo::ErrorCode::INVALID_BSON,
251
+ {:op_type => op_type, :serialize => doc, :ord => doc[:ord], :error => ex})
252
+ errors << ex
253
+ break if options[:ordered]
254
+ rescue Mongo::WriteConcernError => ex
255
+ write_concern_errors << ex
256
+ exchanges << {:op_type => op_type, :batch => [doc], :opts => opts, :response => ex.result}
257
+ rescue Mongo::OperationFailure => ex
258
+ errors << ex
259
+ exchanges << {:op_type => op_type, :batch => [doc], :opts => opts, :response => ex.result}
260
+ break if options[:ordered] && ex.result["err"] != "norepl"
261
+ end
262
+ end
263
+ [errors, write_concern_errors, exchanges]
264
+ end
265
+
266
+ private
267
+
268
+ def batch_message_initialize(message, op_type, continue_on_error, write_concern)
269
+ message.clear!.clear
270
+ message.put_int(continue_on_error ? 1 : 0)
271
+ BSON::BSON_RUBY.serialize_cstr(message, "#{@db.name}.#{@name}")
272
+ end
273
+
274
+ def batch_message_append(message, serialized_doc, write_concern)
275
+ message.put_binary(serialized_doc.to_s)
276
+ end
277
+
278
+ def batch_message_send(message, op_type, batch_docs, write_concern, continue_on_error)
279
+ instrument(:insert, :database => @db.name, :collection => @name, :documents => batch_docs) do
280
+ if Mongo::WriteConcern.gle?(write_concern)
281
+ @connection.send_message_with_gle(Mongo::Constants::OP_INSERT, message, @db.name, nil, write_concern)
282
+ else
283
+ @connection.send_message(Mongo::Constants::OP_INSERT, message)
284
+ end
285
+ end
286
+ end
287
+
288
+ def batch_write_max_sizes(write_concern)
289
+ [@connection.max_message_size, @connection.max_message_size, @connection.max_bson_size]
290
+ end
291
+
292
+ end
293
+
294
+ class CollectionCommandWriter < CollectionWriter
295
+ def initialize(collection)
296
+ super(collection)
297
+ end
298
+
299
+ def send_write_command(op_type, selector, doc_or_docs, check_keys, opts, write_concern, collection_name=@name)
300
+ if op_type == :insert
301
+ argument = [doc_or_docs].flatten(1).compact
302
+ elsif op_type == :update
303
+ argument = [{:q => selector, :u => doc_or_docs, :multi => !!opts[:multi]}]
304
+ argument.first.merge!(:upsert => opts[:upsert]) if opts[:upsert]
305
+ elsif op_type == :delete
306
+ argument = [{:q => selector, :limit => (opts[:limit] || 0)}]
307
+ else
308
+ raise ArgumentError, "Write operation type must be :insert, :update or :delete"
309
+ end
310
+ request = BSON::OrderedHash[op_type, collection_name, WRITE_COMMAND_ARG_KEY[op_type], argument]
311
+ request.merge!(:writeConcern => write_concern, :ordered => !opts[:continue_on_error])
312
+ request.merge!(opts)
313
+ instrument(op_type, :database => @db.name, :collection => collection_name, :selector => selector, :documents => doc_or_docs) do
314
+ @db.command(request)
315
+ end
316
+ end
317
+
318
+ def bulk_execute(ops, options, opts = {})
319
+ errors = []
320
+ write_concern_errors = []
321
+ exchanges = []
322
+ ops = (options[:ordered] == false) ? sort_by_first_sym(ops) : ops # sort by write-type
323
+ ordered_group_by_first(ops).each do |op_type, documents|
324
+ documents.collect! {|doc| {:d => @collection.pk_factory.create_pk(doc[:d]), :ord => doc[:ord]} } if op_type == :insert
325
+ error_docs, batch_errors, batch_write_concern_errors, batch_exchanges =
326
+ batch_write(op_type, documents, check_keys = false, opts.merge(:ordered => options[:ordered]))
327
+ errors += batch_errors
328
+ write_concern_errors += batch_write_concern_errors
329
+ exchanges += batch_exchanges
330
+ break if options[:ordered] && !batch_errors.empty?
331
+ end
332
+ [errors, write_concern_errors, exchanges]
333
+ end
334
+
335
+ private
336
+
337
+ def batch_message_initialize(message, op_type, continue_on_error, write_concern)
338
+ message.clear!.clear
339
+ @bson_empty ||= BSON::BSON_CODER.serialize({})
340
+ message.put_binary(@bson_empty.to_s)
341
+ message.unfinish!.array!(WRITE_COMMAND_ARG_KEY[op_type])
342
+ end
343
+
344
+ def batch_message_append(message, serialized_doc, write_concern)
345
+ message.push_doc!(serialized_doc)
346
+ end
347
+
348
+ def batch_message_send(message, op_type, batch_docs, write_concern, continue_on_error)
349
+ message.finish!
350
+ request = BSON::OrderedHash[op_type, @name, :bson, message]
351
+ request.merge!(:writeConcern => write_concern, :ordered => !continue_on_error)
352
+ instrument(:insert, :database => @db.name, :collection => @name, :documents => batch_docs) do
353
+ @db.command(request)
354
+ end
355
+ end
356
+
357
+ def batch_write_max_sizes(write_concern)
358
+ [MongoClient::COMMAND_HEADROOM, MongoClient::APPEND_HEADROOM, MongoClient::SERIALIZE_HEADROOM].collect{|h| @connection.max_bson_size + h}
359
+ end
360
+
361
+ end
362
+
363
+ end
364
+
@@ -0,0 +1,19 @@
1
+ # Copyright (C) 2009-2013 MongoDB, Inc.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ require 'mongo/connection/socket'
16
+ require 'mongo/connection/node'
17
+ require 'mongo/connection/pool'
18
+ require 'mongo/connection/pool_manager'
19
+ require 'mongo/connection/sharding_pool_manager'
@@ -0,0 +1,239 @@
1
+ # Copyright (C) 2009-2013 MongoDB, Inc.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ module Mongo
16
+ class Node
17
+
18
+ attr_accessor :host, :port, :address, :client, :socket, :last_state
19
+
20
+ def initialize(client, host_port)
21
+ @client = client
22
+ @manager = @client.local_manager
23
+ @host, @port = Support.normalize_seeds(host_port)
24
+ @address = "#{@host}:#{@port}"
25
+ @config = nil
26
+ @socket = nil
27
+ @node_mutex = Mutex.new
28
+ end
29
+
30
+ def eql?(other)
31
+ (other.is_a?(Node) && @address == other.address)
32
+ end
33
+ alias :== :eql?
34
+
35
+ def =~(other)
36
+ if other.is_a?(String)
37
+ h, p = Support.normalize_seeds(other)
38
+ h == @host && p == @port
39
+ else
40
+ false
41
+ end
42
+ end
43
+
44
+ def host_string
45
+ address
46
+ end
47
+
48
+ def config
49
+ connect unless connected?
50
+ set_config unless @config || !connected?
51
+ @config
52
+ end
53
+
54
+ def inspect
55
+ "<Mongo::Node:0x#{self.object_id.to_s(16)} @host=#{@host} @port=#{@port}>"
56
+ end
57
+
58
+ # Create a connection to the provided node,
59
+ # and, if successful, return the socket. Otherwise,
60
+ # return nil.
61
+ def connect
62
+ @node_mutex.synchronize do
63
+ begin
64
+ @socket = @client.socket_class.new(@host, @port,
65
+ @client.op_timeout,
66
+ @client.connect_timeout,
67
+ @client.socket_opts)
68
+ rescue ConnectionTimeoutError, OperationTimeout, ConnectionFailure, OperationFailure,
69
+ SocketError, SystemCallError, IOError => ex
70
+ @client.log(:debug, "Failed connection to #{host_string} with #{ex.class}, #{ex.message}.")
71
+ close
72
+ end
73
+ end
74
+ end
75
+
76
+ # This should only be called within a mutex
77
+ def close
78
+ if @socket && !@socket.closed?
79
+ @socket.close
80
+ end
81
+ @socket = nil
82
+ @config = nil
83
+ end
84
+
85
+ def connected?
86
+ @socket != nil && !@socket.closed?
87
+ end
88
+
89
+ def active?
90
+ begin
91
+ result = @client['admin'].command({:ping => 1}, :socket => @socket)
92
+ rescue OperationFailure, SocketError, SystemCallError, IOError
93
+ return nil
94
+ end
95
+ result['ok'] == 1
96
+ end
97
+
98
+ # Get the configuration for the provided node as returned by the
99
+ # ismaster command. Additionally, check that the replica set name
100
+ # matches with the name provided.
101
+ def set_config
102
+ @node_mutex.synchronize do
103
+ begin
104
+ if @config
105
+ @last_state = @config['ismaster'] ? :primary : :other
106
+ end
107
+
108
+ if @client.connect_timeout
109
+ Timeout::timeout(@client.connect_timeout, OperationTimeout) do
110
+ @config = @client['admin'].command({:ismaster => 1}, :socket => @socket)
111
+ end
112
+ else
113
+ @config = @client['admin'].command({:ismaster => 1}, :socket => @socket)
114
+ end
115
+
116
+ update_max_sizes
117
+
118
+ if @config['msg']
119
+ @client.log(:warn, "#{config['msg']}")
120
+ end
121
+
122
+ unless @client.mongos?
123
+ check_set_membership(@config)
124
+ check_set_name(@config)
125
+ end
126
+ rescue ConnectionFailure, OperationFailure, OperationTimeout, SocketError, SystemCallError, IOError => ex
127
+ @client.log(:warn, "Attempted connection to node #{host_string} raised " +
128
+ "#{ex.class}: #{ex.message}")
129
+ # Socket may already be nil from issuing command
130
+ close
131
+ end
132
+ end
133
+ end
134
+
135
+ # Return a list of replica set nodes from the config.
136
+ # Note: this excludes arbiters.
137
+ def node_list
138
+ nodes = []
139
+ nodes += config['hosts'] if config['hosts']
140
+ nodes += config['passives'] if config['passives']
141
+ nodes += ["#{@host}:#{@port}"] if @client.mongos?
142
+ nodes
143
+ end
144
+
145
+ def arbiters
146
+ return [] unless config['arbiters']
147
+ config['arbiters'].map do |arbiter|
148
+ Support.normalize_seeds(arbiter)
149
+ end
150
+ end
151
+
152
+ def primary?
153
+ config['ismaster'] == true || config['ismaster'] == 1
154
+ end
155
+
156
+ def secondary?
157
+ config['secondary'] == true || config['secondary'] == 1
158
+ end
159
+
160
+ def tags
161
+ config['tags'] || {}
162
+ end
163
+
164
+ def host_port
165
+ [@host, @port]
166
+ end
167
+
168
+ def hash
169
+ address.hash
170
+ end
171
+
172
+ def healthy?
173
+ connected? && config
174
+ end
175
+
176
+ def max_bson_size
177
+ @max_bson_size || DEFAULT_MAX_BSON_SIZE
178
+ end
179
+
180
+ def max_message_size
181
+ @max_message_size || max_bson_size * MESSAGE_SIZE_FACTOR
182
+ end
183
+
184
+ def max_wire_version
185
+ @max_wire_version || 0
186
+ end
187
+
188
+ def min_wire_version
189
+ @min_wire_version || 0
190
+ end
191
+
192
+ def wire_version_feature?(feature)
193
+ min_wire_version <= feature && feature <= max_wire_version
194
+ end
195
+
196
+ def max_write_batch_size
197
+ @max_write_batch_size || Mongo::MongoClient::DEFAULT_MAX_WRITE_BATCH_SIZE
198
+ end
199
+
200
+ protected
201
+
202
+ # Ensure that this node is a healthy member of a replica set.
203
+ def check_set_membership(config)
204
+ if !config.has_key?('hosts')
205
+ message = "Will not connect to #{host_string} because it's not a member " +
206
+ "of a replica set."
207
+ raise ConnectionFailure, message
208
+ elsif config['hosts'].length == 1 && !config['ismaster'] &&
209
+ !config['secondary']
210
+ message = "Attempting to connect to an unhealthy, single-node replica set."
211
+ raise ConnectionFailure, message
212
+ end
213
+ end
214
+
215
+ # Ensure that this node is part of a replica set of the expected name.
216
+ def check_set_name(config)
217
+ if @client.replica_set_name
218
+ if !config['setName']
219
+ @client.log(:warn, "Could not verify replica set name for member #{host_string} " +
220
+ "because ismaster does not return name in this version of MongoDB")
221
+ elsif @client.replica_set_name != config['setName']
222
+ message = "Attempting to connect to replica set '#{config['setName']}' on member #{host_string} " +
223
+ "but expected '#{@client.replica_set_name}'"
224
+ raise ReplicaSetConnectionError, message
225
+ end
226
+ end
227
+ end
228
+
229
+ private
230
+
231
+ def update_max_sizes
232
+ @max_bson_size = config['maxBsonObjectSize'] || DEFAULT_MAX_BSON_SIZE
233
+ @max_message_size = config['maxMessageSizeBytes'] || @max_bson_size * MESSAGE_SIZE_FACTOR
234
+ @max_wire_version = config['maxWireVersion'] || 0
235
+ @min_wire_version = config['minWireVersion'] || 0
236
+ @max_write_batch_size = config['maxWriteBatchSize'] || Mongo::MongoClient::DEFAULT_MAX_WRITE_BATCH_SIZE
237
+ end
238
+ end
239
+ end