mongo 1.10.1 → 1.10.2

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA1:
3
- metadata.gz: 5c4b605a3e5313f94d6b6ea757c823eff8474def
4
- data.tar.gz: 91e2b48a46fda17e05c59dbec5a2fd8f6ea7ada1
3
+ metadata.gz: 1a1eba07acbd5cbdc1ea149bee675c8cb19414fc
4
+ data.tar.gz: 94ede5709c9ed0d5aef5e05e19f6a4c90f5cc1d1
5
5
  SHA512:
6
- metadata.gz: 12c90d51b5f44b11721f85e043565238dbfa483301c7448df034516a4facd51aa3c486feb2a4b60e822c9f10f154a2d2ea92bf2aebefa6a33ea8e1c3bf78258f
7
- data.tar.gz: 9d3ffaa70dec92c43b5dcc43cc14183a9a7537ed0e2f1526f63e4ff9b2e648e99fff7b280828a0bb0e31d1740df86ba76320c33a24a2181a635028d8b8cae6bc
6
+ metadata.gz: 007196c1e574ed834d0cdf44cde761220a531367d303aa1f074955ac112af6cf16a43943f466a00f1b7e955902e702d0ca99c67a33571d9341095b7258d4ef57
7
+ data.tar.gz: b8fcc33bf2f9119ada0e8b5c3010ad9246dd7cc32daa6240980955dd680ddd4b8474e92f3ea0fcbe9178042e6536e324d42d6a476c9ef042563f2509d35068c2
Binary file
data.tar.gz.sig CHANGED
Binary file
data/VERSION CHANGED
@@ -1 +1 @@
1
- 1.10.1
1
+ 1.10.2
@@ -294,9 +294,16 @@ module Mongo
294
294
  elsif op_type == :update
295
295
  n_upserted = 0
296
296
  if (upserted = response.fetch("upserted", nil)) # assignment
297
- upserted = [{"_id" => upserted}] if upserted.class == BSON::ObjectId # OP_UPDATE non-array
297
+ upserted = [{"_id" => upserted}] if upserted.class != Array # OP_UPDATE non-array
298
298
  n_upserted = upserted.size
299
299
  concat(result, "upserted", merge_indexes(upserted, exchange))
300
+ elsif (response["updatedExisting"] == false && n == 1)
301
+ # workaround for DRIVERS-151 (non-ObjectID _id fields in pre-2.6 servers)
302
+ op = exchange[:batch][0]
303
+ missing_id = op[:u].fetch(:_id, op[:q][:_id]) # _id in update document takes precedence
304
+ upserted = [ { "_id" => missing_id, "index" => 0 } ]
305
+ n_upserted = n
306
+ concat(result, "upserted", merge_indexes(upserted, exchange))
300
307
  end
301
308
  tally(result, "nUpserted", n_upserted) if n_upserted > 0
302
309
  tally(result, "nMatched", n - n_upserted)
@@ -17,6 +17,11 @@ module Mongo
17
17
  include ThreadLocalVariableManager
18
18
 
19
19
  attr_reader :client,
20
+ :hosts,
21
+ :pools,
22
+ :secondaries,
23
+ :secondary_pools,
24
+ :arbiters,
20
25
  :primary,
21
26
  :primary_pool,
22
27
  :seeds,
@@ -36,12 +41,12 @@ module Mongo
36
41
  @client = client
37
42
  @seeds = seeds
38
43
 
44
+ initialize_immutable_state
45
+ initialize_mutable_state
46
+
39
47
  @pools = Set.new
40
48
  @primary = nil
41
49
  @primary_pool = nil
42
- @secondaries = Set.new
43
- @secondary_pools = []
44
- @hosts = Set.new
45
50
  @members = Set.new
46
51
  @refresh_required = false
47
52
  @max_bson_size = DEFAULT_MAX_BSON_SIZE
@@ -70,6 +75,7 @@ module Mongo
70
75
  thread_local[:locks][:connecting_manager] = false
71
76
  end
72
77
  end
78
+ clone_state
73
79
  end
74
80
 
75
81
  def refresh!(additional_seeds)
@@ -140,47 +146,6 @@ module Mongo
140
146
  read_pool.host_port
141
147
  end
142
148
 
143
- def hosts
144
- @connect_mutex.synchronize do
145
- @hosts.nil? ? nil : @hosts.clone
146
- end
147
- end
148
-
149
- def pools
150
- @connect_mutex.synchronize do
151
- @pools.nil? ? nil : @pools.clone
152
- end
153
- end
154
-
155
- def secondaries
156
- @connect_mutex.synchronize do
157
- @secondaries.nil? ? nil : @secondaries.clone
158
- end
159
- end
160
-
161
- def secondary_pools
162
- @connect_mutex.synchronize do
163
- @secondary_pools.nil? ? nil : @secondary_pools.clone
164
- end
165
- end
166
-
167
- def arbiters
168
- @connect_mutex.synchronize do
169
- @arbiters.nil? ? nil : @arbiters.clone
170
- end
171
- end
172
-
173
- def state_snapshot
174
- @connect_mutex.synchronize do
175
- { :pools => @pools.nil? ? nil : @pools.clone,
176
- :secondaries => @secondaries.nil? ? nil : @secondaries.clone,
177
- :secondary_pools => @secondary_pools.nil? ? nil : @secondary_pools.clone,
178
- :hosts => @hosts.nil? ? nil : @hosts.clone,
179
- :arbiters => @arbiters.nil? ? nil : @arbiters.clone
180
- }
181
- end
182
- end
183
-
184
149
  private
185
150
 
186
151
  def update_max_sizes
@@ -203,7 +168,7 @@ module Mongo
203
168
 
204
169
  # For any existing members, close and remove any that are unhealthy or already closed.
205
170
  def disconnect_old_members
206
- @pools.reject! {|pool| !pool.healthy? }
171
+ @pools_mutable.reject! {|pool| !pool.healthy? }
207
172
  @members.reject! {|node| !node.healthy? }
208
173
  end
209
174
 
@@ -243,13 +208,13 @@ module Mongo
243
208
  def initialize_pools(members)
244
209
  @primary_pool = nil
245
210
  @primary = nil
246
- @secondaries.clear
247
- @secondary_pools.clear
248
- @hosts.clear
211
+ @secondaries_mutable.clear
212
+ @secondary_pools_mutable.clear
213
+ @hosts_mutable.clear
249
214
 
250
215
  members.each do |member|
251
216
  member.last_state = nil
252
- @hosts << member.host_string
217
+ @hosts_mutable << member.host_string
253
218
  if member.primary?
254
219
  assign_primary(member)
255
220
  elsif member.secondary?
@@ -258,13 +223,13 @@ module Mongo
258
223
  end
259
224
  end
260
225
 
261
- @arbiters = members.first.arbiters
226
+ @arbiters_mutable = members.first.arbiters
262
227
  end
263
228
 
264
229
  def assign_primary(member)
265
230
  member.last_state = :primary
266
231
  @primary = member.host_port
267
- if existing = @pools.detect {|pool| pool.node == member }
232
+ if existing = @pools_mutable.detect {|pool| pool.node == member }
268
233
  @primary_pool = existing
269
234
  else
270
235
  @primary_pool = Pool.new(self.client, member.host, member.port,
@@ -272,23 +237,23 @@ module Mongo
272
237
  :timeout => self.client.pool_timeout,
273
238
  :node => member
274
239
  )
275
- @pools << @primary_pool
240
+ @pools_mutable << @primary_pool
276
241
  end
277
242
  end
278
243
 
279
244
  def assign_secondary(member)
280
245
  member.last_state = :secondary
281
- @secondaries << member.host_port
282
- if existing = @pools.detect {|pool| pool.node == member }
283
- @secondary_pools << existing
246
+ @secondaries_mutable << member.host_port
247
+ if existing = @pools_mutable.detect {|pool| pool.node == member }
248
+ @secondary_pools_mutable << existing
284
249
  else
285
250
  pool = Pool.new(self.client, member.host, member.port,
286
251
  :size => self.client.pool_size,
287
252
  :timeout => self.client.pool_timeout,
288
253
  :node => member
289
254
  )
290
- @secondary_pools << pool
291
- @pools << pool
255
+ @secondary_pools_mutable << pool
256
+ @pools_mutable << pool
292
257
  end
293
258
  end
294
259
 
@@ -321,5 +286,35 @@ module Mongo
321
286
  end
322
287
  members
323
288
  end
289
+
290
+ def initialize_immutable_state
291
+ @hosts = Set.new.freeze
292
+ @pools = Set.new.freeze
293
+ @secondaries = Set.new.freeze
294
+ @secondary_pools = [].freeze
295
+ @arbiters = [].freeze
296
+ end
297
+
298
+ def initialize_mutable_state
299
+ @hosts_mutable = Set.new
300
+ @pools_mutable = Set.new
301
+ @secondaries_mutable = Set.new
302
+ @secondary_pools_mutable = []
303
+ @arbiters_mutable = []
304
+ end
305
+
306
+ def clone_state
307
+ @hosts = @hosts_mutable.clone
308
+ @pools = @pools_mutable.clone
309
+ @secondaries = @secondaries_mutable.clone
310
+ @secondary_pools = @secondary_pools_mutable.clone
311
+ @arbiters = @arbiters_mutable.clone
312
+
313
+ @hosts.freeze
314
+ @pools.freeze
315
+ @secondaries.freeze
316
+ @secondary_pools.freeze
317
+ @arbiters.freeze
318
+ end
324
319
  end
325
320
  end
@@ -32,5 +32,5 @@ Gem::Specification.new do |s|
32
32
  s.require_paths = ['lib']
33
33
  s.has_rdoc = 'yard'
34
34
 
35
- s.add_dependency('bson', "~> #{s.version}")
35
+ s.add_dependency('bson', "#{s.version}")
36
36
  end
@@ -339,6 +339,16 @@ class BulkWriteCollectionViewTest < Test::Unit::TestCase
339
339
  end
340
340
  end
341
341
 
342
+ # ----- REPLACE -----
343
+
344
+ should "raise an error when we attempt to use replace" do
345
+ assert_raise NoMethodError do
346
+ bulk = @collection.initialize_ordered_bulk_op
347
+ bulk.find({:a => 2}).replace({:a => 1})
348
+ bulk.execute
349
+ end
350
+ end
351
+
342
352
  # ----- REPLACE_ONE -----
343
353
 
344
354
  should "check arg for replacement, set :update, :u, :multi, terminate and return view for #replace_one" do
@@ -501,6 +511,27 @@ class BulkWriteCollectionViewTest < Test::Unit::TestCase
501
511
  end
502
512
  end
503
513
 
514
+ should "count nUpserted correctly when _id is not an ObjectId (upsert-update)" do
515
+ with_write_commands_and_operations(@db.connection) do |wire_version|
516
+ @collection.remove
517
+
518
+ bulk = @collection.initialize_unordered_bulk_op
519
+ bulk.find({:_id => 3}).upsert.update({"$set" => {:b => 3}})
520
+ result = bulk.execute
521
+ assert_match_document(
522
+ {
523
+ "ok" => 1,
524
+ "n" => 1,
525
+ "nMatched" => 0,
526
+ "nUpserted" => 1,
527
+ "nModified" => batch_commands?(wire_version) ? 0 : nil,
528
+ "upserted" => [
529
+ { "_id" => 3, "index" => 0 }
530
+ ]
531
+ }, result, "wire_version:#{wire_version}")
532
+ end
533
+ end
534
+
504
535
  # ----- UPSERT-UPDATE_ONE -----
505
536
 
506
537
  should "#upsert a document without affecting non-upsert update_ones" do
@@ -544,6 +575,27 @@ class BulkWriteCollectionViewTest < Test::Unit::TestCase
544
575
  end
545
576
  end
546
577
 
578
+
579
+ should "count nUpserted correctly when _id is not an ObjectId (upsert-update_one)" do
580
+ with_write_commands_and_operations(@db.connection) do |wire_version|
581
+ @collection.remove
582
+ bulk = @collection.initialize_ordered_bulk_op
583
+ bulk.find({:_id => 2}).upsert.update_one({"$set" => {:x => 2}})
584
+ result = bulk.execute
585
+ assert_match_document(
586
+ {
587
+ "ok" => 1,
588
+ "n" => 1,
589
+ "nMatched" => 0,
590
+ "nUpserted" => 1,
591
+ "nModified" => batch_commands?(wire_version) ? 0 : nil,
592
+ "upserted" => [
593
+ {"_id" => 2, "index" => 0 }
594
+ ]
595
+ }, result, "wire_version:#{wire_version}")
596
+ end
597
+ end
598
+
547
599
  # ----- UPSERT-REPLACE_ONE -----
548
600
 
549
601
  should "not affect non-upsert replace_ones in same batch as #upsert-replace_one" do
@@ -598,6 +650,28 @@ class BulkWriteCollectionViewTest < Test::Unit::TestCase
598
650
  assert_equal({"nM" => nil}, nil_tally_responses([{"nM" => 1}, {"nM" => nil}, {"nM" => 3}], "nM"))
599
651
  end
600
652
 
653
+
654
+ should "count nUpserted correctly when _id is not an ObjectId (upsert-replace_one)" do
655
+ with_write_commands_and_operations(@db.connection) do |wire_version|
656
+ @collection.remove
657
+ bulk = @collection.initialize_unordered_bulk_op
658
+ bulk.find({:a => 1}).upsert.replace_one({:_id => 2})
659
+ result = bulk.execute
660
+ assert_match_document(
661
+ {
662
+ "ok" => 1,
663
+ "n" => 1,
664
+ "nMatched" => 0,
665
+ "nUpserted" => 1,
666
+ "nModified" => batch_commands?(wire_version) ? 0 : nil,
667
+ "upserted" => [
668
+ { "_id" => 2, "index" => 0 }
669
+ ]
670
+ }, result, "wire_version:#{wire_version}")
671
+ assert_equal 1, @collection.count
672
+ end
673
+ end
674
+
601
675
  # ----- MIXED OPS, ORDERED -----
602
676
 
603
677
  should "execute, return result and reset @ops for #execute" do
@@ -123,6 +123,24 @@ class ReplicaSetRefreshTest < Test::Unit::TestCase
123
123
  end
124
124
  end
125
125
 
126
+
127
+ def test_manager_recursive_locking
128
+ # See RUBY-775
129
+ # This tests that there isn't recursive locking when a pool manager reconnects
130
+ # to all replica set members. The bug in RUBY-775 occurred because the same lock
131
+ # acquired in order to connect the pool manager was used to read the pool manager's
132
+ # state.
133
+ client = MongoReplicaSetClient.new(@rs.repl_set_seeds)
134
+
135
+ cursor = client[TEST_DB]['rs-refresh-test'].find
136
+ client.stubs(:receive_message).raises(ConnectionFailure)
137
+ client.manager.stubs(:refresh_required?).returns(true)
138
+ client.manager.stubs(:check_connection_health).returns(true)
139
+ assert_raise ConnectionFailure do
140
+ cursor.next
141
+ end
142
+ end
143
+
126
144
  =begin
127
145
  def test_automated_refresh_with_removed_node
128
146
  client = MongoReplicaSetClient.new(@rs.repl_set_seeds,
@@ -106,37 +106,6 @@ class PoolManagerUnitTest < Test::Unit::TestCase
106
106
  assert_equal [['localhost', 27020]], manager.arbiters
107
107
  end
108
108
 
109
- should "return clones of pool lists" do
110
-
111
- @db.stubs(:command).returns(
112
- # First call to get a socket.
113
- @ismaster.merge({'ismaster' => true}),
114
-
115
- # Subsequent calls to configure pools.
116
- @ismaster.merge({'ismaster' => true}),
117
- @ismaster.merge({'secondary' => true, 'maxBsonObjectSize' => 500}),
118
- @ismaster.merge({'secondary' => true, 'maxMessageSizeBytes' => 700}),
119
- @ismaster.merge({'arbiterOnly' => true})
120
- )
121
-
122
- seeds = [['localhost', 27017], ['localhost', 27018]]
123
- manager = Mongo::PoolManager.new(@client, seeds)
124
- @client.stubs(:local_manager).returns(manager)
125
- manager.connect
126
-
127
- assert_not_equal manager.instance_variable_get(:@arbiters).object_id, manager.arbiters.object_id
128
- assert_not_equal manager.instance_variable_get(:@secondaries).object_id, manager.secondaries.object_id
129
- assert_not_equal manager.instance_variable_get(:@secondary_pools).object_id, manager.secondary_pools.object_id
130
- assert_not_equal manager.instance_variable_get(:@hosts).object_id, manager.hosts.object_id
131
- assert_not_equal manager.instance_variable_get(:@pools).object_id, manager.pools.object_id
132
-
133
- assert_not_equal manager.instance_variable_get(:@arbiters).object_id, manager.state_snapshot[:arbiters].object_id
134
- assert_not_equal manager.instance_variable_get(:@secondaries).object_id, manager.state_snapshot[:secondaries].object_id
135
- assert_not_equal manager.instance_variable_get(:@secondary_pools).object_id, manager.state_snapshot[:secondary_pools].object_id
136
- assert_not_equal manager.instance_variable_get(:@hosts).object_id, manager.state_snapshot[:hosts].object_id
137
- assert_not_equal manager.instance_variable_get(:@pools).object_id, manager.state_snapshot[:pools].object_id
138
- end
139
-
140
109
  end
141
110
 
142
111
  end
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: mongo
3
3
  version: !ruby/object:Gem::Version
4
- version: 1.10.1
4
+ version: 1.10.2
5
5
  platform: ruby
6
6
  authors:
7
7
  - Emily Stolfo
@@ -34,22 +34,22 @@ cert_chain:
34
34
  JrZM8w8wGbIOeLtoQqa7HB/jOYbTahH7KMNh2LHAbOR93hNIJxVRa4iwxiMQ75tN
35
35
  9WUIAJ4AEtjwRg1Bz0OwDo3aucPCBpx77+/FWhv7JYY=
36
36
  -----END CERTIFICATE-----
37
- date: 2014-05-16 00:00:00.000000000 Z
37
+ date: 2014-06-16 00:00:00.000000000 Z
38
38
  dependencies:
39
39
  - !ruby/object:Gem::Dependency
40
40
  name: bson
41
41
  requirement: !ruby/object:Gem::Requirement
42
42
  requirements:
43
- - - "~>"
43
+ - - '='
44
44
  - !ruby/object:Gem::Version
45
- version: 1.10.1
45
+ version: 1.10.2
46
46
  type: :runtime
47
47
  prerelease: false
48
48
  version_requirements: !ruby/object:Gem::Requirement
49
49
  requirements:
50
- - - "~>"
50
+ - - '='
51
51
  - !ruby/object:Gem::Version
52
- version: 1.10.1
52
+ version: 1.10.2
53
53
  description: A Ruby driver for MongoDB. For more information about Mongo, see http://www.mongodb.org.
54
54
  email: mongodb-dev@googlegroups.com
55
55
  executables:
metadata.gz.sig CHANGED
Binary file