mongo 1.10.1-java → 1.10.2-java

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA1:
3
- metadata.gz: ae88c8e129b138162459577c3d5aab3464be2aa5
4
- data.tar.gz: c362c3a5f77295bb68227c8263dc9418c26e464d
3
+ metadata.gz: f60da78d5d8323ce4794fe5f3daed3383ee6b8b2
4
+ data.tar.gz: aec0cd5d5273cd94fa6a27d2bcf5c53ea4fa3278
5
5
  SHA512:
6
- metadata.gz: 83d37ce49373264e0089500c7ae049301bbd29d440a995f86723ffd4a746579f79288d5454212e9305260d35e72b90d929c71b5fa4f6a0ebfdab46f5dde91ec5
7
- data.tar.gz: aabe35188da7ba722769546d9344ddb16607098a09d3ef8c585ae18546c858bcfb68e95294914c590531c25b6b818990f0189fa174c047dcabd2e3f0cdaed4a1
6
+ metadata.gz: f3e8d6a2738d5a5ab569bb36148fecb823526fedcccc71c4d8d8a216badadd3f11b0ba9fc4d53906bdc8b34438bd2e2f91777967a120da9f6ff04c4a1805dd6d
7
+ data.tar.gz: 0b900d45b9abad604db1e473dadaf125b2fac5000f50bc92a246918462199f741a1590f9123726de6105a231ba1673a2700a3e1658a1359deadadf65aedb2b40
Binary file
data.tar.gz.sig CHANGED
Binary file
data/VERSION CHANGED
@@ -1 +1 @@
1
- 1.10.1
1
+ 1.10.2
@@ -294,9 +294,16 @@ module Mongo
294
294
  elsif op_type == :update
295
295
  n_upserted = 0
296
296
  if (upserted = response.fetch("upserted", nil)) # assignment
297
- upserted = [{"_id" => upserted}] if upserted.class == BSON::ObjectId # OP_UPDATE non-array
297
+ upserted = [{"_id" => upserted}] if upserted.class != Array # OP_UPDATE non-array
298
298
  n_upserted = upserted.size
299
299
  concat(result, "upserted", merge_indexes(upserted, exchange))
300
+ elsif (response["updatedExisting"] == false && n == 1)
301
+ # workaround for DRIVERS-151 (non-ObjectID _id fields in pre-2.6 servers)
302
+ op = exchange[:batch][0]
303
+ missing_id = op[:u].fetch(:_id, op[:q][:_id]) # _id in update document takes precedence
304
+ upserted = [ { "_id" => missing_id, "index" => 0 } ]
305
+ n_upserted = n
306
+ concat(result, "upserted", merge_indexes(upserted, exchange))
300
307
  end
301
308
  tally(result, "nUpserted", n_upserted) if n_upserted > 0
302
309
  tally(result, "nMatched", n - n_upserted)
@@ -17,6 +17,11 @@ module Mongo
17
17
  include ThreadLocalVariableManager
18
18
 
19
19
  attr_reader :client,
20
+ :hosts,
21
+ :pools,
22
+ :secondaries,
23
+ :secondary_pools,
24
+ :arbiters,
20
25
  :primary,
21
26
  :primary_pool,
22
27
  :seeds,
@@ -36,12 +41,12 @@ module Mongo
36
41
  @client = client
37
42
  @seeds = seeds
38
43
 
44
+ initialize_immutable_state
45
+ initialize_mutable_state
46
+
39
47
  @pools = Set.new
40
48
  @primary = nil
41
49
  @primary_pool = nil
42
- @secondaries = Set.new
43
- @secondary_pools = []
44
- @hosts = Set.new
45
50
  @members = Set.new
46
51
  @refresh_required = false
47
52
  @max_bson_size = DEFAULT_MAX_BSON_SIZE
@@ -70,6 +75,7 @@ module Mongo
70
75
  thread_local[:locks][:connecting_manager] = false
71
76
  end
72
77
  end
78
+ clone_state
73
79
  end
74
80
 
75
81
  def refresh!(additional_seeds)
@@ -140,47 +146,6 @@ module Mongo
140
146
  read_pool.host_port
141
147
  end
142
148
 
143
- def hosts
144
- @connect_mutex.synchronize do
145
- @hosts.nil? ? nil : @hosts.clone
146
- end
147
- end
148
-
149
- def pools
150
- @connect_mutex.synchronize do
151
- @pools.nil? ? nil : @pools.clone
152
- end
153
- end
154
-
155
- def secondaries
156
- @connect_mutex.synchronize do
157
- @secondaries.nil? ? nil : @secondaries.clone
158
- end
159
- end
160
-
161
- def secondary_pools
162
- @connect_mutex.synchronize do
163
- @secondary_pools.nil? ? nil : @secondary_pools.clone
164
- end
165
- end
166
-
167
- def arbiters
168
- @connect_mutex.synchronize do
169
- @arbiters.nil? ? nil : @arbiters.clone
170
- end
171
- end
172
-
173
- def state_snapshot
174
- @connect_mutex.synchronize do
175
- { :pools => @pools.nil? ? nil : @pools.clone,
176
- :secondaries => @secondaries.nil? ? nil : @secondaries.clone,
177
- :secondary_pools => @secondary_pools.nil? ? nil : @secondary_pools.clone,
178
- :hosts => @hosts.nil? ? nil : @hosts.clone,
179
- :arbiters => @arbiters.nil? ? nil : @arbiters.clone
180
- }
181
- end
182
- end
183
-
184
149
  private
185
150
 
186
151
  def update_max_sizes
@@ -203,7 +168,7 @@ module Mongo
203
168
 
204
169
  # For any existing members, close and remove any that are unhealthy or already closed.
205
170
  def disconnect_old_members
206
- @pools.reject! {|pool| !pool.healthy? }
171
+ @pools_mutable.reject! {|pool| !pool.healthy? }
207
172
  @members.reject! {|node| !node.healthy? }
208
173
  end
209
174
 
@@ -243,13 +208,13 @@ module Mongo
243
208
  def initialize_pools(members)
244
209
  @primary_pool = nil
245
210
  @primary = nil
246
- @secondaries.clear
247
- @secondary_pools.clear
248
- @hosts.clear
211
+ @secondaries_mutable.clear
212
+ @secondary_pools_mutable.clear
213
+ @hosts_mutable.clear
249
214
 
250
215
  members.each do |member|
251
216
  member.last_state = nil
252
- @hosts << member.host_string
217
+ @hosts_mutable << member.host_string
253
218
  if member.primary?
254
219
  assign_primary(member)
255
220
  elsif member.secondary?
@@ -258,13 +223,13 @@ module Mongo
258
223
  end
259
224
  end
260
225
 
261
- @arbiters = members.first.arbiters
226
+ @arbiters_mutable = members.first.arbiters
262
227
  end
263
228
 
264
229
  def assign_primary(member)
265
230
  member.last_state = :primary
266
231
  @primary = member.host_port
267
- if existing = @pools.detect {|pool| pool.node == member }
232
+ if existing = @pools_mutable.detect {|pool| pool.node == member }
268
233
  @primary_pool = existing
269
234
  else
270
235
  @primary_pool = Pool.new(self.client, member.host, member.port,
@@ -272,23 +237,23 @@ module Mongo
272
237
  :timeout => self.client.pool_timeout,
273
238
  :node => member
274
239
  )
275
- @pools << @primary_pool
240
+ @pools_mutable << @primary_pool
276
241
  end
277
242
  end
278
243
 
279
244
  def assign_secondary(member)
280
245
  member.last_state = :secondary
281
- @secondaries << member.host_port
282
- if existing = @pools.detect {|pool| pool.node == member }
283
- @secondary_pools << existing
246
+ @secondaries_mutable << member.host_port
247
+ if existing = @pools_mutable.detect {|pool| pool.node == member }
248
+ @secondary_pools_mutable << existing
284
249
  else
285
250
  pool = Pool.new(self.client, member.host, member.port,
286
251
  :size => self.client.pool_size,
287
252
  :timeout => self.client.pool_timeout,
288
253
  :node => member
289
254
  )
290
- @secondary_pools << pool
291
- @pools << pool
255
+ @secondary_pools_mutable << pool
256
+ @pools_mutable << pool
292
257
  end
293
258
  end
294
259
 
@@ -321,5 +286,35 @@ module Mongo
321
286
  end
322
287
  members
323
288
  end
289
+
290
+ def initialize_immutable_state
291
+ @hosts = Set.new.freeze
292
+ @pools = Set.new.freeze
293
+ @secondaries = Set.new.freeze
294
+ @secondary_pools = [].freeze
295
+ @arbiters = [].freeze
296
+ end
297
+
298
+ def initialize_mutable_state
299
+ @hosts_mutable = Set.new
300
+ @pools_mutable = Set.new
301
+ @secondaries_mutable = Set.new
302
+ @secondary_pools_mutable = []
303
+ @arbiters_mutable = []
304
+ end
305
+
306
+ def clone_state
307
+ @hosts = @hosts_mutable.clone
308
+ @pools = @pools_mutable.clone
309
+ @secondaries = @secondaries_mutable.clone
310
+ @secondary_pools = @secondary_pools_mutable.clone
311
+ @arbiters = @arbiters_mutable.clone
312
+
313
+ @hosts.freeze
314
+ @pools.freeze
315
+ @secondaries.freeze
316
+ @secondary_pools.freeze
317
+ @arbiters.freeze
318
+ end
324
319
  end
325
320
  end
@@ -32,5 +32,5 @@ Gem::Specification.new do |s|
32
32
  s.require_paths = ['lib']
33
33
  s.has_rdoc = 'yard'
34
34
 
35
- s.add_dependency('bson', "~> #{s.version}")
35
+ s.add_dependency('bson', "#{s.version}")
36
36
  end
@@ -339,6 +339,16 @@ class BulkWriteCollectionViewTest < Test::Unit::TestCase
339
339
  end
340
340
  end
341
341
 
342
+ # ----- REPLACE -----
343
+
344
+ should "raise an error when we attempt to use replace" do
345
+ assert_raise NoMethodError do
346
+ bulk = @collection.initialize_ordered_bulk_op
347
+ bulk.find({:a => 2}).replace({:a => 1})
348
+ bulk.execute
349
+ end
350
+ end
351
+
342
352
  # ----- REPLACE_ONE -----
343
353
 
344
354
  should "check arg for replacement, set :update, :u, :multi, terminate and return view for #replace_one" do
@@ -501,6 +511,27 @@ class BulkWriteCollectionViewTest < Test::Unit::TestCase
501
511
  end
502
512
  end
503
513
 
514
+ should "count nUpserted correctly when _id is not an ObjectId (upsert-update)" do
515
+ with_write_commands_and_operations(@db.connection) do |wire_version|
516
+ @collection.remove
517
+
518
+ bulk = @collection.initialize_unordered_bulk_op
519
+ bulk.find({:_id => 3}).upsert.update({"$set" => {:b => 3}})
520
+ result = bulk.execute
521
+ assert_match_document(
522
+ {
523
+ "ok" => 1,
524
+ "n" => 1,
525
+ "nMatched" => 0,
526
+ "nUpserted" => 1,
527
+ "nModified" => batch_commands?(wire_version) ? 0 : nil,
528
+ "upserted" => [
529
+ { "_id" => 3, "index" => 0 }
530
+ ]
531
+ }, result, "wire_version:#{wire_version}")
532
+ end
533
+ end
534
+
504
535
  # ----- UPSERT-UPDATE_ONE -----
505
536
 
506
537
  should "#upsert a document without affecting non-upsert update_ones" do
@@ -544,6 +575,27 @@ class BulkWriteCollectionViewTest < Test::Unit::TestCase
544
575
  end
545
576
  end
546
577
 
578
+
579
+ should "count nUpserted correctly when _id is not an ObjectId (upsert-update_one)" do
580
+ with_write_commands_and_operations(@db.connection) do |wire_version|
581
+ @collection.remove
582
+ bulk = @collection.initialize_ordered_bulk_op
583
+ bulk.find({:_id => 2}).upsert.update_one({"$set" => {:x => 2}})
584
+ result = bulk.execute
585
+ assert_match_document(
586
+ {
587
+ "ok" => 1,
588
+ "n" => 1,
589
+ "nMatched" => 0,
590
+ "nUpserted" => 1,
591
+ "nModified" => batch_commands?(wire_version) ? 0 : nil,
592
+ "upserted" => [
593
+ {"_id" => 2, "index" => 0 }
594
+ ]
595
+ }, result, "wire_version:#{wire_version}")
596
+ end
597
+ end
598
+
547
599
  # ----- UPSERT-REPLACE_ONE -----
548
600
 
549
601
  should "not affect non-upsert replace_ones in same batch as #upsert-replace_one" do
@@ -598,6 +650,28 @@ class BulkWriteCollectionViewTest < Test::Unit::TestCase
598
650
  assert_equal({"nM" => nil}, nil_tally_responses([{"nM" => 1}, {"nM" => nil}, {"nM" => 3}], "nM"))
599
651
  end
600
652
 
653
+
654
+ should "count nUpserted correctly when _id is not an ObjectId (upsert-replace_one)" do
655
+ with_write_commands_and_operations(@db.connection) do |wire_version|
656
+ @collection.remove
657
+ bulk = @collection.initialize_unordered_bulk_op
658
+ bulk.find({:a => 1}).upsert.replace_one({:_id => 2})
659
+ result = bulk.execute
660
+ assert_match_document(
661
+ {
662
+ "ok" => 1,
663
+ "n" => 1,
664
+ "nMatched" => 0,
665
+ "nUpserted" => 1,
666
+ "nModified" => batch_commands?(wire_version) ? 0 : nil,
667
+ "upserted" => [
668
+ { "_id" => 2, "index" => 0 }
669
+ ]
670
+ }, result, "wire_version:#{wire_version}")
671
+ assert_equal 1, @collection.count
672
+ end
673
+ end
674
+
601
675
  # ----- MIXED OPS, ORDERED -----
602
676
 
603
677
  should "execute, return result and reset @ops for #execute" do
@@ -123,6 +123,24 @@ class ReplicaSetRefreshTest < Test::Unit::TestCase
123
123
  end
124
124
  end
125
125
 
126
+
127
+ def test_manager_recursive_locking
128
+ # See RUBY-775
129
+ # This tests that there isn't recursive locking when a pool manager reconnects
130
+ # to all replica set members. The bug in RUBY-775 occurred because the same lock
131
+ # acquired in order to connect the pool manager was used to read the pool manager's
132
+ # state.
133
+ client = MongoReplicaSetClient.new(@rs.repl_set_seeds)
134
+
135
+ cursor = client[TEST_DB]['rs-refresh-test'].find
136
+ client.stubs(:receive_message).raises(ConnectionFailure)
137
+ client.manager.stubs(:refresh_required?).returns(true)
138
+ client.manager.stubs(:check_connection_health).returns(true)
139
+ assert_raise ConnectionFailure do
140
+ cursor.next
141
+ end
142
+ end
143
+
126
144
  =begin
127
145
  def test_automated_refresh_with_removed_node
128
146
  client = MongoReplicaSetClient.new(@rs.repl_set_seeds,
@@ -106,37 +106,6 @@ class PoolManagerUnitTest < Test::Unit::TestCase
106
106
  assert_equal [['localhost', 27020]], manager.arbiters
107
107
  end
108
108
 
109
- should "return clones of pool lists" do
110
-
111
- @db.stubs(:command).returns(
112
- # First call to get a socket.
113
- @ismaster.merge({'ismaster' => true}),
114
-
115
- # Subsequent calls to configure pools.
116
- @ismaster.merge({'ismaster' => true}),
117
- @ismaster.merge({'secondary' => true, 'maxBsonObjectSize' => 500}),
118
- @ismaster.merge({'secondary' => true, 'maxMessageSizeBytes' => 700}),
119
- @ismaster.merge({'arbiterOnly' => true})
120
- )
121
-
122
- seeds = [['localhost', 27017], ['localhost', 27018]]
123
- manager = Mongo::PoolManager.new(@client, seeds)
124
- @client.stubs(:local_manager).returns(manager)
125
- manager.connect
126
-
127
- assert_not_equal manager.instance_variable_get(:@arbiters).object_id, manager.arbiters.object_id
128
- assert_not_equal manager.instance_variable_get(:@secondaries).object_id, manager.secondaries.object_id
129
- assert_not_equal manager.instance_variable_get(:@secondary_pools).object_id, manager.secondary_pools.object_id
130
- assert_not_equal manager.instance_variable_get(:@hosts).object_id, manager.hosts.object_id
131
- assert_not_equal manager.instance_variable_get(:@pools).object_id, manager.pools.object_id
132
-
133
- assert_not_equal manager.instance_variable_get(:@arbiters).object_id, manager.state_snapshot[:arbiters].object_id
134
- assert_not_equal manager.instance_variable_get(:@secondaries).object_id, manager.state_snapshot[:secondaries].object_id
135
- assert_not_equal manager.instance_variable_get(:@secondary_pools).object_id, manager.state_snapshot[:secondary_pools].object_id
136
- assert_not_equal manager.instance_variable_get(:@hosts).object_id, manager.state_snapshot[:hosts].object_id
137
- assert_not_equal manager.instance_variable_get(:@pools).object_id, manager.state_snapshot[:pools].object_id
138
- end
139
-
140
109
  end
141
110
 
142
111
  end
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: mongo
3
3
  version: !ruby/object:Gem::Version
4
- version: 1.10.1
4
+ version: 1.10.2
5
5
  platform: java
6
6
  authors:
7
7
  - Emily Stolfo
@@ -34,20 +34,20 @@ cert_chain:
34
34
  JrZM8w8wGbIOeLtoQqa7HB/jOYbTahH7KMNh2LHAbOR93hNIJxVRa4iwxiMQ75tN
35
35
  9WUIAJ4AEtjwRg1Bz0OwDo3aucPCBpx77+/FWhv7JYY=
36
36
  -----END CERTIFICATE-----
37
- date: 2014-05-16 00:00:00.000000000 Z
37
+ date: 2014-06-16 00:00:00.000000000 Z
38
38
  dependencies:
39
39
  - !ruby/object:Gem::Dependency
40
40
  name: bson
41
41
  version_requirements: !ruby/object:Gem::Requirement
42
42
  requirements:
43
- - - ~>
43
+ - - '='
44
44
  - !ruby/object:Gem::Version
45
- version: 1.10.1
45
+ version: 1.10.2
46
46
  requirement: !ruby/object:Gem::Requirement
47
47
  requirements:
48
- - - ~>
48
+ - - '='
49
49
  - !ruby/object:Gem::Version
50
- version: 1.10.1
50
+ version: 1.10.2
51
51
  prerelease: false
52
52
  type: :runtime
53
53
  description: A Ruby driver for MongoDB. For more information about Mongo, see http://www.mongodb.org.
metadata.gz.sig CHANGED
@@ -1,3 +1,2 @@
1
- ���#�*}U"rl��8Y��
2
- ��j�M`"�e�I�ɴǭ�8�.�����)�r-(�
3
- ����`Iw�Qm� %��lm��$�Ñyf��F���QG�t��G����7i�q�M2��HRF�#KzǣIR�&�)W�Fx� ��hߟ�]����Ǭ�I� 0&�`����Q��$���+�.L�o����G����
1
+ ��6y��GNN
2
+ j��