mongo 1.10.0.rc1 → 1.10.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- checksums.yaml.gz.sig +0 -0
- data.tar.gz.sig +0 -0
- data/README.md +5 -0
- data/VERSION +1 -1
- data/lib/mongo.rb +1 -0
- data/lib/mongo/bulk_write_collection_view.rb +12 -12
- data/lib/mongo/collection.rb +3 -2
- data/lib/mongo/collection_writer.rb +20 -5
- data/lib/mongo/connection/pool_manager.rb +41 -5
- data/lib/mongo/connection/socket/ssl_socket.rb +5 -1
- data/lib/mongo/cursor.rb +3 -3
- data/lib/mongo/db.rb +1 -1
- data/lib/mongo/exception.rb +3 -0
- data/lib/mongo/functional/authentication.rb +1 -1
- data/lib/mongo/functional/sasl_java.rb +1 -1
- data/lib/mongo/functional/uri_parser.rb +4 -6
- data/lib/mongo/functional/write_concern.rb +4 -7
- data/lib/mongo/gridfs/grid.rb +3 -3
- data/lib/mongo/gridfs/grid_file_system.rb +1 -1
- data/lib/mongo/gridfs/grid_io.rb +4 -6
- data/lib/mongo/mongo_client.rb +19 -11
- data/lib/mongo/mongo_replica_set_client.rb +1 -1
- data/lib/mongo/mongo_sharded_client.rb +2 -2
- data/lib/mongo/networking.rb +13 -9
- data/test/functional/client_test.rb +14 -0
- data/test/functional/collection_test.rb +70 -100
- data/test/functional/grid_io_test.rb +2 -2
- data/test/functional/grid_test.rb +1 -1
- data/test/replica_set/insert_test.rb +15 -1
- data/test/replica_set/replication_ack_test.rb +11 -5
- data/test/shared/ssl_shared.rb +44 -4
- data/test/unit/pool_manager_test.rb +31 -0
- metadata +6 -6
- metadata.gz.sig +0 -0
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA1:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: a4f4eaa561f3080eb698ae91915d4dfed3688a66
|
4
|
+
data.tar.gz: 4d7a8a0c5bb407d5433e641206b579bf44baee13
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 63f4d172c84188ac600cf15b2a6c2336eb960d08b695725ab9a99234d2bb1023e32c10b56a38a9e907c790b6ec213f317551dfadca0cd2d2e9f2830d40b8e51e
|
7
|
+
data.tar.gz: 8f7b36deea7ccdd8a72f43864cc15e6bfc0f6f74a01322b506940029a93dad5bb78ff3c7090c72fd473f5dc4430a95e6808a5d3f753b42aa3e87e363f0a5134c
|
checksums.yaml.gz.sig
CHANGED
Binary file
|
data.tar.gz.sig
CHANGED
Binary file
|
data/README.md
CHANGED
@@ -7,13 +7,17 @@ Installation
|
|
7
7
|
|
8
8
|
**Gem Installation**<br>
|
9
9
|
The Ruby driver is released and distributed through RubyGems and it can be installed with the following command:
|
10
|
+
|
10
11
|
```bash
|
11
12
|
gem install mongo
|
12
13
|
```
|
14
|
+
|
13
15
|
For a significant performance boost, you'll want to install the C-extension:
|
16
|
+
|
14
17
|
```bash
|
15
18
|
gem install bson_ext
|
16
19
|
```
|
20
|
+
|
17
21
|
**Github Installation**<br>
|
18
22
|
For development and test environments (not recommended for production) you can also install the Ruby driver directly from source:
|
19
23
|
|
@@ -36,6 +40,7 @@ rake install
|
|
36
40
|
Usage
|
37
41
|
-----
|
38
42
|
Here is a quick example of basic usage for the Ruby driver:
|
43
|
+
|
39
44
|
```ruby
|
40
45
|
require 'mongo'
|
41
46
|
include Mongo
|
data/VERSION
CHANGED
@@ -1 +1 @@
|
|
1
|
-
1.10.0
|
1
|
+
1.10.0
|
data/lib/mongo.rb
CHANGED
@@ -220,14 +220,14 @@ module Mongo
|
|
220
220
|
write_concern = get_write_concern(opts, @collection)
|
221
221
|
@ops.each_with_index{|op, index| op.last.merge!(:ord => index)} # infuse ordinal here to avoid issues with upsert
|
222
222
|
if @collection.db.connection.use_write_command?(write_concern)
|
223
|
-
errors, exchanges = @collection.command_writer.bulk_execute(@ops, @options, opts)
|
223
|
+
errors, write_concern_errors, exchanges = @collection.command_writer.bulk_execute(@ops, @options, opts)
|
224
224
|
else
|
225
|
-
errors, exchanges = @collection.operation_writer.bulk_execute(@ops, @options, opts)
|
225
|
+
errors, write_concern_errors, exchanges = @collection.operation_writer.bulk_execute(@ops, @options, opts)
|
226
226
|
end
|
227
227
|
@ops = []
|
228
228
|
return true if errors.empty? && (exchanges.empty? || exchanges.first[:response] == true) # w 0 without GLE
|
229
|
-
result = merge_result(errors, exchanges)
|
230
|
-
raise BulkWriteError.new(MULTIPLE_ERRORS_MSG, Mongo::ErrorCode::MULTIPLE_ERRORS_OCCURRED, result) if !errors.empty? ||
|
229
|
+
result = merge_result(errors + write_concern_errors, exchanges)
|
230
|
+
raise BulkWriteError.new(MULTIPLE_ERRORS_MSG, Mongo::ErrorCode::MULTIPLE_ERRORS_OCCURRED, result) if !errors.empty? || !write_concern_errors.empty?
|
231
231
|
result
|
232
232
|
end
|
233
233
|
|
@@ -274,7 +274,7 @@ module Mongo
|
|
274
274
|
ok = 0
|
275
275
|
result = {"ok" => 0, "n" => 0}
|
276
276
|
unless errors.empty?
|
277
|
-
unless (writeErrors = errors.select { |error| error.class != Mongo::OperationFailure }).empty? # assignment
|
277
|
+
unless (writeErrors = errors.select { |error| error.class != Mongo::OperationFailure && error.class != WriteConcernError }).empty? # assignment
|
278
278
|
concat(result, "writeErrors",
|
279
279
|
writeErrors.collect { |error|
|
280
280
|
{"index" => error.result[:ord], "code" => error.error_code, "errmsg" => error.result[:error].message}
|
@@ -305,26 +305,26 @@ module Mongo
|
|
305
305
|
tally(result, "nRemoved", n)
|
306
306
|
end
|
307
307
|
result["n"] += n
|
308
|
-
|
308
|
+
write_concern_error = nil
|
309
309
|
errmsg = response["errmsg"] || response["err"] # top level
|
310
310
|
if (writeErrors = response["writeErrors"] || response["errDetails"]) # assignment
|
311
311
|
concat(result, "writeErrors", merge_indexes(writeErrors, exchange))
|
312
312
|
elsif response["err"] == "timeout" # errmsg == "timed out waiting for slaves" # OP_*
|
313
|
-
|
313
|
+
write_concern_error = {"errmsg" => errmsg, "code" => Mongo::ErrorCode::WRITE_CONCERN_FAILED,
|
314
314
|
"errInfo" => {"wtimeout" => response["wtimeout"]}} # OP_* does not have "code"
|
315
315
|
elsif errmsg == "norepl" # OP_*
|
316
|
-
|
316
|
+
write_concern_error = {"errmsg" => errmsg, "code" => Mongo::ErrorCode::WRITE_CONCERN_FAILED} # OP_* does not have "code"
|
317
317
|
elsif errmsg # OP_INSERT, OP_UPDATE have "err"
|
318
318
|
append(result, "writeErrors", merge_index({"errmsg" => errmsg, "code" => response["code"]}, exchange))
|
319
319
|
end
|
320
320
|
if response["writeConcernError"]
|
321
|
-
|
321
|
+
write_concern_error = response["writeConcernError"]
|
322
322
|
elsif (wnote = response["wnote"]) # assignment - OP_*
|
323
|
-
|
323
|
+
write_concern_error = {"errmsg" => wnote, "code" => Mongo::ErrorCode::WRITE_CONCERN_FAILED} # OP_* does not have "code"
|
324
324
|
elsif (jnote = response["jnote"]) # assignment - OP_*
|
325
|
-
|
325
|
+
write_concern_error = {"errmsg" => jnote, "code" => Mongo::ErrorCode::BAD_VALUE} # OP_* does not have "code"
|
326
326
|
end
|
327
|
-
append(result, "writeConcernError", merge_index(
|
327
|
+
append(result, "writeConcernError", merge_index(write_concern_error, exchange)) if write_concern_error
|
328
328
|
end
|
329
329
|
result.delete("nModified") if result.has_key?("nModified") && !result["nModified"]
|
330
330
|
result.merge!("ok" => [ok + result["n"], 1].min)
|
data/lib/mongo/collection.rb
CHANGED
@@ -159,7 +159,7 @@ module Mongo
|
|
159
159
|
end
|
160
160
|
|
161
161
|
# Set a hint field using a named index.
|
162
|
-
# @param [String]
|
162
|
+
# @param [String] hint index name
|
163
163
|
def named_hint=(hint=nil)
|
164
164
|
@hint = hint
|
165
165
|
self
|
@@ -408,7 +408,8 @@ module Mongo
|
|
408
408
|
def insert(doc_or_docs, opts={})
|
409
409
|
if doc_or_docs.respond_to?(:collect!)
|
410
410
|
doc_or_docs.collect! { |doc| @pk_factory.create_pk(doc) }
|
411
|
-
error_docs, errors, rest_ignored = batch_write(:insert, doc_or_docs, true, opts)
|
411
|
+
error_docs, errors, write_concern_errors, rest_ignored = batch_write(:insert, doc_or_docs, true, opts)
|
412
|
+
errors = write_concern_errors + errors
|
412
413
|
raise errors.last if !opts[:collect_on_error] && !errors.empty?
|
413
414
|
inserted_docs = doc_or_docs - error_docs
|
414
415
|
inserted_ids = inserted_docs.collect {|o| o[:_id] || o['_id']}
|
@@ -48,6 +48,7 @@ module Mongo
|
|
48
48
|
collect_on_error = !!opts[:collect_on_error] || ordered == false
|
49
49
|
error_docs = [] # docs with serialization errors
|
50
50
|
errors = []
|
51
|
+
write_concern_errors = []
|
51
52
|
exchanges = []
|
52
53
|
serialized_doc = nil
|
53
54
|
message = BSON::ByteBuffer.new("", max_message_size)
|
@@ -80,6 +81,9 @@ module Mongo
|
|
80
81
|
begin
|
81
82
|
response = batch_message_send(message, op_type, batch_docs, write_concern, continue_on_error) if batch_docs.size > 0
|
82
83
|
exchanges << {:op_type => op_type, :batch => batch_docs, :opts => opts, :response => response}
|
84
|
+
rescue Mongo::WriteConcernError => ex
|
85
|
+
write_concern_errors << ex
|
86
|
+
exchanges << {:op_type => op_type, :batch => batch_docs, :opts => opts, :response => ex.result}
|
83
87
|
rescue Mongo::OperationFailure => ex
|
84
88
|
errors << ex
|
85
89
|
exchanges << {:op_type => op_type, :batch => batch_docs, :opts => opts, :response => ex.result}
|
@@ -87,7 +91,7 @@ module Mongo
|
|
87
91
|
end
|
88
92
|
end
|
89
93
|
end
|
90
|
-
[error_docs, errors, exchanges]
|
94
|
+
[error_docs, errors, write_concern_errors, exchanges]
|
91
95
|
end
|
92
96
|
|
93
97
|
def batch_write_partition(op_type, documents, check_keys, opts)
|
@@ -98,6 +102,7 @@ module Mongo
|
|
98
102
|
collect_on_error = !!opts[:collect_on_error] # collect_on_error default false
|
99
103
|
error_docs = [] # docs with serialization errors
|
100
104
|
errors = []
|
105
|
+
write_concern_errors = []
|
101
106
|
exchanges = []
|
102
107
|
@max_write_batch_size = @collection.db.connection.max_write_batch_size
|
103
108
|
@write_batch_size = [documents.size, @max_write_batch_size].min
|
@@ -128,6 +133,10 @@ module Mongo
|
|
128
133
|
next if collect_on_error
|
129
134
|
errors << ex
|
130
135
|
break unless continue_on_error
|
136
|
+
rescue Mongo::WriteConcernError => ex
|
137
|
+
write_concern_errors << ex
|
138
|
+
exchanges << {:op_type => op_type, :batch => batch_docs, :opts => opts, :response => ex.result}
|
139
|
+
docs = docs.drop(batch.size)
|
131
140
|
rescue Mongo::OperationFailure => ex
|
132
141
|
errors << ex
|
133
142
|
exchanges << {:op_type => op_type, :batch => batch, :opts => opts, :response => ex.result}
|
@@ -135,7 +144,7 @@ module Mongo
|
|
135
144
|
break if !continue_on_error && !collect_on_error
|
136
145
|
end
|
137
146
|
end
|
138
|
-
[error_docs, errors, exchanges]
|
147
|
+
[error_docs, errors, write_concern_errors, exchanges]
|
139
148
|
end
|
140
149
|
|
141
150
|
alias :batch_write :batch_write_incremental
|
@@ -225,6 +234,7 @@ module Mongo
|
|
225
234
|
def bulk_execute(ops, options, opts = {})
|
226
235
|
write_concern = get_write_concern(opts, @collection)
|
227
236
|
errors = []
|
237
|
+
write_concern_errors = []
|
228
238
|
exchanges = []
|
229
239
|
ops.each do |op_type, doc|
|
230
240
|
doc = {:d => @collection.pk_factory.create_pk(doc[:d]), :ord => doc[:ord]} if op_type == :insert
|
@@ -241,13 +251,16 @@ module Mongo
|
|
241
251
|
{:op_type => op_type, :serialize => doc, :ord => doc[:ord], :error => ex})
|
242
252
|
errors << ex
|
243
253
|
break if options[:ordered]
|
254
|
+
rescue Mongo::WriteConcernError => ex
|
255
|
+
write_concern_errors << ex
|
256
|
+
exchanges << {:op_type => op_type, :batch => [doc], :opts => opts, :response => ex.result}
|
244
257
|
rescue Mongo::OperationFailure => ex
|
245
258
|
errors << ex
|
246
259
|
exchanges << {:op_type => op_type, :batch => [doc], :opts => opts, :response => ex.result}
|
247
260
|
break if options[:ordered] && ex.result["err"] != "norepl"
|
248
261
|
end
|
249
262
|
end
|
250
|
-
[errors, exchanges]
|
263
|
+
[errors, write_concern_errors, exchanges]
|
251
264
|
end
|
252
265
|
|
253
266
|
private
|
@@ -304,17 +317,19 @@ module Mongo
|
|
304
317
|
|
305
318
|
def bulk_execute(ops, options, opts = {})
|
306
319
|
errors = []
|
320
|
+
write_concern_errors = []
|
307
321
|
exchanges = []
|
308
322
|
ops = (options[:ordered] == false) ? sort_by_first_sym(ops) : ops # sort by write-type
|
309
323
|
ordered_group_by_first(ops).each do |op_type, documents|
|
310
324
|
documents.collect! {|doc| {:d => @collection.pk_factory.create_pk(doc[:d]), :ord => doc[:ord]} } if op_type == :insert
|
311
|
-
error_docs, batch_errors, batch_exchanges =
|
325
|
+
error_docs, batch_errors, batch_write_concern_errors, batch_exchanges =
|
312
326
|
batch_write(op_type, documents, check_keys = false, opts.merge(:ordered => options[:ordered]))
|
313
327
|
errors += batch_errors
|
328
|
+
write_concern_errors += batch_write_concern_errors
|
314
329
|
exchanges += batch_exchanges
|
315
330
|
break if options[:ordered] && !batch_errors.empty?
|
316
331
|
end
|
317
|
-
[errors, exchanges]
|
332
|
+
[errors, write_concern_errors, exchanges]
|
318
333
|
end
|
319
334
|
|
320
335
|
private
|
@@ -17,14 +17,9 @@ module Mongo
|
|
17
17
|
include ThreadLocalVariableManager
|
18
18
|
|
19
19
|
attr_reader :client,
|
20
|
-
:arbiters,
|
21
20
|
:primary,
|
22
|
-
:secondaries,
|
23
21
|
:primary_pool,
|
24
|
-
:secondary_pools,
|
25
|
-
:hosts,
|
26
22
|
:seeds,
|
27
|
-
:pools,
|
28
23
|
:max_bson_size,
|
29
24
|
:max_message_size,
|
30
25
|
:max_wire_version,
|
@@ -145,6 +140,47 @@ module Mongo
|
|
145
140
|
read_pool.host_port
|
146
141
|
end
|
147
142
|
|
143
|
+
def hosts
|
144
|
+
@connect_mutex.synchronize do
|
145
|
+
@hosts.nil? ? nil : @hosts.clone
|
146
|
+
end
|
147
|
+
end
|
148
|
+
|
149
|
+
def pools
|
150
|
+
@connect_mutex.synchronize do
|
151
|
+
@pools.nil? ? nil : @pools.clone
|
152
|
+
end
|
153
|
+
end
|
154
|
+
|
155
|
+
def secondaries
|
156
|
+
@connect_mutex.synchronize do
|
157
|
+
@secondaries.nil? ? nil : @secondaries.clone
|
158
|
+
end
|
159
|
+
end
|
160
|
+
|
161
|
+
def secondary_pools
|
162
|
+
@connect_mutex.synchronize do
|
163
|
+
@secondary_pools.nil? ? nil : @secondary_pools.clone
|
164
|
+
end
|
165
|
+
end
|
166
|
+
|
167
|
+
def arbiters
|
168
|
+
@connect_mutex.synchronize do
|
169
|
+
@arbiters.nil? ? nil : @arbiters.clone
|
170
|
+
end
|
171
|
+
end
|
172
|
+
|
173
|
+
def state_snapshot
|
174
|
+
@connect_mutex.synchronize do
|
175
|
+
{ :pools => @pools.nil? ? nil : @pools.clone,
|
176
|
+
:secondaries => @secondaries.nil? ? nil : @secondaries.clone,
|
177
|
+
:secondary_pools => @secondary_pools.nil? ? nil : @secondary_pools.clone,
|
178
|
+
:hosts => @hosts.nil? ? nil : @hosts.clone,
|
179
|
+
:arbiters => @arbiters.nil? ? nil : @arbiters.clone
|
180
|
+
}
|
181
|
+
end
|
182
|
+
end
|
183
|
+
|
148
184
|
private
|
149
185
|
|
150
186
|
def update_max_sizes
|
@@ -38,7 +38,11 @@ module Mongo
|
|
38
38
|
end
|
39
39
|
|
40
40
|
if opts[:key]
|
41
|
-
|
41
|
+
if opts[:key_pass_phrase]
|
42
|
+
@context.key = OpenSSL::PKey::RSA.new(File.open(opts[:key]), opts[:key_pass_phrase])
|
43
|
+
else
|
44
|
+
@context.key = OpenSSL::PKey::RSA.new(File.open(opts[:key]))
|
45
|
+
end
|
42
46
|
end
|
43
47
|
|
44
48
|
if opts[:verify]
|
data/lib/mongo/cursor.rb
CHANGED
@@ -155,8 +155,8 @@ module Mongo
|
|
155
155
|
end
|
156
156
|
|
157
157
|
raise OperationFailure.new(err, code, doc)
|
158
|
-
elsif doc && (
|
159
|
-
raise
|
158
|
+
elsif doc && (write_concern_error = doc['writeConcernError']) # assignment
|
159
|
+
raise WriteConcernError.new(write_concern_error['errmsg'], write_concern_error['code'], doc)
|
160
160
|
end
|
161
161
|
|
162
162
|
if @transformer.nil?
|
@@ -189,7 +189,7 @@ module Mongo
|
|
189
189
|
|
190
190
|
# Get the size of the result set for this query.
|
191
191
|
#
|
192
|
-
# @param [Boolean] whether
|
192
|
+
# @param [Boolean] skip_and_limit whether or not to take skip or limit into account.
|
193
193
|
#
|
194
194
|
# @return [Integer] the number of objects in the result set for this query.
|
195
195
|
#
|
data/lib/mongo/db.rb
CHANGED
@@ -556,7 +556,7 @@ module Mongo
|
|
556
556
|
result = Cursor.new(system_command_collection, command).next_document
|
557
557
|
rescue OperationFailure => ex
|
558
558
|
if check_response
|
559
|
-
raise
|
559
|
+
raise ex.class.new("Database command '#{selector.keys.first}' failed: #{ex.message}", ex.error_code, ex.result)
|
560
560
|
else
|
561
561
|
result = ex.result
|
562
562
|
end
|
data/lib/mongo/exception.rb
CHANGED
@@ -68,6 +68,9 @@ module Mongo
|
|
68
68
|
# Raised when a database operation exceeds maximum specified time.
|
69
69
|
class ExecutionTimeout < OperationFailure; end
|
70
70
|
|
71
|
+
# Raised when a database operation has a write concern error.
|
72
|
+
class WriteConcernError < OperationFailure; end
|
73
|
+
|
71
74
|
# Raised when a socket read operation times out.
|
72
75
|
class OperationTimeout < SocketError; end
|
73
76
|
|
@@ -296,7 +296,7 @@ module Mongo
|
|
296
296
|
|
297
297
|
# Helper to fetch a nonce value from a given database instance.
|
298
298
|
#
|
299
|
-
# @param
|
299
|
+
# @param database [Mongo::DB] The DB instance to use for issue the nonce command.
|
300
300
|
# @param opts [Hash] Hash of optional settings and configuration values.
|
301
301
|
#
|
302
302
|
# @option opts [Socket] socket (nil) Optional socket instance to use.
|
@@ -16,7 +16,7 @@ require 'jruby'
|
|
16
16
|
|
17
17
|
include Java
|
18
18
|
|
19
|
-
jar_dir = File.join(File.dirname(__FILE__), '../../../ext/jsasl')
|
19
|
+
jar_dir = File.expand_path(File.join(File.dirname(__FILE__), '../../../ext/jsasl'))
|
20
20
|
require File.join(jar_dir, 'target/jsasl.jar')
|
21
21
|
|
22
22
|
module Mongo
|
@@ -153,8 +153,6 @@ module Mongo
|
|
153
153
|
# @note Passwords can contain any character except for ','
|
154
154
|
#
|
155
155
|
# @param [String] uri The MongoDB URI string.
|
156
|
-
# @param [Hash,nil] extra_opts Extra options. Will override anything
|
157
|
-
# already specified in the URI.
|
158
156
|
def initialize(uri)
|
159
157
|
if uri.start_with?('mongodb://')
|
160
158
|
uri = uri[10..-1]
|
@@ -234,12 +232,12 @@ module Mongo
|
|
234
232
|
warn "Using wtimeout in a URI is deprecated, please use wtimeoutMS. It will be removed in v2.0."
|
235
233
|
opts[:wtimeout] = @wtimeout
|
236
234
|
end
|
237
|
-
opts[:wtimeout] = @wtimeoutms
|
235
|
+
opts[:wtimeout] = @wtimeoutms if @wtimeoutms
|
238
236
|
|
239
237
|
opts[:w] = 1 if @safe
|
240
238
|
opts[:w] = @w if @w
|
241
|
-
opts[:j] = @journal
|
242
|
-
opts[:fsync] = @fsync
|
239
|
+
opts[:j] = @journal if @journal
|
240
|
+
opts[:fsync] = @fsync if @fsync
|
243
241
|
|
244
242
|
opts[:connect_timeout] = @connecttimeoutms if @connecttimeoutms
|
245
243
|
opts[:op_timeout] = @sockettimeoutms if @sockettimeoutms
|
@@ -260,7 +258,7 @@ module Mongo
|
|
260
258
|
|
261
259
|
opts[:db_name] = @db_name if @db_name
|
262
260
|
opts[:auths] = @auths if @auths
|
263
|
-
opts[:ssl] = @ssl
|
261
|
+
opts[:ssl] = @ssl if @ssl
|
264
262
|
opts[:connect] = connect?
|
265
263
|
|
266
264
|
opts
|
@@ -14,6 +14,8 @@
|
|
14
14
|
|
15
15
|
module Mongo
|
16
16
|
module WriteConcern
|
17
|
+
VALID_KEYS = [:w, :j, :fsync, :wtimeout]
|
18
|
+
DEFAULT_WRITE_CONCERN = {:w => 1}
|
17
19
|
|
18
20
|
attr_reader :legacy_write_concern
|
19
21
|
|
@@ -44,14 +46,9 @@ module Mongo
|
|
44
46
|
# todo: throw exception for conflicting write concern options
|
45
47
|
def get_write_concern(opts, parent=nil)
|
46
48
|
write_concern_from_legacy(opts) if opts.key?(:safe) || legacy_write_concern
|
47
|
-
write_concern =
|
48
|
-
:w => 1,
|
49
|
-
:j => false,
|
50
|
-
:fsync => false,
|
51
|
-
:wtimeout => nil
|
52
|
-
}
|
49
|
+
write_concern = DEFAULT_WRITE_CONCERN.dup
|
53
50
|
write_concern.merge!(parent.write_concern) if parent
|
54
|
-
write_concern.merge!(opts.reject {|k,v| !
|
51
|
+
write_concern.merge!(opts.reject {|k,v| !VALID_KEYS.include?(k)})
|
55
52
|
write_concern[:w] = write_concern[:w].to_s if write_concern[:w].is_a?(Symbol)
|
56
53
|
write_concern
|
57
54
|
end
|
data/lib/mongo/gridfs/grid.rb
CHANGED
@@ -54,7 +54,7 @@ module Mongo
|
|
54
54
|
# @option opts [String] :content_type ('binary/octet-stream') If no content type is specified,
|
55
55
|
# the content type will may be inferred from the filename extension if the mime-types gem can be
|
56
56
|
# loaded. Otherwise, the content type 'binary/octet-stream' will be used.
|
57
|
-
# @option opts [Integer] (
|
57
|
+
# @option opts [Integer] (261120) :chunk_size size of file chunks in bytes.
|
58
58
|
# @option opts [String, Integer, Symbol] :w (1) Set write concern
|
59
59
|
#
|
60
60
|
# Notes on write concern:
|
@@ -81,7 +81,7 @@ module Mongo
|
|
81
81
|
|
82
82
|
# Read a file from the file store.
|
83
83
|
#
|
84
|
-
# @param
|
84
|
+
# @param id the file's unique id.
|
85
85
|
#
|
86
86
|
# @return [Mongo::GridIO]
|
87
87
|
def get(id)
|
@@ -95,7 +95,7 @@ module Mongo
|
|
95
95
|
# is attempting to read a file while it's being deleted. While the odds for this
|
96
96
|
# kind of race condition are small, it's important to be aware of.
|
97
97
|
#
|
98
|
-
# @param
|
98
|
+
# @param id
|
99
99
|
#
|
100
100
|
# @return [Boolean]
|
101
101
|
def delete(id)
|