mongo 1.7.1 → 1.8.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (138) hide show
  1. data/{LICENSE.txt → LICENSE} +0 -0
  2. data/README.md +124 -111
  3. data/Rakefile +9 -325
  4. data/VERSION +1 -0
  5. data/bin/mongo_console +4 -4
  6. data/examples/admin.rb +43 -0
  7. data/examples/capped.rb +22 -0
  8. data/examples/cursor.rb +48 -0
  9. data/examples/gridfs.rb +44 -0
  10. data/examples/index_test.rb +126 -0
  11. data/examples/info.rb +31 -0
  12. data/examples/queries.rb +74 -0
  13. data/examples/replica_set.rb +26 -0
  14. data/examples/simple.rb +25 -0
  15. data/examples/strict.rb +35 -0
  16. data/examples/types.rb +36 -0
  17. data/{test/load → examples/web}/thin/load.rb +3 -1
  18. data/{test/load → examples/web}/unicorn/load.rb +5 -3
  19. data/lib/mongo.rb +8 -10
  20. data/lib/mongo/collection.rb +134 -114
  21. data/lib/mongo/cursor.rb +21 -14
  22. data/lib/mongo/db.rb +30 -28
  23. data/lib/mongo/exceptions.rb +1 -1
  24. data/lib/mongo/gridfs/grid.rb +8 -7
  25. data/lib/mongo/gridfs/grid_ext.rb +1 -1
  26. data/lib/mongo/gridfs/grid_file_system.rb +6 -5
  27. data/lib/mongo/gridfs/grid_io.rb +22 -19
  28. data/lib/mongo/legacy.rb +82 -0
  29. data/lib/mongo/{connection.rb → mongo_client.rb} +82 -61
  30. data/lib/mongo/{repl_set_connection.rb → mongo_replica_set_client.rb} +54 -39
  31. data/lib/mongo/{sharded_connection.rb → mongo_sharded_client.rb} +9 -9
  32. data/lib/mongo/networking.rb +25 -20
  33. data/lib/mongo/util/conversions.rb +1 -1
  34. data/lib/mongo/util/core_ext.rb +1 -1
  35. data/lib/mongo/util/logging.rb +20 -4
  36. data/lib/mongo/util/node.rb +16 -16
  37. data/lib/mongo/util/pool.rb +56 -27
  38. data/lib/mongo/util/pool_manager.rb +28 -27
  39. data/lib/mongo/util/server_version.rb +1 -1
  40. data/lib/mongo/util/sharding_pool_manager.rb +8 -8
  41. data/lib/mongo/util/ssl_socket.rb +1 -5
  42. data/lib/mongo/util/support.rb +24 -8
  43. data/lib/mongo/util/tcp_socket.rb +0 -4
  44. data/lib/mongo/util/uri_parser.rb +54 -38
  45. data/lib/mongo/util/write_concern.rb +67 -0
  46. data/mongo.gemspec +21 -32
  47. data/test/auxillary/{1.4_features.rb → 1.4_feature_test.rb} +4 -5
  48. data/test/auxillary/authentication_test.rb +18 -20
  49. data/test/auxillary/autoreconnect_test.rb +3 -5
  50. data/test/auxillary/fork_test.rb +5 -7
  51. data/test/auxillary/repl_set_auth_test.rb +13 -15
  52. data/test/auxillary/slave_connection_test.rb +8 -7
  53. data/test/auxillary/threaded_authentication_test.rb +15 -17
  54. data/test/bson/binary_test.rb +1 -1
  55. data/test/bson/bson_test.rb +60 -36
  56. data/test/bson/byte_buffer_test.rb +1 -1
  57. data/test/bson/hash_with_indifferent_access_test.rb +2 -2
  58. data/test/bson/json_test.rb +1 -2
  59. data/test/bson/object_id_test.rb +1 -2
  60. data/test/bson/ordered_hash_test.rb +1 -1
  61. data/test/bson/timestamp_test.rb +1 -1
  62. data/test/{collection_test.rb → functional/collection_test.rb} +57 -57
  63. data/test/{connection_test.rb → functional/connection_test.rb} +75 -89
  64. data/test/{conversions_test.rb → functional/conversions_test.rb} +1 -1
  65. data/test/{cursor_fail_test.rb → functional/cursor_fail_test.rb} +3 -29
  66. data/test/{cursor_message_test.rb → functional/cursor_message_test.rb} +1 -1
  67. data/test/{cursor_test.rb → functional/cursor_test.rb} +5 -1
  68. data/test/{db_api_test.rb → functional/db_api_test.rb} +8 -9
  69. data/test/{db_connection_test.rb → functional/db_connection_test.rb} +3 -5
  70. data/test/{db_test.rb → functional/db_test.rb} +13 -13
  71. data/test/{grid_file_system_test.rb → functional/grid_file_system_test.rb} +2 -2
  72. data/test/{grid_io_test.rb → functional/grid_io_test.rb} +6 -6
  73. data/test/{grid_test.rb → functional/grid_test.rb} +4 -10
  74. data/test/{pool_test.rb → functional/pool_test.rb} +1 -1
  75. data/test/functional/safe_test.rb +84 -0
  76. data/test/{support_test.rb → functional/support_test.rb} +1 -1
  77. data/test/{threading_test.rb → functional/threading_test.rb} +9 -9
  78. data/test/{timeout_test.rb → functional/timeout_test.rb} +1 -1
  79. data/test/{uri_test.rb → functional/uri_test.rb} +1 -1
  80. data/test/functional/write_concern_test.rb +104 -0
  81. data/test/replica_set/basic_test.rb +139 -0
  82. data/test/replica_set/client_test.rb +255 -0
  83. data/test/replica_set/complex_connect_test.rb +62 -0
  84. data/test/replica_set/connection_test.rb +255 -0
  85. data/test/{replica_sets → replica_set}/count_test.rb +17 -14
  86. data/test/replica_set/cursor_test.rb +75 -0
  87. data/test/{replica_sets → replica_set}/insert_test.rb +19 -16
  88. data/test/replica_set/query_test.rb +64 -0
  89. data/test/replica_set/refresh_test.rb +153 -0
  90. data/test/{replica_sets → replica_set}/replication_ack_test.rb +21 -17
  91. data/test/sharded_cluster/basic_test.rb +31 -50
  92. data/test/support/hash_with_indifferent_access.rb +1 -1
  93. data/test/test_helper.rb +56 -9
  94. data/test/threading/threading_with_large_pool_test.rb +8 -8
  95. data/test/tools/mongo_config.rb +270 -58
  96. data/test/tools/mongo_config_test.rb +146 -0
  97. data/test/unit/client_test.rb +230 -0
  98. data/test/unit/collection_test.rb +45 -32
  99. data/test/unit/connection_test.rb +82 -74
  100. data/test/unit/cursor_test.rb +14 -6
  101. data/test/unit/db_test.rb +8 -8
  102. data/test/unit/grid_test.rb +11 -11
  103. data/test/unit/node_test.rb +24 -24
  104. data/test/unit/pool_manager_test.rb +13 -13
  105. data/test/unit/pool_test.rb +1 -1
  106. data/test/unit/read_test.rb +21 -26
  107. data/test/unit/safe_test.rb +52 -33
  108. data/test/unit/util_test.rb +55 -0
  109. data/test/unit/write_concern_test.rb +161 -0
  110. metadata +158 -171
  111. data/docs/CREDITS.md +0 -123
  112. data/docs/FAQ.md +0 -116
  113. data/docs/GRID_FS.md +0 -158
  114. data/docs/HISTORY.md +0 -392
  115. data/docs/READ_PREFERENCE.md +0 -99
  116. data/docs/RELEASES.md +0 -54
  117. data/docs/REPLICA_SETS.md +0 -113
  118. data/docs/TAILABLE_CURSORS.md +0 -51
  119. data/docs/TUTORIAL.md +0 -356
  120. data/docs/WRITE_CONCERN.md +0 -31
  121. data/lib/mongo/gridfs/grid_io_fix.rb +0 -38
  122. data/lib/mongo/version.rb +0 -3
  123. data/test/bson/test_helper.rb +0 -30
  124. data/test/replica_sets/basic_test.rb +0 -119
  125. data/test/replica_sets/complex_connect_test.rb +0 -57
  126. data/test/replica_sets/complex_read_preference_test.rb +0 -237
  127. data/test/replica_sets/connect_test.rb +0 -156
  128. data/test/replica_sets/cursor_test.rb +0 -70
  129. data/test/replica_sets/pooled_insert_test.rb +0 -57
  130. data/test/replica_sets/query_test.rb +0 -50
  131. data/test/replica_sets/read_preference_test.rb +0 -234
  132. data/test/replica_sets/refresh_test.rb +0 -156
  133. data/test/replica_sets/refresh_with_threads_test.rb +0 -60
  134. data/test/replica_sets/rs_test_helper.rb +0 -39
  135. data/test/safe_test.rb +0 -68
  136. data/test/sharded_cluster/mongo_config_test.rb +0 -126
  137. data/test/sharded_cluster/sc_test_helper.rb +0 -39
  138. data/test/tools/repl_set_manager.rb +0 -418
@@ -0,0 +1,35 @@
1
+ $:.unshift(File.join(File.dirname(__FILE__), '..', 'lib'))
2
+
3
+ require 'mongo'
4
+
5
+ include Mongo
6
+
7
+ host = ENV['MONGO_RUBY_DRIVER_HOST'] || 'localhost'
8
+ port = ENV['MONGO_RUBY_DRIVER_PORT'] || MongoClient::DEFAULT_PORT
9
+
10
+ puts "Connecting to #{host}:#{port}"
11
+ db = MongoClient.new(host, port).db('ruby-mongo-examples')
12
+
13
+ db.drop_collection('does-not-exist')
14
+ db.create_collection('test')
15
+
16
+ db.strict = true
17
+
18
+ begin
19
+ # Can't reference collection that does not exist
20
+ db.collection('does-not-exist')
21
+ puts "error: expected exception"
22
+ rescue => ex
23
+ puts "expected exception: #{ex}"
24
+ end
25
+
26
+ begin
27
+ # Can't create collection that already exists
28
+ db.create_collection('test')
29
+ puts "error: expected exception"
30
+ rescue => ex
31
+ puts "expected exception: #{ex}"
32
+ end
33
+
34
+ db.strict = false
35
+ db.drop_collection('test')
@@ -0,0 +1,36 @@
1
+ $:.unshift(File.join(File.dirname(__FILE__), '..', 'lib'))
2
+
3
+ require 'mongo'
4
+ require 'pp'
5
+
6
+ include Mongo
7
+
8
+ host = ENV['MONGO_RUBY_DRIVER_HOST'] || 'localhost'
9
+ port = ENV['MONGO_RUBY_DRIVER_PORT'] || MongoClient::DEFAULT_PORT
10
+
11
+ puts "Connecting to #{host}:#{port}"
12
+ db = MongoClient.new(host, port).db('ruby-mongo-examples')
13
+ coll = db.collection('test')
14
+
15
+ # Remove all records, if any
16
+ coll.remove
17
+
18
+ # Insert record with all sorts of values
19
+ coll.insert('array' => [1, 2, 3],
20
+ 'string' => 'hello',
21
+ 'hash' => {'a' => 1, 'b' => 2},
22
+ 'date' => Time.now, # milliseconds only; microseconds are not stored
23
+ 'oid' => ObjectID.new,
24
+ 'binary' => Binary.new([1, 2, 3]),
25
+ 'int' => 42,
26
+ 'float' => 33.33333,
27
+ 'regex' => /foobar/i,
28
+ 'boolean' => true,
29
+ 'where' => Code.new('this.x == 3'),
30
+ 'dbref' => DBRef.new(coll.name, ObjectID.new),
31
+ 'null' => nil,
32
+ 'symbol' => :zildjian)
33
+
34
+ pp coll.find().next_document
35
+
36
+ coll.remove
@@ -1,7 +1,9 @@
1
1
  require File.join(File.dirname(__FILE__), '..', '..', '..', 'lib', 'mongo')
2
2
  require 'logger'
3
3
 
4
- $con = Mongo::ReplSetConnection.new(['localhost:30000', 'localhost:30001'], :read => :secondary, :refresh_mode => :sync, :refresh_interval => 30)
4
+ include Mongo
5
+
6
+ $con = MongoReplicaSetClient.new(['localhost:30000', 'localhost:30001'], :read => :secondary, :refresh_mode => :sync, :refresh_interval => 30)
5
7
  $db = $con['foo']
6
8
 
7
9
  class Load < Sinatra::Base
@@ -1,7 +1,9 @@
1
- require File.join(File.dirname(__FILE__), '..', '..', 'lib', 'mongo')
1
+ require File.join(File.dirname(__FILE__), '..', '..', 'lib', 'mongo')
2
2
 
3
- $con = Mongo::Connection.new
4
- $db = $con['foo']
3
+ include Mongo
4
+
5
+ $client = MongoClient.new('localhost', 27017)
6
+ $db = $client['foo']
5
7
 
6
8
  class Load < Sinatra::Base
7
9
 
@@ -1,7 +1,7 @@
1
1
  # encoding: UTF-8
2
2
  #
3
3
  # --
4
- # Copyright (C) 2008-2011 10gen Inc.
4
+ # Copyright (C) 2008-2012 10gen Inc.
5
5
  #
6
6
  # Licensed under the Apache License, Version 2.0 (the "License");
7
7
  # you may not use this file except in compliance with the License.
@@ -16,8 +16,6 @@
16
16
  # limitations under the License.
17
17
  # ++
18
18
 
19
- require 'mongo/version'
20
-
21
19
  module Mongo
22
20
  ASCENDING = 1
23
21
  DESCENDING = -1
@@ -54,6 +52,7 @@ require 'bson'
54
52
 
55
53
  require 'mongo/util/conversions'
56
54
  require 'mongo/util/support'
55
+ require 'mongo/util/write_concern'
57
56
  require 'mongo/util/core_ext'
58
57
  require 'mongo/util/logging'
59
58
  require 'mongo/util/node'
@@ -65,18 +64,17 @@ require 'mongo/util/ssl_socket'
65
64
  require 'mongo/util/tcp_socket'
66
65
  require 'mongo/util/uri_parser'
67
66
 
68
- require 'mongo/collection'
67
+
69
68
  require 'mongo/networking'
70
- require 'mongo/connection'
71
- require 'mongo/repl_set_connection'
72
- require 'mongo/sharded_connection'
69
+ require 'mongo/mongo_client'
70
+ require 'mongo/mongo_replica_set_client'
71
+ require 'mongo/mongo_sharded_client'
72
+ require 'mongo/legacy'
73
+ require 'mongo/collection'
73
74
  require 'mongo/cursor'
74
75
  require 'mongo/db'
75
76
  require 'mongo/exceptions'
76
77
  require 'mongo/gridfs/grid_ext'
77
78
  require 'mongo/gridfs/grid'
78
79
  require 'mongo/gridfs/grid_io'
79
- if RUBY_PLATFORM =~ /java/
80
- require 'mongo/gridfs/grid_io_fix'
81
- end
82
80
  require 'mongo/gridfs/grid_file_system'
@@ -1,7 +1,7 @@
1
1
  # encoding: UTF-8
2
2
 
3
3
  # --
4
- # Copyright (C) 2008-2011 10gen Inc.
4
+ # Copyright (C) 2008-2012 10gen Inc.
5
5
  #
6
6
  # Licensed under the Apache License, Version 2.0 (the "License");
7
7
  # you may not use this file except in compliance with the License.
@@ -20,11 +20,18 @@ module Mongo
20
20
  # A named collection of documents in a database.
21
21
  class Collection
22
22
  include Mongo::Logging
23
+ include Mongo::WriteConcern
23
24
 
24
- attr_reader :db, :name, :pk_factory, :hint, :safe
25
+ attr_reader :db,
26
+ :name,
27
+ :pk_factory,
28
+ :hint,
29
+ :write_concern
25
30
 
26
31
  # Read Preference
27
- attr_accessor :read_preference, :tag_sets, :acceptable_latency
32
+ attr_accessor :read_preference,
33
+ :tag_sets,
34
+ :acceptable_latency
28
35
 
29
36
  # Initialize a collection object.
30
37
  #
@@ -34,10 +41,10 @@ module Mongo
34
41
  # @option opts [:create_pk] :pk (BSON::ObjectId) A primary key factory to use
35
42
  # other than the default BSON::ObjectId.
36
43
  #
37
- # @option opts [Boolean, Hash] :safe (false) Set the default safe-mode options
38
- # for insert, update, and remove method called on this Collection instance. If no
39
- # value is provided, the default value set on this instance's DB will be used. This
40
- # default can be overridden for any invocation of insert, update, or remove.
44
+ # @option opts [Hash] :w, :j, :wtimeout, :fsync Set the default write concern
45
+ # for +insert+, +update+, and +remove+ method called on this Collection instance. If no
46
+ # value is provided, the default values set on this instance's DB will be used. These option
47
+ # values can be overridden for any invocation of +insert+, +update+, or +remove+.
41
48
  # @option options [:primary, :secondary] :read The default read preference for queries
42
49
  # initiates from this connection object. If +:secondary+ is chosen, reads will be sent
43
50
  # to one of the closest available secondary nodes. If a secondary node cannot be located, the
@@ -91,16 +98,16 @@ module Mongo
91
98
  @connection = @db.connection
92
99
  @logger = @connection.logger
93
100
  @cache_time = @db.cache_time
94
- @cache = Hash.new(0)
101
+ @cache = Hash.new(0)
95
102
  unless pk_factory
96
- @safe = opts.fetch(:safe, @db.safe)
103
+ @write_concern = get_write_concern(opts, db)
97
104
  if value = opts[:read]
98
105
  Mongo::Support.validate_read_preference(value)
99
106
  else
100
107
  value = @db.read_preference
101
108
  end
102
- @read_preference = value.is_a?(Hash) ? value.dup : value
103
- @tag_sets = opts.fetch(:tag_sets, @db.tag_sets)
109
+ @read_preference = value.is_a?(Hash) ? value.dup : value
110
+ @tag_sets = opts.fetch(:tag_sets, @db.tag_sets)
104
111
  @acceptable_latency = opts.fetch(:acceptable_latency, @db.acceptable_latency)
105
112
  end
106
113
  @pk_factory = pk_factory || opts[:pk] || BSON::ObjectId
@@ -169,7 +176,7 @@ module Mongo
169
176
  # to Ruby 1.8).
170
177
  #
171
178
  # @option opts [Array, Hash] :fields field names that should be returned in the result
172
- # set ("_id" will be included unless explicity excluded). By limiting results to a certain subset of fields,
179
+ # set ("_id" will be included unless explicitly excluded). By limiting results to a certain subset of fields,
173
180
  # you can cut down on network traffic and decoding time. If using a Hash, keys should be field
174
181
  # names and values should be either 1 or 0, depending on whether you want to include or exclude
175
182
  # the given field.
@@ -194,13 +201,14 @@ module Mongo
194
201
  # @option opts [Boolean] :timeout (true) when +true+, the returned cursor will be subject to
195
202
  # the normal cursor timeout behavior of the mongod process. When +false+, the returned cursor will
196
203
  # never timeout. Note that disabling timeout will only work when #find is invoked with a block.
197
- # This is to prevent any inadvertant failure to close the cursor, as the cursor is explicitly
204
+ # This is to prevent any inadvertent failure to close the cursor, as the cursor is explicitly
198
205
  # closed when block code finishes.
199
206
  # @option opts [Integer] :max_scan (nil) Limit the number of items to scan on both collection scans and indexed queries..
200
207
  # @option opts [Boolean] :show_disk_loc (false) Return the disk location of each query result (for debugging).
201
208
  # @option opts [Boolean] :return_key (false) Return the index key used to obtain the result (for debugging).
202
- # @option opts [Block] :transformer (nil) a block for tranforming returned documents.
209
+ # @option opts [Block] :transformer (nil) a block for transforming returned documents.
203
210
  # This is normally used by object mappers to convert each returned document to an instance of a class.
211
+ # @option opts [String] :comment (nil) a comment to include in profiling logs
204
212
  #
205
213
  # @raise [ArgumentError]
206
214
  # if timeout is set to false and find is not invoked in a block
@@ -210,22 +218,23 @@ module Mongo
210
218
  #
211
219
  # @core find find-instance_method
212
220
  def find(selector={}, opts={})
213
- opts = opts.dup
214
- fields = opts.delete(:fields)
215
- fields = ["_id"] if fields && fields.empty?
216
- skip = opts.delete(:skip) || skip || 0
217
- limit = opts.delete(:limit) || 0
218
- sort = opts.delete(:sort)
219
- hint = opts.delete(:hint)
220
- snapshot = opts.delete(:snapshot)
221
- batch_size = opts.delete(:batch_size)
222
- timeout = (opts.delete(:timeout) == false) ? false : true
223
- max_scan = opts.delete(:max_scan)
224
- return_key = opts.delete(:return_key)
225
- transformer = opts.delete(:transformer)
226
- show_disk_loc = opts.delete(:show_disk_loc)
227
- read = opts.delete(:read) || @read_preference
228
- tag_sets = opts.delete(:tag_sets) || @tag_sets
221
+ opts = opts.dup
222
+ fields = opts.delete(:fields)
223
+ fields = ["_id"] if fields && fields.empty?
224
+ skip = opts.delete(:skip) || skip || 0
225
+ limit = opts.delete(:limit) || 0
226
+ sort = opts.delete(:sort)
227
+ hint = opts.delete(:hint)
228
+ snapshot = opts.delete(:snapshot)
229
+ batch_size = opts.delete(:batch_size)
230
+ timeout = (opts.delete(:timeout) == false) ? false : true
231
+ max_scan = opts.delete(:max_scan)
232
+ return_key = opts.delete(:return_key)
233
+ transformer = opts.delete(:transformer)
234
+ show_disk_loc = opts.delete(:show_disk_loc)
235
+ comment = opts.delete(:comment)
236
+ read = opts.delete(:read) || @read_preference
237
+ tag_sets = opts.delete(:tag_sets) || @tag_sets
229
238
  acceptable_latency = opts.delete(:acceptable_latency) || @acceptable_latency
230
239
 
231
240
  if timeout == false && !block_given?
@@ -241,27 +250,31 @@ module Mongo
241
250
  raise RuntimeError, "Unknown options [#{opts.inspect}]" unless opts.empty?
242
251
 
243
252
  cursor = Cursor.new(self, {
244
- :selector => selector,
245
- :fields => fields,
246
- :skip => skip,
247
- :limit => limit,
248
- :order => sort,
249
- :hint => hint,
250
- :snapshot => snapshot,
251
- :timeout => timeout,
252
- :batch_size => batch_size,
253
- :transformer => transformer,
254
- :max_scan => max_scan,
255
- :show_disk_loc => show_disk_loc,
256
- :return_key => return_key,
257
- :read => read,
258
- :tag_sets => tag_sets,
253
+ :selector => selector,
254
+ :fields => fields,
255
+ :skip => skip,
256
+ :limit => limit,
257
+ :order => sort,
258
+ :hint => hint,
259
+ :snapshot => snapshot,
260
+ :timeout => timeout,
261
+ :batch_size => batch_size,
262
+ :transformer => transformer,
263
+ :max_scan => max_scan,
264
+ :show_disk_loc => show_disk_loc,
265
+ :return_key => return_key,
266
+ :read => read,
267
+ :tag_sets => tag_sets,
268
+ :comment => comment,
259
269
  :acceptable_latency => acceptable_latency
260
270
  })
261
271
 
262
272
  if block_given?
263
- yield cursor
264
- cursor.close
273
+ begin
274
+ yield cursor
275
+ ensure
276
+ cursor.close
277
+ end
265
278
  nil
266
279
  else
267
280
  cursor
@@ -306,20 +319,23 @@ module Mongo
306
319
  #
307
320
  # @return [ObjectId] the _id of the saved document.
308
321
  #
309
- # @option opts [Boolean, Hash] :safe (+false+)
310
- # run the operation in safe mode, which run a getlasterror command on the
311
- # database to report any assertion. In addition, a hash can be provided to
312
- # run an fsync and/or wait for replication of the save (>= 1.5.1). See the options
313
- # for DB#error.
322
+ # @option opts [Hash] :w, :j, :wtimeout, :fsync Set the write concern for this operation.
323
+ # :w > 0 will run a +getlasterror+ command on the database to report any assertion.
324
+ # :j will confirm a write has been committed to the journal,
325
+ # :wtimeout specifies how long to wait for write confirmation,
326
+ # :fsync will confirm that a write has been fsynced.
327
+ # Options provided here will override any write concern options set on this collection,
328
+ # its database object, or the current connection. See the options
329
+ # for +DB#get_last_error+.
314
330
  #
315
- # @raise [OperationFailure] when :safe mode fails.
331
+ # @raise [Mongo::OperationFailure] will be raised iff :w > 0 and the operation fails.
316
332
  def save(doc, opts={})
317
333
  if doc.has_key?(:_id) || doc.has_key?('_id')
318
334
  id = doc[:_id] || doc['_id']
319
- update({:_id => id}, doc, :upsert => true, :safe => opts.fetch(:safe, @safe))
335
+ update({:_id => id}, doc, opts.merge!({:upsert => true}))
320
336
  id
321
337
  else
322
- insert(doc, :safe => opts.fetch(:safe, @safe))
338
+ insert(doc, opts)
323
339
  end
324
340
  end
325
341
 
@@ -335,31 +351,34 @@ module Mongo
335
351
  # 2nd, a list of invalid documents.
336
352
  # Return this result format only when :collect_on_error is true.
337
353
  #
338
- # @option opts [Boolean, Hash] :safe (+false+)
339
- # run the operation in safe mode, which run a getlasterror command on the
340
- # database to report any assertion. In addition, a hash can be provided to
341
- # run an fsync and/or wait for replication of the insert (>= 1.5.1). Safe
342
- # options provided here will override any safe options set on this collection,
343
- # its database object, or the current connection. See the options on
344
- # for DB#get_last_error.
354
+ # @option opts [Hash] :w, :j, :wtimeout, :fsync Set the write concern for this operation.
355
+ # :w > 0 will run a +getlasterror+ command on the database to report any assertion.
356
+ # :j will confirm a write has been committed to the journal,
357
+ # :wtimeout specifies how long to wait for write confirmation,
358
+ # :fsync will confirm that a write has been fsynced.
359
+ # Options provided here will override any write concern options set on this collection,
360
+ # its database object, or the current connection. See the options
361
+ # for +DB#get_last_error+.
345
362
  #
346
363
  # @option opts [Boolean] :continue_on_error (+false+) If true, then
347
364
  # continue a bulk insert even if one of the documents inserted
348
365
  # triggers a database assertion (as in a duplicate insert, for instance).
349
- # If not using safe mode, the list of ids returned will
366
+ # If not acknowledging writes, the list of ids returned will
350
367
  # include the object ids of all documents attempted on insert, even
351
- # if some are rejected on error. When safe mode is
352
- # enabled, any error will raise an OperationFailure exception.
368
+ # if some are rejected on error. When acknowledging writes, any error will raise an
369
+ # OperationFailure exception.
353
370
  # MongoDB v2.0+.
354
371
  # @option opts [Boolean] :collect_on_error (+false+) if true, then
355
372
  # collects invalid documents as an array. Note that this option changes the result format.
356
373
  #
374
+ # @raise [Mongo::OperationFailure] will be raised iff :w > 0 and the operation fails.
375
+ #
357
376
  # @core insert insert-instance_method
358
377
  def insert(doc_or_docs, opts={})
359
378
  doc_or_docs = [doc_or_docs] unless doc_or_docs.is_a?(Array)
360
379
  doc_or_docs.collect! { |doc| @pk_factory.create_pk(doc) }
361
- safe = opts.fetch(:safe, @safe)
362
- result = insert_documents(doc_or_docs, @name, true, safe, opts)
380
+ write_concern = get_write_concern(opts, self)
381
+ result = insert_documents(doc_or_docs, @name, true, write_concern, opts)
363
382
  result.size > 1 ? result : result.first
364
383
  end
365
384
  alias_method :<<, :insert
@@ -369,12 +388,14 @@ module Mongo
369
388
  # @param [Hash] selector
370
389
  # If specified, only matching documents will be removed.
371
390
  #
372
- # @option opts [Boolean, Hash] :safe (+false+)
373
- # run the operation in safe mode, which will run a getlasterror command on the
374
- # database to report any assertion. In addition, a hash can be provided to
375
- # run an fsync and/or wait for replication of the remove (>= 1.5.1). Safe
376
- # options provided here will override any safe options set on this collection,
377
- # its database, or the current connection. See the options for DB#get_last_error for more details.
391
+ # @option opts [Hash] :w, :j, :wtimeout, :fsync Set the write concern for this operation.
392
+ # :w > 0 will run a +getlasterror+ command on the database to report any assertion.
393
+ # :j will confirm a write has been committed to the journal,
394
+ # :wtimeout specifies how long to wait for write confirmation,
395
+ # :fsync will confirm that a write has been fsynced.
396
+ # Options provided here will override any write concern options set on this collection,
397
+ # its database object, or the current connection. See the options
398
+ # for +DB#get_last_error+.
378
399
  #
379
400
  # @example remove all documents from the 'users' collection:
380
401
  # users.remove
@@ -383,24 +404,22 @@ module Mongo
383
404
  # @example remove only documents that have expired:
384
405
  # users.remove({:expire => {"$lte" => Time.now}})
385
406
  #
386
- # @return [Hash, true] Returns a Hash containing the last error object if running in safe mode.
407
+ # @return [Hash, true] Returns a Hash containing the last error object if acknowledging writes
387
408
  # Otherwise, returns true.
388
409
  #
389
- # @raise [Mongo::OperationFailure] an exception will be raised iff safe mode is enabled
390
- # and the operation fails.
410
+ # @raise [Mongo::OperationFailure] will be raised iff :w > 0 and the operation fails.
391
411
  #
392
412
  # @core remove remove-instance_method
393
413
  def remove(selector={}, opts={})
394
- # Initial byte is 0.
395
- safe = opts.fetch(:safe, @safe)
414
+ write_concern = get_write_concern(opts, self)
396
415
  message = BSON::ByteBuffer.new("\0\0\0\0")
397
416
  BSON::BSON_RUBY.serialize_cstr(message, "#{@db.name}.#{@name}")
398
417
  message.put_int(0)
399
418
  message.put_binary(BSON::BSON_CODER.serialize(selector, false, true, @connection.max_bson_size).to_s)
400
419
 
401
420
  instrument(:remove, :database => @db.name, :collection => @name, :selector => selector) do
402
- if safe
403
- @connection.send_message_with_safe_check(Mongo::Constants::OP_DELETE, message, @db.name, nil, safe)
421
+ if Mongo::WriteConcern.gle?(write_concern)
422
+ @connection.send_message_with_gle(Mongo::Constants::OP_DELETE, message, @db.name, nil, write_concern)
404
423
  else
405
424
  @connection.send_message(Mongo::Constants::OP_DELETE, message)
406
425
  true
@@ -422,20 +441,24 @@ module Mongo
422
441
  # @option opts [Boolean] :upsert (+false+) if true, performs an upsert (update or insert)
423
442
  # @option opts [Boolean] :multi (+false+) update all documents matching the selector, as opposed to
424
443
  # just the first matching document. Note: only works in MongoDB 1.1.3 or later.
425
- # @option opts [Boolean] :safe (+false+)
426
- # If true, check that the save succeeded. OperationFailure
427
- # will be raised on an error. Note that a safe check requires an extra
428
- # round-trip to the database. Safe options provided here will override any safe
429
- # options set on this collection, its database object, or the current collection.
430
- # See the options for DB#get_last_error for details.
431
- #
432
- # @return [Hash, true] Returns a Hash containing the last error object if running in safe mode.
444
+ # @option opts [Hash] :w, :j, :wtimeout, :fsync Set the write concern for this operation.
445
+ # :w > 0 will run a +getlasterror+ command on the database to report any assertion.
446
+ # :j will confirm a write has been committed to the journal,
447
+ # :wtimeout specifies how long to wait for write confirmation,
448
+ # :fsync will confirm that a write has been fsynced.
449
+ # Options provided here will override any write concern options set on this collection,
450
+ # its database object, or the current connection. See the options
451
+ # for +DB#get_last_error+.
452
+ #
453
+ # @return [Hash, true] Returns a Hash containing the last error object if acknowledging writes.
433
454
  # Otherwise, returns true.
434
455
  #
456
+ # @raise [Mongo::OperationFailure] will be raised iff :w > 0 and the operation fails.
457
+ #
435
458
  # @core update update-instance_method
436
459
  def update(selector, document, opts={})
437
460
  # Initial byte is 0.
438
- safe = opts.fetch(:safe, @safe)
461
+ write_concern = get_write_concern(opts, self)
439
462
  message = BSON::ByteBuffer.new("\0\0\0\0")
440
463
  BSON::BSON_RUBY.serialize_cstr(message, "#{@db.name}.#{@name}")
441
464
  update_options = 0
@@ -450,8 +473,8 @@ module Mongo
450
473
  message.put_binary(BSON::BSON_CODER.serialize(document, check_keys, true, @connection.max_bson_size).to_s)
451
474
 
452
475
  instrument(:update, :database => @db.name, :collection => @name, :selector => selector, :document => document) do
453
- if safe
454
- @connection.send_message_with_safe_check(Mongo::Constants::OP_UPDATE, message, @db.name, nil, safe)
476
+ if Mongo::WriteConcern.gle?(write_concern)
477
+ @connection.send_message_with_gle(Mongo::Constants::OP_UPDATE, message, @db.name, nil, write_concern)
455
478
  else
456
479
  @connection.send_message(Mongo::Constants::OP_UPDATE, message)
457
480
  end
@@ -502,18 +525,17 @@ module Mongo
502
525
  # @core indexes create_index-instance_method
503
526
  def create_index(spec, opts={})
504
527
  opts[:dropDups] = opts[:drop_dups] if opts[:drop_dups]
505
- field_spec = parse_index_spec(spec)
506
- opts = opts.dup
507
- name = opts.delete(:name) || generate_index_name(field_spec)
508
- name = name.to_s if name
509
-
528
+ field_spec = parse_index_spec(spec)
529
+ opts = opts.dup
530
+ name = opts.delete(:name) || generate_index_name(field_spec)
531
+ name = name.to_s if name
510
532
  generate_indexes(field_spec, name, opts)
511
533
  name
512
534
  end
513
535
 
514
536
  # Calls create_index and sets a flag to not do so again for another X minutes.
515
537
  # this time can be specified as an option when initializing a Mongo::DB object as options[:cache_time]
516
- # Any changes to an index will be propogated through regardless of cache time (e.g., a change of index direction)
538
+ # Any changes to an index will be propagated through regardless of cache time (e.g., a change of index direction)
517
539
  #
518
540
  # The parameters and options for this methods are the same as those for Collection#create_index.
519
541
  #
@@ -528,12 +550,11 @@ module Mongo
528
550
  #
529
551
  # @return [String] the name of the index.
530
552
  def ensure_index(spec, opts={})
531
- now = Time.now.utc.to_i
553
+ now = Time.now.utc.to_i
532
554
  opts[:dropDups] = opts[:drop_dups] if opts[:drop_dups]
533
- field_spec = parse_index_spec(spec)
534
-
535
- name = opts[:name] || generate_index_name(field_spec)
536
- name = name.to_s if name
555
+ field_spec = parse_index_spec(spec)
556
+ name = opts[:name] || generate_index_name(field_spec)
557
+ name = name.to_s if name
537
558
 
538
559
  if !@cache[name] || @cache[name] <= now
539
560
  generate_indexes(field_spec, name, opts)
@@ -594,7 +615,7 @@ module Mongo
594
615
 
595
616
  @db.command(cmd)['value']
596
617
  end
597
-
618
+
598
619
  # Perform an aggregation using the aggregation framework on the current collection.
599
620
  # @note Aggregate requires server version >= 2.1.1
600
621
  # @note Field References: Within an expression, field names must be quoted and prefixed by a dollar sign ($).
@@ -603,13 +624,13 @@ module Mongo
603
624
  # coll.aggregate([ {"$project" => {"last_name" => 1, "first_name" => 1 }}, {"$match" => {"last_name" => "Jones"}} ])
604
625
  #
605
626
  # @param [Array] pipeline Should be a single array of pipeline operator hashes.
606
- #
627
+ #
607
628
  # '$project' Reshapes a document stream by including fields, excluding fields, inserting computed fields,
608
629
  # renaming fields,or creating/populating fields that hold sub-documents.
609
630
  #
610
631
  # '$match' Query-like interface for filtering documents out of the aggregation pipeline.
611
632
  #
612
- # '$limit' Restricts the number of documents that pass through the pipline.
633
+ # '$limit' Restricts the number of documents that pass through the pipeline.
613
634
  #
614
635
  # '$skip' Skips over the specified number of documents and passes the rest along the pipeline.
615
636
  #
@@ -636,10 +657,10 @@ module Mongo
636
657
  unless Mongo::Support.ok?(result)
637
658
  raise Mongo::OperationFailure, "aggregate failed: #{result['errmsg']}"
638
659
  end
639
-
660
+
640
661
  return result["result"]
641
662
  end
642
-
663
+
643
664
  # Perform a map-reduce operation on the current collection.
644
665
  #
645
666
  # @param [String, BSON::Code] map a map function, written in JavaScript.
@@ -655,7 +676,7 @@ module Mongo
655
676
  # @option opts [String] :out (nil) a valid output type. In versions of MongoDB prior to v1.7.6,
656
677
  # this option takes the name of a collection for the output results. In versions 1.7.6 and later,
657
678
  # this option specifies the output type. See the core docs for available output types.
658
- # @option opts [Boolean] :keeptemp (false) if true, the generated collection will be persisted. The defualt
679
+ # @option opts [Boolean] :keeptemp (false) if true, the generated collection will be persisted. The default
659
680
  # is false. Note that this option has no effect is versions of MongoDB > v1.7.6.
660
681
  # @option opts [Boolean ] :verbose (false) if true, provides statistics on job execution time.
661
682
  # @option opts [Boolean] :raw (false) if true, return the raw result object from the map_reduce command, and not
@@ -841,11 +862,10 @@ module Mongo
841
862
  # @return [Array] an array of distinct values.
842
863
  def distinct(key, query=nil)
843
864
  raise MongoArgumentError unless [String, Symbol].include?(key.class)
844
- command = BSON::OrderedHash.new
865
+ command = BSON::OrderedHash.new
845
866
  command[:distinct] = @name
846
867
  command[:key] = key.to_s
847
868
  command[:query] = query
848
-
849
869
  @db.command(command)["values"]
850
870
  end
851
871
 
@@ -977,7 +997,7 @@ module Mongo
977
997
  selector.merge!(opts)
978
998
 
979
999
  begin
980
- insert_documents([selector], Mongo::DB::SYSTEM_INDEX_COLLECTION, false, true)
1000
+ insert_documents([selector], Mongo::DB::SYSTEM_INDEX_COLLECTION, false, {:w => 1})
981
1001
 
982
1002
  rescue Mongo::OperationFailure => e
983
1003
  if selector[:dropDups] && e.message =~ /^11000/
@@ -994,7 +1014,7 @@ module Mongo
994
1014
  # Sends a Mongo::Constants::OP_INSERT message to the database.
995
1015
  # Takes an array of +documents+, an optional +collection_name+, and a
996
1016
  # +check_keys+ setting.
997
- def insert_documents(documents, collection_name=@name, check_keys=true, safe=false, flags={})
1017
+ def insert_documents(documents, collection_name=@name, check_keys=true, write_concern={}, flags={})
998
1018
  if flags[:continue_on_error]
999
1019
  message = BSON::ByteBuffer.new
1000
1020
  message.put_int(1)
@@ -1026,8 +1046,8 @@ module Mongo
1026
1046
  raise InvalidOperation, "Exceded maximum insert size of 16,777,216 bytes" if message.size > @connection.max_bson_size
1027
1047
 
1028
1048
  instrument(:insert, :database => @db.name, :collection => collection_name, :documents => documents) do
1029
- if safe
1030
- @connection.send_message_with_safe_check(Mongo::Constants::OP_INSERT, message, @db.name, nil, safe)
1049
+ if Mongo::WriteConcern.gle?(write_concern)
1050
+ @connection.send_message_with_gle(Mongo::Constants::OP_INSERT, message, @db.name, nil, write_concern)
1031
1051
  else
1032
1052
  @connection.send_message(Mongo::Constants::OP_INSERT, message)
1033
1053
  end