mongo 1.3.1 → 1.4.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (75) hide show
  1. data/README.md +9 -6
  2. data/Rakefile +3 -4
  3. data/docs/HISTORY.md +20 -2
  4. data/docs/READ_PREFERENCE.md +39 -0
  5. data/docs/RELEASES.md +1 -1
  6. data/docs/REPLICA_SETS.md +23 -2
  7. data/docs/TAILABLE_CURSORS.md +51 -0
  8. data/docs/TUTORIAL.md +4 -4
  9. data/docs/WRITE_CONCERN.md +5 -2
  10. data/lib/mongo.rb +7 -22
  11. data/lib/mongo/collection.rb +96 -29
  12. data/lib/mongo/connection.rb +107 -62
  13. data/lib/mongo/cursor.rb +136 -57
  14. data/lib/mongo/db.rb +26 -5
  15. data/lib/mongo/exceptions.rb +17 -1
  16. data/lib/mongo/gridfs/grid.rb +1 -1
  17. data/lib/mongo/repl_set_connection.rb +273 -156
  18. data/lib/mongo/util/logging.rb +42 -0
  19. data/lib/mongo/util/node.rb +183 -0
  20. data/lib/mongo/util/pool.rb +76 -13
  21. data/lib/mongo/util/pool_manager.rb +208 -0
  22. data/lib/mongo/util/ssl_socket.rb +38 -0
  23. data/lib/mongo/util/support.rb +9 -1
  24. data/lib/mongo/util/timeout.rb +42 -0
  25. data/lib/mongo/version.rb +3 -0
  26. data/mongo.gemspec +2 -2
  27. data/test/bson/binary_test.rb +1 -1
  28. data/test/bson/bson_string_test.rb +30 -0
  29. data/test/bson/bson_test.rb +6 -3
  30. data/test/bson/byte_buffer_test.rb +1 -1
  31. data/test/bson/hash_with_indifferent_access_test.rb +1 -1
  32. data/test/bson/json_test.rb +1 -1
  33. data/test/bson/object_id_test.rb +2 -18
  34. data/test/bson/ordered_hash_test.rb +38 -3
  35. data/test/bson/test_helper.rb +46 -0
  36. data/test/bson/timestamp_test.rb +32 -10
  37. data/test/collection_test.rb +89 -3
  38. data/test/connection_test.rb +35 -20
  39. data/test/cursor_test.rb +63 -2
  40. data/test/db_test.rb +12 -2
  41. data/test/pool_test.rb +21 -0
  42. data/test/replica_sets/connect_test.rb +26 -13
  43. data/test/replica_sets/connection_string_test.rb +1 -4
  44. data/test/replica_sets/count_test.rb +1 -0
  45. data/test/replica_sets/insert_test.rb +1 -0
  46. data/test/replica_sets/pooled_insert_test.rb +4 -1
  47. data/test/replica_sets/query_secondaries.rb +2 -1
  48. data/test/replica_sets/query_test.rb +2 -1
  49. data/test/replica_sets/read_preference_test.rb +43 -0
  50. data/test/replica_sets/refresh_test.rb +123 -0
  51. data/test/replica_sets/replication_ack_test.rb +9 -4
  52. data/test/replica_sets/rs_test_helper.rb +2 -2
  53. data/test/timeout_test.rb +14 -0
  54. data/test/tools/repl_set_manager.rb +134 -23
  55. data/test/unit/collection_test.rb +6 -8
  56. data/test/unit/connection_test.rb +4 -4
  57. data/test/unit/cursor_test.rb +23 -5
  58. data/test/unit/db_test.rb +2 -0
  59. data/test/unit/grid_test.rb +2 -0
  60. data/test/unit/node_test.rb +73 -0
  61. data/test/unit/pool_manager_test.rb +47 -0
  62. data/test/unit/read_test.rb +101 -0
  63. metadata +214 -138
  64. data/lib/mongo/test.rb +0 -20
  65. data/test/async/collection_test.rb +0 -224
  66. data/test/async/connection_test.rb +0 -24
  67. data/test/async/cursor_test.rb +0 -162
  68. data/test/async/worker_pool_test.rb +0 -99
  69. data/test/load/resque/load.rb +0 -21
  70. data/test/load/resque/processor.rb +0 -26
  71. data/test/load/unicorn/unicorn.rb +0 -29
  72. data/test/tools/load.rb +0 -58
  73. data/test/tools/sharding_manager.rb +0 -202
  74. data/test/tools/test.rb +0 -4
  75. data/test/unit/repl_set_connection_test.rb +0 -59
data/README.md CHANGED
@@ -7,11 +7,13 @@ This documentation includes other articles of interest, include:
7
7
  1. [A tutorial](http://api.mongodb.org/ruby/current/file.TUTORIAL.html).
8
8
  2. [Replica Sets in Ruby](http://api.mongodb.org/ruby/current/file.REPLICA_SETS.html).
9
9
  3. [Write Concern in Ruby](http://api.mongodb.org/ruby/current/file.WRITE_CONCERN.html).
10
- 4. [GridFS in Ruby](http://api.mongodb.org/ruby/current/file.GridFS.html).
11
- 5. [Frequently Asked Questions](http://api.mongodb.org/ruby/current/file.FAQ.html).
12
- 6. [History](http://api.mongodb.org/ruby/current/file.HISTORY.html).
13
- 6. [Release plan](http://api.mongodb.org/ruby/current/file.RELEASES.html).
14
- 7. [Credits](http://api.mongodb.org/ruby/current/file.CREDITS.html).
10
+ 4. [Tailable Cursors in Ruby](http://api.mongodb.org/ruby/current/file.TAILABLE_CURSORS.html).
11
+ 5. [Read Preference in Ruby](http://api.mongodb.org/ruby/current/file.READ_PREFERENCE.html).
12
+ 6. [GridFS in Ruby](http://api.mongodb.org/ruby/current/file.GridFS.html).
13
+ 7. [Frequently Asked Questions](http://api.mongodb.org/ruby/current/file.FAQ.html).
14
+ 8. [History](http://api.mongodb.org/ruby/current/file.HISTORY.html).
15
+ 9. [Release plan](http://api.mongodb.org/ruby/current/file.RELEASES.html).
16
+ 10. [Credits](http://api.mongodb.org/ruby/current/file.CREDITS.html).
15
17
 
16
18
  Here's a quick code sample. Again, see the [MongoDB Ruby Tutorial](http://api.mongodb.org/ruby/current/file.TUTORIAL.html)
17
19
  for much more:
@@ -109,9 +111,10 @@ for details, and see examples/gridfs.rb for code that uses many of the Grid
109
111
  features (metadata, content type, seek, tell, etc).
110
112
 
111
113
  Examples:
114
+
112
115
  # Write a file on disk to the Grid
113
116
  file = File.open('image.jpg')
114
- grid = Grid.new(db)
117
+ grid = Mongo::Grid.new(db)
115
118
  id = grid.put(file)
116
119
 
117
120
  # Retrieve the file
data/Rakefile CHANGED
@@ -22,12 +22,11 @@ namespace :build do
22
22
  jar_dir = File.join(java_dir, 'jar')
23
23
 
24
24
  jruby_jar = File.join(jar_dir, 'jruby.jar')
25
- mongo_jar = File.join(jar_dir, 'mongo-2.4.jar')
26
- bson_jar = File.join(jar_dir, 'bson-2.2.jar')
25
+ mongo_jar = File.join(jar_dir, 'mongo-2.6.5.jar')
27
26
 
28
27
  src_base = File.join(java_dir, 'src')
29
28
 
30
- system("javac -Xlint:unchecked -classpath #{jruby_jar}:#{mongo_jar}:#{bson_jar} #{File.join(src_base, 'org', 'jbson', '*.java')}")
29
+ system("javac -Xlint:deprecation -Xlint:unchecked -classpath #{jruby_jar}:#{mongo_jar} #{File.join(src_base, 'org', 'jbson', '*.java')}")
31
30
  system("cd #{src_base} && jar cf #{File.join(jar_dir, 'jbson.jar')} #{File.join('.', 'org', 'jbson', '*.class')}")
32
31
  end
33
32
  end
@@ -142,7 +141,7 @@ task :ydoc do
142
141
  require File.join(File.dirname(__FILE__), 'lib', 'mongo')
143
142
  out = File.join('ydoc', Mongo::VERSION)
144
143
  FileUtils.rm_rf('ydoc')
145
- system "yardoc lib/**/*.rb lib/mongo/**/*.rb lib/bson/**/*.rb -e yard/yard_ext.rb -p yard/templates -o #{out} --title MongoRuby-#{Mongo::VERSION} --files docs/TUTORIAL.md,docs/GridFS.md,docs/FAQ.md,docs/REPLICA_SETS.md,docs/WRITE_CONCERN.md,docs/HISTORY.md,docs/CREDITS.md,docs/RELEASES.md"
144
+ system "yardoc lib/**/*.rb lib/mongo/**/*.rb lib/bson/**/*.rb -e ./yard/yard_ext.rb -p yard/templates -o #{out} --title MongoRuby-#{Mongo::VERSION} --files docs/TUTORIAL.md,docs/GridFS.md,docs/FAQ.md,docs/REPLICA_SETS.md,docs/WRITE_CONCERN.md,docs/READ_PREFERENCE.md,docs/HISTORY.md,docs/CREDITS.md,docs/RELEASES.md,docs/CREDITS.md,docs/TAILABLE_CURSORS.md"
146
145
  end
147
146
 
148
147
  namespace :bamboo do
@@ -1,5 +1,23 @@
1
1
  # MongoDB Ruby Driver History
2
2
 
3
+ ### 1.4.0
4
+ 2011-9-19
5
+
6
+ * Attempt to automatically refresh internal replica set state using ReplSetConnection#refresh.
7
+ * Two automated refresh modes: :async and :sync. Automated refresh can also be disabled.
8
+ * Choose secondary for reads based on ping time.
9
+ * Read preference API: specify whether queries should go to primary or secondary on a per-query basis.
10
+ * Pass :require_primary => false to ReplSetConnection to connect without requiring a primary node.
11
+ * Enable exhaust-mode queries with OP_QUERY_EXHAUST.
12
+ * Collection#count takes a query selector.
13
+ * Support continue_on_error flag for bulk inserts (use :continue_on_error => true)
14
+ * Add Cursor#add_option. Deprecate Cursor#query_opts and replace with Cursor#options.
15
+ * Initial SSL support (connect with :ssl => true)
16
+ * Update to latest Java driver for JRuby.
17
+ * Check max BSON size on a per-connection basis.
18
+ * Fixed two platform-specific BSON serialization issues.
19
+ * Lots of bug fixes and code cleanup.
20
+
3
21
  ### 1.3.1
4
22
  2011-5-10
5
23
 
@@ -43,8 +61,8 @@
43
61
 
44
62
  * Fix the exception message shown when there's an IOError (Mauro Pompilio)
45
63
  * Another update to map-reduce docs for v1.8. Note that if you use the new
46
- output option {:out => {:inline => true}}, then you must also specify
47
- :raw => true.
64
+ output option `{:out => {:inline => true}}`, then you must also specify
65
+ `:raw => true`.
48
66
 
49
67
  ### 1.2.3
50
68
  2011-2-22
@@ -0,0 +1,39 @@
1
+ # Read Preference in Ruby
2
+
3
+ ## Setting the read preference
4
+
5
+ You can using the `:read` option to specify a query's read preference. There are for now two possible options:
6
+
7
+ @collection.find({:doc => 'foo'}, :read => :primary)
8
+ @collection.find({:doc => 'foo'}, :read => :secondary)
9
+
10
+ In the first case, the query will be directed to the primary node in a replica set. In the second, the query will be sent
11
+ to a secondary node. The driver will attempt to choose a secondary node that's nearby, as determined by ping time. If more
12
+ than one secondary node is closeby (e.g, responds to pings within 10ms), then a random node within this subset will be chosen.
13
+
14
+ ## Read preference inheritance
15
+
16
+ The Ruby driver allows you to set read preference on each of four levels: the connection, database, collection, and cursor (or read operation).
17
+ Objects will inherit the default read preference from their parents. Thus, if you set a read preference of `{:read => :secondary}` when creating
18
+ a new connection, then all databases and collections created from that connection will inherit the same setting. See this code example:
19
+
20
+ @con = Mongo::ReplSetConnection.new([['localhost', 27017], ['localhost', 27018]], :read => :secondary)
21
+ @db = @con['test']
22
+ @collection = @db['foo']
23
+ @collection.find({:name => 'foo'})
24
+
25
+ @collection.find({:name => 'bar'}, :read => :primary)
26
+
27
+ Here, the first call to Collection#find will use the inherited read preference, `{:read => :secondary}`. But the second call
28
+ to Collection#find overrides this setting by setting the preference to `:primary`.
29
+
30
+ You can examine the read preference on any object by calling its `read_preference` method:
31
+
32
+ @con.read_preference
33
+ @db.read_preference
34
+ @collection.read_preference
35
+
36
+ ## Future work
37
+
38
+ In the v2.0 release of the driver, you'll also be able to specify a read preference consisting of a set of tags. This way,
39
+ you'll be able to direct reads to a replica set member. You can follow this issue's progress here: (https://jira.mongodb.org/browse/RUBY-326).
@@ -13,7 +13,7 @@ following release rules:
13
13
 
14
14
  2. Minor versions (Y in x.Y.z) will be released if new, backward-compatible functionality is introduced to the public API.
15
15
 
16
- 3. Major versions (X in X.y.z) will be incremented if any backward-incompatibl changes are introduced to the public API.
16
+ 3. Major versions (X in X.y.z) will be incremented if any backward-incompatible changes are introduced to the public API.
17
17
 
18
18
  This policy will clearly indicate to users when an upgrade may affect their code. As a side effect, version numbers will climb more quickly than before.
19
19
 
@@ -15,10 +15,10 @@ cache the replica set topology as reported by the given seed node and use that i
15
15
 
16
16
  ### Read slaves
17
17
 
18
- If you want to read from a seconday node, you can pass :read_secondary => true to ReplSetConnection#new.
18
+ If you want to read from a secondary node, you can pass :read => :secondary to ReplSetConnection#new.
19
19
 
20
20
  @connection = ReplSetConnection.new(['n1.mydb.net', 27017], ['n2.mydb.net', 27017], ['n3.mydb.net', 27017],
21
- :read_secondary => true)
21
+ :read => :secondary)
22
22
 
23
23
  A random secondary will be chosen to be read from. In a typical multi-process Ruby application, you'll have a good distribution of reads across secondary nodes.
24
24
 
@@ -32,6 +32,27 @@ If the client decides to retry, it's not guaranteed that another member of the r
32
32
 
33
33
  The driver will essentially cycle through all known seed addresses until a node identifies itself as master.
34
34
 
35
+ ### Refresh mode
36
+
37
+ You can now specify a refresh mode and refresh interval for a replica set connection. This will help to ensure that
38
+ changes to a replica set's configuration are quickly reflected on the driver side. Refresh mode is
39
+ enabled in synchronous mode by default. Here's how to specify this explicitly:
40
+
41
+ @connection = ReplSetConnection.new(['n1.mydb.net', 27017], :refresh_mode => :sync)
42
+
43
+ If you want to refresh to happen via a background thread, use the `:async` mode:
44
+
45
+ @connection = ReplSetConnection.new(['n1.mydb.net', 27017], :refresh_mode => :async)
46
+
47
+ If you want to change the default refresh interval of 90 seconds, you can do so like this:
48
+
49
+ @connection = ReplSetConnection.new(['n1.mydb.net', 27017], :refresh_mode => :async,
50
+ :refresh_interval => 60)
51
+
52
+ You can also disable refresh mode altogether:
53
+
54
+ @connection = ReplSetConnection.new(['n1.mydb.net', 27017], :refresh_mode => false)
55
+
35
56
  ### Recovery
36
57
 
37
58
  Driver users may wish to wrap their database calls with failure recovery code. Here's one possibility, which will attempt to connection
@@ -0,0 +1,51 @@
1
+ # Tailable cursors in Ruby
2
+
3
+ Tailable cursors are cursors that remain open even after they've returned
4
+ a final result. This way, if more documents are added to a collection (i.e.,
5
+ to the cursor's result set), then you can continue to call `Cursor#next` to
6
+ retrieve those results. Here's a complete test case that demonstrates the use
7
+ of tailable cursors.
8
+
9
+ Note that tailable cursors are for capped collections only.
10
+
11
+ require 'mongo'
12
+ require 'test/unit'
13
+
14
+ class TestTailable < Test::Unit::TestCase
15
+ include Mongo
16
+
17
+ def test_tailable
18
+
19
+ # Create a connection and capped collection.
20
+ @con = Connection.new
21
+ @db = @con['test']
22
+ @db.drop_collection('log')
23
+ @capped = @db.create_collection('log', :capped => true, :size => 1024)
24
+
25
+ # Insert 10 documents.
26
+ 10.times do |n|
27
+ @capped.insert({:n => n})
28
+ end
29
+
30
+ # Create a tailable cursor that iterates the collection in natural order
31
+ @tail = Cursor.new(@capped, :tailable => true, :order => [['$natural', 1]])
32
+
33
+ # Call Cursor#next 10 times. Each call returns a document.
34
+ 10.times do
35
+ assert @tail.next
36
+ end
37
+
38
+ # But the 11th time, the cursor returns nothing.
39
+ assert_nil @tail.next
40
+
41
+ # Add a document to the capped collection.
42
+ @capped.insert({:n => 100})
43
+
44
+ # Now call Cursor#next again. This will return the just-inserted result.
45
+ assert @tail.next
46
+
47
+ # Close the cursor.
48
+ @tail.close
49
+ end
50
+
51
+ end
@@ -15,7 +15,7 @@ Next, install the mongo rubygem:
15
15
 
16
16
  The required `bson` gem will be installed automatically.
17
17
 
18
- For optimum performance, install the bson_ext gem:
18
+ For optimum performance, install the bson\_ext gem:
19
19
 
20
20
  gem install bson_ext
21
21
 
@@ -98,7 +98,7 @@ Once you have the collection object, you can insert documents into the collectio
98
98
  Notice that the above has an "inner" document embedded within it. To do this, we can use a Hash or the driver's OrderedHash (which preserves key order) to create the document (including the inner document), and then just simply insert it into the collection using the `insert()` method.
99
99
 
100
100
  doc = {"name" => "MongoDB", "type" => "database", "count" => 1,
101
- "info" => {"x" => 203, "y" => '102'`
101
+ "info" => {"x" => 203, "y" => '102'}}
102
102
  coll.insert(doc)
103
103
 
104
104
  #### Updating a Document
@@ -110,7 +110,7 @@ We can update the previous document using the `update` method. There are a coupl
110
110
 
111
111
  Or we can use an atomic operator to change a single value:
112
112
 
113
- coll.update({"_id" => doc["_id"]}, {"$set" => {"name" => "MongoDB Ruby"`)
113
+ coll.update({"_id" => doc["_id"]}, {"$set" => {"name" => "MongoDB Ruby"}})
114
114
 
115
115
  Read [more about updating documents|Updating].
116
116
 
@@ -126,7 +126,7 @@ and you should see:
126
126
  {"_id"=>#<BSON::ObjectID:0x118576c ...>, "name"=>"MongoDB",
127
127
  "info"=>{"x"=>203, "y"=>102}, "type"=>"database", "count"=>1}
128
128
 
129
- Note the `\_id` element has been added automatically by MongoDB to your document.
129
+ Note the `_id` element has been added automatically by MongoDB to your document.
130
130
 
131
131
  #### Adding Multiple Documents
132
132
 
@@ -7,9 +7,12 @@ Write concern is set using the `:safe` option. There are several possible option
7
7
  @collection.save({:doc => 'foo'}, :safe => true)
8
8
  @collection.save({:doc => 'foo'}, :safe => {:w => 2})
9
9
  @collection.save({:doc => 'foo'}, :safe => {:w => 2, :wtimeout => 200})
10
- @collection.save({:doc => 'foo'}, :safe => {:w => 2, :wtimeout => 200, :fsync => true})
10
+ @collection.save({:doc => 'foo'}, :safe => {:w => 2, :wtimeout => 200, :j => true})
11
11
 
12
- The first, `true`, simply indicates that we should request a response from the server to ensure that to errors have occurred. The second, `{:w => 2}`forces the server to wait until at least two servers have recorded the write. The third does the same but will time out if the replication can't be completed in 200 milliseconds. The fourth forces an fsync on each server being written to (note: this option is rarely necessary and will have a dramaticly negative effect on performance).
12
+ The first, `true`, simply indicates that we should request a response from the server to ensure that to errors have occurred. The second, `{:w => 2}`, forces the server to wait until at least two servers have recorded the write. The third does the same but will time out if the replication can't be completed in 200 milliseconds.
13
+ Setting a value for `wtimeout` is encouraed.
14
+
15
+ Finally, the fourth example forces the journal to sync to disk if journaling is enabled.
13
16
 
14
17
  ## Write concern inheritance
15
18
 
@@ -18,9 +18,7 @@
18
18
 
19
19
  $:.unshift(File.join(File.dirname(__FILE__), '..', 'lib'))
20
20
 
21
- module Mongo
22
- VERSION = "1.3.1"
23
- end
21
+ require 'mongo/version'
24
22
 
25
23
  module Mongo
26
24
  ASCENDING = 1
@@ -58,8 +56,12 @@ require 'bson'
58
56
  require 'mongo/util/conversions'
59
57
  require 'mongo/util/support'
60
58
  require 'mongo/util/core_ext'
59
+ require 'mongo/util/logging'
60
+ require 'mongo/util/node'
61
61
  require 'mongo/util/pool'
62
+ require 'mongo/util/pool_manager'
62
63
  require 'mongo/util/server_version'
64
+ require 'mongo/util/ssl_socket'
63
65
  require 'mongo/util/uri_parser'
64
66
 
65
67
  require 'mongo/collection'
@@ -76,22 +78,5 @@ if RUBY_PLATFORM =~ /java/
76
78
  end
77
79
  require 'mongo/gridfs/grid_file_system'
78
80
 
79
- # Use SystemTimer on Ruby 1.8
80
- if !defined?(RUBY_ENGINE) || (RUBY_ENGINE == 'ruby' && RUBY_VERSION < '1.9.0')
81
- begin
82
- require 'system_timer'
83
- if SystemTimer.method(:timeout).arity.abs != 2
84
- raise LoadError
85
- end
86
- Mongo::TimeoutHandler = SystemTimer
87
- rescue LoadError
88
- warn "Could not load SystemTimer >= v1.2.0. Falling back to timeout.rb. " +
89
- "SystemTimer is STRONGLY recommended for timeouts in Ruby 1.8.7. " +
90
- "See http://ph7spot.com/blog/system-timer-1-2-release for details."
91
- require 'timeout'
92
- Mongo::TimeoutHandler = Timeout
93
- end
94
- else
95
- require 'timeout'
96
- Mongo::TimeoutHandler = Timeout
97
- end
81
+ require 'timeout'
82
+ Mongo::TimeoutHandler = Timeout
@@ -19,6 +19,7 @@ module Mongo
19
19
 
20
20
  # A named collection of documents in a database.
21
21
  class Collection
22
+ include Mongo::Logging
22
23
 
23
24
  attr_reader :db, :name, :pk_factory, :hint, :safe
24
25
 
@@ -34,6 +35,11 @@ module Mongo
34
35
  # for insert, update, and remove method called on this Collection instance. If no
35
36
  # value is provided, the default value set on this instance's DB will be used. This
36
37
  # default can be overridden for any invocation of insert, update, or remove.
38
+ # @option options [:primary, :secondary] :read The default read preference for queries
39
+ # initiates from this connection object. If +:secondary+ is chosen, reads will be sent
40
+ # to one of the closest available secondary nodes. If a secondary node cannot be located, the
41
+ # read will be sent to the primary. If this option is left unspecified, the value of the read
42
+ # preference for this collection's associated Mongo::DB object will be used.
37
43
  #
38
44
  # @raise [InvalidNSName]
39
45
  # if collection name is empty, contains '$', or starts or ends with '.'
@@ -47,7 +53,8 @@ module Mongo
47
53
  def initialize(name, db, opts={})
48
54
  if db.is_a?(String) && name.is_a?(Mongo::DB)
49
55
  warn "Warning: the order of parameters to initialize a collection have changed. " +
50
- "Please specify the collection name first, followed by the db."
56
+ "Please specify the collection name first, followed by the db. This will be made permanent" +
57
+ "in v2.0."
51
58
  db, name = name, db
52
59
  end
53
60
 
@@ -83,11 +90,27 @@ module Mongo
83
90
  @cache = Hash.new(0)
84
91
  unless pk_factory
85
92
  @safe = opts.fetch(:safe, @db.safe)
93
+ if value = opts[:read]
94
+ Mongo::Support.validate_read_preference(value)
95
+ else
96
+ value = @db.read_preference
97
+ end
98
+ @read_preference = value.is_a?(Hash) ? value.dup : value
86
99
  end
87
100
  @pk_factory = pk_factory || opts[:pk] || BSON::ObjectId
88
101
  @hint = nil
89
102
  end
90
103
 
104
+ # Indicate whether this is a capped collection.
105
+ #
106
+ # @raise [Mongo::OperationFailure]
107
+ # if the collection doesn't exist.
108
+ #
109
+ # @return [Boolean]
110
+ def capped?
111
+ @db.command({:collstats => @name})['capped'] == 1
112
+ end
113
+
91
114
  # Return a sub-collection of this collection by name. If 'users' is a collection, then
92
115
  # 'users.comments' is a sub-collection of users.
93
116
  #
@@ -144,6 +167,11 @@ module Mongo
144
167
  # you can cut down on network traffic and decoding time. If using a Hash, keys should be field
145
168
  # names and values should be either 1 or 0, depending on whether you want to include or exclude
146
169
  # the given field.
170
+ # @option opts [:primary, :secondary] :read The default read preference for queries
171
+ # initiates from this connection object. If +:secondary+ is chosen, reads will be sent
172
+ # to one of the closest available secondary nodes. If a secondary node cannot be located, the
173
+ # read will be sent to the primary. If this option is left unspecified, the value of the read
174
+ # preference for this Collection object will be used.
147
175
  # @option opts [Integer] :skip number of documents to skip from the beginning of the result set
148
176
  # @option opts [Integer] :limit maximum number of documents to return
149
177
  # @option opts [Array] :sort an array of [key, direction] pairs to sort by. Direction should
@@ -186,7 +214,11 @@ module Mongo
186
214
  snapshot = opts.delete(:snapshot)
187
215
  batch_size = opts.delete(:batch_size)
188
216
  timeout = (opts.delete(:timeout) == false) ? false : true
217
+ max_scan = opts.delete(:max_scan)
218
+ return_key = opts.delete(:return_key)
189
219
  transformer = opts.delete(:transformer)
220
+ show_disk_loc = opts.delete(:max_scan)
221
+ read = opts.delete(:read) || @read_preference
190
222
 
191
223
  if timeout == false && !block_given?
192
224
  raise ArgumentError, "Collection#find must be invoked with a block when timeout is disabled."
@@ -201,16 +233,20 @@ module Mongo
201
233
  raise RuntimeError, "Unknown options [#{opts.inspect}]" unless opts.empty?
202
234
 
203
235
  cursor = Cursor.new(self, {
204
- :selector => selector,
205
- :fields => fields,
206
- :skip => skip,
236
+ :selector => selector,
237
+ :fields => fields,
238
+ :skip => skip,
207
239
  :limit => limit,
208
- :order => sort,
209
- :hint => hint,
210
- :snapshot => snapshot,
211
- :timeout => timeout,
240
+ :order => sort,
241
+ :hint => hint,
242
+ :snapshot => snapshot,
243
+ :timeout => timeout,
212
244
  :batch_size => batch_size,
213
245
  :transformer => transformer,
246
+ :max_scan => max_scan,
247
+ :show_disk_loc => show_disk_loc,
248
+ :return_key => return_key,
249
+ :read => read
214
250
  })
215
251
 
216
252
  if block_given?
@@ -267,8 +303,6 @@ module Mongo
267
303
  # for DB#error.
268
304
  #
269
305
  # @raise [OperationFailure] when :safe mode fails.
270
- #
271
- # @see DB#remove for options that can be passed to :safe.
272
306
  def save(doc, opts={})
273
307
  if doc.has_key?(:_id) || doc.has_key?('_id')
274
308
  id = doc[:_id] || doc['_id']
@@ -295,14 +329,17 @@ module Mongo
295
329
  # its database object, or the current connection. See the options on
296
330
  # for DB#get_last_error.
297
331
  #
298
- # @see DB#remove for options that can be passed to :safe.
332
+ # @option opts [Boolean] :continue_on_error (+false+) If true, then
333
+ # continue a bulk insert even if one of the documents inserted
334
+ # triggers a database assertion (as in a duplicate insert, for instance).
335
+ # MongoDB v2.0+.
299
336
  #
300
337
  # @core insert insert-instance_method
301
338
  def insert(doc_or_docs, opts={})
302
339
  doc_or_docs = [doc_or_docs] unless doc_or_docs.is_a?(Array)
303
340
  doc_or_docs.collect! { |doc| @pk_factory.create_pk(doc) }
304
341
  safe = opts.fetch(:safe, @safe)
305
- result = insert_documents(doc_or_docs, @name, true, safe)
342
+ result = insert_documents(doc_or_docs, @name, true, safe, opts)
306
343
  result.size > 1 ? result : result.first
307
344
  end
308
345
  alias_method :<<, :insert
@@ -332,8 +369,6 @@ module Mongo
332
369
  # @raise [Mongo::OperationFailure] an exception will be raised iff safe mode is enabled
333
370
  # and the operation fails.
334
371
  #
335
- # @see DB#remove for options that can be passed to :safe.
336
- #
337
372
  # @core remove remove-instance_method
338
373
  def remove(selector={}, opts={})
339
374
  # Initial byte is 0.
@@ -341,9 +376,9 @@ module Mongo
341
376
  message = BSON::ByteBuffer.new("\0\0\0\0")
342
377
  BSON::BSON_RUBY.serialize_cstr(message, "#{@db.name}.#{@name}")
343
378
  message.put_int(0)
344
- message.put_binary(BSON::BSON_CODER.serialize(selector, false, true).to_s)
379
+ message.put_binary(BSON::BSON_CODER.serialize(selector, false, true, @connection.max_bson_size).to_s)
345
380
 
346
- @connection.instrument(:remove, :database => @db.name, :collection => @name, :selector => selector) do
381
+ instrument(:remove, :database => @db.name, :collection => @name, :selector => selector) do
347
382
  if safe
348
383
  @connection.send_message_with_safe_check(Mongo::Constants::OP_DELETE, message, @db.name, nil, safe)
349
384
  else
@@ -388,9 +423,9 @@ module Mongo
388
423
  update_options += 2 if opts[:multi]
389
424
  message.put_int(update_options)
390
425
  message.put_binary(BSON::BSON_CODER.serialize(selector, false, true).to_s)
391
- message.put_binary(BSON::BSON_CODER.serialize(document, false, true).to_s)
426
+ message.put_binary(BSON::BSON_CODER.serialize(document, false, true, @connection.max_bson_size).to_s)
392
427
 
393
- @connection.instrument(:update, :database => @db.name, :collection => @name, :selector => selector, :document => document) do
428
+ instrument(:update, :database => @db.name, :collection => @name, :selector => selector, :document => document) do
394
429
  if safe
395
430
  @connection.send_message_with_safe_check(Mongo::Constants::OP_UPDATE, message, @db.name, nil, safe)
396
431
  else
@@ -470,6 +505,7 @@ module Mongo
470
505
  # @return [String] the name of the index.
471
506
  def ensure_index(spec, opts={})
472
507
  now = Time.now.utc.to_i
508
+ opts[:dropDups] = opts[:drop_dups] if opts[:drop_dups]
473
509
  field_spec = parse_index_spec(spec)
474
510
 
475
511
  name = opts[:name] || generate_index_name(field_spec)
@@ -490,6 +526,9 @@ module Mongo
490
526
  #
491
527
  # @core indexes
492
528
  def drop_index(name)
529
+ if name.is_a?(Array)
530
+ return drop_index(index_name(name))
531
+ end
493
532
  @cache[name.to_s] = nil
494
533
  @db.drop_index(@name, name)
495
534
  end
@@ -552,12 +591,12 @@ module Mongo
552
591
  # @option opts [Boolean ] :verbose (false) if true, provides statistics on job execution time.
553
592
  # @option opts [Boolean] :raw (false) if true, return the raw result object from the map_reduce command, and not
554
593
  # the instantiated collection that's returned by default. Note if a collection name isn't returned in the
555
- # map-reduce output (as, for example, when using :out => {:inline => 1}), then you must specify this option
594
+ # map-reduce output (as, for example, when using :out => { :inline => 1 }), then you must specify this option
556
595
  # or an ArgumentError will be raised.
557
596
  #
558
597
  # @return [Collection, Hash] a Mongo::Collection object or a Hash with the map-reduce command's results.
559
598
  #
560
- # @raise ArgumentError if you specify {:out => {:inline => true}} but don't specify :raw => true.
599
+ # @raise ArgumentError if you specify { :out => { :inline => true }} but don't specify :raw => true.
561
600
  #
562
601
  # @see http://www.mongodb.org/display/DOCS/MapReduce Offical MongoDB map/reduce documentation.
563
602
  #
@@ -565,13 +604,16 @@ module Mongo
565
604
  def map_reduce(map, reduce, opts={})
566
605
  map = BSON::Code.new(map) unless map.is_a?(BSON::Code)
567
606
  reduce = BSON::Code.new(reduce) unless reduce.is_a?(BSON::Code)
568
- raw = opts[:raw]
607
+ raw = opts.delete(:raw)
569
608
 
570
609
  hash = BSON::OrderedHash.new
571
610
  hash['mapreduce'] = self.name
572
611
  hash['map'] = map
573
612
  hash['reduce'] = reduce
574
613
  hash.merge! opts
614
+ if hash[:sort]
615
+ hash[:sort] = Mongo::Support.format_order_clause(hash[:sort])
616
+ end
575
617
 
576
618
  result = @db.command(hash)
577
619
  unless Mongo::Support.ok?(result)
@@ -609,7 +651,7 @@ module Mongo
609
651
  if opts.is_a?(Hash)
610
652
  return new_group(opts)
611
653
  else
612
- warn "Collection#group no longer take a list of parameters. This usage is deprecated." +
654
+ warn "Collection#group no longer take a list of parameters. This usage is deprecated and will be remove in v2.0." +
613
655
  "Check out the new API at http://api.mongodb.org/ruby/current/Mongo/Collection.html#group-instance_method"
614
656
  end
615
657
 
@@ -656,6 +698,13 @@ module Mongo
656
698
  end
657
699
  end
658
700
 
701
+ # The value of the read preference. This will be
702
+ # either +:primary+, +:secondary+, or an object
703
+ # representing the tags to be read from.
704
+ def read_preference
705
+ @read_preference
706
+ end
707
+
659
708
  private
660
709
 
661
710
  def new_group(opts={})
@@ -792,9 +841,15 @@ module Mongo
792
841
 
793
842
  # Get the number of documents in this collection.
794
843
  #
844
+ # @option opts [Hash] :query ({}) A query selector for filtering the documents counted.
845
+ # @option opts [Integer] :skip (nil) The number of documents to skip.
846
+ # @option opts [Integer] :limit (nil) The number of documents to limit.
847
+ #
795
848
  # @return [Integer]
796
- def count
797
- find().count()
849
+ def count(opts={})
850
+ find(opts[:query],
851
+ :skip => opts[:skip],
852
+ :limit => opts[:limit]).count(true)
798
853
  end
799
854
 
800
855
  alias :size :count
@@ -818,6 +873,14 @@ module Mongo
818
873
 
819
874
  private
820
875
 
876
+ def index_name(spec)
877
+ field_spec = parse_index_spec(spec)
878
+ index_information.each do |index|
879
+ return index[0] if index[1]['key'] == field_spec
880
+ end
881
+ nil
882
+ end
883
+
821
884
  def parse_index_spec(spec)
822
885
  field_spec = BSON::OrderedHash.new
823
886
  if spec.is_a?(String) || spec.is_a?(Symbol)
@@ -864,16 +927,20 @@ module Mongo
864
927
  # Sends a Mongo::Constants::OP_INSERT message to the database.
865
928
  # Takes an array of +documents+, an optional +collection_name+, and a
866
929
  # +check_keys+ setting.
867
- def insert_documents(documents, collection_name=@name, check_keys=true, safe=false)
868
- # Initial byte is 0.
869
- message = BSON::ByteBuffer.new("\0\0\0\0")
930
+ def insert_documents(documents, collection_name=@name, check_keys=true, safe=false, flags={})
931
+ if flags[:continue_on_error]
932
+ message = BSON::ByteBuffer.new
933
+ message.put_int(1)
934
+ else
935
+ message = BSON::ByteBuffer.new("\0\0\0\0")
936
+ end
870
937
  BSON::BSON_RUBY.serialize_cstr(message, "#{@db.name}.#{collection_name}")
871
938
  documents.each do |doc|
872
- message.put_binary(BSON::BSON_CODER.serialize(doc, check_keys, true).to_s)
939
+ message.put_binary(BSON::BSON_CODER.serialize(doc, check_keys, true, @connection.max_bson_size).to_s)
873
940
  end
874
941
  raise InvalidOperation, "Exceded maximum insert size of 16,000,000 bytes" if message.size > 16_000_000
875
942
 
876
- @connection.instrument(:insert, :database => @db.name, :collection => collection_name, :documents => documents) do
943
+ instrument(:insert, :database => @db.name, :collection => collection_name, :documents => documents) do
877
944
  if safe
878
945
  @connection.send_message_with_safe_check(Mongo::Constants::OP_INSERT, message, @db.name, nil, safe)
879
946
  else