mongo-lyon 1.2.4

Sign up to get free protection for your applications and to get access to all the features.
Files changed (87) hide show
  1. data/LICENSE.txt +190 -0
  2. data/README.md +344 -0
  3. data/Rakefile +202 -0
  4. data/bin/mongo_console +34 -0
  5. data/docs/1.0_UPGRADE.md +21 -0
  6. data/docs/CREDITS.md +123 -0
  7. data/docs/FAQ.md +116 -0
  8. data/docs/GridFS.md +158 -0
  9. data/docs/HISTORY.md +225 -0
  10. data/docs/REPLICA_SETS.md +72 -0
  11. data/docs/TUTORIAL.md +247 -0
  12. data/docs/WRITE_CONCERN.md +28 -0
  13. data/lib/mongo.rb +77 -0
  14. data/lib/mongo/collection.rb +872 -0
  15. data/lib/mongo/connection.rb +875 -0
  16. data/lib/mongo/cursor.rb +449 -0
  17. data/lib/mongo/db.rb +607 -0
  18. data/lib/mongo/exceptions.rb +68 -0
  19. data/lib/mongo/gridfs/grid.rb +106 -0
  20. data/lib/mongo/gridfs/grid_ext.rb +57 -0
  21. data/lib/mongo/gridfs/grid_file_system.rb +145 -0
  22. data/lib/mongo/gridfs/grid_io.rb +394 -0
  23. data/lib/mongo/gridfs/grid_io_fix.rb +38 -0
  24. data/lib/mongo/repl_set_connection.rb +342 -0
  25. data/lib/mongo/util/conversions.rb +89 -0
  26. data/lib/mongo/util/core_ext.rb +60 -0
  27. data/lib/mongo/util/pool.rb +185 -0
  28. data/lib/mongo/util/server_version.rb +71 -0
  29. data/lib/mongo/util/support.rb +82 -0
  30. data/lib/mongo/util/uri_parser.rb +181 -0
  31. data/lib/mongo/version.rb +3 -0
  32. data/mongo.gemspec +34 -0
  33. data/test/auxillary/1.4_features.rb +166 -0
  34. data/test/auxillary/authentication_test.rb +68 -0
  35. data/test/auxillary/autoreconnect_test.rb +41 -0
  36. data/test/auxillary/repl_set_auth_test.rb +58 -0
  37. data/test/auxillary/slave_connection_test.rb +36 -0
  38. data/test/auxillary/threaded_authentication_test.rb +101 -0
  39. data/test/bson/binary_test.rb +15 -0
  40. data/test/bson/bson_test.rb +614 -0
  41. data/test/bson/byte_buffer_test.rb +190 -0
  42. data/test/bson/hash_with_indifferent_access_test.rb +38 -0
  43. data/test/bson/json_test.rb +17 -0
  44. data/test/bson/object_id_test.rb +154 -0
  45. data/test/bson/ordered_hash_test.rb +197 -0
  46. data/test/collection_test.rb +893 -0
  47. data/test/connection_test.rb +303 -0
  48. data/test/conversions_test.rb +120 -0
  49. data/test/cursor_fail_test.rb +75 -0
  50. data/test/cursor_message_test.rb +43 -0
  51. data/test/cursor_test.rb +457 -0
  52. data/test/db_api_test.rb +715 -0
  53. data/test/db_connection_test.rb +15 -0
  54. data/test/db_test.rb +287 -0
  55. data/test/grid_file_system_test.rb +244 -0
  56. data/test/grid_io_test.rb +120 -0
  57. data/test/grid_test.rb +200 -0
  58. data/test/load/thin/load.rb +24 -0
  59. data/test/load/unicorn/load.rb +23 -0
  60. data/test/replica_sets/connect_test.rb +86 -0
  61. data/test/replica_sets/connection_string_test.rb +32 -0
  62. data/test/replica_sets/count_test.rb +35 -0
  63. data/test/replica_sets/insert_test.rb +53 -0
  64. data/test/replica_sets/pooled_insert_test.rb +55 -0
  65. data/test/replica_sets/query_secondaries.rb +96 -0
  66. data/test/replica_sets/query_test.rb +51 -0
  67. data/test/replica_sets/replication_ack_test.rb +66 -0
  68. data/test/replica_sets/rs_test_helper.rb +27 -0
  69. data/test/safe_test.rb +68 -0
  70. data/test/support/hash_with_indifferent_access.rb +199 -0
  71. data/test/support/keys.rb +45 -0
  72. data/test/support_test.rb +19 -0
  73. data/test/test_helper.rb +83 -0
  74. data/test/threading/threading_with_large_pool_test.rb +90 -0
  75. data/test/threading_test.rb +87 -0
  76. data/test/tools/auth_repl_set_manager.rb +14 -0
  77. data/test/tools/repl_set_manager.rb +266 -0
  78. data/test/unit/collection_test.rb +130 -0
  79. data/test/unit/connection_test.rb +98 -0
  80. data/test/unit/cursor_test.rb +99 -0
  81. data/test/unit/db_test.rb +96 -0
  82. data/test/unit/grid_test.rb +49 -0
  83. data/test/unit/pool_test.rb +9 -0
  84. data/test/unit/repl_set_connection_test.rb +72 -0
  85. data/test/unit/safe_test.rb +125 -0
  86. data/test/uri_test.rb +91 -0
  87. metadata +202 -0
data/docs/TUTORIAL.md ADDED
@@ -0,0 +1,247 @@
1
+ # MongoDB Ruby Driver Tutorial
2
+
3
+ This tutorial gives many common examples of using MongoDB with the Ruby driver. If you're looking for information on data modeling, see [MongoDB Data Modeling and Rails](http://www.mongodb.org/display/DOCS/MongoDB+Data+Modeling+and+Rails). Links to the various object mappers are listed on our [object mappers page](http://www.mongodb.org/display/DOCS/Object+Mappers+for+Ruby+and+MongoDB).
4
+
5
+ Interested in GridFS? See [GridFS in Ruby](file.GridFS.html).
6
+
7
+ As always, the [latest source for the Ruby driver](http://github.com/mongodb/mongo-ruby-driver) can be found on [github](http://github.com/mongodb/mongo-ruby-driver/).
8
+
9
+ ## Installation
10
+
11
+ The mongo-ruby-driver gem is served through Rubygems.org. To install, make sure you have the latest version of rubygems.
12
+ gem update --system
13
+ Next, install the mongo rubygem:
14
+ gem install mongo
15
+
16
+ The required `bson` gem will be installed automatically.
17
+
18
+ For optimum performance, install the bson_ext gem:
19
+
20
+ gem install bson_ext
21
+
22
+ After installing, you may want to look at the [examples](http://github.com/mongodb/mongo-ruby-driver/tree/master/examples) directory included in the source distribution. These examples walk through some of the basics of using the Ruby driver.
23
+
24
+ ## Getting started
25
+
26
+ #### Using the gem
27
+
28
+ All of the code here assumes that you have already executed the following Ruby code:
29
+
30
+ require 'rubygems' # not necessary for Ruby 1.9
31
+ require 'mongo'
32
+
33
+ #### Making a Connection
34
+
35
+ An `Mongo::Connection` instance represents a connection to MongoDB. You use a Connection instance to obtain an Mongo:DB instance, which represents a named database. The database doesn't have to exist - if it doesn't, MongoDB will create it for you.
36
+
37
+ You can optionally specify the MongoDB server address and port when connecting. The following example shows three ways to connect to the database "mydb" on the local machine:
38
+
39
+ db = Mongo::Connection.new.db("mydb")
40
+ db = Mongo::Connection.new("localhost").db("mydb")
41
+ db = Mongo::Connection.new("localhost", 27017).db("mydb")
42
+
43
+ At this point, the `db` object will be a connection to a MongoDB server for the specified database. Each DB instance uses a separate socket connection to the server.
44
+
45
+ If you're trying to connect to a replica set, see [Replica Sets in Ruby](http://www.mongodb.org/display/DOCS/Replica+Sets+in+Ruby).
46
+
47
+ #### Listing All Databases
48
+
49
+ connection = Mongo::Connection.new # (optional host/port args)
50
+ connection.database_names.each { |name| puts name }
51
+ connection.database_info.each { |info| puts info.inspect}
52
+
53
+ #### Dropping a Database
54
+ connection.drop_database('database_name')
55
+
56
+ MongoDB can be run in a secure mode where access to databases is controlled through name and password authentication. When run in this mode, any client application must provide a name and password before doing any operations. In the Ruby driver, you simply do the following with the connected mongo object:
57
+
58
+ auth = db.authenticate(my_user_name, my_password)
59
+
60
+ If the name and password are valid for the database, `auth` will be `true`. Otherwise, it will be `false`. You should look at the MongoDB log for further information if available.
61
+
62
+ #### Getting a List Of Collections
63
+
64
+ Each database has zero or more collections. You can retrieve a list of them from the db (and print out any that are there):
65
+
66
+ db.collection_names.each { |name| puts name }
67
+
68
+ and assuming that there are two collections, name and address, in the database, you would see
69
+
70
+ name
71
+ address
72
+
73
+ as the output.
74
+
75
+ #### Getting a Collection
76
+
77
+ You can get a collection to use using the `collection` method:
78
+ coll = db.collection("testCollection")
79
+ This is aliased to the \[\] method:
80
+ coll = db["testCollection"]
81
+
82
+ Once you have this collection object, you can now do things like insert data, query for data, etc.
83
+
84
+ #### Inserting a Document
85
+
86
+ Once you have the collection object, you can insert documents into the collection. For example, lets make a little document that in JSON would be represented as
87
+
88
+ {
89
+ "name" : "MongoDB",
90
+ "type" : "database",
91
+ "count" : 1,
92
+ "info" : {
93
+ x : 203,
94
+ y : 102
95
+ }
96
+ }
97
+
98
+ Notice that the above has an "inner" document embedded within it. To do this, we can use a Hash or the driver's OrderedHash (which preserves key order) to create the document (including the inner document), and then just simply insert it into the collection using the `insert()` method.
99
+
100
+ doc = {"name" => "MongoDB", "type" => "database", "count" => 1,
101
+ "info" => {"x" => 203, "y" => '102'`
102
+ coll.insert(doc)
103
+
104
+ #### Updating a Document
105
+
106
+ We can update the previous document using the `update` method. There are a couple ways to update a document. We can rewrite it:
107
+
108
+ doc["name"] = "MongoDB Ruby"
109
+ coll.update({"_id" => doc["_id"]}, doc)
110
+
111
+ Or we can use an atomic operator to change a single value:
112
+
113
+ coll.update({"_id" => doc["_id"]}, {"$set" => {"name" => "MongoDB Ruby"`)
114
+
115
+ Read [more about updating documents|Updating].
116
+
117
+ #### Finding the First Document In a Collection using `find_one()`
118
+
119
+ To show that the document we inserted in the previous step is there, we can do a simple `find_one()` operation to get the first document in the collection. This method returns a single document (rather than the `Cursor` that the `find()` operation returns).
120
+
121
+ my_doc = coll.find_one()
122
+ puts my_doc.inspect
123
+
124
+ and you should see:
125
+
126
+ {"_id"=>#<BSON::ObjectID:0x118576c ...>, "name"=>"MongoDB",
127
+ "info"=>{"x"=>203, "y"=>102}, "type"=>"database", "count"=>1}
128
+
129
+ Note the `\_id` element has been added automatically by MongoDB to your document.
130
+
131
+ #### Adding Multiple Documents
132
+
133
+ To demonstrate some more interesting queries, let's add multiple simple documents to the collection. These documents will have the following form:
134
+ {
135
+ "i" : value
136
+ }
137
+
138
+ Here's how to insert them:
139
+
140
+ 100.times { |i| coll.insert("i" => i) }
141
+
142
+ Notice that we can insert documents of different "shapes" into the same collection. These records are in the same collection as the complex record we inserted above. This aspect is what we mean when we say that MongoDB is "schema-free".
143
+
144
+ #### Counting Documents in a Collection
145
+
146
+ Now that we've inserted 101 documents (the 100 we did in the loop, plus the first one), we can check to see if we have them all using the `count()` method.
147
+
148
+ puts coll.count()
149
+
150
+ and it should print `101`.
151
+
152
+ #### Using a Cursor to get all of the Documents
153
+
154
+ To get all the documents from the collection, we use the `find()` method. `find()` returns a `Cursor` object, which allows us to iterate over the set of documents that matches our query. The Ruby driver's Cursor implemented Enumerable, which allows us to use `Enumerable#each`, `Enumerable#map}, etc. For instance:
155
+
156
+ coll.find().each { |row| puts row.inspect }
157
+
158
+ and that should print all 101 documents in the collection.
159
+
160
+ #### Getting a Single Document with a Query
161
+
162
+ We can create a _query_ hash to pass to the `find()` method to get a subset of the documents in our collection. For example, if we wanted to find the document for which the value of the "i" field is 71, we would do the following ;
163
+
164
+ coll.find("i" => 71).each { |row| puts row.inspect }
165
+
166
+ and it should just print just one document:
167
+
168
+ {"_id"=>#<BSON::ObjectID:0x117de90 ...>, "i"=>71}
169
+
170
+ #### Getting a Set of Documents With a Query
171
+
172
+ We can use the query to get a set of documents from our collection. For example, if we wanted to get all documents where "i" > 50, we could write:
173
+
174
+ coll.find("i" => {"$gt" => 50}).each { |row| puts row }
175
+
176
+ which should print the documents where i > 50. We could also get a range, say 20 < i <= 30:
177
+
178
+ coll.find("i" => {"$gt" => 20, "$lte" => 30}).each { |row| puts row }
179
+
180
+ #### Selecting a subset of fields for a query
181
+
182
+ Use the `:fields` option. If you just want fields "a" and "b":
183
+
184
+ coll.find("i" => {"$gt" => 50}, :fields => ["a", "b"]).each { |row| puts row }
185
+
186
+ #### Querying with Regular Expressions
187
+
188
+ Regular expressions can be used to query MongoDB. To find all names that begin with 'a':
189
+
190
+ coll.find({"name" => /^a/})
191
+
192
+ You can also construct a regular expression dynamically. To match a given search string:
193
+
194
+ search_string = params['search']
195
+
196
+ # Constructor syntax
197
+ coll.find({"name" => Regexp.new(search_string)})
198
+
199
+ # Literal syntax
200
+ coll.find({"name" => /#{search_string}/})
201
+
202
+ Although MongoDB isn't vulnerable to anything like SQL-injection, it may be worth checking the search string for anything malicious.
203
+
204
+ ## Indexing
205
+
206
+ #### Creating An Index
207
+
208
+ MongoDB supports indexes, and they are very easy to add on a collection. To create an index, you specify an index name and an array of field names to be indexed, or a single field name. The following creates an ascending index on the "i" field:
209
+
210
+ # create_index assumes ascending order; see method docs
211
+ # for details
212
+ coll.create_index("i")
213
+ To specify complex indexes or a descending index you need to use a slightly more complex syntax - the index specifier must be an Array of [field name, direction] pairs. Directions should be specified as Mongo::ASCENDING or Mongo::DESCENDING:
214
+
215
+ # Explicit "ascending"
216
+ coll.create_index([["i", Mongo::ASCENDING]])
217
+
218
+ #### Creating and querying on a geospatial index
219
+
220
+ First, create the index on a field containing long-lat values:
221
+
222
+ people.create_index([["loc", Mongo::GEO2D]])
223
+
224
+ Then get a list of the twenty locations nearest to the point 50, 50:
225
+
226
+ people.find({"loc" => {"$near" => [50, 50]}}, {:limit => 20}).each do |p|
227
+ puts p.inspect
228
+ end
229
+
230
+ #### Getting a List of Indexes on a Collection
231
+
232
+ You can get a list of the indexes on a collection using `coll.index_information()`.
233
+
234
+ ## Database Administration
235
+
236
+ A database can have one of three profiling levels: off (:off), slow queries only (:slow_only), or all (:all). To see the database level:
237
+
238
+ puts db.profiling_level # => off (the symbol :off printed as a string)
239
+ db.profiling_level = :slow_only
240
+
241
+ Validating a collection will return an interesting hash if all is well or raise an exception if there is a problem.
242
+ p db.validate_collection('coll_name')
243
+
244
+ ## See Also
245
+
246
+ * [MongoDB Koans](http://github.com/chicagoruby/MongoDB_Koans) A path to MongoDB enlightenment via the Ruby driver.
247
+ * [MongoDB Manual](http://www.mongodb.org/display/DOCS/Developer+Zone)
@@ -0,0 +1,28 @@
1
+ # Write Concern in Ruby
2
+
3
+ ## Setting the write concern
4
+
5
+ Write concern is set using the `:safe` option. There are several possible options:
6
+
7
+ @collection.save({:doc => 'foo'}, :safe => true)
8
+ @collection.save({:doc => 'foo'}, :safe => {:w => 2})
9
+ @collection.save({:doc => 'foo'}, :safe => {:w => 2, :wtimeout => 200})
10
+ @collection.save({:doc => 'foo'}, :safe => {:w => 2, :wtimeout => 200, :fsync => true})
11
+
12
+ The first, `true`, simply indicates that we should request a response from the server to ensure that to errors have occurred. The second, `{:w => 2}`forces the server to wait until at least two servers have recorded the write. The third does the same but will time out if the replication can't be completed in 200 milliseconds. The fourth forces an fsync on each server being written to (note: this option is rarely necessary and will have a dramaticly negative effect on performance).
13
+
14
+ ## Write concern inheritance
15
+
16
+ The Ruby driver allows you to set write concern on each of four levels: the connection, database, collection, and write operation.
17
+ Objects will inherit the default write concern from their parents. Thus, if you set a write concern of `{:w => 1}` when creating
18
+ a new connection, then all databases and collections created from that connection will inherit the same setting. See this code example:
19
+
20
+ @con = Mongo::Connection.new('localhost', 27017, :safe => {:w => 2})
21
+ @db = @con['test']
22
+ @collection = @db['foo']
23
+ @collection.save({:name => 'foo'})
24
+
25
+ @collection.save({:name => 'bar'}, :safe => false)
26
+
27
+ Here, the first call to Collection#save will use the inherited write concern, `{:w => 2}`. But notice that the second call
28
+ to Collection#save overrides this setting.
data/lib/mongo.rb ADDED
@@ -0,0 +1,77 @@
1
+ # encoding: UTF-8
2
+ #
3
+ # --
4
+ # Copyright (C) 2008-2011 10gen Inc.
5
+ #
6
+ # Licensed under the Apache License, Version 2.0 (the "License");
7
+ # you may not use this file except in compliance with the License.
8
+ # You may obtain a copy of the License at
9
+ #
10
+ # http://www.apache.org/licenses/LICENSE-2.0
11
+ #
12
+ # Unless required by applicable law or agreed to in writing, software
13
+ # distributed under the License is distributed on an "AS IS" BASIS,
14
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15
+ # See the License for the specific language governing permissions and
16
+ # limitations under the License.
17
+ # ++
18
+
19
+ $:.unshift(File.join(File.dirname(__FILE__), '..', 'lib'))
20
+
21
+ require 'mongo/version'
22
+
23
+ module Mongo
24
+ ASCENDING = 1
25
+ DESCENDING = -1
26
+ GEO2D = '2d'
27
+
28
+ DEFAULT_MAX_BSON_SIZE = 4 * 1024 * 1024
29
+
30
+ module Constants
31
+ OP_REPLY = 1
32
+ OP_MSG = 1000
33
+ OP_UPDATE = 2001
34
+ OP_INSERT = 2002
35
+ OP_QUERY = 2004
36
+ OP_GET_MORE = 2005
37
+ OP_DELETE = 2006
38
+ OP_KILL_CURSORS = 2007
39
+
40
+ OP_QUERY_TAILABLE = 2 ** 1
41
+ OP_QUERY_SLAVE_OK = 2 ** 2
42
+ OP_QUERY_OPLOG_REPLAY = 2 ** 3
43
+ OP_QUERY_NO_CURSOR_TIMEOUT = 2 ** 4
44
+ OP_QUERY_AWAIT_DATA = 2 ** 5
45
+ OP_QUERY_EXHAUST = 2 ** 6
46
+
47
+ REPLY_CURSOR_NOT_FOUND = 2 ** 0
48
+ REPLY_QUERY_FAILURE = 2 ** 1
49
+ REPLY_SHARD_CONFIG_STALE = 2 ** 2
50
+ REPLY_AWAIT_CAPABLE = 2 ** 3
51
+ end
52
+ end
53
+
54
+ require 'bson'
55
+
56
+ require 'mongo/util/conversions'
57
+ require 'mongo/util/support'
58
+ require 'mongo/util/core_ext'
59
+ require 'mongo/util/pool'
60
+ require 'mongo/util/server_version'
61
+ require 'mongo/util/uri_parser'
62
+
63
+ require 'mongo/collection'
64
+ require 'mongo/connection'
65
+ require 'mongo/repl_set_connection'
66
+ require 'mongo/cursor'
67
+ require 'mongo/db'
68
+ require 'mongo/exceptions'
69
+ require 'mongo/gridfs/grid_ext'
70
+ require 'mongo/gridfs/grid'
71
+ require 'mongo/gridfs/grid_io'
72
+ if RUBY_PLATFORM =~ /java/
73
+ require 'mongo/gridfs/grid_io_fix'
74
+ end
75
+ require 'mongo/gridfs/grid_file_system'
76
+
77
+
@@ -0,0 +1,872 @@
1
+ # encoding: UTF-8
2
+
3
+ # --
4
+ # Copyright (C) 2008-2011 10gen Inc.
5
+ #
6
+ # Licensed under the Apache License, Version 2.0 (the "License");
7
+ # you may not use this file except in compliance with the License.
8
+ # You may obtain a copy of the License at
9
+ #
10
+ # http://www.apache.org/licenses/LICENSE-2.0
11
+ #
12
+ # Unless required by applicable law or agreed to in writing, software
13
+ # distributed under the License is distributed on an "AS IS" BASIS,
14
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15
+ # See the License for the specific language governing permissions and
16
+ # limitations under the License.
17
+
18
+ module Mongo
19
+
20
+ # A named collection of documents in a database.
21
+ class Collection
22
+
23
+ attr_reader :db, :name, :pk_factory, :hint, :safe
24
+
25
+ # Initialize a collection object.
26
+ #
27
+ # @param [String, Symbol] name the name of the collection.
28
+ # @param [DB] db a MongoDB database instance.
29
+ #
30
+ # @option opts [:create_pk] :pk (BSON::ObjectId) A primary key factory to use
31
+ # other than the default BSON::ObjectId.
32
+ #
33
+ # @option opts [Boolean, Hash] :safe (false) Set the default safe-mode options
34
+ # for insert, update, and remove method called on this Collection instance. If no
35
+ # value is provided, the default value set on this instance's DB will be used. This
36
+ # default can be overridden for any invocation of insert, update, or remove.
37
+ #
38
+ # @raise [InvalidNSName]
39
+ # if collection name is empty, contains '$', or starts or ends with '.'
40
+ #
41
+ # @raise [TypeError]
42
+ # if collection name is not a string or symbol
43
+ #
44
+ # @return [Collection]
45
+ #
46
+ # @core collections constructor_details
47
+ def initialize(name, db, opts={})
48
+ if db.is_a?(String) && name.is_a?(Mongo::DB)
49
+ warn "Warning: the order of parameters to initialize a collection have changed. " +
50
+ "Please specify the collection name first, followed by the db."
51
+ db, name = name, db
52
+ end
53
+
54
+ case name
55
+ when Symbol, String
56
+ else
57
+ raise TypeError, "new_name must be a string or symbol"
58
+ end
59
+
60
+ name = name.to_s
61
+
62
+ if name.empty? or name.include? ".."
63
+ raise Mongo::InvalidNSName, "collection names cannot be empty"
64
+ end
65
+ if name.include? "$"
66
+ raise Mongo::InvalidNSName, "collection names must not contain '$'" unless name =~ /((^\$cmd)|(oplog\.\$main))/
67
+ end
68
+ if name.match(/^\./) or name.match(/\.$/)
69
+ raise Mongo::InvalidNSName, "collection names must not start or end with '.'"
70
+ end
71
+
72
+ if opts.respond_to?(:create_pk) || !opts.is_a?(Hash)
73
+ warn "The method for specifying a primary key factory on a Collection has changed.\n" +
74
+ "Please specify it as an option (e.g., :pk => PkFactory)."
75
+ pk_factory = opts
76
+ else
77
+ pk_factory = nil
78
+ end
79
+
80
+ @db, @name = db, name
81
+ @connection = @db.connection
82
+ @cache_time = @db.cache_time
83
+ @cache = Hash.new(0)
84
+ unless pk_factory
85
+ @safe = opts.fetch(:safe, @db.safe)
86
+ end
87
+ @pk_factory = pk_factory || opts[:pk] || BSON::ObjectId
88
+ @hint = nil
89
+ end
90
+
91
+ # Return a sub-collection of this collection by name. If 'users' is a collection, then
92
+ # 'users.comments' is a sub-collection of users.
93
+ #
94
+ # @param [String] name
95
+ # the collection to return
96
+ #
97
+ # @raise [Mongo::InvalidNSName]
98
+ # if passed an invalid collection name
99
+ #
100
+ # @return [Collection]
101
+ # the specified sub-collection
102
+ def [](name)
103
+ name = "#{self.name}.#{name}"
104
+ return Collection.new(name, db) if !db.strict? || db.collection_names.include?(name)
105
+ raise "Collection #{name} doesn't exist. Currently in strict mode."
106
+ end
107
+
108
+ # Set a hint field for query optimizer. Hint may be a single field
109
+ # name, array of field names, or a hash (preferably an [OrderedHash]).
110
+ # If using MongoDB > 1.1, you probably don't ever need to set a hint.
111
+ #
112
+ # @param [String, Array, OrderedHash] hint a single field, an array of
113
+ # fields, or a hash specifying fields
114
+ def hint=(hint=nil)
115
+ @hint = normalize_hint_fields(hint)
116
+ self
117
+ end
118
+
119
+ # Query the database.
120
+ #
121
+ # The +selector+ argument is a prototype document that all results must
122
+ # match. For example:
123
+ #
124
+ # collection.find({"hello" => "world"})
125
+ #
126
+ # only matches documents that have a key "hello" with value "world".
127
+ # Matches can have other keys *in addition* to "hello".
128
+ #
129
+ # If given an optional block +find+ will yield a Cursor to that block,
130
+ # close the cursor, and then return nil. This guarantees that partially
131
+ # evaluated cursors will be closed. If given no block +find+ returns a
132
+ # cursor.
133
+ #
134
+ # @param [Hash] selector
135
+ # a document specifying elements which must be present for a
136
+ # document to be included in the result set. Note that in rare cases,
137
+ # (e.g., with $near queries), the order of keys will matter. To preserve
138
+ # key order on a selector, use an instance of BSON::OrderedHash (only applies
139
+ # to Ruby 1.8).
140
+ #
141
+ # @option opts [Array, Hash] :fields field names that should be returned in the result
142
+ # set ("_id" will be included unless explicity excluded). By limiting results to a certain subset of fields,
143
+ # you can cut down on network traffic and decoding time. If using a Hash, keys should be field
144
+ # names and values should be either 1 or 0, depending on whether you want to include or exclude
145
+ # the given field.
146
+ # @option opts [Integer] :skip number of documents to skip from the beginning of the result set
147
+ # @option opts [Integer] :limit maximum number of documents to return
148
+ # @option opts [Array] :sort an array of [key, direction] pairs to sort by. Direction should
149
+ # be specified as Mongo::ASCENDING (or :ascending / :asc) or Mongo::DESCENDING (or :descending / :desc)
150
+ # @option opts [String, Array, OrderedHash] :hint hint for query optimizer, usually not necessary if using MongoDB > 1.1
151
+ # @option opts [Boolean] :snapshot (false) if true, snapshot mode will be used for this query.
152
+ # Snapshot mode assures no duplicates are returned, or objects missed, which were preset at both the start and
153
+ # end of the query's execution. For details see http://www.mongodb.org/display/DOCS/How+to+do+Snapshotting+in+the+Mongo+Database
154
+ # @option opts [Boolean] :batch_size (100) the number of documents to returned by the database per GETMORE operation. A value of 0
155
+ # will let the database server decide how many results to returns. This option can be ignored for most use cases.
156
+ # @option opts [Boolean] :timeout (true) when +true+, the returned cursor will be subject to
157
+ # the normal cursor timeout behavior of the mongod process. When +false+, the returned cursor will never timeout. Note
158
+ # that disabling timeout will only work when #find is invoked with a block. This is to prevent any inadvertant failure to
159
+ # close the cursor, as the cursor is explicitly closed when block code finishes.
160
+ #
161
+ # @raise [ArgumentError]
162
+ # if timeout is set to false and find is not invoked in a block
163
+ #
164
+ # @raise [RuntimeError]
165
+ # if given unknown options
166
+ #
167
+ # @core find find-instance_method
168
+ def find(selector={}, opts={})
169
+ fields = opts.delete(:fields)
170
+ fields = ["_id"] if fields && fields.empty?
171
+ skip = opts.delete(:skip) || skip || 0
172
+ limit = opts.delete(:limit) || 0
173
+ sort = opts.delete(:sort)
174
+ hint = opts.delete(:hint)
175
+ snapshot = opts.delete(:snapshot)
176
+ batch_size = opts.delete(:batch_size)
177
+ timeout = (opts.delete(:timeout) == false) ? false : true
178
+
179
+ if timeout == false && !block_given?
180
+ raise ArgumentError, "Collection#find must be invoked with a block when timeout is disabled."
181
+ end
182
+
183
+ if hint
184
+ hint = normalize_hint_fields(hint)
185
+ else
186
+ hint = @hint # assumed to be normalized already
187
+ end
188
+
189
+ raise RuntimeError, "Unknown options [#{opts.inspect}]" unless opts.empty?
190
+
191
+ cursor = Cursor.new(self, :selector => selector, :fields => fields, :skip => skip, :limit => limit,
192
+ :order => sort, :hint => hint, :snapshot => snapshot, :timeout => timeout, :batch_size => batch_size)
193
+
194
+ if block_given?
195
+ yield cursor
196
+ cursor.close
197
+ nil
198
+ else
199
+ cursor
200
+ end
201
+ end
202
+
203
+ # Return a single object from the database.
204
+ #
205
+ # @return [OrderedHash, Nil]
206
+ # a single document or nil if no result is found.
207
+ #
208
+ # @param [Hash, ObjectId, Nil] spec_or_object_id a hash specifying elements
209
+ # which must be present for a document to be included in the result set or an
210
+ # instance of ObjectId to be used as the value for an _id query.
211
+ # If nil, an empty selector, {}, will be used.
212
+ #
213
+ # @option opts [Hash]
214
+ # any valid options that can be send to Collection#find
215
+ #
216
+ # @raise [TypeError]
217
+ # if the argument is of an improper type.
218
+ def find_one(spec_or_object_id=nil, opts={})
219
+ spec = case spec_or_object_id
220
+ when nil
221
+ {}
222
+ when BSON::ObjectId
223
+ {:_id => spec_or_object_id}
224
+ when Hash
225
+ spec_or_object_id
226
+ else
227
+ raise TypeError, "spec_or_object_id must be an instance of ObjectId or Hash, or nil"
228
+ end
229
+ find(spec, opts.merge(:limit => -1)).next_document
230
+ end
231
+
232
+ # Save a document to this collection.
233
+ #
234
+ # @param [Hash] doc
235
+ # the document to be saved. If the document already has an '_id' key,
236
+ # then an update (upsert) operation will be performed, and any existing
237
+ # document with that _id is overwritten. Otherwise an insert operation is performed.
238
+ #
239
+ # @return [ObjectId] the _id of the saved document.
240
+ #
241
+ # @option opts [Boolean, Hash] :safe (+false+)
242
+ # run the operation in safe mode, which run a getlasterror command on the
243
+ # database to report any assertion. In addition, a hash can be provided to
244
+ # run an fsync and/or wait for replication of the save (>= 1.5.1). See the options
245
+ # for DB#error.
246
+ #
247
+ # @raise [OperationFailure] when :safe mode fails.
248
+ #
249
+ # @see DB#remove for options that can be passed to :safe.
250
+ def save(doc, opts={})
251
+ if doc.has_key?(:_id) || doc.has_key?('_id')
252
+ id = doc[:_id] || doc['_id']
253
+ update({:_id => id}, doc, :upsert => true, :safe => opts.fetch(:safe, @safe))
254
+ id
255
+ else
256
+ insert(doc, :safe => opts.fetch(:safe, @safe))
257
+ end
258
+ end
259
+
260
+ # Insert one or more documents into the collection.
261
+ #
262
+ # @param [Hash, Array] doc_or_docs
263
+ # a document (as a hash) or array of documents to be inserted.
264
+ #
265
+ # @return [ObjectId, Array]
266
+ # The _id of the inserted document or a list of _ids of all inserted documents.
267
+ #
268
+ # @option opts [Boolean, Hash] :safe (+false+)
269
+ # run the operation in safe mode, which run a getlasterror command on the
270
+ # database to report any assertion. In addition, a hash can be provided to
271
+ # run an fsync and/or wait for replication of the insert (>= 1.5.1). Safe
272
+ # options provided here will override any safe options set on this collection,
273
+ # its database object, or the current connection. See the options on
274
+ # for DB#get_last_error.
275
+ #
276
+ # @see DB#remove for options that can be passed to :safe.
277
+ #
278
+ # @core insert insert-instance_method
279
+ def insert(doc_or_docs, opts={})
280
+ doc_or_docs = [doc_or_docs] unless doc_or_docs.is_a?(Array)
281
+ doc_or_docs.collect! { |doc| @pk_factory.create_pk(doc) }
282
+ safe = opts.fetch(:safe, @safe)
283
+ result = insert_documents(doc_or_docs, @name, true, safe)
284
+ result.size > 1 ? result : result.first
285
+ end
286
+ alias_method :<<, :insert
287
+
288
+ # Remove all documents from this collection.
289
+ #
290
+ # @param [Hash] selector
291
+ # If specified, only matching documents will be removed.
292
+ #
293
+ # @option opts [Boolean, Hash] :safe (+false+)
294
+ # run the operation in safe mode, which will run a getlasterror command on the
295
+ # database to report any assertion. In addition, a hash can be provided to
296
+ # run an fsync and/or wait for replication of the remove (>= 1.5.1). Safe
297
+ # options provided here will override any safe options set on this collection,
298
+ # its database, or the current connection. See the options for DB#get_last_error for more details.
299
+ #
300
+ # @example remove all documents from the 'users' collection:
301
+ # users.remove
302
+ # users.remove({})
303
+ #
304
+ # @example remove only documents that have expired:
305
+ # users.remove({:expire => {"$lte" => Time.now}})
306
+ #
307
+ # @return [Hash, true] Returns a Hash containing the last error object if running in safe mode.
308
+ # Otherwise, returns true.
309
+ #
310
+ # @raise [Mongo::OperationFailure] an exception will be raised iff safe mode is enabled
311
+ # and the operation fails.
312
+ #
313
+ # @see DB#remove for options that can be passed to :safe.
314
+ #
315
+ # @core remove remove-instance_method
316
+ def remove(selector={}, opts={})
317
+ # Initial byte is 0.
318
+ safe = opts.fetch(:safe, @safe)
319
+ message = BSON::ByteBuffer.new("\0\0\0\0")
320
+ BSON::BSON_RUBY.serialize_cstr(message, "#{@db.name}.#{@name}")
321
+ message.put_int(0)
322
+ message.put_binary(BSON::BSON_CODER.serialize(selector, false, true).to_s)
323
+
324
+ @connection.instrument(:remove, :database => @db.name, :collection => @name, :selector => selector) do
325
+ if safe
326
+ @connection.send_message_with_safe_check(Mongo::Constants::OP_DELETE, message, @db.name, nil, safe)
327
+ else
328
+ @connection.send_message(Mongo::Constants::OP_DELETE, message)
329
+ true
330
+ end
331
+ end
332
+ end
333
+
334
+ # Update one or more documents in this collection.
335
+ #
336
+ # @param [Hash] selector
337
+ # a hash specifying elements which must be present for a document to be updated. Note:
338
+ # the update command currently updates only the first document matching the
339
+ # given selector. If you want all matching documents to be updated, be sure
340
+ # to specify :multi => true.
341
+ # @param [Hash] document
342
+ # a hash specifying the fields to be changed in the selected document,
343
+ # or (in the case of an upsert) the document to be inserted
344
+ #
345
+ # @option opts [Boolean] :upsert (+false+) if true, performs an upsert (update or insert)
346
+ # @option opts [Boolean] :multi (+false+) update all documents matching the selector, as opposed to
347
+ # just the first matching document. Note: only works in MongoDB 1.1.3 or later.
348
+ # @option opts [Boolean] :safe (+false+)
349
+ # If true, check that the save succeeded. OperationFailure
350
+ # will be raised on an error. Note that a safe check requires an extra
351
+ # round-trip to the database. Safe options provided here will override any safe
352
+ # options set on this collection, its database object, or the current collection.
353
+ # See the options for DB#get_last_error for details.
354
+ #
355
+ # @return [Hash, true] Returns a Hash containing the last error object if running in safe mode.
356
+ # Otherwise, returns true.
357
+ #
358
+ # @core update update-instance_method
359
+ def update(selector, document, opts={})
360
+ # Initial byte is 0.
361
+ safe = opts.fetch(:safe, @safe)
362
+ message = BSON::ByteBuffer.new("\0\0\0\0")
363
+ BSON::BSON_RUBY.serialize_cstr(message, "#{@db.name}.#{@name}")
364
+ update_options = 0
365
+ update_options += 1 if opts[:upsert]
366
+ update_options += 2 if opts[:multi]
367
+ message.put_int(update_options)
368
+ message.put_binary(BSON::BSON_CODER.serialize(selector, false, true).to_s)
369
+ message.put_binary(BSON::BSON_CODER.serialize(document, false, true).to_s)
370
+
371
+ @connection.instrument(:update, :database => @db.name, :collection => @name, :selector => selector, :document => document) do
372
+ if safe
373
+ @connection.send_message_with_safe_check(Mongo::Constants::OP_UPDATE, message, @db.name, nil, safe)
374
+ else
375
+ @connection.send_message(Mongo::Constants::OP_UPDATE, message, nil)
376
+ end
377
+ end
378
+ end
379
+
380
+ # Create a new index.
381
+ #
382
+ # @param [String, Array] spec
383
+ # should be either a single field name or an array of
384
+ # [field name, direction] pairs. Directions should be specified
385
+ # as Mongo::ASCENDING, Mongo::DESCENDING, or Mongo::GEO2D.
386
+ #
387
+ # Note that geospatial indexing only works with versions of MongoDB >= 1.3.3+. Keep in mind, too,
388
+ # that in order to geo-index a given field, that field must reference either an array or a sub-object
389
+ # where the first two values represent x- and y-coordinates. Examples can be seen below.
390
+ #
391
+ # Also note that it is permissible to create compound indexes that include a geospatial index as
392
+ # long as the geospatial index comes first.
393
+ #
394
+ # If your code calls create_index frequently, you can use Collection#ensure_index to cache these calls
395
+ # and thereby prevent excessive round trips to the database.
396
+ #
397
+ # @option opts [Boolean] :unique (false) if true, this index will enforce a uniqueness constraint.
398
+ # @option opts [Boolean] :background (false) indicate that the index should be built in the background. This
399
+ # feature is only available in MongoDB >= 1.3.2.
400
+ # @option opts [Boolean] :drop_dups (nil) If creating a unique index on a collection with pre-existing records,
401
+ # this option will keep the first document the database indexes and drop all subsequent with duplicate values.
402
+ # @option opts [Integer] :min (nil) specify the minimum longitude and latitude for a geo index.
403
+ # @option opts [Integer] :max (nil) specify the maximum longitude and latitude for a geo index.
404
+ #
405
+ # @example Creating a compound index:
406
+ # @posts.create_index([['subject', Mongo::ASCENDING], ['created_at', Mongo::DESCENDING]])
407
+ #
408
+ # @example Creating a geospatial index:
409
+ # @restaurants.create_index([['location', Mongo::GEO2D]])
410
+ #
411
+ # # Note that this will work only if 'location' represents x,y coordinates:
412
+ # {'location': [0, 50]}
413
+ # {'location': {'x' => 0, 'y' => 50}}
414
+ # {'location': {'latitude' => 0, 'longitude' => 50}}
415
+ #
416
+ # @example A geospatial index with alternate longitude and latitude:
417
+ # @restaurants.create_index([['location', Mongo::GEO2D]], :min => 500, :max => 500)
418
+ #
419
+ # @return [String] the name of the index created.
420
+ #
421
+ # @core indexes create_index-instance_method
422
+ def create_index(spec, opts={})
423
+ opts[:dropDups] = opts.delete(:drop_dups) if opts[:drop_dups]
424
+ field_spec = parse_index_spec(spec)
425
+ name = opts.delete(:name) || generate_index_name(field_spec)
426
+ name = name.to_s if name
427
+
428
+ generate_indexes(field_spec, name, opts)
429
+ name
430
+ end
431
+
432
+ # Calls create_index and sets a flag to not do so again for another X minutes.
433
+ # this time can be specified as an option when initializing a Mongo::DB object as options[:cache_time]
434
+ # Any changes to an index will be propogated through regardless of cache time (e.g., a change of index direction)
435
+ #
436
+ # The parameters and options for this methods are the same as those for Collection#create_index.
437
+ #
438
+ # @example Call sequence:
439
+ # Time t: @posts.ensure_index([['subject', Mongo::ASCENDING]) -- calls create_index and
440
+ # sets the 5 minute cache
441
+ # Time t+2min : @posts.ensure_index([['subject', Mongo::ASCENDING]) -- doesn't do anything
442
+ # Time t+3min : @posts.ensure_index([['something_else', Mongo::ASCENDING]) -- calls create_index
443
+ # and sets 5 minute cache
444
+ # Time t+10min : @posts.ensure_index([['subject', Mongo::ASCENDING]) -- calls create_index and
445
+ # resets the 5 minute counter
446
+ #
447
+ # @return [String] the name of the index.
448
+ def ensure_index(spec, opts={})
449
+ now = Time.now.utc.to_i
450
+ field_spec = parse_index_spec(spec)
451
+
452
+ name = opts.delete(:name) || generate_index_name(field_spec)
453
+ name = name.to_s if name
454
+
455
+ if !@cache[name] || @cache[name] <= now
456
+ generate_indexes(field_spec, name, opts)
457
+ end
458
+
459
+ # Reset the cache here in case there are any errors inserting. Best to be safe.
460
+ @cache[name] = now + @cache_time
461
+ name
462
+ end
463
+
464
+ # Drop a specified index.
465
+ #
466
+ # @param [String] name
467
+ #
468
+ # @core indexes
469
+ def drop_index(name)
470
+ @cache[name.to_s] = nil
471
+ @db.drop_index(@name, name)
472
+ end
473
+
474
+ # Drop all indexes.
475
+ #
476
+ # @core indexes
477
+ def drop_indexes
478
+ @cache = {}
479
+
480
+ # Note: calling drop_indexes with no args will drop them all.
481
+ @db.drop_index(@name, '*')
482
+ end
483
+
484
+ # Drop the entire collection. USE WITH CAUTION.
485
+ def drop
486
+ @db.drop_collection(@name)
487
+ end
488
+
489
+ # Atomically update and return a document using MongoDB's findAndModify command. (MongoDB > 1.3.0)
490
+ #
491
+ # @option opts [Hash] :query ({}) a query selector document for matching the desired document.
492
+ # @option opts [Hash] :update (nil) the update operation to perform on the matched document.
493
+ # @option opts [Array, String, OrderedHash] :sort ({}) specify a sort option for the query using any
494
+ # of the sort options available for Cursor#sort. Sort order is important if the query will be matching
495
+ # multiple documents since only the first matching document will be updated and returned.
496
+ # @option opts [Boolean] :remove (false) If true, removes the the returned document from the collection.
497
+ # @option opts [Boolean] :new (false) If true, returns the updated document; otherwise, returns the document
498
+ # prior to update.
499
+ #
500
+ # @return [Hash] the matched document.
501
+ #
502
+ # @core findandmodify find_and_modify-instance_method
503
+ def find_and_modify(opts={})
504
+ cmd = BSON::OrderedHash.new
505
+ cmd[:findandmodify] = @name
506
+ cmd.merge!(opts)
507
+ cmd[:sort] = Mongo::Support.format_order_clause(opts[:sort]) if opts[:sort]
508
+
509
+ @db.command(cmd)['value']
510
+ end
511
+
512
+ # Perform a map-reduce operation on the current collection.
513
+ #
514
+ # @param [String, BSON::Code] map a map function, written in JavaScript.
515
+ # @param [String, BSON::Code] reduce a reduce function, written in JavaScript.
516
+ #
517
+ # @option opts [Hash] :query ({}) a query selector document, like what's passed to #find, to limit
518
+ # the operation to a subset of the collection.
519
+ # @option opts [Array] :sort ([]) an array of [key, direction] pairs to sort by. Direction should
520
+ # be specified as Mongo::ASCENDING (or :ascending / :asc) or Mongo::DESCENDING (or :descending / :desc)
521
+ # @option opts [Integer] :limit (nil) if passing a query, number of objects to return from the collection.
522
+ # @option opts [String, BSON::Code] :finalize (nil) a javascript function to apply to the result set after the
523
+ # map/reduce operation has finished.
524
+ # @option opts [String] :out (nil) a valid output type. In versions of MongoDB prior to v1.7.6,
525
+ # this option takes the name of a collection for the output results. In versions 1.7.6 and later,
526
+ # this option specifies the output type. See the core docs for available output types.
527
+ # @option opts [Boolean] :keeptemp (false) if true, the generated collection will be persisted. The defualt
528
+ # is false. Note that this option has no effect is versions of MongoDB > v1.7.6.
529
+ # @option opts [Boolean ] :verbose (false) if true, provides statistics on job execution time.
530
+ # @option opts [Boolean] :raw (false) if true, return the raw result object from the map_reduce command, and not
531
+ # the instantiated collection that's returned by default. Note if a collection name isn't returned in the
532
+ # map-reduce output (as, for example, when using :out => {:inline => 1}), then you must specify this option
533
+ # or an ArgumentError will be raised.
534
+ #
535
+ # @return [Collection, Hash] a Mongo::Collection object or a Hash with the map-reduce command's results.
536
+ #
537
+ # @raise ArgumentError if you specify {:out => {:inline => true}} but don't specify :raw => true.
538
+ #
539
+ # @see http://www.mongodb.org/display/DOCS/MapReduce Offical MongoDB map/reduce documentation.
540
+ #
541
+ # @core mapreduce map_reduce-instance_method
542
+ def map_reduce(map, reduce, opts={})
543
+ map = BSON::Code.new(map) unless map.is_a?(BSON::Code)
544
+ reduce = BSON::Code.new(reduce) unless reduce.is_a?(BSON::Code)
545
+ raw = opts.delete(:raw)
546
+
547
+ hash = BSON::OrderedHash.new
548
+ hash['mapreduce'] = self.name
549
+ hash['map'] = map
550
+ hash['reduce'] = reduce
551
+ hash.merge! opts
552
+
553
+ result = @db.command(hash)
554
+ unless Mongo::Support.ok?(result)
555
+ raise Mongo::OperationFailure, "map-reduce failed: #{result['errmsg']}"
556
+ end
557
+
558
+ if raw
559
+ result
560
+ elsif result["result"]
561
+ @db[result["result"]]
562
+ else
563
+ raise ArgumentError, "Could not instantiate collection from result. If you specified " +
564
+ "{:out => {:inline => true}}, then you must also specify :raw => true to get the results."
565
+ end
566
+ end
567
+ alias :mapreduce :map_reduce
568
+
569
+ # Perform a group aggregation.
570
+ #
571
+ # @param [Hash] opts the options for this group operation. The minimum required are :initial
572
+ # and :reduce.
573
+ #
574
+ # @option opts [Array, String, Symbol] :key (nil) Either the name of a field or a list of fields to group by (optional).
575
+ # @option opts [String, BSON::Code] :keyf (nil) A JavaScript function to be used to generate the grouping keys (optional).
576
+ # @option opts [String, BSON::Code] :cond ({}) A document specifying a query for filtering the documents over
577
+ # which the aggregation is run (optional).
578
+ # @option opts [Hash] :initial the initial value of the aggregation counter object (required).
579
+ # @option opts [String, BSON::Code] :reduce (nil) a JavaScript aggregation function (required).
580
+ # @option opts [String, BSON::Code] :finalize (nil) a JavaScript function that receives and modifies
581
+ # each of the resultant grouped objects. Available only when group is run with command
582
+ # set to true.
583
+ #
584
+ # @return [Array] the command response consisting of grouped items.
585
+ def group(opts, condition={}, initial={}, reduce=nil, finalize=nil)
586
+ if opts.is_a?(Hash)
587
+ return new_group(opts)
588
+ else
589
+ warn "Collection#group no longer take a list of parameters. This usage is deprecated." +
590
+ "Check out the new API at http://api.mongodb.org/ruby/current/Mongo/Collection.html#group-instance_method"
591
+ end
592
+
593
+ reduce = BSON::Code.new(reduce) unless reduce.is_a?(BSON::Code)
594
+
595
+ group_command = {
596
+ "group" => {
597
+ "ns" => @name,
598
+ "$reduce" => reduce,
599
+ "cond" => condition,
600
+ "initial" => initial
601
+ }
602
+ }
603
+
604
+ if opts.is_a?(Symbol)
605
+ raise MongoArgumentError, "Group takes either an array of fields to group by or a JavaScript function" +
606
+ "in the form of a String or BSON::Code."
607
+ end
608
+
609
+ unless opts.nil?
610
+ if opts.is_a? Array
611
+ key_type = "key"
612
+ key_value = {}
613
+ opts.each { |k| key_value[k] = 1 }
614
+ else
615
+ key_type = "$keyf"
616
+ key_value = opts.is_a?(BSON::Code) ? opts : BSON::Code.new(opts)
617
+ end
618
+
619
+ group_command["group"][key_type] = key_value
620
+ end
621
+
622
+ finalize = BSON::Code.new(finalize) if finalize.is_a?(String)
623
+ if finalize.is_a?(BSON::Code)
624
+ group_command['group']['finalize'] = finalize
625
+ end
626
+
627
+ result = @db.command(group_command)
628
+
629
+ if Mongo::Support.ok?(result)
630
+ result["retval"]
631
+ else
632
+ raise OperationFailure, "group command failed: #{result['errmsg']}"
633
+ end
634
+ end
635
+
636
+ private
637
+
638
+ def new_group(opts={})
639
+ reduce = opts[:reduce]
640
+ finalize = opts[:finalize]
641
+ cond = opts.fetch(:cond, {})
642
+ initial = opts[:initial]
643
+
644
+ if !(reduce && initial)
645
+ raise MongoArgumentError, "Group requires at minimum values for initial and reduce."
646
+ end
647
+
648
+ cmd = {
649
+ "group" => {
650
+ "ns" => @name,
651
+ "$reduce" => reduce.to_bson_code,
652
+ "cond" => cond,
653
+ "initial" => initial
654
+ }
655
+ }
656
+
657
+ if finalize
658
+ cmd['group']['finalize'] = finalize.to_bson_code
659
+ end
660
+
661
+ if key = opts[:key]
662
+ if key.is_a?(String) || key.is_a?(Symbol)
663
+ key = [key]
664
+ end
665
+ key_value = {}
666
+ key.each { |k| key_value[k] = 1 }
667
+ cmd["group"]["key"] = key_value
668
+ elsif keyf = opts[:keyf]
669
+ cmd["group"]["$keyf"] = keyf.to_bson_code
670
+ end
671
+
672
+ result = @db.command(cmd)
673
+ result["retval"]
674
+ end
675
+
676
+ public
677
+
678
+ # Return a list of distinct values for +key+ across all
679
+ # documents in the collection. The key may use dot notation
680
+ # to reach into an embedded object.
681
+ #
682
+ # @param [String, Symbol, OrderedHash] key or hash to group by.
683
+ # @param [Hash] query a selector for limiting the result set over which to group.
684
+ #
685
+ # @example Saving zip codes and ages and returning distinct results.
686
+ # @collection.save({:zip => 10010, :name => {:age => 27}})
687
+ # @collection.save({:zip => 94108, :name => {:age => 24}})
688
+ # @collection.save({:zip => 10010, :name => {:age => 27}})
689
+ # @collection.save({:zip => 99701, :name => {:age => 24}})
690
+ # @collection.save({:zip => 94108, :name => {:age => 27}})
691
+ #
692
+ # @collection.distinct(:zip)
693
+ # [10010, 94108, 99701]
694
+ # @collection.distinct("name.age")
695
+ # [27, 24]
696
+ #
697
+ # # You may also pass a document selector as the second parameter
698
+ # # to limit the documents over which distinct is run:
699
+ # @collection.distinct("name.age", {"name.age" => {"$gt" => 24}})
700
+ # [27]
701
+ #
702
+ # @return [Array] an array of distinct values.
703
+ def distinct(key, query=nil)
704
+ raise MongoArgumentError unless [String, Symbol].include?(key.class)
705
+ command = BSON::OrderedHash.new
706
+ command[:distinct] = @name
707
+ command[:key] = key.to_s
708
+ command[:query] = query
709
+
710
+ @db.command(command)["values"]
711
+ end
712
+
713
+ # Rename this collection.
714
+ #
715
+ # Note: If operating in auth mode, the client must be authorized as an admin to
716
+ # perform this operation.
717
+ #
718
+ # @param [String] new_name the new name for this collection
719
+ #
720
+ # @return [String] the name of the new collection.
721
+ #
722
+ # @raise [Mongo::InvalidNSName] if +new_name+ is an invalid collection name.
723
+ def rename(new_name)
724
+ case new_name
725
+ when Symbol, String
726
+ else
727
+ raise TypeError, "new_name must be a string or symbol"
728
+ end
729
+
730
+ new_name = new_name.to_s
731
+
732
+ if new_name.empty? or new_name.include? ".."
733
+ raise Mongo::InvalidNSName, "collection names cannot be empty"
734
+ end
735
+ if new_name.include? "$"
736
+ raise Mongo::InvalidNSName, "collection names must not contain '$'"
737
+ end
738
+ if new_name.match(/^\./) or new_name.match(/\.$/)
739
+ raise Mongo::InvalidNSName, "collection names must not start or end with '.'"
740
+ end
741
+
742
+ @db.rename_collection(@name, new_name)
743
+ @name = new_name
744
+ end
745
+
746
+ # Get information on the indexes for this collection.
747
+ #
748
+ # @return [Hash] a hash where the keys are index names.
749
+ #
750
+ # @core indexes
751
+ def index_information
752
+ @db.index_information(@name)
753
+ end
754
+
755
+ # Return a hash containing options that apply to this collection.
756
+ # For all possible keys and values, see DB#create_collection.
757
+ #
758
+ # @return [Hash] options that apply to this collection.
759
+ def options
760
+ @db.collections_info(@name).next_document['options']
761
+ end
762
+
763
+ # Return stats on the collection. Uses MongoDB's collstats command.
764
+ #
765
+ # @return [Hash]
766
+ def stats
767
+ @db.command({:collstats => @name})
768
+ end
769
+
770
+ # Get the number of documents in this collection.
771
+ #
772
+ # @return [Integer]
773
+ def count
774
+ find().count()
775
+ end
776
+
777
+ alias :size :count
778
+
779
+ protected
780
+
781
+ def normalize_hint_fields(hint)
782
+ case hint
783
+ when String
784
+ {hint => 1}
785
+ when Hash
786
+ hint
787
+ when nil
788
+ nil
789
+ else
790
+ h = BSON::OrderedHash.new
791
+ hint.to_a.each { |k| h[k] = 1 }
792
+ h
793
+ end
794
+ end
795
+
796
+ private
797
+
798
+ def parse_index_spec(spec)
799
+ field_spec = BSON::OrderedHash.new
800
+ if spec.is_a?(String) || spec.is_a?(Symbol)
801
+ field_spec[spec.to_s] = 1
802
+ elsif spec.is_a?(Array) && spec.all? {|field| field.is_a?(Array) }
803
+ spec.each do |f|
804
+ if [Mongo::ASCENDING, Mongo::DESCENDING, Mongo::GEO2D].include?(f[1])
805
+ field_spec[f[0].to_s] = f[1]
806
+ else
807
+ raise MongoArgumentError, "Invalid index field #{f[1].inspect}; " +
808
+ "should be one of Mongo::ASCENDING (1), Mongo::DESCENDING (-1) or Mongo::GEO2D ('2d')."
809
+ end
810
+ end
811
+ else
812
+ raise MongoArgumentError, "Invalid index specification #{spec.inspect}; " +
813
+ "should be either a string, symbol, or an array of arrays."
814
+ end
815
+ field_spec
816
+ end
817
+
818
+ def generate_indexes(field_spec, name, opts)
819
+ selector = {
820
+ :name => name,
821
+ :ns => "#{@db.name}.#{@name}",
822
+ :key => field_spec
823
+ }
824
+ selector.merge!(opts)
825
+
826
+ begin
827
+ insert_documents([selector], Mongo::DB::SYSTEM_INDEX_COLLECTION, false, true)
828
+
829
+ rescue Mongo::OperationFailure => e
830
+ if selector[:dropDups] && e.message =~ /^11000/
831
+ # NOP. If the user is intentionally dropping dups, we can ignore duplicate key errors.
832
+ else
833
+ raise Mongo::OperationFailure, "Failed to create index #{selector.inspect} with the following error: " +
834
+ "#{e.message}"
835
+ end
836
+ end
837
+
838
+ nil
839
+ end
840
+
841
+ # Sends a Mongo::Constants::OP_INSERT message to the database.
842
+ # Takes an array of +documents+, an optional +collection_name+, and a
843
+ # +check_keys+ setting.
844
+ def insert_documents(documents, collection_name=@name, check_keys=true, safe=false)
845
+ # Initial byte is 0.
846
+ message = BSON::ByteBuffer.new("\0\0\0\0")
847
+ BSON::BSON_RUBY.serialize_cstr(message, "#{@db.name}.#{collection_name}")
848
+ documents.each do |doc|
849
+ message.put_binary(BSON::BSON_CODER.serialize(doc, check_keys, true).to_s)
850
+ end
851
+ raise InvalidOperation, "Exceded maximum insert size of 16,000,000 bytes" if message.size > 16_000_000
852
+
853
+ @connection.instrument(:insert, :database => @db.name, :collection => collection_name, :documents => documents) do
854
+ if safe
855
+ @connection.send_message_with_safe_check(Mongo::Constants::OP_INSERT, message, @db.name, nil, safe)
856
+ else
857
+ @connection.send_message(Mongo::Constants::OP_INSERT, message, nil)
858
+ end
859
+ end
860
+ documents.collect { |o| o[:_id] || o['_id'] }
861
+ end
862
+
863
+ def generate_index_name(spec)
864
+ indexes = []
865
+ spec.each_pair do |field, direction|
866
+ indexes.push("#{field}_#{direction}")
867
+ end
868
+ indexes.join("_")
869
+ end
870
+ end
871
+
872
+ end