mongo 1.3.0 → 1.12.5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- checksums.yaml.gz.sig +0 -0
- data/{LICENSE.txt → LICENSE} +1 -1
- data/README.md +122 -271
- data/Rakefile +25 -209
- data/VERSION +1 -0
- data/bin/mongo_console +31 -9
- data/lib/mongo/bulk_write_collection_view.rb +387 -0
- data/lib/mongo/collection.rb +576 -269
- data/lib/mongo/collection_writer.rb +364 -0
- data/lib/mongo/connection/node.rb +249 -0
- data/lib/mongo/connection/pool.rb +340 -0
- data/lib/mongo/connection/pool_manager.rb +320 -0
- data/lib/mongo/connection/sharding_pool_manager.rb +67 -0
- data/lib/mongo/connection/socket/socket_util.rb +37 -0
- data/lib/mongo/connection/socket/ssl_socket.rb +95 -0
- data/lib/mongo/connection/socket/tcp_socket.rb +87 -0
- data/lib/mongo/connection/socket/unix_socket.rb +39 -0
- data/lib/mongo/connection/socket.rb +18 -0
- data/lib/mongo/connection.rb +7 -875
- data/lib/mongo/cursor.rb +403 -117
- data/lib/mongo/db.rb +444 -243
- data/lib/mongo/exception.rb +145 -0
- data/lib/mongo/functional/authentication.rb +455 -0
- data/lib/mongo/functional/logging.rb +85 -0
- data/lib/mongo/functional/read_preference.rb +183 -0
- data/lib/mongo/functional/scram.rb +556 -0
- data/lib/mongo/functional/uri_parser.rb +409 -0
- data/lib/mongo/functional/write_concern.rb +66 -0
- data/lib/mongo/functional.rb +20 -0
- data/lib/mongo/gridfs/grid.rb +30 -24
- data/lib/mongo/gridfs/grid_ext.rb +6 -10
- data/lib/mongo/gridfs/grid_file_system.rb +38 -20
- data/lib/mongo/gridfs/grid_io.rb +84 -75
- data/lib/mongo/gridfs.rb +18 -0
- data/lib/mongo/legacy.rb +140 -0
- data/lib/mongo/mongo_client.rb +697 -0
- data/lib/mongo/mongo_replica_set_client.rb +535 -0
- data/lib/mongo/mongo_sharded_client.rb +159 -0
- data/lib/mongo/networking.rb +372 -0
- data/lib/mongo/{util → utils}/conversions.rb +29 -8
- data/lib/mongo/{util → utils}/core_ext.rb +28 -18
- data/lib/mongo/{util → utils}/server_version.rb +4 -6
- data/lib/mongo/{util → utils}/support.rb +29 -31
- data/lib/mongo/utils/thread_local_variable_manager.rb +25 -0
- data/lib/mongo/utils.rb +19 -0
- data/lib/mongo.rb +51 -50
- data/mongo.gemspec +29 -32
- data/test/functional/authentication_test.rb +39 -0
- data/test/functional/bulk_api_stress_test.rb +133 -0
- data/test/functional/bulk_write_collection_view_test.rb +1198 -0
- data/test/functional/client_test.rb +627 -0
- data/test/functional/collection_test.rb +2175 -0
- data/test/functional/collection_writer_test.rb +83 -0
- data/test/{conversions_test.rb → functional/conversions_test.rb} +47 -3
- data/test/functional/cursor_fail_test.rb +57 -0
- data/test/functional/cursor_message_test.rb +56 -0
- data/test/functional/cursor_test.rb +683 -0
- data/test/functional/db_api_test.rb +835 -0
- data/test/functional/db_connection_test.rb +25 -0
- data/test/functional/db_test.rb +348 -0
- data/test/functional/grid_file_system_test.rb +285 -0
- data/test/{grid_io_test.rb → functional/grid_io_test.rb} +72 -11
- data/test/{grid_test.rb → functional/grid_test.rb} +88 -15
- data/test/functional/pool_test.rb +136 -0
- data/test/functional/safe_test.rb +98 -0
- data/test/functional/ssl_test.rb +29 -0
- data/test/functional/support_test.rb +62 -0
- data/test/functional/timeout_test.rb +60 -0
- data/test/functional/uri_test.rb +446 -0
- data/test/functional/write_concern_test.rb +118 -0
- data/test/helpers/general.rb +50 -0
- data/test/helpers/test_unit.rb +476 -0
- data/test/replica_set/authentication_test.rb +37 -0
- data/test/replica_set/basic_test.rb +189 -0
- data/test/replica_set/client_test.rb +393 -0
- data/test/replica_set/connection_test.rb +138 -0
- data/test/replica_set/count_test.rb +66 -0
- data/test/replica_set/cursor_test.rb +220 -0
- data/test/replica_set/insert_test.rb +157 -0
- data/test/replica_set/max_values_test.rb +151 -0
- data/test/replica_set/pinning_test.rb +105 -0
- data/test/replica_set/query_test.rb +73 -0
- data/test/replica_set/read_preference_test.rb +219 -0
- data/test/replica_set/refresh_test.rb +211 -0
- data/test/replica_set/replication_ack_test.rb +95 -0
- data/test/replica_set/ssl_test.rb +32 -0
- data/test/sharded_cluster/basic_test.rb +203 -0
- data/test/shared/authentication/basic_auth_shared.rb +260 -0
- data/test/shared/authentication/bulk_api_auth_shared.rb +249 -0
- data/test/shared/authentication/gssapi_shared.rb +176 -0
- data/test/shared/authentication/sasl_plain_shared.rb +96 -0
- data/test/shared/authentication/scram_shared.rb +92 -0
- data/test/shared/ssl_shared.rb +235 -0
- data/test/test_helper.rb +53 -94
- data/test/threading/basic_test.rb +120 -0
- data/test/tools/mongo_config.rb +708 -0
- data/test/tools/mongo_config_test.rb +160 -0
- data/test/unit/client_test.rb +381 -0
- data/test/unit/collection_test.rb +89 -53
- data/test/unit/connection_test.rb +282 -32
- data/test/unit/cursor_test.rb +206 -8
- data/test/unit/db_test.rb +55 -13
- data/test/unit/grid_test.rb +43 -16
- data/test/unit/mongo_sharded_client_test.rb +48 -0
- data/test/unit/node_test.rb +93 -0
- data/test/unit/pool_manager_test.rb +111 -0
- data/test/unit/read_pref_test.rb +406 -0
- data/test/unit/read_test.rb +159 -0
- data/test/unit/safe_test.rb +69 -36
- data/test/unit/sharding_pool_manager_test.rb +84 -0
- data/test/unit/write_concern_test.rb +175 -0
- data.tar.gz.sig +3 -0
- metadata +227 -216
- metadata.gz.sig +0 -0
- data/docs/CREDITS.md +0 -123
- data/docs/FAQ.md +0 -116
- data/docs/GridFS.md +0 -158
- data/docs/HISTORY.md +0 -244
- data/docs/RELEASES.md +0 -33
- data/docs/REPLICA_SETS.md +0 -72
- data/docs/TUTORIAL.md +0 -247
- data/docs/WRITE_CONCERN.md +0 -28
- data/lib/mongo/exceptions.rb +0 -71
- data/lib/mongo/gridfs/grid_io_fix.rb +0 -38
- data/lib/mongo/repl_set_connection.rb +0 -342
- data/lib/mongo/test.rb +0 -20
- data/lib/mongo/util/pool.rb +0 -177
- data/lib/mongo/util/uri_parser.rb +0 -185
- data/test/async/collection_test.rb +0 -224
- data/test/async/connection_test.rb +0 -24
- data/test/async/cursor_test.rb +0 -162
- data/test/async/worker_pool_test.rb +0 -99
- data/test/auxillary/1.4_features.rb +0 -166
- data/test/auxillary/authentication_test.rb +0 -68
- data/test/auxillary/autoreconnect_test.rb +0 -41
- data/test/auxillary/fork_test.rb +0 -30
- data/test/auxillary/repl_set_auth_test.rb +0 -58
- data/test/auxillary/slave_connection_test.rb +0 -36
- data/test/auxillary/threaded_authentication_test.rb +0 -101
- data/test/bson/binary_test.rb +0 -15
- data/test/bson/bson_test.rb +0 -649
- data/test/bson/byte_buffer_test.rb +0 -208
- data/test/bson/hash_with_indifferent_access_test.rb +0 -38
- data/test/bson/json_test.rb +0 -17
- data/test/bson/object_id_test.rb +0 -154
- data/test/bson/ordered_hash_test.rb +0 -204
- data/test/bson/timestamp_test.rb +0 -24
- data/test/collection_test.rb +0 -910
- data/test/connection_test.rb +0 -309
- data/test/cursor_fail_test.rb +0 -75
- data/test/cursor_message_test.rb +0 -43
- data/test/cursor_test.rb +0 -483
- data/test/db_api_test.rb +0 -726
- data/test/db_connection_test.rb +0 -15
- data/test/db_test.rb +0 -287
- data/test/grid_file_system_test.rb +0 -243
- data/test/load/resque/load.rb +0 -21
- data/test/load/resque/processor.rb +0 -26
- data/test/load/thin/load.rb +0 -24
- data/test/load/unicorn/load.rb +0 -23
- data/test/load/unicorn/unicorn.rb +0 -29
- data/test/replica_sets/connect_test.rb +0 -94
- data/test/replica_sets/connection_string_test.rb +0 -32
- data/test/replica_sets/count_test.rb +0 -35
- data/test/replica_sets/insert_test.rb +0 -53
- data/test/replica_sets/pooled_insert_test.rb +0 -55
- data/test/replica_sets/query_secondaries.rb +0 -96
- data/test/replica_sets/query_test.rb +0 -51
- data/test/replica_sets/replication_ack_test.rb +0 -66
- data/test/replica_sets/rs_test_helper.rb +0 -27
- data/test/safe_test.rb +0 -68
- data/test/support/hash_with_indifferent_access.rb +0 -186
- data/test/support/keys.rb +0 -45
- data/test/support_test.rb +0 -18
- data/test/threading/threading_with_large_pool_test.rb +0 -90
- data/test/threading_test.rb +0 -87
- data/test/tools/auth_repl_set_manager.rb +0 -14
- data/test/tools/load.rb +0 -58
- data/test/tools/repl_set_manager.rb +0 -266
- data/test/tools/sharding_manager.rb +0 -202
- data/test/tools/test.rb +0 -4
- data/test/unit/pool_test.rb +0 -9
- data/test/unit/repl_set_connection_test.rb +0 -59
- data/test/uri_test.rb +0 -91
data/lib/mongo/collection.rb
CHANGED
@@ -1,13 +1,10 @@
|
|
1
|
-
#
|
2
|
-
|
3
|
-
# --
|
4
|
-
# Copyright (C) 2008-2011 10gen Inc.
|
1
|
+
# Copyright (C) 2009-2013 MongoDB, Inc.
|
5
2
|
#
|
6
3
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
7
4
|
# you may not use this file except in compliance with the License.
|
8
5
|
# You may obtain a copy of the License at
|
9
6
|
#
|
10
|
-
#
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
11
8
|
#
|
12
9
|
# Unless required by applicable law or agreed to in writing, software
|
13
10
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
@@ -19,21 +16,52 @@ module Mongo
|
|
19
16
|
|
20
17
|
# A named collection of documents in a database.
|
21
18
|
class Collection
|
22
|
-
|
23
|
-
|
19
|
+
include Mongo::Logging
|
20
|
+
include Mongo::WriteConcern
|
21
|
+
|
22
|
+
attr_reader :db,
|
23
|
+
:name,
|
24
|
+
:pk_factory,
|
25
|
+
:hint,
|
26
|
+
:write_concern,
|
27
|
+
:capped,
|
28
|
+
:operation_writer,
|
29
|
+
:command_writer
|
30
|
+
|
31
|
+
# Read Preference
|
32
|
+
attr_accessor :read,
|
33
|
+
:tag_sets,
|
34
|
+
:acceptable_latency
|
24
35
|
|
25
36
|
# Initialize a collection object.
|
26
37
|
#
|
27
38
|
# @param [String, Symbol] name the name of the collection.
|
28
39
|
# @param [DB] db a MongoDB database instance.
|
29
40
|
#
|
41
|
+
# @option opts [String, Integer, Symbol] :w (1) Set default number of nodes to which a write
|
42
|
+
# should be acknowledged.
|
43
|
+
# @option opts [Integer] :wtimeout (nil) Set replica set acknowledgement timeout.
|
44
|
+
# @option opts [Boolean] :j (false) If true, block until write operations have been committed
|
45
|
+
# to the journal. Cannot be used in combination with 'fsync'. Prior to MongoDB 2.6 this option was
|
46
|
+
# ignored if the server was running without journaling. Starting with MongoDB 2.6, write operations will
|
47
|
+
# fail with an exception if this option is used when the server is running without journaling.
|
48
|
+
# @option opts [Boolean] :fsync (false) If true, and the server is running without journaling, blocks until
|
49
|
+
# the server has synced all data files to disk. If the server is running with journaling, this acts the same as
|
50
|
+
# the 'j' option, blocking until write operations have been committed to the journal.
|
51
|
+
# Cannot be used in combination with 'j'.
|
52
|
+
#
|
53
|
+
# Notes about write concern:
|
54
|
+
# These write concern options will be used for insert, update, and remove methods called on this
|
55
|
+
# Collection instance. If no value is provided, the default values set on this instance's DB will be used.
|
56
|
+
# These option values can be overridden for any invocation of insert, update, or remove.
|
57
|
+
#
|
30
58
|
# @option opts [:create_pk] :pk (BSON::ObjectId) A primary key factory to use
|
31
59
|
# other than the default BSON::ObjectId.
|
32
|
-
#
|
33
|
-
#
|
34
|
-
#
|
35
|
-
#
|
36
|
-
#
|
60
|
+
# @option opts [:primary, :secondary] :read The default read preference for queries
|
61
|
+
# initiates from this connection object. If +:secondary+ is chosen, reads will be sent
|
62
|
+
# to one of the closest available secondary nodes. If a secondary node cannot be located, the
|
63
|
+
# read will be sent to the primary. If this option is left unspecified, the value of the read
|
64
|
+
# preference for this collection's associated Mongo::DB object will be used.
|
37
65
|
#
|
38
66
|
# @raise [InvalidNSName]
|
39
67
|
# if collection name is empty, contains '$', or starts or ends with '.'
|
@@ -42,56 +70,69 @@ module Mongo
|
|
42
70
|
# if collection name is not a string or symbol
|
43
71
|
#
|
44
72
|
# @return [Collection]
|
45
|
-
#
|
46
|
-
# @core collections constructor_details
|
47
73
|
def initialize(name, db, opts={})
|
48
74
|
if db.is_a?(String) && name.is_a?(Mongo::DB)
|
49
75
|
warn "Warning: the order of parameters to initialize a collection have changed. " +
|
50
|
-
"Please specify the collection name first, followed by the db."
|
76
|
+
"Please specify the collection name first, followed by the db. This will be made permanent" +
|
77
|
+
"in v2.0."
|
51
78
|
db, name = name, db
|
52
79
|
end
|
53
80
|
|
54
|
-
|
55
|
-
|
56
|
-
else
|
57
|
-
raise TypeError, "new_name must be a string or symbol"
|
58
|
-
end
|
59
|
-
|
81
|
+
raise TypeError,
|
82
|
+
"Collection name must be a String or Symbol." unless [String, Symbol].include?(name.class)
|
60
83
|
name = name.to_s
|
61
84
|
|
62
|
-
|
63
|
-
|
64
|
-
|
65
|
-
if name.include?
|
66
|
-
raise Mongo::InvalidNSName,
|
67
|
-
|
68
|
-
if name.match(/^\./) or name.match(/\.$/)
|
69
|
-
raise Mongo::InvalidNSName, "collection names must not start or end with '.'"
|
85
|
+
raise Mongo::InvalidNSName,
|
86
|
+
"Collection names cannot be empty." if name.empty? || name.include?("..")
|
87
|
+
|
88
|
+
if name.include?("$")
|
89
|
+
raise Mongo::InvalidNSName,
|
90
|
+
"Collection names must not contain '$'" unless name =~ /((^\$cmd)|(oplog\.\$main))/
|
70
91
|
end
|
71
92
|
|
93
|
+
raise Mongo::InvalidNSName,
|
94
|
+
"Collection names must not start or end with '.'" if name.match(/^\./) || name.match(/\.$/)
|
95
|
+
|
96
|
+
pk_factory = nil
|
72
97
|
if opts.respond_to?(:create_pk) || !opts.is_a?(Hash)
|
73
98
|
warn "The method for specifying a primary key factory on a Collection has changed.\n" +
|
74
|
-
|
99
|
+
"Please specify it as an option (e.g., :pk => PkFactory)."
|
75
100
|
pk_factory = opts
|
76
|
-
else
|
77
|
-
pk_factory = nil
|
78
101
|
end
|
79
102
|
|
80
103
|
@db, @name = db, name
|
81
104
|
@connection = @db.connection
|
105
|
+
@logger = @connection.logger
|
82
106
|
@cache_time = @db.cache_time
|
83
|
-
@cache
|
107
|
+
@cache = Hash.new(0)
|
84
108
|
unless pk_factory
|
85
|
-
@
|
109
|
+
@write_concern = get_write_concern(opts, db)
|
110
|
+
@read = opts[:read] || @db.read
|
111
|
+
Mongo::ReadPreference::validate(@read)
|
112
|
+
@capped = opts[:capped]
|
113
|
+
@tag_sets = opts.fetch(:tag_sets, @db.tag_sets)
|
114
|
+
@acceptable_latency = opts.fetch(:acceptable_latency, @db.acceptable_latency)
|
86
115
|
end
|
87
116
|
@pk_factory = pk_factory || opts[:pk] || BSON::ObjectId
|
88
117
|
@hint = nil
|
118
|
+
@operation_writer = CollectionOperationWriter.new(self)
|
119
|
+
@command_writer = CollectionCommandWriter.new(self)
|
120
|
+
end
|
121
|
+
|
122
|
+
# Indicate whether this is a capped collection.
|
123
|
+
#
|
124
|
+
# @raise [Mongo::OperationFailure]
|
125
|
+
# if the collection doesn't exist.
|
126
|
+
#
|
127
|
+
# @return [Boolean]
|
128
|
+
def capped?
|
129
|
+
@capped ||= [1, true].include?(@db.command({:collstats => @name})['capped'])
|
89
130
|
end
|
90
131
|
|
91
132
|
# Return a sub-collection of this collection by name. If 'users' is a collection, then
|
92
133
|
# 'users.comments' is a sub-collection of users.
|
93
134
|
#
|
94
|
-
# @param [String] name
|
135
|
+
# @param [String, Symbol] name
|
95
136
|
# the collection to return
|
96
137
|
#
|
97
138
|
# @raise [Mongo::InvalidNSName]
|
@@ -101,7 +142,8 @@ module Mongo
|
|
101
142
|
# the specified sub-collection
|
102
143
|
def [](name)
|
103
144
|
name = "#{self.name}.#{name}"
|
104
|
-
return Collection.new(name, db) if !db.strict? ||
|
145
|
+
return Collection.new(name, db) if !db.strict? ||
|
146
|
+
db.collection_names.include?(name.to_s)
|
105
147
|
raise "Collection #{name} doesn't exist. Currently in strict mode."
|
106
148
|
end
|
107
149
|
|
@@ -116,6 +158,13 @@ module Mongo
|
|
116
158
|
self
|
117
159
|
end
|
118
160
|
|
161
|
+
# Set a hint field using a named index.
|
162
|
+
# @param [String] hint index name
|
163
|
+
def named_hint=(hint=nil)
|
164
|
+
@hint = hint
|
165
|
+
self
|
166
|
+
end
|
167
|
+
|
119
168
|
# Query the database.
|
120
169
|
#
|
121
170
|
# The +selector+ argument is a prototype document that all results must
|
@@ -139,45 +188,70 @@ module Mongo
|
|
139
188
|
# to Ruby 1.8).
|
140
189
|
#
|
141
190
|
# @option opts [Array, Hash] :fields field names that should be returned in the result
|
142
|
-
# set ("_id" will be included unless
|
191
|
+
# set ("_id" will be included unless explicitly excluded). By limiting results to a certain subset of fields,
|
143
192
|
# you can cut down on network traffic and decoding time. If using a Hash, keys should be field
|
144
193
|
# names and values should be either 1 or 0, depending on whether you want to include or exclude
|
145
194
|
# the given field.
|
195
|
+
# @option opts [:primary, :secondary] :read The default read preference for queries
|
196
|
+
# initiates from this connection object. If +:secondary+ is chosen, reads will be sent
|
197
|
+
# to one of the closest available secondary nodes. If a secondary node cannot be located, the
|
198
|
+
# read will be sent to the primary. If this option is left unspecified, the value of the read
|
199
|
+
# preference for this Collection object will be used.
|
146
200
|
# @option opts [Integer] :skip number of documents to skip from the beginning of the result set
|
147
201
|
# @option opts [Integer] :limit maximum number of documents to return
|
148
202
|
# @option opts [Array] :sort an array of [key, direction] pairs to sort by. Direction should
|
149
203
|
# be specified as Mongo::ASCENDING (or :ascending / :asc) or Mongo::DESCENDING (or :descending / :desc)
|
150
|
-
# @option opts [String, Array, OrderedHash] :hint hint for query optimizer, usually not necessary if
|
204
|
+
# @option opts [String, Array, OrderedHash] :hint hint for query optimizer, usually not necessary if
|
205
|
+
# using MongoDB > 1.1
|
206
|
+
# @option opts [String] :named_hint for specifying a named index as a hint, will be overriden by :hint
|
207
|
+
# if :hint is also provided.
|
151
208
|
# @option opts [Boolean] :snapshot (false) if true, snapshot mode will be used for this query.
|
152
209
|
# Snapshot mode assures no duplicates are returned, or objects missed, which were preset at both the start and
|
153
|
-
# end of the query's execution.
|
154
|
-
#
|
155
|
-
#
|
210
|
+
# end of the query's execution.
|
211
|
+
# For details see http://www.mongodb.org/display/DOCS/How+to+do+Snapshotting+in+the+Mongo+Database
|
212
|
+
# @option opts [Boolean] :batch_size (100) the number of documents to returned by the database per
|
213
|
+
# GETMORE operation. A value of 0 will let the database server decide how many results to return.
|
214
|
+
# This option can be ignored for most use cases.
|
156
215
|
# @option opts [Boolean] :timeout (true) when +true+, the returned cursor will be subject to
|
157
|
-
# the normal cursor timeout behavior of the mongod process. When +false+, the returned cursor will
|
158
|
-
# that disabling timeout will only work when #find is invoked with a block.
|
159
|
-
# close the cursor, as the cursor is explicitly
|
160
|
-
#
|
216
|
+
# the normal cursor timeout behavior of the mongod process. When +false+, the returned cursor will
|
217
|
+
# never timeout. Note that disabling timeout will only work when #find is invoked with a block.
|
218
|
+
# This is to prevent any inadvertent failure to close the cursor, as the cursor is explicitly
|
219
|
+
# closed when block code finishes.
|
220
|
+
# @option opts [Integer] :max_scan (nil) Limit the number of items to scan on both collection scans and indexed queries..
|
221
|
+
# @option opts [Boolean] :show_disk_loc (false) Return the disk location of each query result (for debugging).
|
222
|
+
# @option opts [Boolean] :return_key (false) Return the index key used to obtain the result (for debugging).
|
223
|
+
# @option opts [Block] :transformer (nil) a block for transforming returned documents.
|
161
224
|
# This is normally used by object mappers to convert each returned document to an instance of a class.
|
225
|
+
# @option opts [String] :comment (nil) a comment to include in profiling logs
|
226
|
+
# @option opts [Boolean] :compile_regex (true) whether BSON regex objects should be compiled into Ruby regexes.
|
227
|
+
# If false, a BSON::Regex object will be returned instead.
|
162
228
|
#
|
163
229
|
# @raise [ArgumentError]
|
164
230
|
# if timeout is set to false and find is not invoked in a block
|
165
231
|
#
|
166
232
|
# @raise [RuntimeError]
|
167
233
|
# if given unknown options
|
168
|
-
#
|
169
|
-
# @core find find-instance_method
|
170
234
|
def find(selector={}, opts={})
|
171
|
-
|
172
|
-
fields
|
173
|
-
|
174
|
-
|
175
|
-
|
176
|
-
|
177
|
-
|
178
|
-
|
179
|
-
|
180
|
-
|
235
|
+
opts = opts.dup
|
236
|
+
fields = opts.delete(:fields)
|
237
|
+
fields = ["_id"] if fields && fields.empty?
|
238
|
+
skip = opts.delete(:skip) || skip || 0
|
239
|
+
limit = opts.delete(:limit) || 0
|
240
|
+
sort = opts.delete(:sort)
|
241
|
+
hint = opts.delete(:hint)
|
242
|
+
named_hint = opts.delete(:named_hint)
|
243
|
+
snapshot = opts.delete(:snapshot)
|
244
|
+
batch_size = opts.delete(:batch_size)
|
245
|
+
timeout = (opts.delete(:timeout) == false) ? false : true
|
246
|
+
max_scan = opts.delete(:max_scan)
|
247
|
+
return_key = opts.delete(:return_key)
|
248
|
+
transformer = opts.delete(:transformer)
|
249
|
+
show_disk_loc = opts.delete(:show_disk_loc)
|
250
|
+
comment = opts.delete(:comment)
|
251
|
+
read = opts.delete(:read) || @read
|
252
|
+
tag_sets = opts.delete(:tag_sets) || @tag_sets
|
253
|
+
acceptable_latency = opts.delete(:acceptable_latency) || @acceptable_latency
|
254
|
+
compile_regex = opts.key?(:compile_regex) ? opts.delete(:compile_regex) : true
|
181
255
|
|
182
256
|
if timeout == false && !block_given?
|
183
257
|
raise ArgumentError, "Collection#find must be invoked with a block when timeout is disabled."
|
@@ -192,21 +266,32 @@ module Mongo
|
|
192
266
|
raise RuntimeError, "Unknown options [#{opts.inspect}]" unless opts.empty?
|
193
267
|
|
194
268
|
cursor = Cursor.new(self, {
|
195
|
-
:selector
|
196
|
-
:fields
|
197
|
-
:skip
|
198
|
-
:limit
|
199
|
-
:order
|
200
|
-
:hint
|
201
|
-
:snapshot
|
202
|
-
:timeout
|
203
|
-
:batch_size
|
204
|
-
:transformer
|
269
|
+
:selector => selector,
|
270
|
+
:fields => fields,
|
271
|
+
:skip => skip,
|
272
|
+
:limit => limit,
|
273
|
+
:order => sort,
|
274
|
+
:hint => hint || named_hint,
|
275
|
+
:snapshot => snapshot,
|
276
|
+
:timeout => timeout,
|
277
|
+
:batch_size => batch_size,
|
278
|
+
:transformer => transformer,
|
279
|
+
:max_scan => max_scan,
|
280
|
+
:show_disk_loc => show_disk_loc,
|
281
|
+
:return_key => return_key,
|
282
|
+
:read => read,
|
283
|
+
:tag_sets => tag_sets,
|
284
|
+
:comment => comment,
|
285
|
+
:acceptable_latency => acceptable_latency,
|
286
|
+
:compile_regex => compile_regex
|
205
287
|
})
|
206
288
|
|
207
289
|
if block_given?
|
208
|
-
|
209
|
-
|
290
|
+
begin
|
291
|
+
yield cursor
|
292
|
+
ensure
|
293
|
+
cursor.close
|
294
|
+
end
|
210
295
|
nil
|
211
296
|
else
|
212
297
|
cursor
|
@@ -218,8 +303,8 @@ module Mongo
|
|
218
303
|
# @return [OrderedHash, Nil]
|
219
304
|
# a single document or nil if no result is found.
|
220
305
|
#
|
221
|
-
# @param [Hash, ObjectId, Nil] spec_or_object_id a hash specifying elements
|
222
|
-
# which must be present for a document to be included in the result set or an
|
306
|
+
# @param [Hash, ObjectId, Nil] spec_or_object_id a hash specifying elements
|
307
|
+
# which must be present for a document to be included in the result set or an
|
223
308
|
# instance of ObjectId to be used as the value for an _id query.
|
224
309
|
# If nil, an empty selector, {}, will be used.
|
225
310
|
#
|
@@ -239,7 +324,9 @@ module Mongo
|
|
239
324
|
else
|
240
325
|
raise TypeError, "spec_or_object_id must be an instance of ObjectId or Hash, or nil"
|
241
326
|
end
|
242
|
-
|
327
|
+
timeout = opts.delete(:max_time_ms)
|
328
|
+
cursor = find(spec, opts.merge(:limit => -1))
|
329
|
+
timeout ? cursor.max_time_ms(timeout).next_document : cursor.next_document
|
243
330
|
end
|
244
331
|
|
245
332
|
# Save a document to this collection.
|
@@ -251,22 +338,30 @@ module Mongo
|
|
251
338
|
#
|
252
339
|
# @return [ObjectId] the _id of the saved document.
|
253
340
|
#
|
254
|
-
# @option opts [
|
255
|
-
#
|
256
|
-
#
|
257
|
-
#
|
258
|
-
#
|
259
|
-
#
|
260
|
-
#
|
341
|
+
# @option opts [String, Integer, Symbol] :w (1) Set default number of nodes to which a write
|
342
|
+
# should be acknowledged.
|
343
|
+
# @option opts [Integer] :wtimeout (nil) Set replica set acknowledgement timeout.
|
344
|
+
# @option opts [Boolean] :j (false) If true, block until write operations have been committed
|
345
|
+
# to the journal. Cannot be used in combination with 'fsync'. Prior to MongoDB 2.6 this option was
|
346
|
+
# ignored if the server was running without journaling. Starting with MongoDB 2.6, write operations will
|
347
|
+
# fail with an exception if this option is used when the server is running without journaling.
|
348
|
+
# @option opts [Boolean] :fsync (false) If true, and the server is running without journaling, blocks until
|
349
|
+
# the server has synced all data files to disk. If the server is running with journaling, this acts the same as
|
350
|
+
# the 'j' option, blocking until write operations have been committed to the journal.
|
351
|
+
# Cannot be used in combination with 'j'.
|
352
|
+
#
|
353
|
+
# Options provided here will override any write concern options set on this collection,
|
354
|
+
# its database object, or the current connection. See the options
|
355
|
+
# for DB#get_last_error.
|
261
356
|
#
|
262
|
-
# @
|
357
|
+
# @raise [Mongo::OperationFailure] will be raised iff :w > 0 and the operation fails.
|
263
358
|
def save(doc, opts={})
|
264
359
|
if doc.has_key?(:_id) || doc.has_key?('_id')
|
265
360
|
id = doc[:_id] || doc['_id']
|
266
|
-
update({:_id => id}, doc, :upsert => true
|
361
|
+
update({:_id => id}, doc, opts.merge!({:upsert => true}))
|
267
362
|
id
|
268
363
|
else
|
269
|
-
insert(doc,
|
364
|
+
insert(doc, opts)
|
270
365
|
end
|
271
366
|
end
|
272
367
|
|
@@ -277,24 +372,53 @@ module Mongo
|
|
277
372
|
#
|
278
373
|
# @return [ObjectId, Array]
|
279
374
|
# The _id of the inserted document or a list of _ids of all inserted documents.
|
280
|
-
#
|
281
|
-
#
|
282
|
-
#
|
283
|
-
#
|
284
|
-
#
|
285
|
-
#
|
286
|
-
#
|
287
|
-
#
|
288
|
-
#
|
289
|
-
#
|
290
|
-
#
|
291
|
-
#
|
375
|
+
# @return [[ObjectId, Array], [Hash, Array]]
|
376
|
+
# 1st, the _id of the inserted document or a list of _ids of all inserted documents.
|
377
|
+
# 2nd, a list of invalid documents.
|
378
|
+
# Return this result format only when :collect_on_error is true.
|
379
|
+
#
|
380
|
+
# @option opts [String, Integer, Symbol] :w (1) Set default number of nodes to which a write
|
381
|
+
# should be acknowledged.
|
382
|
+
# @option opts [Integer] :wtimeout (nil) Set replica set acknowledgement timeout.
|
383
|
+
# @option opts [Boolean] :j (false) If true, block until write operations have been committed
|
384
|
+
# to the journal. Cannot be used in combination with 'fsync'. Prior to MongoDB 2.6 this option was
|
385
|
+
# ignored if the server was running without journaling. Starting with MongoDB 2.6, write operations will
|
386
|
+
# fail with an exception if this option is used when the server is running without journaling.
|
387
|
+
# @option opts [Boolean] :fsync (false) If true, and the server is running without journaling, blocks until
|
388
|
+
# the server has synced all data files to disk. If the server is running with journaling, this acts the same as
|
389
|
+
# the 'j' option, blocking until write operations have been committed to the journal.
|
390
|
+
# Cannot be used in combination with 'j'.
|
391
|
+
#
|
392
|
+
# Notes on write concern:
|
393
|
+
# Options provided here will override any write concern options set on this collection,
|
394
|
+
# its database object, or the current connection. See the options for +DB#get_last_error+.
|
395
|
+
#
|
396
|
+
# @option opts [Boolean] :continue_on_error (+false+) If true, then
|
397
|
+
# continue a bulk insert even if one of the documents inserted
|
398
|
+
# triggers a database assertion (as in a duplicate insert, for instance).
|
399
|
+
# If not acknowledging writes, the list of ids returned will
|
400
|
+
# include the object ids of all documents attempted on insert, even
|
401
|
+
# if some are rejected on error. When acknowledging writes, any error will raise an
|
402
|
+
# OperationFailure exception.
|
403
|
+
# MongoDB v2.0+.
|
404
|
+
# @option opts [Boolean] :collect_on_error (+false+) if true, then
|
405
|
+
# collects invalid documents as an array. Note that this option changes the result format.
|
406
|
+
#
|
407
|
+
# @raise [Mongo::OperationFailure] will be raised iff :w > 0 and the operation fails.
|
292
408
|
def insert(doc_or_docs, opts={})
|
293
|
-
|
294
|
-
|
295
|
-
|
296
|
-
|
297
|
-
|
409
|
+
if doc_or_docs.respond_to?(:collect!)
|
410
|
+
doc_or_docs.collect! { |doc| @pk_factory.create_pk(doc) }
|
411
|
+
error_docs, errors, write_concern_errors, rest_ignored = batch_write(:insert, doc_or_docs, true, opts)
|
412
|
+
errors = write_concern_errors + errors
|
413
|
+
raise errors.last if !opts[:collect_on_error] && !errors.empty?
|
414
|
+
inserted_docs = doc_or_docs - error_docs
|
415
|
+
inserted_ids = inserted_docs.collect {|o| o[:_id] || o['_id']}
|
416
|
+
opts[:collect_on_error] ? [inserted_ids, error_docs] : inserted_ids
|
417
|
+
else
|
418
|
+
@pk_factory.create_pk(doc_or_docs)
|
419
|
+
send_write(:insert, nil, doc_or_docs, true, opts)
|
420
|
+
return doc_or_docs[:_id] || doc_or_docs['_id']
|
421
|
+
end
|
298
422
|
end
|
299
423
|
alias_method :<<, :insert
|
300
424
|
|
@@ -303,12 +427,22 @@ module Mongo
|
|
303
427
|
# @param [Hash] selector
|
304
428
|
# If specified, only matching documents will be removed.
|
305
429
|
#
|
306
|
-
# @option opts [
|
307
|
-
#
|
308
|
-
#
|
309
|
-
#
|
310
|
-
#
|
311
|
-
#
|
430
|
+
# @option opts [String, Integer, Symbol] :w (1) Set default number of nodes to which a write
|
431
|
+
# should be acknowledged.
|
432
|
+
# @option opts [Integer] :wtimeout (nil) Set replica set acknowledgement timeout.
|
433
|
+
# @option opts [Boolean] :j (false) If true, block until write operations have been committed
|
434
|
+
# to the journal. Cannot be used in combination with 'fsync'. Prior to MongoDB 2.6 this option was
|
435
|
+
# ignored if the server was running without journaling. Starting with MongoDB 2.6, write operations will
|
436
|
+
# fail with an exception if this option is used when the server is running without journaling.
|
437
|
+
# @option opts [Boolean] :fsync (false) If true, and the server is running without journaling, blocks until
|
438
|
+
# the server has synced all data files to disk. If the server is running with journaling, this acts the same as
|
439
|
+
# the 'j' option, blocking until write operations have been committed to the journal.
|
440
|
+
# Cannot be used in combination with 'j'.
|
441
|
+
# @option opts [Integer] :limit (0) Set limit option, currently only 0 for all or 1 for just one.
|
442
|
+
#
|
443
|
+
# Notes on write concern:
|
444
|
+
# Options provided here will override any write concern options set on this collection,
|
445
|
+
# its database object, or the current connection. See the options for +DB#get_last_error+.
|
312
446
|
#
|
313
447
|
# @example remove all documents from the 'users' collection:
|
314
448
|
# users.remove
|
@@ -317,31 +451,12 @@ module Mongo
|
|
317
451
|
# @example remove only documents that have expired:
|
318
452
|
# users.remove({:expire => {"$lte" => Time.now}})
|
319
453
|
#
|
320
|
-
# @return [Hash, true] Returns a Hash containing the last error object if
|
454
|
+
# @return [Hash, true] Returns a Hash containing the last error object if acknowledging writes
|
321
455
|
# Otherwise, returns true.
|
322
456
|
#
|
323
|
-
# @raise [Mongo::OperationFailure]
|
324
|
-
# and the operation fails.
|
325
|
-
#
|
326
|
-
# @see DB#remove for options that can be passed to :safe.
|
327
|
-
#
|
328
|
-
# @core remove remove-instance_method
|
457
|
+
# @raise [Mongo::OperationFailure] will be raised iff :w > 0 and the operation fails.
|
329
458
|
def remove(selector={}, opts={})
|
330
|
-
|
331
|
-
safe = opts.fetch(:safe, @safe)
|
332
|
-
message = BSON::ByteBuffer.new("\0\0\0\0")
|
333
|
-
BSON::BSON_RUBY.serialize_cstr(message, "#{@db.name}.#{@name}")
|
334
|
-
message.put_int(0)
|
335
|
-
message.put_binary(BSON::BSON_CODER.serialize(selector, false, true).to_s)
|
336
|
-
|
337
|
-
@connection.instrument(:remove, :database => @db.name, :collection => @name, :selector => selector) do
|
338
|
-
if safe
|
339
|
-
@connection.send_message_with_safe_check(Mongo::Constants::OP_DELETE, message, @db.name, nil, safe)
|
340
|
-
else
|
341
|
-
@connection.send_message(Mongo::Constants::OP_DELETE, message)
|
342
|
-
true
|
343
|
-
end
|
344
|
-
end
|
459
|
+
send_write(:delete, selector, nil, nil, opts)
|
345
460
|
end
|
346
461
|
|
347
462
|
# Update one or more documents in this collection.
|
@@ -358,44 +473,37 @@ module Mongo
|
|
358
473
|
# @option opts [Boolean] :upsert (+false+) if true, performs an upsert (update or insert)
|
359
474
|
# @option opts [Boolean] :multi (+false+) update all documents matching the selector, as opposed to
|
360
475
|
# just the first matching document. Note: only works in MongoDB 1.1.3 or later.
|
361
|
-
# @option opts [
|
362
|
-
#
|
363
|
-
#
|
364
|
-
#
|
365
|
-
#
|
366
|
-
#
|
367
|
-
#
|
368
|
-
# @
|
476
|
+
# @option opts [String, Integer, Symbol] :w (1) Set default number of nodes to which a write
|
477
|
+
# should be acknowledged.
|
478
|
+
# @option opts [Integer] :wtimeout (nil) Set replica set acknowledgement timeout.
|
479
|
+
# @option opts [Boolean] :j (false) If true, block until write operations have been committed
|
480
|
+
# to the journal. Cannot be used in combination with 'fsync'. Prior to MongoDB 2.6 this option was
|
481
|
+
# ignored if the server was running without journaling. Starting with MongoDB 2.6, write operations will
|
482
|
+
# fail with an exception if this option is used when the server is running without journaling.
|
483
|
+
# @option opts [Boolean] :fsync (false) If true, and the server is running without journaling, blocks until
|
484
|
+
# the server has synced all data files to disk. If the server is running with journaling, this acts the same as
|
485
|
+
# the 'j' option, blocking until write operations have been committed to the journal.
|
486
|
+
# Cannot be used in combination with 'j'.
|
487
|
+
#
|
488
|
+
# Notes on write concern:
|
489
|
+
# Options provided here will override any write concern options set on this collection,
|
490
|
+
# its database object, or the current connection. See the options for DB#get_last_error.
|
491
|
+
#
|
492
|
+
# @return [Hash, true] Returns a Hash containing the last error object if acknowledging writes.
|
369
493
|
# Otherwise, returns true.
|
370
494
|
#
|
371
|
-
# @
|
495
|
+
# @raise [Mongo::OperationFailure] will be raised iff :w > 0 and the operation fails.
|
372
496
|
def update(selector, document, opts={})
|
373
|
-
|
374
|
-
safe = opts.fetch(:safe, @safe)
|
375
|
-
message = BSON::ByteBuffer.new("\0\0\0\0")
|
376
|
-
BSON::BSON_RUBY.serialize_cstr(message, "#{@db.name}.#{@name}")
|
377
|
-
update_options = 0
|
378
|
-
update_options += 1 if opts[:upsert]
|
379
|
-
update_options += 2 if opts[:multi]
|
380
|
-
message.put_int(update_options)
|
381
|
-
message.put_binary(BSON::BSON_CODER.serialize(selector, false, true).to_s)
|
382
|
-
message.put_binary(BSON::BSON_CODER.serialize(document, false, true).to_s)
|
383
|
-
|
384
|
-
@connection.instrument(:update, :database => @db.name, :collection => @name, :selector => selector, :document => document) do
|
385
|
-
if safe
|
386
|
-
@connection.send_message_with_safe_check(Mongo::Constants::OP_UPDATE, message, @db.name, nil, safe)
|
387
|
-
else
|
388
|
-
@connection.send_message(Mongo::Constants::OP_UPDATE, message, nil)
|
389
|
-
end
|
390
|
-
end
|
497
|
+
send_write(:update, selector, document, !document.keys.first.to_s.start_with?("$"), opts)
|
391
498
|
end
|
392
499
|
|
393
500
|
# Create a new index.
|
394
501
|
#
|
395
502
|
# @param [String, Array] spec
|
396
503
|
# should be either a single field name or an array of
|
397
|
-
# [field name,
|
398
|
-
# as Mongo::ASCENDING, Mongo::DESCENDING,
|
504
|
+
# [field name, type] pairs. Index types should be specified
|
505
|
+
# as Mongo::ASCENDING, Mongo::DESCENDING, Mongo::GEO2D, Mongo::GEO2DSPHERE, Mongo::GEOHAYSTACK,
|
506
|
+
# Mongo::TEXT or Mongo::HASHED.
|
399
507
|
#
|
400
508
|
# Note that geospatial indexing only works with versions of MongoDB >= 1.3.3+. Keep in mind, too,
|
401
509
|
# that in order to geo-index a given field, that field must reference either an array or a sub-object
|
@@ -410,14 +518,23 @@ module Mongo
|
|
410
518
|
# @option opts [Boolean] :unique (false) if true, this index will enforce a uniqueness constraint.
|
411
519
|
# @option opts [Boolean] :background (false) indicate that the index should be built in the background. This
|
412
520
|
# feature is only available in MongoDB >= 1.3.2.
|
413
|
-
# @option opts [Boolean] :drop_dups (nil) If creating a unique index on a collection with
|
414
|
-
# this option will keep the first document the database indexes and drop all subsequent
|
521
|
+
# @option opts [Boolean] :drop_dups (nil) (DEPRECATED) If creating a unique index on a collection with
|
522
|
+
# pre-existing records, this option will keep the first document the database indexes and drop all subsequent
|
523
|
+
# with duplicate values.
|
524
|
+
# @option opts [Integer] :bucket_size (nil) For use with geoHaystack indexes. Number of documents to group
|
525
|
+
# together within a certain proximity to a given longitude and latitude.
|
415
526
|
# @option opts [Integer] :min (nil) specify the minimum longitude and latitude for a geo index.
|
416
527
|
# @option opts [Integer] :max (nil) specify the maximum longitude and latitude for a geo index.
|
417
528
|
#
|
529
|
+
# @example Creating a compound index using a hash: (Ruby 1.9+ Syntax)
|
530
|
+
# @posts.create_index({'subject' => Mongo::ASCENDING, 'created_at' => Mongo::DESCENDING})
|
531
|
+
#
|
418
532
|
# @example Creating a compound index:
|
419
533
|
# @posts.create_index([['subject', Mongo::ASCENDING], ['created_at', Mongo::DESCENDING]])
|
420
534
|
#
|
535
|
+
# @example Creating a geospatial index using a hash: (Ruby 1.9+ Syntax)
|
536
|
+
# @restaurants.create_index(:location => Mongo::GEO2D)
|
537
|
+
#
|
421
538
|
# @example Creating a geospatial index:
|
422
539
|
# @restaurants.create_index([['location', Mongo::GEO2D]])
|
423
540
|
#
|
@@ -429,44 +546,59 @@ module Mongo
|
|
429
546
|
# @example A geospatial index with alternate longitude and latitude:
|
430
547
|
# @restaurants.create_index([['location', Mongo::GEO2D]], :min => 500, :max => 500)
|
431
548
|
#
|
432
|
-
# @
|
549
|
+
# @note The :drop_dups option is no longer supported by MongoDB starting with server version 2.7.5.
|
550
|
+
# The option is silently ignored by the server and unique index builds using the option will
|
551
|
+
# fail if a duplicate value is detected.
|
433
552
|
#
|
434
|
-
# @
|
553
|
+
# @note Note that the options listed may be subset of those available.
|
554
|
+
# See the MongoDB documentation for a full list of supported options by server version.
|
555
|
+
#
|
556
|
+
# @return [String] the name of the index created.
|
435
557
|
def create_index(spec, opts={})
|
436
|
-
|
437
|
-
|
438
|
-
|
439
|
-
|
440
|
-
|
441
|
-
|
558
|
+
options = opts.dup
|
559
|
+
options[:dropDups] = options.delete(:drop_dups) if options[:drop_dups]
|
560
|
+
options[:bucketSize] = options.delete(:bucket_size) if options[:bucket_size]
|
561
|
+
field_spec = parse_index_spec(spec)
|
562
|
+
name = options.delete(:name) || generate_index_name(field_spec)
|
563
|
+
name = name.to_s if name
|
564
|
+
generate_indexes(field_spec, name, options)
|
442
565
|
name
|
443
566
|
end
|
444
567
|
|
445
568
|
# Calls create_index and sets a flag to not do so again for another X minutes.
|
446
569
|
# this time can be specified as an option when initializing a Mongo::DB object as options[:cache_time]
|
447
|
-
# Any changes to an index will be
|
570
|
+
# Any changes to an index will be propagated through regardless of cache time (e.g., a change of index direction)
|
448
571
|
#
|
449
572
|
# The parameters and options for this methods are the same as those for Collection#create_index.
|
450
573
|
#
|
451
|
-
# @example Call sequence:
|
452
|
-
# Time t: @posts.ensure_index(
|
574
|
+
# @example Call sequence (Ruby 1.9+ Syntax):
|
575
|
+
# Time t: @posts.ensure_index(:subject => Mongo::ASCENDING) -- calls create_index and
|
453
576
|
# sets the 5 minute cache
|
454
|
-
# Time t+2min : @posts.ensure_index(
|
455
|
-
# Time t+3min : @posts.ensure_index(
|
577
|
+
# Time t+2min : @posts.ensure_index(:subject => Mongo::ASCENDING) -- doesn't do anything
|
578
|
+
# Time t+3min : @posts.ensure_index(:something_else => Mongo::ASCENDING) -- calls create_index
|
456
579
|
# and sets 5 minute cache
|
457
|
-
# Time t+10min : @posts.ensure_index(
|
580
|
+
# Time t+10min : @posts.ensure_index(:subject => Mongo::ASCENDING) -- calls create_index and
|
458
581
|
# resets the 5 minute counter
|
459
582
|
#
|
583
|
+
# @note The :drop_dups option is no longer supported by MongoDB starting with server version 2.7.5.
|
584
|
+
# The option is silently ignored by the server and unique index builds using the option will
|
585
|
+
# fail if a duplicate value is detected.
|
586
|
+
#
|
587
|
+
# @note Note that the options listed may be subset of those available.
|
588
|
+
# See the MongoDB documentation for a full list of supported options by server version.
|
589
|
+
#
|
460
590
|
# @return [String] the name of the index.
|
461
591
|
def ensure_index(spec, opts={})
|
462
|
-
now
|
463
|
-
|
464
|
-
|
465
|
-
|
466
|
-
|
592
|
+
now = Time.now.utc.to_i
|
593
|
+
options = opts.dup
|
594
|
+
options[:dropDups] = options.delete(:drop_dups) if options[:drop_dups]
|
595
|
+
options[:bucketSize] = options.delete(:bucket_size) if options[:bucket_size]
|
596
|
+
field_spec = parse_index_spec(spec)
|
597
|
+
name = options.delete(:name) || generate_index_name(field_spec)
|
598
|
+
name = name.to_s if name
|
467
599
|
|
468
600
|
if !@cache[name] || @cache[name] <= now
|
469
|
-
generate_indexes(field_spec, name,
|
601
|
+
generate_indexes(field_spec, name, options)
|
470
602
|
end
|
471
603
|
|
472
604
|
# Reset the cache here in case there are any errors inserting. Best to be safe.
|
@@ -477,16 +609,15 @@ module Mongo
|
|
477
609
|
# Drop a specified index.
|
478
610
|
#
|
479
611
|
# @param [String] name
|
480
|
-
#
|
481
|
-
# @core indexes
|
482
612
|
def drop_index(name)
|
613
|
+
if name.is_a?(Array)
|
614
|
+
return drop_index(index_name(name))
|
615
|
+
end
|
483
616
|
@cache[name.to_s] = nil
|
484
617
|
@db.drop_index(@name, name)
|
485
618
|
end
|
486
619
|
|
487
620
|
# Drop all indexes.
|
488
|
-
#
|
489
|
-
# @core indexes
|
490
621
|
def drop_indexes
|
491
622
|
@cache = {}
|
492
623
|
|
@@ -501,25 +632,114 @@ module Mongo
|
|
501
632
|
|
502
633
|
# Atomically update and return a document using MongoDB's findAndModify command. (MongoDB > 1.3.0)
|
503
634
|
#
|
504
|
-
# @option opts [Hash] :query ({}) a query selector document for matching
|
505
|
-
#
|
506
|
-
# @option opts [
|
507
|
-
#
|
508
|
-
#
|
509
|
-
#
|
510
|
-
#
|
511
|
-
#
|
635
|
+
# @option opts [Hash] :query ({}) a query selector document for matching
|
636
|
+
# the desired document.
|
637
|
+
# @option opts [Hash] :update (nil) the update operation to perform on the
|
638
|
+
# matched document.
|
639
|
+
# @option opts [Array, String, OrderedHash] :sort ({}) specify a sort
|
640
|
+
# option for the query using any
|
641
|
+
# of the sort options available for Cursor#sort. Sort order is important
|
642
|
+
# if the query will be matching multiple documents since only the first
|
643
|
+
# matching document will be updated and returned.
|
644
|
+
# @option opts [Boolean] :remove (false) If true, removes the returned
|
645
|
+
# document from the collection.
|
646
|
+
# @option opts [Boolean] :new (false) If true, returns the updated
|
647
|
+
# document; otherwise, returns the document prior to update.
|
648
|
+
# @option opts [Boolean] :upsert (false) If true, creates a new document
|
649
|
+
# if the query returns no document.
|
650
|
+
# @option opts [Hash] :fields (nil) A subset of fields to return.
|
651
|
+
# Specify an inclusion of a field with 1. _id is included by default and must
|
652
|
+
# be explicitly excluded.
|
653
|
+
# @option opts [Boolean] :full_response (false) If true, returns the entire
|
654
|
+
# response object from the server including 'ok' and 'lastErrorObject'.
|
512
655
|
#
|
513
656
|
# @return [Hash] the matched document.
|
514
|
-
#
|
515
|
-
# @core findandmodify find_and_modify-instance_method
|
516
657
|
def find_and_modify(opts={})
|
658
|
+
full_response = opts.delete(:full_response)
|
659
|
+
|
517
660
|
cmd = BSON::OrderedHash.new
|
518
661
|
cmd[:findandmodify] = @name
|
519
662
|
cmd.merge!(opts)
|
520
|
-
cmd[:sort] = Mongo::Support.format_order_clause(opts[:sort]) if opts[:sort]
|
521
663
|
|
522
|
-
|
664
|
+
cmd[:sort] =
|
665
|
+
Mongo::Support.format_order_clause(opts[:sort]) if opts[:sort]
|
666
|
+
|
667
|
+
full_response ? @db.command(cmd) : @db.command(cmd)['value']
|
668
|
+
end
|
669
|
+
|
670
|
+
# Perform an aggregation using the aggregation framework on the current collection.
|
671
|
+
# @note Aggregate requires server version >= 2.1.1
|
672
|
+
# @note Field References: Within an expression, field names must be quoted and prefixed by a dollar sign ($).
|
673
|
+
#
|
674
|
+
# @example Define the pipeline as an array of operator hashes:
|
675
|
+
# coll.aggregate([ {"$project" => {"last_name" => 1, "first_name" => 1 }}, {"$match" => {"last_name" => "Jones"}} ])
|
676
|
+
#
|
677
|
+
# @example With server version 2.5.1 or newer, pass a cursor option to retrieve unlimited aggregation results:
|
678
|
+
# coll.aggregate([ {"$group" => { :_id => "$_id", :count => { "$sum" => "$members" }}} ], :cursor => {} )
|
679
|
+
#
|
680
|
+
# @param [Array] pipeline Should be a single array of pipeline operator hashes.
|
681
|
+
#
|
682
|
+
# '$project' Reshapes a document stream by including fields, excluding fields, inserting computed fields,
|
683
|
+
# renaming fields,or creating/populating fields that hold sub-documents.
|
684
|
+
#
|
685
|
+
# '$match' Query-like interface for filtering documents out of the aggregation pipeline.
|
686
|
+
#
|
687
|
+
# '$limit' Restricts the number of documents that pass through the pipeline.
|
688
|
+
#
|
689
|
+
# '$skip' Skips over the specified number of documents and passes the rest along the pipeline.
|
690
|
+
#
|
691
|
+
# '$unwind' Peels off elements of an array individually, returning one document for each member.
|
692
|
+
#
|
693
|
+
# '$group' Groups documents for calculating aggregate values.
|
694
|
+
#
|
695
|
+
# '$sort' Sorts all input documents and returns them to the pipeline in sorted order.
|
696
|
+
#
|
697
|
+
# '$out' The name of a collection to which the result set will be saved.
|
698
|
+
#
|
699
|
+
# @option opts [:primary, :secondary] :read Read preference indicating which server to perform this operation
|
700
|
+
# on. If $out is specified and :read is not :primary, the aggregation will be rerouted to the primary with
|
701
|
+
# a warning. See Collection#find for more details.
|
702
|
+
# @option opts [String] :comment (nil) a comment to include in profiling logs
|
703
|
+
# @option opts [Hash] :cursor return a cursor object instead of an Array. Takes an optional batchSize parameter
|
704
|
+
# to specify the maximum size, in documents, of the first batch returned.
|
705
|
+
#
|
706
|
+
# @return [Array] An Array with the aggregate command's results.
|
707
|
+
#
|
708
|
+
# @raise MongoArgumentError if operators either aren't supplied or aren't in the correct format.
|
709
|
+
# @raise MongoOperationFailure if the aggregate command fails.
|
710
|
+
#
|
711
|
+
def aggregate(pipeline=nil, opts={})
|
712
|
+
raise MongoArgumentError, "pipeline must be an array of operators" unless pipeline.class == Array
|
713
|
+
raise MongoArgumentError, "pipeline operators must be hashes" unless pipeline.all? { |op| op.class == Hash }
|
714
|
+
|
715
|
+
selector = BSON::OrderedHash.new
|
716
|
+
selector['aggregate'] = self.name
|
717
|
+
selector['pipeline'] = pipeline
|
718
|
+
|
719
|
+
result = @db.command(selector, command_options(opts))
|
720
|
+
unless Mongo::Support.ok?(result)
|
721
|
+
raise Mongo::OperationFailure, "aggregate failed: #{result['errmsg']}"
|
722
|
+
end
|
723
|
+
|
724
|
+
if result.key?('cursor')
|
725
|
+
cursor_info = result['cursor']
|
726
|
+
pinned_pool = @connection.pinned_pool
|
727
|
+
pinned_pool = pinned_pool[:pool] if pinned_pool.respond_to?(:keys)
|
728
|
+
|
729
|
+
seed = {
|
730
|
+
:cursor_id => cursor_info['id'],
|
731
|
+
:first_batch => cursor_info['firstBatch'],
|
732
|
+
:pool => pinned_pool,
|
733
|
+
:ns => cursor_info['ns']
|
734
|
+
}
|
735
|
+
|
736
|
+
return Cursor.new(self, seed.merge!(opts))
|
737
|
+
|
738
|
+
elsif selector['pipeline'].any? { |op| op.key?('$out') || op.key?(:$out) }
|
739
|
+
return result
|
740
|
+
end
|
741
|
+
|
742
|
+
result['result'] || result
|
523
743
|
end
|
524
744
|
|
525
745
|
# Perform a map-reduce operation on the current collection.
|
@@ -534,25 +754,28 @@ module Mongo
|
|
534
754
|
# @option opts [Integer] :limit (nil) if passing a query, number of objects to return from the collection.
|
535
755
|
# @option opts [String, BSON::Code] :finalize (nil) a javascript function to apply to the result set after the
|
536
756
|
# map/reduce operation has finished.
|
537
|
-
# @option opts [String] :out
|
538
|
-
#
|
539
|
-
#
|
540
|
-
#
|
757
|
+
# @option opts [String, Hash] :out Location of the result of the map-reduce operation. You can output to a
|
758
|
+
# collection, output to a collection with an action, or output inline. You may output to a collection
|
759
|
+
# when performing map reduce operations on the primary members of the set; on secondary members you
|
760
|
+
# may only use the inline output. See the server mapReduce documentation for available options.
|
761
|
+
# @option opts [Boolean] :keeptemp (false) if true, the generated collection will be persisted. The default
|
541
762
|
# is false. Note that this option has no effect is versions of MongoDB > v1.7.6.
|
542
763
|
# @option opts [Boolean ] :verbose (false) if true, provides statistics on job execution time.
|
543
764
|
# @option opts [Boolean] :raw (false) if true, return the raw result object from the map_reduce command, and not
|
544
765
|
# the instantiated collection that's returned by default. Note if a collection name isn't returned in the
|
545
|
-
# map-reduce output (as, for example, when using :out => {:inline => 1}), then you must specify this option
|
766
|
+
# map-reduce output (as, for example, when using :out => { :inline => 1 }), then you must specify this option
|
546
767
|
# or an ArgumentError will be raised.
|
768
|
+
# @option opts [:primary, :secondary] :read Read preference indicating which server to run this map-reduce
|
769
|
+
# on. See Collection#find for more details.
|
770
|
+
# @option opts [String] :comment (nil) a comment to include in profiling logs
|
547
771
|
#
|
548
772
|
# @return [Collection, Hash] a Mongo::Collection object or a Hash with the map-reduce command's results.
|
549
773
|
#
|
550
|
-
# @raise ArgumentError if you specify {:out => {:inline => true}} but don't specify :raw => true.
|
774
|
+
# @raise ArgumentError if you specify { :out => { :inline => true }} but don't specify :raw => true.
|
551
775
|
#
|
552
776
|
# @see http://www.mongodb.org/display/DOCS/MapReduce Offical MongoDB map/reduce documentation.
|
553
|
-
#
|
554
|
-
# @core mapreduce map_reduce-instance_method
|
555
777
|
def map_reduce(map, reduce, opts={})
|
778
|
+
opts = opts.dup
|
556
779
|
map = BSON::Code.new(map) unless map.is_a?(BSON::Code)
|
557
780
|
reduce = BSON::Code.new(reduce) unless reduce.is_a?(BSON::Code)
|
558
781
|
raw = opts.delete(:raw)
|
@@ -561,17 +784,25 @@ module Mongo
|
|
561
784
|
hash['mapreduce'] = self.name
|
562
785
|
hash['map'] = map
|
563
786
|
hash['reduce'] = reduce
|
564
|
-
hash
|
787
|
+
hash['out'] = opts.delete(:out)
|
788
|
+
hash['sort'] = Mongo::Support.format_order_clause(opts.delete(:sort)) if opts.key?(:sort)
|
565
789
|
|
566
|
-
result = @db.command(hash)
|
790
|
+
result = @db.command(hash, command_options(opts))
|
567
791
|
unless Mongo::Support.ok?(result)
|
568
792
|
raise Mongo::OperationFailure, "map-reduce failed: #{result['errmsg']}"
|
569
793
|
end
|
570
794
|
|
571
795
|
if raw
|
572
796
|
result
|
573
|
-
elsif result[
|
574
|
-
|
797
|
+
elsif result['result']
|
798
|
+
if result['result'].is_a?(BSON::OrderedHash) &&
|
799
|
+
result['result'].key?('db') &&
|
800
|
+
result['result'].key?('collection')
|
801
|
+
otherdb = @db.connection[result['result']['db']]
|
802
|
+
otherdb[result['result']['collection']]
|
803
|
+
else
|
804
|
+
@db[result["result"]]
|
805
|
+
end
|
575
806
|
else
|
576
807
|
raise ArgumentError, "Could not instantiate collection from result. If you specified " +
|
577
808
|
"{:out => {:inline => true}}, then you must also specify :raw => true to get the results."
|
@@ -593,16 +824,23 @@ module Mongo
|
|
593
824
|
# @option opts [String, BSON::Code] :finalize (nil) a JavaScript function that receives and modifies
|
594
825
|
# each of the resultant grouped objects. Available only when group is run with command
|
595
826
|
# set to true.
|
827
|
+
# @option opts [:primary, :secondary] :read Read preference indicating which server to perform this group
|
828
|
+
# on. See Collection#find for more details.
|
829
|
+
# @option opts [String] :comment (nil) a comment to include in profiling logs
|
596
830
|
#
|
597
831
|
# @return [Array] the command response consisting of grouped items.
|
598
832
|
def group(opts, condition={}, initial={}, reduce=nil, finalize=nil)
|
833
|
+
opts = opts.dup
|
599
834
|
if opts.is_a?(Hash)
|
600
835
|
return new_group(opts)
|
601
|
-
|
602
|
-
|
603
|
-
|
836
|
+
elsif opts.is_a?(Symbol)
|
837
|
+
raise MongoArgumentError, "Group takes either an array of fields to group by or a JavaScript function" +
|
838
|
+
"in the form of a String or BSON::Code."
|
604
839
|
end
|
605
840
|
|
841
|
+
warn "Collection#group no longer takes a list of parameters. This usage is deprecated and will be removed in v2.0." +
|
842
|
+
"Check out the new API at http://api.mongodb.org/ruby/current/Mongo/Collection.html#group-instance_method"
|
843
|
+
|
606
844
|
reduce = BSON::Code.new(reduce) unless reduce.is_a?(BSON::Code)
|
607
845
|
|
608
846
|
group_command = {
|
@@ -614,11 +852,6 @@ module Mongo
|
|
614
852
|
}
|
615
853
|
}
|
616
854
|
|
617
|
-
if opts.is_a?(Symbol)
|
618
|
-
raise MongoArgumentError, "Group takes either an array of fields to group by or a JavaScript function" +
|
619
|
-
"in the form of a String or BSON::Code."
|
620
|
-
end
|
621
|
-
|
622
855
|
unless opts.nil?
|
623
856
|
if opts.is_a? Array
|
624
857
|
key_type = "key"
|
@@ -646,13 +879,44 @@ module Mongo
|
|
646
879
|
end
|
647
880
|
end
|
648
881
|
|
882
|
+
# Scan this entire collection in parallel.
|
883
|
+
# Returns a list of up to num_cursors cursors that can be iterated concurrently. As long as the collection
|
884
|
+
# is not modified during scanning, each document appears once in one of the cursors' result sets.
|
885
|
+
#
|
886
|
+
# @note Requires server version >= 2.5.5
|
887
|
+
#
|
888
|
+
# @param [Integer] num_cursors the number of cursors to return.
|
889
|
+
# @param [Hash] opts
|
890
|
+
#
|
891
|
+
# @return [Array] An array of up to num_cursors cursors for iterating over the collection.
|
892
|
+
def parallel_scan(num_cursors, opts={})
|
893
|
+
cmd = BSON::OrderedHash.new
|
894
|
+
cmd[:parallelCollectionScan] = self.name
|
895
|
+
cmd[:numCursors] = num_cursors
|
896
|
+
result = @db.command(cmd, command_options(opts))
|
897
|
+
|
898
|
+
result['cursors'].collect do |cursor_info|
|
899
|
+
pinned_pool = @connection.pinned_pool
|
900
|
+
pinned_pool = pinned_pool[:pool] if pinned_pool.respond_to?(:keys)
|
901
|
+
|
902
|
+
seed = {
|
903
|
+
:cursor_id => cursor_info['cursor']['id'],
|
904
|
+
:first_batch => cursor_info['cursor']['firstBatch'],
|
905
|
+
:pool => pinned_pool,
|
906
|
+
:ns => cursor_info['ns']
|
907
|
+
}
|
908
|
+
Cursor.new(self, seed.merge!(opts))
|
909
|
+
end
|
910
|
+
|
911
|
+
end
|
912
|
+
|
649
913
|
private
|
650
914
|
|
651
915
|
def new_group(opts={})
|
652
|
-
reduce = opts
|
653
|
-
finalize = opts
|
654
|
-
cond = opts.
|
655
|
-
initial = opts
|
916
|
+
reduce = opts.delete(:reduce)
|
917
|
+
finalize = opts.delete(:finalize)
|
918
|
+
cond = opts.delete(:cond) || {}
|
919
|
+
initial = opts.delete(:initial)
|
656
920
|
|
657
921
|
if !(reduce && initial)
|
658
922
|
raise MongoArgumentError, "Group requires at minimum values for initial and reduce."
|
@@ -671,18 +935,18 @@ module Mongo
|
|
671
935
|
cmd['group']['finalize'] = finalize.to_bson_code
|
672
936
|
end
|
673
937
|
|
674
|
-
if key = opts
|
938
|
+
if key = opts.delete(:key)
|
675
939
|
if key.is_a?(String) || key.is_a?(Symbol)
|
676
940
|
key = [key]
|
677
941
|
end
|
678
942
|
key_value = {}
|
679
943
|
key.each { |k| key_value[k] = 1 }
|
680
944
|
cmd["group"]["key"] = key_value
|
681
|
-
elsif keyf = opts
|
945
|
+
elsif keyf = opts.delete(:keyf)
|
682
946
|
cmd["group"]["$keyf"] = keyf.to_bson_code
|
683
947
|
end
|
684
948
|
|
685
|
-
result = @db.command(cmd)
|
949
|
+
result = @db.command(cmd, command_options(opts))
|
686
950
|
result["retval"]
|
687
951
|
end
|
688
952
|
|
@@ -694,6 +958,11 @@ module Mongo
|
|
694
958
|
#
|
695
959
|
# @param [String, Symbol, OrderedHash] key or hash to group by.
|
696
960
|
# @param [Hash] query a selector for limiting the result set over which to group.
|
961
|
+
# @param [Hash] opts the options for this distinct operation.
|
962
|
+
#
|
963
|
+
# @option opts [:primary, :secondary] :read Read preference indicating which server to perform this query
|
964
|
+
# on. See Collection#find for more details.
|
965
|
+
# @option opts [String] :comment (nil) a comment to include in profiling logs
|
697
966
|
#
|
698
967
|
# @example Saving zip codes and ages and returning distinct results.
|
699
968
|
# @collection.save({:zip => 10010, :name => {:age => 27}})
|
@@ -713,14 +982,14 @@ module Mongo
|
|
713
982
|
# [27]
|
714
983
|
#
|
715
984
|
# @return [Array] an array of distinct values.
|
716
|
-
def distinct(key, query=nil)
|
985
|
+
def distinct(key, query=nil, opts={})
|
717
986
|
raise MongoArgumentError unless [String, Symbol].include?(key.class)
|
718
|
-
command
|
987
|
+
command = BSON::OrderedHash.new
|
719
988
|
command[:distinct] = @name
|
720
989
|
command[:key] = key.to_s
|
721
990
|
command[:query] = query
|
722
991
|
|
723
|
-
@db.command(command)["values"]
|
992
|
+
@db.command(command, command_options(opts))["values"]
|
724
993
|
end
|
725
994
|
|
726
995
|
# Rename this collection.
|
@@ -759,8 +1028,6 @@ module Mongo
|
|
759
1028
|
# Get information on the indexes for this collection.
|
760
1029
|
#
|
761
1030
|
# @return [Hash] a hash where the keys are index names.
|
762
|
-
#
|
763
|
-
# @core indexes
|
764
1031
|
def index_information
|
765
1032
|
@db.index_information(@name)
|
766
1033
|
end
|
@@ -770,7 +1037,7 @@ module Mongo
|
|
770
1037
|
#
|
771
1038
|
# @return [Hash] options that apply to this collection.
|
772
1039
|
def options
|
773
|
-
@db.collections_info(@name).
|
1040
|
+
@db.collections_info(@name).first['options']
|
774
1041
|
end
|
775
1042
|
|
776
1043
|
# Return stats on the collection. Uses MongoDB's collstats command.
|
@@ -782,15 +1049,38 @@ module Mongo
|
|
782
1049
|
|
783
1050
|
# Get the number of documents in this collection.
|
784
1051
|
#
|
1052
|
+
# @option opts [Hash] :query ({}) A query selector for filtering the documents counted.
|
1053
|
+
# @option opts [Integer] :skip (nil) The number of documents to skip.
|
1054
|
+
# @option opts [Integer] :limit (nil) The number of documents to limit.
|
1055
|
+
# @option opts [String, Array, OrderedHash] :hint hint for query optimizer, usually not necessary if
|
1056
|
+
# using MongoDB > 1.1. This option is only supported with #count in server version > 2.6.
|
1057
|
+
# @option opts [String] :named_hint for specifying a named index as a hint, will be overridden by :hint
|
1058
|
+
# if :hint is also provided. This option is only supported with #count in server version > 2.6.
|
1059
|
+
# @option opts [:primary, :secondary] :read Read preference for this command. See Collection#find for
|
1060
|
+
# more details.
|
1061
|
+
# @option opts [String] :comment (nil) a comment to include in profiling logs
|
1062
|
+
#
|
785
1063
|
# @return [Integer]
|
786
|
-
def count
|
787
|
-
find(
|
1064
|
+
def count(opts={})
|
1065
|
+
find(opts[:query],
|
1066
|
+
:skip => opts[:skip],
|
1067
|
+
:limit => opts[:limit],
|
1068
|
+
:named_hint => opts[:named_hint] || @hint,
|
1069
|
+
:hint => opts[:hint] || @hint,
|
1070
|
+
:read => opts[:read],
|
1071
|
+
:comment => opts[:comment]).count(true)
|
788
1072
|
end
|
789
|
-
|
790
1073
|
alias :size :count
|
791
1074
|
|
792
1075
|
protected
|
793
1076
|
|
1077
|
+
# Provide required command options if they are missing in the command options hash.
|
1078
|
+
#
|
1079
|
+
# @return [Hash] The command options hash
|
1080
|
+
def command_options(opts)
|
1081
|
+
opts[:read] ? opts : opts.merge(:read => @read)
|
1082
|
+
end
|
1083
|
+
|
794
1084
|
def normalize_hint_fields(hint)
|
795
1085
|
case hint
|
796
1086
|
when String
|
@@ -808,78 +1098,95 @@ module Mongo
|
|
808
1098
|
|
809
1099
|
private
|
810
1100
|
|
1101
|
+
def send_write(op_type, selector, doc_or_docs, check_keys, opts, collection_name=@name)
|
1102
|
+
write_concern = get_write_concern(opts, self)
|
1103
|
+
if @db.connection.use_write_command?(write_concern)
|
1104
|
+
@command_writer.send_write_command(op_type, selector, doc_or_docs, check_keys, opts, write_concern, collection_name)
|
1105
|
+
else
|
1106
|
+
@operation_writer.send_write_operation(op_type, selector, doc_or_docs, check_keys, opts, write_concern, collection_name)
|
1107
|
+
end
|
1108
|
+
end
|
1109
|
+
|
1110
|
+
def index_name(spec)
|
1111
|
+
field_spec = parse_index_spec(spec)
|
1112
|
+
index_information.each do |index|
|
1113
|
+
return index[0] if index[1]['key'] == field_spec
|
1114
|
+
end
|
1115
|
+
nil
|
1116
|
+
end
|
1117
|
+
|
811
1118
|
def parse_index_spec(spec)
|
812
1119
|
field_spec = BSON::OrderedHash.new
|
813
1120
|
if spec.is_a?(String) || spec.is_a?(Symbol)
|
814
1121
|
field_spec[spec.to_s] = 1
|
1122
|
+
elsif spec.is_a?(Hash)
|
1123
|
+
if RUBY_VERSION < '1.9' && !spec.is_a?(BSON::OrderedHash)
|
1124
|
+
raise MongoArgumentError, "Must use OrderedHash in Ruby < 1.9.0"
|
1125
|
+
end
|
1126
|
+
validate_index_types(spec.values)
|
1127
|
+
field_spec = spec.is_a?(BSON::OrderedHash) ? spec : BSON::OrderedHash.try_convert(spec)
|
815
1128
|
elsif spec.is_a?(Array) && spec.all? {|field| field.is_a?(Array) }
|
816
1129
|
spec.each do |f|
|
817
|
-
|
818
|
-
|
819
|
-
else
|
820
|
-
raise MongoArgumentError, "Invalid index field #{f[1].inspect}; " +
|
821
|
-
"should be one of Mongo::ASCENDING (1), Mongo::DESCENDING (-1) or Mongo::GEO2D ('2d')."
|
822
|
-
end
|
1130
|
+
validate_index_types(f[1])
|
1131
|
+
field_spec[f[0].to_s] = f[1]
|
823
1132
|
end
|
824
1133
|
else
|
825
|
-
raise MongoArgumentError, "Invalid index specification #{spec.inspect}; " +
|
826
|
-
"should be either a string, symbol, or an array of arrays."
|
1134
|
+
raise MongoArgumentError, "Invalid index specification #{spec.inspect}; " +
|
1135
|
+
"should be either a hash (OrderedHash), string, symbol, or an array of arrays."
|
827
1136
|
end
|
828
1137
|
field_spec
|
829
1138
|
end
|
830
1139
|
|
1140
|
+
def validate_index_types(*types)
|
1141
|
+
types.flatten!
|
1142
|
+
types.each do |t|
|
1143
|
+
unless Mongo::INDEX_TYPES.values.include?(t)
|
1144
|
+
raise MongoArgumentError, "Invalid index field #{t.inspect}; " +
|
1145
|
+
"should be one of " + Mongo::INDEX_TYPES.map {|k,v| "Mongo::#{k} (#{v})"}.join(', ')
|
1146
|
+
end
|
1147
|
+
end
|
1148
|
+
end
|
1149
|
+
|
831
1150
|
def generate_indexes(field_spec, name, opts)
|
832
1151
|
selector = {
|
833
1152
|
:name => name,
|
834
|
-
:ns => "#{@db.name}.#{@name}",
|
835
1153
|
:key => field_spec
|
836
1154
|
}
|
837
1155
|
selector.merge!(opts)
|
838
1156
|
|
839
1157
|
begin
|
840
|
-
|
841
|
-
|
842
|
-
rescue Mongo::OperationFailure =>
|
843
|
-
if
|
844
|
-
|
1158
|
+
cmd = BSON::OrderedHash[:createIndexes, @name, :indexes, [selector]]
|
1159
|
+
@db.command(cmd)
|
1160
|
+
rescue Mongo::OperationFailure => ex
|
1161
|
+
if Mongo::ErrorCode::COMMAND_NOT_FOUND_CODES.include?(ex.error_code)
|
1162
|
+
selector[:ns] = "#{@db.name}.#{@name}"
|
1163
|
+
send_write(:insert, nil, selector, false, {:w => 1}, Mongo::DB::SYSTEM_INDEX_COLLECTION)
|
845
1164
|
else
|
846
1165
|
raise Mongo::OperationFailure, "Failed to create index #{selector.inspect} with the following error: " +
|
847
|
-
"#{
|
1166
|
+
"#{ex.message}"
|
848
1167
|
end
|
849
1168
|
end
|
850
1169
|
|
851
1170
|
nil
|
852
1171
|
end
|
853
1172
|
|
854
|
-
# Sends a Mongo::Constants::OP_INSERT message to the database.
|
855
|
-
# Takes an array of +documents+, an optional +collection_name+, and a
|
856
|
-
# +check_keys+ setting.
|
857
|
-
def insert_documents(documents, collection_name=@name, check_keys=true, safe=false)
|
858
|
-
# Initial byte is 0.
|
859
|
-
message = BSON::ByteBuffer.new("\0\0\0\0")
|
860
|
-
BSON::BSON_RUBY.serialize_cstr(message, "#{@db.name}.#{collection_name}")
|
861
|
-
documents.each do |doc|
|
862
|
-
message.put_binary(BSON::BSON_CODER.serialize(doc, check_keys, true).to_s)
|
863
|
-
end
|
864
|
-
raise InvalidOperation, "Exceded maximum insert size of 16,000,000 bytes" if message.size > 16_000_000
|
865
|
-
|
866
|
-
@connection.instrument(:insert, :database => @db.name, :collection => collection_name, :documents => documents) do
|
867
|
-
if safe
|
868
|
-
@connection.send_message_with_safe_check(Mongo::Constants::OP_INSERT, message, @db.name, nil, safe)
|
869
|
-
else
|
870
|
-
@connection.send_message(Mongo::Constants::OP_INSERT, message, nil)
|
871
|
-
end
|
872
|
-
end
|
873
|
-
documents.collect { |o| o[:_id] || o['_id'] }
|
874
|
-
end
|
875
|
-
|
876
1173
|
def generate_index_name(spec)
|
877
1174
|
indexes = []
|
878
|
-
spec.each_pair do |field,
|
879
|
-
indexes.push("#{field}_#{
|
1175
|
+
spec.each_pair do |field, type|
|
1176
|
+
indexes.push("#{field}_#{type}")
|
880
1177
|
end
|
881
1178
|
indexes.join("_")
|
882
1179
|
end
|
1180
|
+
|
1181
|
+
def batch_write(op_type, documents, check_keys=true, opts={})
|
1182
|
+
write_concern = get_write_concern(opts, self)
|
1183
|
+
if @db.connection.use_write_command?(write_concern)
|
1184
|
+
return @command_writer.batch_write(op_type, documents, check_keys, opts)
|
1185
|
+
else
|
1186
|
+
return @operation_writer.batch_write(op_type, documents, check_keys, opts)
|
1187
|
+
end
|
1188
|
+
end
|
1189
|
+
|
883
1190
|
end
|
884
1191
|
|
885
1192
|
end
|