mongo 1.10.0-java

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (116) hide show
  1. checksums.yaml +7 -0
  2. checksums.yaml.gz.sig +0 -0
  3. data.tar.gz.sig +0 -0
  4. data/LICENSE +190 -0
  5. data/README.md +149 -0
  6. data/Rakefile +31 -0
  7. data/VERSION +1 -0
  8. data/bin/mongo_console +43 -0
  9. data/ext/jsasl/target/jsasl.jar +0 -0
  10. data/lib/mongo.rb +90 -0
  11. data/lib/mongo/bulk_write_collection_view.rb +380 -0
  12. data/lib/mongo/collection.rb +1164 -0
  13. data/lib/mongo/collection_writer.rb +364 -0
  14. data/lib/mongo/connection.rb +19 -0
  15. data/lib/mongo/connection/node.rb +239 -0
  16. data/lib/mongo/connection/pool.rb +347 -0
  17. data/lib/mongo/connection/pool_manager.rb +325 -0
  18. data/lib/mongo/connection/sharding_pool_manager.rb +67 -0
  19. data/lib/mongo/connection/socket.rb +18 -0
  20. data/lib/mongo/connection/socket/socket_util.rb +37 -0
  21. data/lib/mongo/connection/socket/ssl_socket.rb +95 -0
  22. data/lib/mongo/connection/socket/tcp_socket.rb +86 -0
  23. data/lib/mongo/connection/socket/unix_socket.rb +39 -0
  24. data/lib/mongo/cursor.rb +719 -0
  25. data/lib/mongo/db.rb +735 -0
  26. data/lib/mongo/exception.rb +88 -0
  27. data/lib/mongo/functional.rb +21 -0
  28. data/lib/mongo/functional/authentication.rb +318 -0
  29. data/lib/mongo/functional/logging.rb +85 -0
  30. data/lib/mongo/functional/read_preference.rb +174 -0
  31. data/lib/mongo/functional/sasl_java.rb +48 -0
  32. data/lib/mongo/functional/uri_parser.rb +374 -0
  33. data/lib/mongo/functional/write_concern.rb +66 -0
  34. data/lib/mongo/gridfs.rb +18 -0
  35. data/lib/mongo/gridfs/grid.rb +112 -0
  36. data/lib/mongo/gridfs/grid_ext.rb +53 -0
  37. data/lib/mongo/gridfs/grid_file_system.rb +163 -0
  38. data/lib/mongo/gridfs/grid_io.rb +484 -0
  39. data/lib/mongo/legacy.rb +140 -0
  40. data/lib/mongo/mongo_client.rb +702 -0
  41. data/lib/mongo/mongo_replica_set_client.rb +523 -0
  42. data/lib/mongo/mongo_sharded_client.rb +159 -0
  43. data/lib/mongo/networking.rb +370 -0
  44. data/lib/mongo/utils.rb +19 -0
  45. data/lib/mongo/utils/conversions.rb +110 -0
  46. data/lib/mongo/utils/core_ext.rb +70 -0
  47. data/lib/mongo/utils/server_version.rb +69 -0
  48. data/lib/mongo/utils/support.rb +80 -0
  49. data/lib/mongo/utils/thread_local_variable_manager.rb +25 -0
  50. data/mongo.gemspec +36 -0
  51. data/test/functional/authentication_test.rb +35 -0
  52. data/test/functional/bulk_api_stress_test.rb +133 -0
  53. data/test/functional/bulk_write_collection_view_test.rb +1129 -0
  54. data/test/functional/client_test.rb +565 -0
  55. data/test/functional/collection_test.rb +2073 -0
  56. data/test/functional/collection_writer_test.rb +83 -0
  57. data/test/functional/conversions_test.rb +163 -0
  58. data/test/functional/cursor_fail_test.rb +63 -0
  59. data/test/functional/cursor_message_test.rb +57 -0
  60. data/test/functional/cursor_test.rb +625 -0
  61. data/test/functional/db_api_test.rb +819 -0
  62. data/test/functional/db_connection_test.rb +27 -0
  63. data/test/functional/db_test.rb +344 -0
  64. data/test/functional/grid_file_system_test.rb +285 -0
  65. data/test/functional/grid_io_test.rb +252 -0
  66. data/test/functional/grid_test.rb +273 -0
  67. data/test/functional/pool_test.rb +62 -0
  68. data/test/functional/safe_test.rb +98 -0
  69. data/test/functional/ssl_test.rb +29 -0
  70. data/test/functional/support_test.rb +62 -0
  71. data/test/functional/timeout_test.rb +58 -0
  72. data/test/functional/uri_test.rb +330 -0
  73. data/test/functional/write_concern_test.rb +118 -0
  74. data/test/helpers/general.rb +50 -0
  75. data/test/helpers/test_unit.rb +317 -0
  76. data/test/replica_set/authentication_test.rb +35 -0
  77. data/test/replica_set/basic_test.rb +174 -0
  78. data/test/replica_set/client_test.rb +341 -0
  79. data/test/replica_set/complex_connect_test.rb +77 -0
  80. data/test/replica_set/connection_test.rb +138 -0
  81. data/test/replica_set/count_test.rb +64 -0
  82. data/test/replica_set/cursor_test.rb +212 -0
  83. data/test/replica_set/insert_test.rb +140 -0
  84. data/test/replica_set/max_values_test.rb +145 -0
  85. data/test/replica_set/pinning_test.rb +55 -0
  86. data/test/replica_set/query_test.rb +73 -0
  87. data/test/replica_set/read_preference_test.rb +214 -0
  88. data/test/replica_set/refresh_test.rb +175 -0
  89. data/test/replica_set/replication_ack_test.rb +94 -0
  90. data/test/replica_set/ssl_test.rb +32 -0
  91. data/test/sharded_cluster/basic_test.rb +197 -0
  92. data/test/shared/authentication/basic_auth_shared.rb +286 -0
  93. data/test/shared/authentication/bulk_api_auth_shared.rb +259 -0
  94. data/test/shared/authentication/gssapi_shared.rb +164 -0
  95. data/test/shared/authentication/sasl_plain_shared.rb +96 -0
  96. data/test/shared/ssl_shared.rb +235 -0
  97. data/test/test_helper.rb +56 -0
  98. data/test/threading/basic_test.rb +120 -0
  99. data/test/tools/mongo_config.rb +608 -0
  100. data/test/tools/mongo_config_test.rb +160 -0
  101. data/test/unit/client_test.rb +347 -0
  102. data/test/unit/collection_test.rb +166 -0
  103. data/test/unit/connection_test.rb +325 -0
  104. data/test/unit/cursor_test.rb +299 -0
  105. data/test/unit/db_test.rb +136 -0
  106. data/test/unit/grid_test.rb +76 -0
  107. data/test/unit/mongo_sharded_client_test.rb +48 -0
  108. data/test/unit/node_test.rb +93 -0
  109. data/test/unit/pool_manager_test.rb +142 -0
  110. data/test/unit/read_pref_test.rb +115 -0
  111. data/test/unit/read_test.rb +159 -0
  112. data/test/unit/safe_test.rb +158 -0
  113. data/test/unit/sharding_pool_manager_test.rb +84 -0
  114. data/test/unit/write_concern_test.rb +175 -0
  115. metadata +260 -0
  116. metadata.gz.sig +0 -0
@@ -0,0 +1,380 @@
1
+ # Copyright (C) 2009-2013 MongoDB, Inc.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ module Mongo
16
+
17
+ # A bulk write view to a collection of documents in a database.
18
+ class BulkWriteCollectionView
19
+ include Mongo::WriteConcern
20
+
21
+ DEFAULT_OP_ARGS = {:q => nil}
22
+ MULTIPLE_ERRORS_MSG = "batch item errors occurred"
23
+ EMPTY_BATCH_MSG = "batch is empty"
24
+
25
+ attr_reader :collection, :options, :ops, :op_args
26
+
27
+ # Initialize a bulk-write-view object to a collection with default query selector {}.
28
+ #
29
+ # A bulk write operation is initialized from a collection object.
30
+ # For example, for an ordered bulk write view:
31
+ #
32
+ # bulk = collection.initialize_ordered_bulk_op
33
+ #
34
+ # or for an unordered bulk write view:
35
+ #
36
+ # bulk = collection.initialize_unordered_bulk_op
37
+ #
38
+ # The bulk write view collects individual write operations together so that they can be
39
+ # executed as a batch for significant performance gains.
40
+ # The ordered bulk operation will execute each operation serially in order.
41
+ # Execution will stop at the first occurrence of an error for an ordered bulk operation.
42
+ # The unordered bulk operation will be executed and may take advantage of parallelism.
43
+ # There are no guarantees for the order of execution of the operations on the server.
44
+ # Execution will continue even if there are errors for an unordered bulk operation.
45
+ #
46
+ # A bulk operation is programmed as a sequence of individual operations.
47
+ # An individual operation is composed of a method chain of modifiers or setters terminated by a write method.
48
+ # A modify method sets a value on the current object.
49
+ # A set methods returns a duplicate of the current object with a value set.
50
+ # A terminator write method appends a write operation to the bulk batch collected in the view.
51
+ #
52
+ # The API supports mixing of write operation types in a bulk operation.
53
+ # However, server support affects the implementation and performance of bulk operations.
54
+ #
55
+ # MongoDB version 2.6 servers currently support only bulk commands of the same type.
56
+ # With an ordered bulk operation,
57
+ # contiguous individual ops of the same type can be batched into the same db request,
58
+ # and the next op of a different type must be sent separately in the next request.
59
+ # Performance will improve if you can arrange your ops to reduce the number of db requests.
60
+ # With an unordered bulk operation,
61
+ # individual ops can be grouped by type and sent in at most three requests,
62
+ # one each per insert, update, or delete.
63
+ #
64
+ # MongoDB pre-version 2.6 servers do not support bulk write commands.
65
+ # The bulk operation must be sent one request per individual op.
66
+ # This also applies to inserts in order to have accurate counts and error reporting.
67
+ #
68
+ # Important note on pre-2.6 performance:
69
+ # Performance is very poor compared to version 2.6.
70
+ # We recommend bulk operation with pre-2.6 only for compatibility or
71
+ # for development in preparation for version 2.6.
72
+ # For better performance with pre-version 2.6, use bulk insertion with Collection#insert.
73
+ #
74
+ # @param [Collection] collection the parent collection object
75
+ #
76
+ # @option opts [Boolean] :ordered (true) Set bulk execution for ordered or unordered
77
+ #
78
+ # @return [BulkWriteCollectionView]
79
+ def initialize(collection, options = {})
80
+ @collection = collection
81
+ @options = options
82
+ @ops = []
83
+ @op_args = DEFAULT_OP_ARGS.dup
84
+ end
85
+
86
+ def inspect
87
+ vars = [:@options, :@ops, :@op_args]
88
+ vars_inspect = vars.collect{|var| "#{var}=#{instance_variable_get(var).inspect}"}
89
+ "#<Mongo::BulkWriteCollectionView:0x#{self.object_id} " <<
90
+ "@collection=#<Mongo::Collection:0x#{@collection.object_id}>, #{vars_inspect.join(', ')}>"
91
+ end
92
+
93
+ # Modify the query selector for subsequent bulk write operations.
94
+ # The default query selector on creation of the bulk write view is {}.
95
+ # For operations that require a query selector, find() must be set
96
+ # per operation, or set once for all operations on the bulk object.
97
+ # For example, these operations:
98
+ #
99
+ # bulk.find({"a" => 2}).update({"$inc" => {"x" => 2}})
100
+ # bulk.find({"a" => 2}).update({"$set" => {"b" => 3}})
101
+ #
102
+ # may be rewritten as:
103
+ #
104
+ # bulk = find({"a" => 2})
105
+ # bulk.update({"$inc" => {"x" => 2}})
106
+ # bulk.update({"$set" => {"b" => 3}})
107
+ #
108
+ # Note that modifying the query selector in this way will not affect
109
+ # operations that do not use a query selector, like insert().
110
+ #
111
+ # @param [Hash] q the query selector
112
+ #
113
+ # @return [BulkWriteCollectionView]
114
+ def find(q)
115
+ op_args_set(:q, q)
116
+ end
117
+
118
+ # Modify the upsert option argument for subsequent bulk write operations.
119
+ #
120
+ # @param [Boolean] value (true) the upsert option value
121
+ #
122
+ # @return [BulkWriteCollectionView]
123
+ def upsert!(value = true)
124
+ op_args_set(:upsert, value)
125
+ end
126
+
127
+ # Set the upsert option argument for subsequent bulk write operations.
128
+ #
129
+ # @param [Boolean] value (true) the upsert option value
130
+ #
131
+ # @return [BulkWriteCollectionView] a duplicated object
132
+ def upsert(value = true)
133
+ dup.upsert!(value)
134
+ end
135
+
136
+ # Update one document matching the selector.
137
+ #
138
+ # bulk.find({"a" => 1}).update_one({"$inc" => {"x" => 1}})
139
+ #
140
+ # Use the upsert! or upsert method to specify an upsert. For example:
141
+ #
142
+ # bulk.find({"a" => 1}).upsert.updateOne({"$inc" => {"x" => 1}})
143
+ #
144
+ # @param [Hash] u the update document
145
+ #
146
+ # @return [BulkWriteCollectionView]
147
+ def update_one(u)
148
+ raise MongoArgumentError, "document must start with an operator" unless update_doc?(u)
149
+ op_push([:update, @op_args.merge(:u => u, :multi => false)])
150
+ end
151
+
152
+ # Update all documents matching the selector. For example:
153
+ #
154
+ # bulk.find({"a" => 2}).update({"$inc" => {"x" => 2}})
155
+ #
156
+ # Use the upsert! or upsert method to specify an upsert. For example:
157
+ #
158
+ # bulk.find({"a" => 2}).upsert.update({"$inc" => {"x" => 2}})
159
+ #
160
+ # @param [Hash] u the update document
161
+ #
162
+ # @return [BulkWriteCollectionView]
163
+ def update(u)
164
+ raise MongoArgumentError, "document must start with an operator" unless update_doc?(u)
165
+ op_push([:update, @op_args.merge(:u => u, :multi => true)])
166
+ end
167
+
168
+ # Replace entire document (update with whole doc replace). For example:
169
+ #
170
+ # bulk.find({"a" => 3}).replace_one({"x" => 3})
171
+ #
172
+ # @param [Hash] u the replacement document
173
+ #
174
+ # @return [BulkWriteCollectionView]
175
+ def replace_one(u)
176
+ raise MongoArgumentError, "document must not contain any operators" unless replace_doc?(u)
177
+ op_push([:update, @op_args.merge(:u => u, :multi => false)])
178
+ end
179
+
180
+ # Remove a single document matching the selector. For example:
181
+ #
182
+ # bulk.find({"a" => 4}).remove_one;
183
+ #
184
+ # @return [BulkWriteCollectionView]
185
+ def remove_one
186
+ op_push([:delete, @op_args.merge(:limit => 1)])
187
+ end
188
+
189
+ # Remove all documents matching the selector. For example:
190
+ #
191
+ # bulk.find({"a" => 5}).remove;
192
+ #
193
+ # @return [BulkWriteCollectionView]
194
+ def remove
195
+ op_push([:delete, @op_args.merge(:limit => 0)])
196
+ end
197
+
198
+ # Insert a document. For example:
199
+ #
200
+ # bulk.insert({"x" => 4})
201
+ #
202
+ # @return [BulkWriteCollectionView]
203
+ def insert(document)
204
+ # TODO - check keys
205
+ op_push([:insert, {:d => document}])
206
+ end
207
+
208
+ # Execute the bulk operation, with an optional write concern overwriting the default w:1.
209
+ # For example:
210
+ #
211
+ # write_concern = {:w => 1, :j => 1}
212
+ # bulk.execute({write_concern})
213
+ #
214
+ # On return from execute, the bulk operation is cleared,
215
+ # but the selector and upsert settings are preserved.
216
+ #
217
+ # @return [BulkWriteCollectionView]
218
+ def execute(opts = {})
219
+ raise MongoArgumentError, EMPTY_BATCH_MSG if @ops.empty?
220
+ write_concern = get_write_concern(opts, @collection)
221
+ @ops.each_with_index{|op, index| op.last.merge!(:ord => index)} # infuse ordinal here to avoid issues with upsert
222
+ if @collection.db.connection.use_write_command?(write_concern)
223
+ errors, write_concern_errors, exchanges = @collection.command_writer.bulk_execute(@ops, @options, opts)
224
+ else
225
+ errors, write_concern_errors, exchanges = @collection.operation_writer.bulk_execute(@ops, @options, opts)
226
+ end
227
+ @ops = []
228
+ return true if errors.empty? && (exchanges.empty? || exchanges.first[:response] == true) # w 0 without GLE
229
+ result = merge_result(errors + write_concern_errors, exchanges)
230
+ raise BulkWriteError.new(MULTIPLE_ERRORS_MSG, Mongo::ErrorCode::MULTIPLE_ERRORS_OCCURRED, result) if !errors.empty? || !write_concern_errors.empty?
231
+ result
232
+ end
233
+
234
+ private
235
+
236
+ def hash_except(h, *keys)
237
+ keys.each { |key| h.delete(key) }
238
+ h
239
+ end
240
+
241
+ def hash_select(h, *keys)
242
+ Hash[*keys.zip(h.values_at(*keys)).flatten]
243
+ end
244
+
245
+ def tally(h, key, n)
246
+ h[key] = h.fetch(key, 0) + n
247
+ end
248
+
249
+ def nil_tally(h, key, n)
250
+ if !h.has_key?(key)
251
+ h[key] = n
252
+ elsif h[key]
253
+ h[key] = n ? h[key] + n : n
254
+ end
255
+ end
256
+
257
+ def append(h, key, obj)
258
+ h[key] = h.fetch(key, []) << obj
259
+ end
260
+
261
+ def concat(h, key, a)
262
+ h[key] = h.fetch(key, []) + a
263
+ end
264
+
265
+ def merge_index(h, exchange)
266
+ h.merge("index" => exchange[:batch][h.fetch("index", 0)][:ord])
267
+ end
268
+
269
+ def merge_indexes(a, exchange)
270
+ a.collect{|h| merge_index(h, exchange)}
271
+ end
272
+
273
+ def merge_result(errors, exchanges)
274
+ ok = 0
275
+ result = {"ok" => 0, "n" => 0}
276
+ unless errors.empty?
277
+ unless (writeErrors = errors.select { |error| error.class != Mongo::OperationFailure && error.class != WriteConcernError }).empty? # assignment
278
+ concat(result, "writeErrors",
279
+ writeErrors.collect { |error|
280
+ {"index" => error.result[:ord], "code" => error.error_code, "errmsg" => error.result[:error].message}
281
+ })
282
+ end
283
+ result.merge!("code" => Mongo::ErrorCode::MULTIPLE_ERRORS_OCCURRED, "errmsg" => MULTIPLE_ERRORS_MSG)
284
+ end
285
+ exchanges.each do |exchange|
286
+ response = exchange[:response]
287
+ next unless response
288
+ ok += response["ok"].to_i
289
+ n = response["n"] || 0
290
+ op_type = exchange[:op_type]
291
+ if op_type == :insert
292
+ n = 1 if response.key?("err") && (response["err"].nil? || response["err"] == "norepl" || response["err"] == "timeout") # OP_INSERT override n = 0 bug, n = exchange[:batch].size always 1
293
+ tally(result, "nInserted", n)
294
+ elsif op_type == :update
295
+ n_upserted = 0
296
+ if (upserted = response.fetch("upserted", nil)) # assignment
297
+ upserted = [{"_id" => upserted}] if upserted.class == BSON::ObjectId # OP_UPDATE non-array
298
+ n_upserted = upserted.size
299
+ concat(result, "upserted", merge_indexes(upserted, exchange))
300
+ end
301
+ tally(result, "nUpserted", n_upserted) if n_upserted > 0
302
+ tally(result, "nMatched", n - n_upserted)
303
+ nil_tally(result, "nModified", response["nModified"])
304
+ elsif op_type == :delete
305
+ tally(result, "nRemoved", n)
306
+ end
307
+ result["n"] += n
308
+ write_concern_error = nil
309
+ errmsg = response["errmsg"] || response["err"] # top level
310
+ if (writeErrors = response["writeErrors"] || response["errDetails"]) # assignment
311
+ concat(result, "writeErrors", merge_indexes(writeErrors, exchange))
312
+ elsif response["err"] == "timeout" # errmsg == "timed out waiting for slaves" # OP_*
313
+ write_concern_error = {"errmsg" => errmsg, "code" => Mongo::ErrorCode::WRITE_CONCERN_FAILED,
314
+ "errInfo" => {"wtimeout" => response["wtimeout"]}} # OP_* does not have "code"
315
+ elsif errmsg == "norepl" # OP_*
316
+ write_concern_error = {"errmsg" => errmsg, "code" => Mongo::ErrorCode::WRITE_CONCERN_FAILED} # OP_* does not have "code"
317
+ elsif errmsg # OP_INSERT, OP_UPDATE have "err"
318
+ append(result, "writeErrors", merge_index({"errmsg" => errmsg, "code" => response["code"]}, exchange))
319
+ end
320
+ if response["writeConcernError"]
321
+ write_concern_error = response["writeConcernError"]
322
+ elsif (wnote = response["wnote"]) # assignment - OP_*
323
+ write_concern_error = {"errmsg" => wnote, "code" => Mongo::ErrorCode::WRITE_CONCERN_FAILED} # OP_* does not have "code"
324
+ elsif (jnote = response["jnote"]) # assignment - OP_*
325
+ write_concern_error = {"errmsg" => jnote, "code" => Mongo::ErrorCode::BAD_VALUE} # OP_* does not have "code"
326
+ end
327
+ append(result, "writeConcernError", merge_index(write_concern_error, exchange)) if write_concern_error
328
+ end
329
+ result.delete("nModified") if result.has_key?("nModified") && !result["nModified"]
330
+ result.merge!("ok" => [ok + result["n"], 1].min)
331
+ end
332
+
333
+ def initialize_copy(other)
334
+ other.instance_variable_set(:@options, other.options.dup)
335
+ end
336
+
337
+ def op_args_set(op, value)
338
+ @op_args[op] = value
339
+ self
340
+ end
341
+
342
+ def op_push(op)
343
+ raise MongoArgumentError, "non-nil query must be set via find" if op.first != :insert && !op.last[:q]
344
+ @ops << op
345
+ self
346
+ end
347
+
348
+ def update_doc?(doc)
349
+ !doc.empty? && doc.keys.first.to_s =~ /^\$/
350
+ end
351
+
352
+ def replace_doc?(doc)
353
+ doc.keys.all?{|key| key !~ /^\$/}
354
+ end
355
+
356
+ end
357
+
358
+ class Collection
359
+
360
+ # Initialize an ordered bulk write view for this collection
361
+ # Execution will stop at the first occurrence of an error for an ordered bulk operation.
362
+ #
363
+ # @return [BulkWriteCollectionView]
364
+ def initialize_ordered_bulk_op
365
+ BulkWriteCollectionView.new(self, :ordered => true)
366
+ end
367
+
368
+ # Initialize an unordered bulk write view for this collection
369
+ # The unordered bulk operation will be executed and may take advantage of parallelism.
370
+ # There are no guarantees for the order of execution of the operations on the server.
371
+ # Execution will continue even if there are errors for an unordered bulk operation.
372
+ #
373
+ # @return [BulkWriteCollectionView]
374
+ def initialize_unordered_bulk_op
375
+ BulkWriteCollectionView.new(self, :ordered => false)
376
+ end
377
+
378
+ end
379
+
380
+ end
@@ -0,0 +1,1164 @@
1
+ # Copyright (C) 2009-2013 MongoDB, Inc.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ module Mongo
16
+
17
+ # A named collection of documents in a database.
18
+ class Collection
19
+ include Mongo::Logging
20
+ include Mongo::WriteConcern
21
+
22
+ attr_reader :db,
23
+ :name,
24
+ :pk_factory,
25
+ :hint,
26
+ :write_concern,
27
+ :capped,
28
+ :operation_writer,
29
+ :command_writer
30
+
31
+ # Read Preference
32
+ attr_accessor :read,
33
+ :tag_sets,
34
+ :acceptable_latency
35
+
36
+ # Initialize a collection object.
37
+ #
38
+ # @param [String, Symbol] name the name of the collection.
39
+ # @param [DB] db a MongoDB database instance.
40
+ #
41
+ # @option opts [String, Integer, Symbol] :w (1) Set default number of nodes to which a write
42
+ # should be acknowledged.
43
+ # @option opts [Integer] :wtimeout (nil) Set replica set acknowledgement timeout.
44
+ # @option opts [Boolean] :j (false) If true, block until write operations have been committed
45
+ # to the journal. Cannot be used in combination with 'fsync'. Prior to MongoDB 2.6 this option was
46
+ # ignored if the server was running without journaling. Starting with MongoDB 2.6, write operations will
47
+ # fail with an exception if this option is used when the server is running without journaling.
48
+ # @option opts [Boolean] :fsync (false) If true, and the server is running without journaling, blocks until
49
+ # the server has synced all data files to disk. If the server is running with journaling, this acts the same as
50
+ # the 'j' option, blocking until write operations have been committed to the journal.
51
+ # Cannot be used in combination with 'j'.
52
+ #
53
+ # Notes about write concern:
54
+ # These write concern options will be used for insert, update, and remove methods called on this
55
+ # Collection instance. If no value is provided, the default values set on this instance's DB will be used.
56
+ # These option values can be overridden for any invocation of insert, update, or remove.
57
+ #
58
+ # @option opts [:create_pk] :pk (BSON::ObjectId) A primary key factory to use
59
+ # other than the default BSON::ObjectId.
60
+ # @option opts [:primary, :secondary] :read The default read preference for queries
61
+ # initiates from this connection object. If +:secondary+ is chosen, reads will be sent
62
+ # to one of the closest available secondary nodes. If a secondary node cannot be located, the
63
+ # read will be sent to the primary. If this option is left unspecified, the value of the read
64
+ # preference for this collection's associated Mongo::DB object will be used.
65
+ #
66
+ # @raise [InvalidNSName]
67
+ # if collection name is empty, contains '$', or starts or ends with '.'
68
+ #
69
+ # @raise [TypeError]
70
+ # if collection name is not a string or symbol
71
+ #
72
+ # @return [Collection]
73
+ def initialize(name, db, opts={})
74
+ if db.is_a?(String) && name.is_a?(Mongo::DB)
75
+ warn "Warning: the order of parameters to initialize a collection have changed. " +
76
+ "Please specify the collection name first, followed by the db. This will be made permanent" +
77
+ "in v2.0."
78
+ db, name = name, db
79
+ end
80
+
81
+ raise TypeError,
82
+ "Collection name must be a String or Symbol." unless [String, Symbol].include?(name.class)
83
+ name = name.to_s
84
+
85
+ raise Mongo::InvalidNSName,
86
+ "Collection names cannot be empty." if name.empty? || name.include?("..")
87
+
88
+ if name.include?("$")
89
+ raise Mongo::InvalidNSName,
90
+ "Collection names must not contain '$'" unless name =~ /((^\$cmd)|(oplog\.\$main))/
91
+ end
92
+
93
+ raise Mongo::InvalidNSName,
94
+ "Collection names must not start or end with '.'" if name.match(/^\./) || name.match(/\.$/)
95
+
96
+ pk_factory = nil
97
+ if opts.respond_to?(:create_pk) || !opts.is_a?(Hash)
98
+ warn "The method for specifying a primary key factory on a Collection has changed.\n" +
99
+ "Please specify it as an option (e.g., :pk => PkFactory)."
100
+ pk_factory = opts
101
+ end
102
+
103
+ @db, @name = db, name
104
+ @connection = @db.connection
105
+ @logger = @connection.logger
106
+ @cache_time = @db.cache_time
107
+ @cache = Hash.new(0)
108
+ unless pk_factory
109
+ @write_concern = get_write_concern(opts, db)
110
+ @read = opts[:read] || @db.read
111
+ Mongo::ReadPreference::validate(@read)
112
+ @capped = opts[:capped]
113
+ @tag_sets = opts.fetch(:tag_sets, @db.tag_sets)
114
+ @acceptable_latency = opts.fetch(:acceptable_latency, @db.acceptable_latency)
115
+ end
116
+ @pk_factory = pk_factory || opts[:pk] || BSON::ObjectId
117
+ @hint = nil
118
+ @operation_writer = CollectionOperationWriter.new(self)
119
+ @command_writer = CollectionCommandWriter.new(self)
120
+ end
121
+
122
+ # Indicate whether this is a capped collection.
123
+ #
124
+ # @raise [Mongo::OperationFailure]
125
+ # if the collection doesn't exist.
126
+ #
127
+ # @return [Boolean]
128
+ def capped?
129
+ @capped ||= [1, true].include?(@db.command({:collstats => @name})['capped'])
130
+ end
131
+
132
+ # Return a sub-collection of this collection by name. If 'users' is a collection, then
133
+ # 'users.comments' is a sub-collection of users.
134
+ #
135
+ # @param [String, Symbol] name
136
+ # the collection to return
137
+ #
138
+ # @raise [Mongo::InvalidNSName]
139
+ # if passed an invalid collection name
140
+ #
141
+ # @return [Collection]
142
+ # the specified sub-collection
143
+ def [](name)
144
+ name = "#{self.name}.#{name}"
145
+ return Collection.new(name, db) if !db.strict? ||
146
+ db.collection_names.include?(name.to_s)
147
+ raise "Collection #{name} doesn't exist. Currently in strict mode."
148
+ end
149
+
150
+ # Set a hint field for query optimizer. Hint may be a single field
151
+ # name, array of field names, or a hash (preferably an [OrderedHash]).
152
+ # If using MongoDB > 1.1, you probably don't ever need to set a hint.
153
+ #
154
+ # @param [String, Array, OrderedHash] hint a single field, an array of
155
+ # fields, or a hash specifying fields
156
+ def hint=(hint=nil)
157
+ @hint = normalize_hint_fields(hint)
158
+ self
159
+ end
160
+
161
+ # Set a hint field using a named index.
162
+ # @param [String] hint index name
163
+ def named_hint=(hint=nil)
164
+ @hint = hint
165
+ self
166
+ end
167
+
168
+ # Query the database.
169
+ #
170
+ # The +selector+ argument is a prototype document that all results must
171
+ # match. For example:
172
+ #
173
+ # collection.find({"hello" => "world"})
174
+ #
175
+ # only matches documents that have a key "hello" with value "world".
176
+ # Matches can have other keys *in addition* to "hello".
177
+ #
178
+ # If given an optional block +find+ will yield a Cursor to that block,
179
+ # close the cursor, and then return nil. This guarantees that partially
180
+ # evaluated cursors will be closed. If given no block +find+ returns a
181
+ # cursor.
182
+ #
183
+ # @param [Hash] selector
184
+ # a document specifying elements which must be present for a
185
+ # document to be included in the result set. Note that in rare cases,
186
+ # (e.g., with $near queries), the order of keys will matter. To preserve
187
+ # key order on a selector, use an instance of BSON::OrderedHash (only applies
188
+ # to Ruby 1.8).
189
+ #
190
+ # @option opts [Array, Hash] :fields field names that should be returned in the result
191
+ # set ("_id" will be included unless explicitly excluded). By limiting results to a certain subset of fields,
192
+ # you can cut down on network traffic and decoding time. If using a Hash, keys should be field
193
+ # names and values should be either 1 or 0, depending on whether you want to include or exclude
194
+ # the given field.
195
+ # @option opts [:primary, :secondary] :read The default read preference for queries
196
+ # initiates from this connection object. If +:secondary+ is chosen, reads will be sent
197
+ # to one of the closest available secondary nodes. If a secondary node cannot be located, the
198
+ # read will be sent to the primary. If this option is left unspecified, the value of the read
199
+ # preference for this Collection object will be used.
200
+ # @option opts [Integer] :skip number of documents to skip from the beginning of the result set
201
+ # @option opts [Integer] :limit maximum number of documents to return
202
+ # @option opts [Array] :sort an array of [key, direction] pairs to sort by. Direction should
203
+ # be specified as Mongo::ASCENDING (or :ascending / :asc) or Mongo::DESCENDING (or :descending / :desc)
204
+ # @option opts [String, Array, OrderedHash] :hint hint for query optimizer, usually not necessary if
205
+ # using MongoDB > 1.1
206
+ # @option opts [String] :named_hint for specifying a named index as a hint, will be overriden by :hint
207
+ # if :hint is also provided.
208
+ # @option opts [Boolean] :snapshot (false) if true, snapshot mode will be used for this query.
209
+ # Snapshot mode assures no duplicates are returned, or objects missed, which were preset at both the start and
210
+ # end of the query's execution.
211
+ # For details see http://www.mongodb.org/display/DOCS/How+to+do+Snapshotting+in+the+Mongo+Database
212
+ # @option opts [Boolean] :batch_size (100) the number of documents to returned by the database per
213
+ # GETMORE operation. A value of 0 will let the database server decide how many results to return.
214
+ # This option can be ignored for most use cases.
215
+ # @option opts [Boolean] :timeout (true) when +true+, the returned cursor will be subject to
216
+ # the normal cursor timeout behavior of the mongod process. When +false+, the returned cursor will
217
+ # never timeout. Note that disabling timeout will only work when #find is invoked with a block.
218
+ # This is to prevent any inadvertent failure to close the cursor, as the cursor is explicitly
219
+ # closed when block code finishes.
220
+ # @option opts [Integer] :max_scan (nil) Limit the number of items to scan on both collection scans and indexed queries..
221
+ # @option opts [Boolean] :show_disk_loc (false) Return the disk location of each query result (for debugging).
222
+ # @option opts [Boolean] :return_key (false) Return the index key used to obtain the result (for debugging).
223
+ # @option opts [Block] :transformer (nil) a block for transforming returned documents.
224
+ # This is normally used by object mappers to convert each returned document to an instance of a class.
225
+ # @option opts [String] :comment (nil) a comment to include in profiling logs
226
+ # @option opts [Boolean] :compile_regex (true) whether BSON regex objects should be compiled into Ruby regexes.
227
+ # If false, a BSON::Regex object will be returned instead.
228
+ #
229
+ # @raise [ArgumentError]
230
+ # if timeout is set to false and find is not invoked in a block
231
+ #
232
+ # @raise [RuntimeError]
233
+ # if given unknown options
234
+ def find(selector={}, opts={})
235
+ opts = opts.dup
236
+ fields = opts.delete(:fields)
237
+ fields = ["_id"] if fields && fields.empty?
238
+ skip = opts.delete(:skip) || skip || 0
239
+ limit = opts.delete(:limit) || 0
240
+ sort = opts.delete(:sort)
241
+ hint = opts.delete(:hint)
242
+ named_hint = opts.delete(:named_hint)
243
+ snapshot = opts.delete(:snapshot)
244
+ batch_size = opts.delete(:batch_size)
245
+ timeout = (opts.delete(:timeout) == false) ? false : true
246
+ max_scan = opts.delete(:max_scan)
247
+ return_key = opts.delete(:return_key)
248
+ transformer = opts.delete(:transformer)
249
+ show_disk_loc = opts.delete(:show_disk_loc)
250
+ comment = opts.delete(:comment)
251
+ read = opts.delete(:read) || @read
252
+ tag_sets = opts.delete(:tag_sets) || @tag_sets
253
+ acceptable_latency = opts.delete(:acceptable_latency) || @acceptable_latency
254
+ compile_regex = opts.key?(:compile_regex) ? opts.delete(:compile_regex) : true
255
+
256
+ if timeout == false && !block_given?
257
+ raise ArgumentError, "Collection#find must be invoked with a block when timeout is disabled."
258
+ end
259
+
260
+ if hint
261
+ hint = normalize_hint_fields(hint)
262
+ else
263
+ hint = @hint # assumed to be normalized already
264
+ end
265
+
266
+ raise RuntimeError, "Unknown options [#{opts.inspect}]" unless opts.empty?
267
+
268
+ cursor = Cursor.new(self, {
269
+ :selector => selector,
270
+ :fields => fields,
271
+ :skip => skip,
272
+ :limit => limit,
273
+ :order => sort,
274
+ :hint => hint || named_hint,
275
+ :snapshot => snapshot,
276
+ :timeout => timeout,
277
+ :batch_size => batch_size,
278
+ :transformer => transformer,
279
+ :max_scan => max_scan,
280
+ :show_disk_loc => show_disk_loc,
281
+ :return_key => return_key,
282
+ :read => read,
283
+ :tag_sets => tag_sets,
284
+ :comment => comment,
285
+ :acceptable_latency => acceptable_latency,
286
+ :compile_regex => compile_regex
287
+ })
288
+
289
+ if block_given?
290
+ begin
291
+ yield cursor
292
+ ensure
293
+ cursor.close
294
+ end
295
+ nil
296
+ else
297
+ cursor
298
+ end
299
+ end
300
+
301
+ # Return a single object from the database.
302
+ #
303
+ # @return [OrderedHash, Nil]
304
+ # a single document or nil if no result is found.
305
+ #
306
+ # @param [Hash, ObjectId, Nil] spec_or_object_id a hash specifying elements
307
+ # which must be present for a document to be included in the result set or an
308
+ # instance of ObjectId to be used as the value for an _id query.
309
+ # If nil, an empty selector, {}, will be used.
310
+ #
311
+ # @option opts [Hash]
312
+ # any valid options that can be send to Collection#find
313
+ #
314
+ # @raise [TypeError]
315
+ # if the argument is of an improper type.
316
+ def find_one(spec_or_object_id=nil, opts={})
317
+ spec = case spec_or_object_id
318
+ when nil
319
+ {}
320
+ when BSON::ObjectId
321
+ {:_id => spec_or_object_id}
322
+ when Hash
323
+ spec_or_object_id
324
+ else
325
+ raise TypeError, "spec_or_object_id must be an instance of ObjectId or Hash, or nil"
326
+ end
327
+ timeout = opts.delete(:max_time_ms)
328
+ cursor = find(spec, opts.merge(:limit => -1))
329
+ timeout ? cursor.max_time_ms(timeout).next_document : cursor.next_document
330
+ end
331
+
332
+ # Save a document to this collection.
333
+ #
334
+ # @param [Hash] doc
335
+ # the document to be saved. If the document already has an '_id' key,
336
+ # then an update (upsert) operation will be performed, and any existing
337
+ # document with that _id is overwritten. Otherwise an insert operation is performed.
338
+ #
339
+ # @return [ObjectId] the _id of the saved document.
340
+ #
341
+ # @option opts [String, Integer, Symbol] :w (1) Set default number of nodes to which a write
342
+ # should be acknowledged.
343
+ # @option opts [Integer] :wtimeout (nil) Set replica set acknowledgement timeout.
344
+ # @option opts [Boolean] :j (false) If true, block until write operations have been committed
345
+ # to the journal. Cannot be used in combination with 'fsync'. Prior to MongoDB 2.6 this option was
346
+ # ignored if the server was running without journaling. Starting with MongoDB 2.6, write operations will
347
+ # fail with an exception if this option is used when the server is running without journaling.
348
+ # @option opts [Boolean] :fsync (false) If true, and the server is running without journaling, blocks until
349
+ # the server has synced all data files to disk. If the server is running with journaling, this acts the same as
350
+ # the 'j' option, blocking until write operations have been committed to the journal.
351
+ # Cannot be used in combination with 'j'.
352
+ #
353
+ # Options provided here will override any write concern options set on this collection,
354
+ # its database object, or the current connection. See the options
355
+ # for DB#get_last_error.
356
+ #
357
+ # @raise [Mongo::OperationFailure] will be raised iff :w > 0 and the operation fails.
358
+ def save(doc, opts={})
359
+ if doc.has_key?(:_id) || doc.has_key?('_id')
360
+ id = doc[:_id] || doc['_id']
361
+ update({:_id => id}, doc, opts.merge!({:upsert => true}))
362
+ id
363
+ else
364
+ insert(doc, opts)
365
+ end
366
+ end
367
+
368
+ # Insert one or more documents into the collection.
369
+ #
370
+ # @param [Hash, Array] doc_or_docs
371
+ # a document (as a hash) or array of documents to be inserted.
372
+ #
373
+ # @return [ObjectId, Array]
374
+ # The _id of the inserted document or a list of _ids of all inserted documents.
375
+ # @return [[ObjectId, Array], [Hash, Array]]
376
+ # 1st, the _id of the inserted document or a list of _ids of all inserted documents.
377
+ # 2nd, a list of invalid documents.
378
+ # Return this result format only when :collect_on_error is true.
379
+ #
380
+ # @option opts [String, Integer, Symbol] :w (1) Set default number of nodes to which a write
381
+ # should be acknowledged.
382
+ # @option opts [Integer] :wtimeout (nil) Set replica set acknowledgement timeout.
383
+ # @option opts [Boolean] :j (false) If true, block until write operations have been committed
384
+ # to the journal. Cannot be used in combination with 'fsync'. Prior to MongoDB 2.6 this option was
385
+ # ignored if the server was running without journaling. Starting with MongoDB 2.6, write operations will
386
+ # fail with an exception if this option is used when the server is running without journaling.
387
+ # @option opts [Boolean] :fsync (false) If true, and the server is running without journaling, blocks until
388
+ # the server has synced all data files to disk. If the server is running with journaling, this acts the same as
389
+ # the 'j' option, blocking until write operations have been committed to the journal.
390
+ # Cannot be used in combination with 'j'.
391
+ #
392
+ # Notes on write concern:
393
+ # Options provided here will override any write concern options set on this collection,
394
+ # its database object, or the current connection. See the options for +DB#get_last_error+.
395
+ #
396
+ # @option opts [Boolean] :continue_on_error (+false+) If true, then
397
+ # continue a bulk insert even if one of the documents inserted
398
+ # triggers a database assertion (as in a duplicate insert, for instance).
399
+ # If not acknowledging writes, the list of ids returned will
400
+ # include the object ids of all documents attempted on insert, even
401
+ # if some are rejected on error. When acknowledging writes, any error will raise an
402
+ # OperationFailure exception.
403
+ # MongoDB v2.0+.
404
+ # @option opts [Boolean] :collect_on_error (+false+) if true, then
405
+ # collects invalid documents as an array. Note that this option changes the result format.
406
+ #
407
+ # @raise [Mongo::OperationFailure] will be raised iff :w > 0 and the operation fails.
408
+ def insert(doc_or_docs, opts={})
409
+ if doc_or_docs.respond_to?(:collect!)
410
+ doc_or_docs.collect! { |doc| @pk_factory.create_pk(doc) }
411
+ error_docs, errors, write_concern_errors, rest_ignored = batch_write(:insert, doc_or_docs, true, opts)
412
+ errors = write_concern_errors + errors
413
+ raise errors.last if !opts[:collect_on_error] && !errors.empty?
414
+ inserted_docs = doc_or_docs - error_docs
415
+ inserted_ids = inserted_docs.collect {|o| o[:_id] || o['_id']}
416
+ opts[:collect_on_error] ? [inserted_ids, error_docs] : inserted_ids
417
+ else
418
+ @pk_factory.create_pk(doc_or_docs)
419
+ send_write(:insert, nil, doc_or_docs, true, opts)
420
+ return doc_or_docs[:_id] || doc_or_docs['_id']
421
+ end
422
+ end
423
+ alias_method :<<, :insert
424
+
425
+ # Remove all documents from this collection.
426
+ #
427
+ # @param [Hash] selector
428
+ # If specified, only matching documents will be removed.
429
+ #
430
+ # @option opts [String, Integer, Symbol] :w (1) Set default number of nodes to which a write
431
+ # should be acknowledged.
432
+ # @option opts [Integer] :wtimeout (nil) Set replica set acknowledgement timeout.
433
+ # @option opts [Boolean] :j (false) If true, block until write operations have been committed
434
+ # to the journal. Cannot be used in combination with 'fsync'. Prior to MongoDB 2.6 this option was
435
+ # ignored if the server was running without journaling. Starting with MongoDB 2.6, write operations will
436
+ # fail with an exception if this option is used when the server is running without journaling.
437
+ # @option opts [Boolean] :fsync (false) If true, and the server is running without journaling, blocks until
438
+ # the server has synced all data files to disk. If the server is running with journaling, this acts the same as
439
+ # the 'j' option, blocking until write operations have been committed to the journal.
440
+ # Cannot be used in combination with 'j'.
441
+ # @option opts [Integer] :limit (0) Set limit option, currently only 0 for all or 1 for just one.
442
+ #
443
+ # Notes on write concern:
444
+ # Options provided here will override any write concern options set on this collection,
445
+ # its database object, or the current connection. See the options for +DB#get_last_error+.
446
+ #
447
+ # @example remove all documents from the 'users' collection:
448
+ # users.remove
449
+ # users.remove({})
450
+ #
451
+ # @example remove only documents that have expired:
452
+ # users.remove({:expire => {"$lte" => Time.now}})
453
+ #
454
+ # @return [Hash, true] Returns a Hash containing the last error object if acknowledging writes
455
+ # Otherwise, returns true.
456
+ #
457
+ # @raise [Mongo::OperationFailure] will be raised iff :w > 0 and the operation fails.
458
+ def remove(selector={}, opts={})
459
+ send_write(:delete, selector, nil, nil, opts)
460
+ end
461
+
462
+ # Update one or more documents in this collection.
463
+ #
464
+ # @param [Hash] selector
465
+ # a hash specifying elements which must be present for a document to be updated. Note:
466
+ # the update command currently updates only the first document matching the
467
+ # given selector. If you want all matching documents to be updated, be sure
468
+ # to specify :multi => true.
469
+ # @param [Hash] document
470
+ # a hash specifying the fields to be changed in the selected document,
471
+ # or (in the case of an upsert) the document to be inserted
472
+ #
473
+ # @option opts [Boolean] :upsert (+false+) if true, performs an upsert (update or insert)
474
+ # @option opts [Boolean] :multi (+false+) update all documents matching the selector, as opposed to
475
+ # just the first matching document. Note: only works in MongoDB 1.1.3 or later.
476
+ # @option opts [String, Integer, Symbol] :w (1) Set default number of nodes to which a write
477
+ # should be acknowledged.
478
+ # @option opts [Integer] :wtimeout (nil) Set replica set acknowledgement timeout.
479
+ # @option opts [Boolean] :j (false) If true, block until write operations have been committed
480
+ # to the journal. Cannot be used in combination with 'fsync'. Prior to MongoDB 2.6 this option was
481
+ # ignored if the server was running without journaling. Starting with MongoDB 2.6, write operations will
482
+ # fail with an exception if this option is used when the server is running without journaling.
483
+ # @option opts [Boolean] :fsync (false) If true, and the server is running without journaling, blocks until
484
+ # the server has synced all data files to disk. If the server is running with journaling, this acts the same as
485
+ # the 'j' option, blocking until write operations have been committed to the journal.
486
+ # Cannot be used in combination with 'j'.
487
+ #
488
+ # Notes on write concern:
489
+ # Options provided here will override any write concern options set on this collection,
490
+ # its database object, or the current connection. See the options for DB#get_last_error.
491
+ #
492
+ # @return [Hash, true] Returns a Hash containing the last error object if acknowledging writes.
493
+ # Otherwise, returns true.
494
+ #
495
+ # @raise [Mongo::OperationFailure] will be raised iff :w > 0 and the operation fails.
496
+ def update(selector, document, opts={})
497
+ send_write(:update, selector, document, !document.keys.first.to_s.start_with?("$"), opts)
498
+ end
499
+
500
+ # Create a new index.
501
+ #
502
+ # @param [String, Array] spec
503
+ # should be either a single field name or an array of
504
+ # [field name, type] pairs. Index types should be specified
505
+ # as Mongo::ASCENDING, Mongo::DESCENDING, Mongo::GEO2D, Mongo::GEO2DSPHERE, Mongo::GEOHAYSTACK,
506
+ # Mongo::TEXT or Mongo::HASHED.
507
+ #
508
+ # Note that geospatial indexing only works with versions of MongoDB >= 1.3.3+. Keep in mind, too,
509
+ # that in order to geo-index a given field, that field must reference either an array or a sub-object
510
+ # where the first two values represent x- and y-coordinates. Examples can be seen below.
511
+ #
512
+ # Also note that it is permissible to create compound indexes that include a geospatial index as
513
+ # long as the geospatial index comes first.
514
+ #
515
+ # If your code calls create_index frequently, you can use Collection#ensure_index to cache these calls
516
+ # and thereby prevent excessive round trips to the database.
517
+ #
518
+ # @option opts [Boolean] :unique (false) if true, this index will enforce a uniqueness constraint.
519
+ # @option opts [Boolean] :background (false) indicate that the index should be built in the background. This
520
+ # feature is only available in MongoDB >= 1.3.2.
521
+ # @option opts [Boolean] :drop_dups (nil) If creating a unique index on a collection with pre-existing records,
522
+ # this option will keep the first document the database indexes and drop all subsequent with duplicate values.
523
+ # @option opts [Integer] :bucket_size (nil) For use with geoHaystack indexes. Number of documents to group
524
+ # together within a certain proximity to a given longitude and latitude.
525
+ # @option opts [Integer] :min (nil) specify the minimum longitude and latitude for a geo index.
526
+ # @option opts [Integer] :max (nil) specify the maximum longitude and latitude for a geo index.
527
+ #
528
+ # @example Creating a compound index using a hash: (Ruby 1.9+ Syntax)
529
+ # @posts.create_index({'subject' => Mongo::ASCENDING, 'created_at' => Mongo::DESCENDING})
530
+ #
531
+ # @example Creating a compound index:
532
+ # @posts.create_index([['subject', Mongo::ASCENDING], ['created_at', Mongo::DESCENDING]])
533
+ #
534
+ # @example Creating a geospatial index using a hash: (Ruby 1.9+ Syntax)
535
+ # @restaurants.create_index(:location => Mongo::GEO2D)
536
+ #
537
+ # @example Creating a geospatial index:
538
+ # @restaurants.create_index([['location', Mongo::GEO2D]])
539
+ #
540
+ # # Note that this will work only if 'location' represents x,y coordinates:
541
+ # {'location': [0, 50]}
542
+ # {'location': {'x' => 0, 'y' => 50}}
543
+ # {'location': {'latitude' => 0, 'longitude' => 50}}
544
+ #
545
+ # @example A geospatial index with alternate longitude and latitude:
546
+ # @restaurants.create_index([['location', Mongo::GEO2D]], :min => 500, :max => 500)
547
+ #
548
+ # @return [String] the name of the index created.
549
+ def create_index(spec, opts={})
550
+ opts[:dropDups] = opts[:drop_dups] if opts[:drop_dups]
551
+ opts[:bucketSize] = opts[:bucket_size] if opts[:bucket_size]
552
+ field_spec = parse_index_spec(spec)
553
+ opts = opts.dup
554
+ name = opts.delete(:name) || generate_index_name(field_spec)
555
+ name = name.to_s if name
556
+ generate_indexes(field_spec, name, opts)
557
+ name
558
+ end
559
+
560
+ # Calls create_index and sets a flag to not do so again for another X minutes.
561
+ # this time can be specified as an option when initializing a Mongo::DB object as options[:cache_time]
562
+ # Any changes to an index will be propagated through regardless of cache time (e.g., a change of index direction)
563
+ #
564
+ # The parameters and options for this methods are the same as those for Collection#create_index.
565
+ #
566
+ # @example Call sequence (Ruby 1.9+ Syntax):
567
+ # Time t: @posts.ensure_index(:subject => Mongo::ASCENDING) -- calls create_index and
568
+ # sets the 5 minute cache
569
+ # Time t+2min : @posts.ensure_index(:subject => Mongo::ASCENDING) -- doesn't do anything
570
+ # Time t+3min : @posts.ensure_index(:something_else => Mongo::ASCENDING) -- calls create_index
571
+ # and sets 5 minute cache
572
+ # Time t+10min : @posts.ensure_index(:subject => Mongo::ASCENDING) -- calls create_index and
573
+ # resets the 5 minute counter
574
+ #
575
+ # @return [String] the name of the index.
576
+ def ensure_index(spec, opts={})
577
+ now = Time.now.utc.to_i
578
+ opts[:dropDups] = opts[:drop_dups] if opts[:drop_dups]
579
+ opts[:bucketSize] = opts[:bucket_size] if opts[:bucket_size]
580
+ field_spec = parse_index_spec(spec)
581
+ name = opts[:name] || generate_index_name(field_spec)
582
+ name = name.to_s if name
583
+
584
+ if !@cache[name] || @cache[name] <= now
585
+ generate_indexes(field_spec, name, opts)
586
+ end
587
+
588
+ # Reset the cache here in case there are any errors inserting. Best to be safe.
589
+ @cache[name] = now + @cache_time
590
+ name
591
+ end
592
+
593
+ # Drop a specified index.
594
+ #
595
+ # @param [String] name
596
+ def drop_index(name)
597
+ if name.is_a?(Array)
598
+ return drop_index(index_name(name))
599
+ end
600
+ @cache[name.to_s] = nil
601
+ @db.drop_index(@name, name)
602
+ end
603
+
604
+ # Drop all indexes.
605
+ def drop_indexes
606
+ @cache = {}
607
+
608
+ # Note: calling drop_indexes with no args will drop them all.
609
+ @db.drop_index(@name, '*')
610
+ end
611
+
612
+ # Drop the entire collection. USE WITH CAUTION.
613
+ def drop
614
+ @db.drop_collection(@name)
615
+ end
616
+
617
+ # Atomically update and return a document using MongoDB's findAndModify command. (MongoDB > 1.3.0)
618
+ #
619
+ # @option opts [Hash] :query ({}) a query selector document for matching
620
+ # the desired document.
621
+ # @option opts [Hash] :update (nil) the update operation to perform on the
622
+ # matched document.
623
+ # @option opts [Array, String, OrderedHash] :sort ({}) specify a sort
624
+ # option for the query using any
625
+ # of the sort options available for Cursor#sort. Sort order is important
626
+ # if the query will be matching multiple documents since only the first
627
+ # matching document will be updated and returned.
628
+ # @option opts [Boolean] :remove (false) If true, removes the returned
629
+ # document from the collection.
630
+ # @option opts [Boolean] :new (false) If true, returns the updated
631
+ # document; otherwise, returns the document prior to update.
632
+ # @option opts [Boolean] :upsert (false) If true, creates a new document
633
+ # if the query returns no document.
634
+ # @option opts [Hash] :fields (nil) A subset of fields to return.
635
+ # Specify an inclusion of a field with 1. _id is included by default and must
636
+ # be explicitly excluded.
637
+ # @option opts [Boolean] :full_response (false) If true, returns the entire
638
+ # response object from the server including 'ok' and 'lastErrorObject'.
639
+ #
640
+ # @return [Hash] the matched document.
641
+ def find_and_modify(opts={})
642
+ full_response = opts.delete(:full_response)
643
+
644
+ cmd = BSON::OrderedHash.new
645
+ cmd[:findandmodify] = @name
646
+ cmd.merge!(opts)
647
+
648
+ cmd[:sort] =
649
+ Mongo::Support.format_order_clause(opts[:sort]) if opts[:sort]
650
+
651
+ full_response ? @db.command(cmd) : @db.command(cmd)['value']
652
+ end
653
+
654
+ # Perform an aggregation using the aggregation framework on the current collection.
655
+ # @note Aggregate requires server version >= 2.1.1
656
+ # @note Field References: Within an expression, field names must be quoted and prefixed by a dollar sign ($).
657
+ #
658
+ # @example Define the pipeline as an array of operator hashes:
659
+ # coll.aggregate([ {"$project" => {"last_name" => 1, "first_name" => 1 }}, {"$match" => {"last_name" => "Jones"}} ])
660
+ #
661
+ # @example With server version 2.5.1 or newer, pass a cursor option to retrieve unlimited aggregation results:
662
+ # coll.aggregate([ {"$group" => { :_id => "$_id", :count => { "$sum" => "$members" }}} ], :cursor => {} )
663
+ #
664
+ # @param [Array] pipeline Should be a single array of pipeline operator hashes.
665
+ #
666
+ # '$project' Reshapes a document stream by including fields, excluding fields, inserting computed fields,
667
+ # renaming fields,or creating/populating fields that hold sub-documents.
668
+ #
669
+ # '$match' Query-like interface for filtering documents out of the aggregation pipeline.
670
+ #
671
+ # '$limit' Restricts the number of documents that pass through the pipeline.
672
+ #
673
+ # '$skip' Skips over the specified number of documents and passes the rest along the pipeline.
674
+ #
675
+ # '$unwind' Peels off elements of an array individually, returning one document for each member.
676
+ #
677
+ # '$group' Groups documents for calculating aggregate values.
678
+ #
679
+ # '$sort' Sorts all input documents and returns them to the pipeline in sorted order.
680
+ #
681
+ # '$out' The name of a collection to which the result set will be saved.
682
+ #
683
+ # @option opts [:primary, :secondary] :read Read preference indicating which server to perform this operation
684
+ # on. If $out is specified and :read is not :primary, the aggregation will be rerouted to the primary with
685
+ # a warning. See Collection#find for more details.
686
+ # @option opts [String] :comment (nil) a comment to include in profiling logs
687
+ # @option opts [Hash] :cursor return a cursor object instead of an Array. Takes an optional batchSize parameter
688
+ # to specify the maximum size, in documents, of the first batch returned.
689
+ #
690
+ # @return [Array] An Array with the aggregate command's results.
691
+ #
692
+ # @raise MongoArgumentError if operators either aren't supplied or aren't in the correct format.
693
+ # @raise MongoOperationFailure if the aggregate command fails.
694
+ #
695
+ def aggregate(pipeline=nil, opts={})
696
+ raise MongoArgumentError, "pipeline must be an array of operators" unless pipeline.class == Array
697
+ raise MongoArgumentError, "pipeline operators must be hashes" unless pipeline.all? { |op| op.class == Hash }
698
+
699
+ selector = BSON::OrderedHash.new
700
+ selector['aggregate'] = self.name
701
+ selector['pipeline'] = pipeline
702
+
703
+ result = @db.command(selector, command_options(opts))
704
+ unless Mongo::Support.ok?(result)
705
+ raise Mongo::OperationFailure, "aggregate failed: #{result['errmsg']}"
706
+ end
707
+
708
+ if result.key?('cursor')
709
+ cursor_info = result['cursor']
710
+
711
+ seed = {
712
+ :cursor_id => cursor_info['id'],
713
+ :first_batch => cursor_info['firstBatch'],
714
+ :pool => @connection.pinned_pool
715
+ }
716
+
717
+ return Cursor.new(self, seed.merge!(opts))
718
+
719
+ elsif selector['pipeline'].any? { |op| op.key?('$out') || op.key?(:$out) }
720
+ return result
721
+ end
722
+
723
+ result['result'] || result
724
+ end
725
+
726
+ # Perform a map-reduce operation on the current collection.
727
+ #
728
+ # @param [String, BSON::Code] map a map function, written in JavaScript.
729
+ # @param [String, BSON::Code] reduce a reduce function, written in JavaScript.
730
+ #
731
+ # @option opts [Hash] :query ({}) a query selector document, like what's passed to #find, to limit
732
+ # the operation to a subset of the collection.
733
+ # @option opts [Array] :sort ([]) an array of [key, direction] pairs to sort by. Direction should
734
+ # be specified as Mongo::ASCENDING (or :ascending / :asc) or Mongo::DESCENDING (or :descending / :desc)
735
+ # @option opts [Integer] :limit (nil) if passing a query, number of objects to return from the collection.
736
+ # @option opts [String, BSON::Code] :finalize (nil) a javascript function to apply to the result set after the
737
+ # map/reduce operation has finished.
738
+ # @option opts [String, Hash] :out Location of the result of the map-reduce operation. You can output to a
739
+ # collection, output to a collection with an action, or output inline. You may output to a collection
740
+ # when performing map reduce operations on the primary members of the set; on secondary members you
741
+ # may only use the inline output. See the server mapReduce documentation for available options.
742
+ # @option opts [Boolean] :keeptemp (false) if true, the generated collection will be persisted. The default
743
+ # is false. Note that this option has no effect is versions of MongoDB > v1.7.6.
744
+ # @option opts [Boolean ] :verbose (false) if true, provides statistics on job execution time.
745
+ # @option opts [Boolean] :raw (false) if true, return the raw result object from the map_reduce command, and not
746
+ # the instantiated collection that's returned by default. Note if a collection name isn't returned in the
747
+ # map-reduce output (as, for example, when using :out => { :inline => 1 }), then you must specify this option
748
+ # or an ArgumentError will be raised.
749
+ # @option opts [:primary, :secondary] :read Read preference indicating which server to run this map-reduce
750
+ # on. See Collection#find for more details.
751
+ # @option opts [String] :comment (nil) a comment to include in profiling logs
752
+ #
753
+ # @return [Collection, Hash] a Mongo::Collection object or a Hash with the map-reduce command's results.
754
+ #
755
+ # @raise ArgumentError if you specify { :out => { :inline => true }} but don't specify :raw => true.
756
+ #
757
+ # @see http://www.mongodb.org/display/DOCS/MapReduce Offical MongoDB map/reduce documentation.
758
+ def map_reduce(map, reduce, opts={})
759
+ opts = opts.dup
760
+ map = BSON::Code.new(map) unless map.is_a?(BSON::Code)
761
+ reduce = BSON::Code.new(reduce) unless reduce.is_a?(BSON::Code)
762
+ raw = opts.delete(:raw)
763
+
764
+ hash = BSON::OrderedHash.new
765
+ hash['mapreduce'] = self.name
766
+ hash['map'] = map
767
+ hash['reduce'] = reduce
768
+ hash['out'] = opts.delete(:out)
769
+ hash['sort'] = Mongo::Support.format_order_clause(opts.delete(:sort)) if opts.key?(:sort)
770
+
771
+ result = @db.command(hash, command_options(opts))
772
+ unless Mongo::Support.ok?(result)
773
+ raise Mongo::OperationFailure, "map-reduce failed: #{result['errmsg']}"
774
+ end
775
+
776
+ if raw
777
+ result
778
+ elsif result['result']
779
+ if result['result'].is_a?(BSON::OrderedHash) &&
780
+ result['result'].key?('db') &&
781
+ result['result'].key?('collection')
782
+ otherdb = @db.connection[result['result']['db']]
783
+ otherdb[result['result']['collection']]
784
+ else
785
+ @db[result["result"]]
786
+ end
787
+ else
788
+ raise ArgumentError, "Could not instantiate collection from result. If you specified " +
789
+ "{:out => {:inline => true}}, then you must also specify :raw => true to get the results."
790
+ end
791
+ end
792
+ alias :mapreduce :map_reduce
793
+
794
+ # Perform a group aggregation.
795
+ #
796
+ # @param [Hash] opts the options for this group operation. The minimum required are :initial
797
+ # and :reduce.
798
+ #
799
+ # @option opts [Array, String, Symbol] :key (nil) Either the name of a field or a list of fields to group by (optional).
800
+ # @option opts [String, BSON::Code] :keyf (nil) A JavaScript function to be used to generate the grouping keys (optional).
801
+ # @option opts [String, BSON::Code] :cond ({}) A document specifying a query for filtering the documents over
802
+ # which the aggregation is run (optional).
803
+ # @option opts [Hash] :initial the initial value of the aggregation counter object (required).
804
+ # @option opts [String, BSON::Code] :reduce (nil) a JavaScript aggregation function (required).
805
+ # @option opts [String, BSON::Code] :finalize (nil) a JavaScript function that receives and modifies
806
+ # each of the resultant grouped objects. Available only when group is run with command
807
+ # set to true.
808
+ # @option opts [:primary, :secondary] :read Read preference indicating which server to perform this group
809
+ # on. See Collection#find for more details.
810
+ # @option opts [String] :comment (nil) a comment to include in profiling logs
811
+ #
812
+ # @return [Array] the command response consisting of grouped items.
813
+ def group(opts, condition={}, initial={}, reduce=nil, finalize=nil)
814
+ opts = opts.dup
815
+ if opts.is_a?(Hash)
816
+ return new_group(opts)
817
+ elsif opts.is_a?(Symbol)
818
+ raise MongoArgumentError, "Group takes either an array of fields to group by or a JavaScript function" +
819
+ "in the form of a String or BSON::Code."
820
+ end
821
+
822
+ warn "Collection#group no longer takes a list of parameters. This usage is deprecated and will be removed in v2.0." +
823
+ "Check out the new API at http://api.mongodb.org/ruby/current/Mongo/Collection.html#group-instance_method"
824
+
825
+ reduce = BSON::Code.new(reduce) unless reduce.is_a?(BSON::Code)
826
+
827
+ group_command = {
828
+ "group" => {
829
+ "ns" => @name,
830
+ "$reduce" => reduce,
831
+ "cond" => condition,
832
+ "initial" => initial
833
+ }
834
+ }
835
+
836
+ unless opts.nil?
837
+ if opts.is_a? Array
838
+ key_type = "key"
839
+ key_value = {}
840
+ opts.each { |k| key_value[k] = 1 }
841
+ else
842
+ key_type = "$keyf"
843
+ key_value = opts.is_a?(BSON::Code) ? opts : BSON::Code.new(opts)
844
+ end
845
+
846
+ group_command["group"][key_type] = key_value
847
+ end
848
+
849
+ finalize = BSON::Code.new(finalize) if finalize.is_a?(String)
850
+ if finalize.is_a?(BSON::Code)
851
+ group_command['group']['finalize'] = finalize
852
+ end
853
+
854
+ result = @db.command(group_command)
855
+
856
+ if Mongo::Support.ok?(result)
857
+ result["retval"]
858
+ else
859
+ raise OperationFailure, "group command failed: #{result['errmsg']}"
860
+ end
861
+ end
862
+
863
+ # Scan this entire collection in parallel.
864
+ # Returns a list of up to num_cursors cursors that can be iterated concurrently. As long as the collection
865
+ # is not modified during scanning, each document appears once in one of the cursors' result sets.
866
+ #
867
+ # @note Requires server version >= 2.5.5
868
+ #
869
+ # @param [Integer] num_cursors the number of cursors to return.
870
+ # @param [Hash] opts
871
+ #
872
+ # @return [Array] An array of up to num_cursors cursors for iterating over the collection.
873
+ def parallel_scan(num_cursors, opts={})
874
+ cmd = BSON::OrderedHash.new
875
+ cmd[:parallelCollectionScan] = self.name
876
+ cmd[:numCursors] = num_cursors
877
+ result = @db.command(cmd, command_options(opts))
878
+
879
+ result['cursors'].collect do |cursor_info|
880
+ seed = {
881
+ :cursor_id => cursor_info['cursor']['id'],
882
+ :first_batch => cursor_info['cursor']['firstBatch'],
883
+ :pool => @connection.pinned_pool
884
+ }
885
+ Cursor.new(self, seed.merge!(opts))
886
+ end
887
+
888
+ end
889
+
890
+ private
891
+
892
+ def new_group(opts={})
893
+ reduce = opts.delete(:reduce)
894
+ finalize = opts.delete(:finalize)
895
+ cond = opts.delete(:cond) || {}
896
+ initial = opts.delete(:initial)
897
+
898
+ if !(reduce && initial)
899
+ raise MongoArgumentError, "Group requires at minimum values for initial and reduce."
900
+ end
901
+
902
+ cmd = {
903
+ "group" => {
904
+ "ns" => @name,
905
+ "$reduce" => reduce.to_bson_code,
906
+ "cond" => cond,
907
+ "initial" => initial
908
+ }
909
+ }
910
+
911
+ if finalize
912
+ cmd['group']['finalize'] = finalize.to_bson_code
913
+ end
914
+
915
+ if key = opts.delete(:key)
916
+ if key.is_a?(String) || key.is_a?(Symbol)
917
+ key = [key]
918
+ end
919
+ key_value = {}
920
+ key.each { |k| key_value[k] = 1 }
921
+ cmd["group"]["key"] = key_value
922
+ elsif keyf = opts.delete(:keyf)
923
+ cmd["group"]["$keyf"] = keyf.to_bson_code
924
+ end
925
+
926
+ result = @db.command(cmd, command_options(opts))
927
+ result["retval"]
928
+ end
929
+
930
+ public
931
+
932
+ # Return a list of distinct values for +key+ across all
933
+ # documents in the collection. The key may use dot notation
934
+ # to reach into an embedded object.
935
+ #
936
+ # @param [String, Symbol, OrderedHash] key or hash to group by.
937
+ # @param [Hash] query a selector for limiting the result set over which to group.
938
+ # @param [Hash] opts the options for this distinct operation.
939
+ #
940
+ # @option opts [:primary, :secondary] :read Read preference indicating which server to perform this query
941
+ # on. See Collection#find for more details.
942
+ # @option opts [String] :comment (nil) a comment to include in profiling logs
943
+ #
944
+ # @example Saving zip codes and ages and returning distinct results.
945
+ # @collection.save({:zip => 10010, :name => {:age => 27}})
946
+ # @collection.save({:zip => 94108, :name => {:age => 24}})
947
+ # @collection.save({:zip => 10010, :name => {:age => 27}})
948
+ # @collection.save({:zip => 99701, :name => {:age => 24}})
949
+ # @collection.save({:zip => 94108, :name => {:age => 27}})
950
+ #
951
+ # @collection.distinct(:zip)
952
+ # [10010, 94108, 99701]
953
+ # @collection.distinct("name.age")
954
+ # [27, 24]
955
+ #
956
+ # # You may also pass a document selector as the second parameter
957
+ # # to limit the documents over which distinct is run:
958
+ # @collection.distinct("name.age", {"name.age" => {"$gt" => 24}})
959
+ # [27]
960
+ #
961
+ # @return [Array] an array of distinct values.
962
+ def distinct(key, query=nil, opts={})
963
+ raise MongoArgumentError unless [String, Symbol].include?(key.class)
964
+ command = BSON::OrderedHash.new
965
+ command[:distinct] = @name
966
+ command[:key] = key.to_s
967
+ command[:query] = query
968
+
969
+ @db.command(command, command_options(opts))["values"]
970
+ end
971
+
972
+ # Rename this collection.
973
+ #
974
+ # Note: If operating in auth mode, the client must be authorized as an admin to
975
+ # perform this operation.
976
+ #
977
+ # @param [String] new_name the new name for this collection
978
+ #
979
+ # @return [String] the name of the new collection.
980
+ #
981
+ # @raise [Mongo::InvalidNSName] if +new_name+ is an invalid collection name.
982
+ def rename(new_name)
983
+ case new_name
984
+ when Symbol, String
985
+ else
986
+ raise TypeError, "new_name must be a string or symbol"
987
+ end
988
+
989
+ new_name = new_name.to_s
990
+
991
+ if new_name.empty? or new_name.include? ".."
992
+ raise Mongo::InvalidNSName, "collection names cannot be empty"
993
+ end
994
+ if new_name.include? "$"
995
+ raise Mongo::InvalidNSName, "collection names must not contain '$'"
996
+ end
997
+ if new_name.match(/^\./) or new_name.match(/\.$/)
998
+ raise Mongo::InvalidNSName, "collection names must not start or end with '.'"
999
+ end
1000
+
1001
+ @db.rename_collection(@name, new_name)
1002
+ @name = new_name
1003
+ end
1004
+
1005
+ # Get information on the indexes for this collection.
1006
+ #
1007
+ # @return [Hash] a hash where the keys are index names.
1008
+ def index_information
1009
+ @db.index_information(@name)
1010
+ end
1011
+
1012
+ # Return a hash containing options that apply to this collection.
1013
+ # For all possible keys and values, see DB#create_collection.
1014
+ #
1015
+ # @return [Hash] options that apply to this collection.
1016
+ def options
1017
+ @db.collections_info(@name).next_document['options']
1018
+ end
1019
+
1020
+ # Return stats on the collection. Uses MongoDB's collstats command.
1021
+ #
1022
+ # @return [Hash]
1023
+ def stats
1024
+ @db.command({:collstats => @name})
1025
+ end
1026
+
1027
+ # Get the number of documents in this collection.
1028
+ #
1029
+ # @option opts [Hash] :query ({}) A query selector for filtering the documents counted.
1030
+ # @option opts [Integer] :skip (nil) The number of documents to skip.
1031
+ # @option opts [Integer] :limit (nil) The number of documents to limit.
1032
+ # @option opts [:primary, :secondary] :read Read preference for this command. See Collection#find for
1033
+ # more details.
1034
+ # @option opts [String] :comment (nil) a comment to include in profiling logs
1035
+ #
1036
+ # @return [Integer]
1037
+ def count(opts={})
1038
+ find(opts[:query],
1039
+ :skip => opts[:skip],
1040
+ :limit => opts[:limit],
1041
+ :read => opts[:read],
1042
+ :comment => opts[:comment]).count(true)
1043
+ end
1044
+
1045
+ alias :size :count
1046
+
1047
+ protected
1048
+
1049
+ # Provide required command options if they are missing in the command options hash.
1050
+ #
1051
+ # @return [Hash] The command options hash
1052
+ def command_options(opts)
1053
+ opts[:read] ? opts : opts.merge(:read => @read)
1054
+ end
1055
+
1056
+ def normalize_hint_fields(hint)
1057
+ case hint
1058
+ when String
1059
+ {hint => 1}
1060
+ when Hash
1061
+ hint
1062
+ when nil
1063
+ nil
1064
+ else
1065
+ h = BSON::OrderedHash.new
1066
+ hint.to_a.each { |k| h[k] = 1 }
1067
+ h
1068
+ end
1069
+ end
1070
+
1071
+ private
1072
+
1073
+ def send_write(op_type, selector, doc_or_docs, check_keys, opts, collection_name=@name)
1074
+ write_concern = get_write_concern(opts, self)
1075
+ if @db.connection.use_write_command?(write_concern)
1076
+ @command_writer.send_write_command(op_type, selector, doc_or_docs, check_keys, opts, write_concern, collection_name)
1077
+ else
1078
+ @operation_writer.send_write_operation(op_type, selector, doc_or_docs, check_keys, opts, write_concern, collection_name)
1079
+ end
1080
+ end
1081
+
1082
+ def index_name(spec)
1083
+ field_spec = parse_index_spec(spec)
1084
+ index_information.each do |index|
1085
+ return index[0] if index[1]['key'] == field_spec
1086
+ end
1087
+ nil
1088
+ end
1089
+
1090
+ def parse_index_spec(spec)
1091
+ field_spec = BSON::OrderedHash.new
1092
+ if spec.is_a?(String) || spec.is_a?(Symbol)
1093
+ field_spec[spec.to_s] = 1
1094
+ elsif spec.is_a?(Hash)
1095
+ if RUBY_VERSION < '1.9' && !spec.is_a?(BSON::OrderedHash)
1096
+ raise MongoArgumentError, "Must use OrderedHash in Ruby < 1.9.0"
1097
+ end
1098
+ validate_index_types(spec.values)
1099
+ field_spec = spec.is_a?(BSON::OrderedHash) ? spec : BSON::OrderedHash.try_convert(spec)
1100
+ elsif spec.is_a?(Array) && spec.all? {|field| field.is_a?(Array) }
1101
+ spec.each do |f|
1102
+ validate_index_types(f[1])
1103
+ field_spec[f[0].to_s] = f[1]
1104
+ end
1105
+ else
1106
+ raise MongoArgumentError, "Invalid index specification #{spec.inspect}; " +
1107
+ "should be either a hash (OrderedHash), string, symbol, or an array of arrays."
1108
+ end
1109
+ field_spec
1110
+ end
1111
+
1112
+ def validate_index_types(*types)
1113
+ types.flatten!
1114
+ types.each do |t|
1115
+ unless Mongo::INDEX_TYPES.values.include?(t)
1116
+ raise MongoArgumentError, "Invalid index field #{t.inspect}; " +
1117
+ "should be one of " + Mongo::INDEX_TYPES.map {|k,v| "Mongo::#{k} (#{v})"}.join(', ')
1118
+ end
1119
+ end
1120
+ end
1121
+
1122
+ def generate_indexes(field_spec, name, opts)
1123
+ selector = {
1124
+ :name => name,
1125
+ :key => field_spec
1126
+ }
1127
+ selector.merge!(opts)
1128
+
1129
+ begin
1130
+ cmd = BSON::OrderedHash[:createIndexes, @name, :indexes, [selector]]
1131
+ @db.command(cmd)
1132
+ rescue Mongo::OperationFailure => ex
1133
+ if ex.error_code == Mongo::ErrorCode::COMMAND_NOT_FOUND || ex.error_code.nil?
1134
+ selector[:ns] = "#{@db.name}.#{@name}"
1135
+ send_write(:insert, nil, selector, false, {:w => 1}, Mongo::DB::SYSTEM_INDEX_COLLECTION)
1136
+ else
1137
+ raise Mongo::OperationFailure, "Failed to create index #{selector.inspect} with the following error: " +
1138
+ "#{ex.message}"
1139
+ end
1140
+ end
1141
+
1142
+ nil
1143
+ end
1144
+
1145
+ def generate_index_name(spec)
1146
+ indexes = []
1147
+ spec.each_pair do |field, type|
1148
+ indexes.push("#{field}_#{type}")
1149
+ end
1150
+ indexes.join("_")
1151
+ end
1152
+
1153
+ def batch_write(op_type, documents, check_keys=true, opts={})
1154
+ write_concern = get_write_concern(opts, self)
1155
+ if @db.connection.use_write_command?(write_concern)
1156
+ return @command_writer.batch_write(op_type, documents, check_keys, opts)
1157
+ else
1158
+ return @operation_writer.batch_write(op_type, documents, check_keys, opts)
1159
+ end
1160
+ end
1161
+
1162
+ end
1163
+
1164
+ end