mongo 1.12.5 → 2.0.0.beta
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- checksums.yaml.gz.sig +0 -0
- data.tar.gz.sig +0 -0
- data/CONTRIBUTING.md +64 -0
- data/LICENSE +1 -1
- data/README.md +23 -125
- data/Rakefile +26 -21
- data/bin/mongo_console +6 -38
- data/lib/mongo.rb +23 -82
- data/lib/mongo/address.rb +111 -0
- data/lib/mongo/address/ipv4.rb +85 -0
- data/lib/mongo/address/ipv6.rb +85 -0
- data/lib/mongo/address/unix.rb +76 -0
- data/lib/mongo/auth.rb +108 -0
- data/lib/mongo/auth/cr.rb +44 -0
- data/lib/mongo/auth/cr/conversation.rb +119 -0
- data/lib/mongo/auth/executable.rb +52 -0
- data/lib/mongo/auth/ldap.rb +48 -0
- data/lib/mongo/auth/ldap/conversation.rb +92 -0
- data/lib/mongo/auth/roles.rb +104 -0
- data/lib/mongo/auth/scram.rb +53 -0
- data/lib/mongo/auth/scram/conversation.rb +450 -0
- data/lib/mongo/auth/user.rb +159 -0
- data/lib/mongo/auth/user/view.rb +102 -0
- data/lib/mongo/auth/x509.rb +48 -0
- data/lib/mongo/auth/x509/conversation.rb +92 -0
- data/lib/mongo/{gridfs.rb → bulk.rb} +2 -5
- data/lib/mongo/bulk/bulk_write.rb +307 -0
- data/lib/mongo/client.rb +233 -0
- data/lib/mongo/cluster.rb +203 -0
- data/lib/mongo/cluster/topology.rb +60 -0
- data/lib/mongo/cluster/topology/replica_set.rb +160 -0
- data/lib/mongo/cluster/topology/sharded.rb +132 -0
- data/lib/mongo/cluster/topology/standalone.rb +132 -0
- data/lib/mongo/cluster/topology/unknown.rb +155 -0
- data/lib/mongo/collection.rb +130 -1101
- data/lib/mongo/collection/view.rb +169 -0
- data/lib/mongo/collection/view/aggregation.rb +108 -0
- data/lib/mongo/collection/view/explainable.rb +49 -0
- data/lib/mongo/collection/view/immutable.rb +43 -0
- data/lib/mongo/collection/view/iterable.rb +48 -0
- data/lib/mongo/collection/view/map_reduce.rb +191 -0
- data/lib/mongo/collection/view/readable.rb +363 -0
- data/lib/mongo/collection/view/writable.rb +169 -0
- data/lib/mongo/cursor.rb +79 -680
- data/lib/mongo/database.rb +224 -0
- data/lib/mongo/database/view.rb +101 -0
- data/lib/mongo/error.rb +81 -0
- data/lib/mongo/error/bulk_write_failure.rb +41 -0
- data/lib/mongo/{utils/thread_local_variable_manager.rb → error/empty_batch.rb} +22 -8
- data/{test/functional/db_connection_test.rb → lib/mongo/error/invalid_bulk_operation.rb} +19 -8
- data/lib/mongo/error/invalid_collection_name.rb +39 -0
- data/lib/mongo/error/invalid_database_name.rb +39 -0
- data/{test/replica_set/ssl_test.rb → lib/mongo/error/invalid_document.rb} +21 -14
- data/lib/mongo/error/invalid_file.rb +38 -0
- data/lib/mongo/error/invalid_nonce.rb +46 -0
- data/lib/mongo/error/invalid_replacement_document.rb +39 -0
- data/lib/mongo/error/invalid_signature.rb +47 -0
- data/{test/functional/ssl_test.rb → lib/mongo/error/invalid_update_document.rb} +22 -12
- data/lib/mongo/error/max_bson_size.rb +40 -0
- data/lib/mongo/error/max_message_size.rb +42 -0
- data/lib/mongo/{utils.rb → error/need_primary_server.rb} +10 -6
- data/lib/mongo/{connection.rb → error/operation_failure.rb} +10 -6
- data/lib/mongo/error/parser.rb +77 -0
- data/lib/mongo/{connection/socket.rb → error/socket_error.rb} +10 -5
- data/lib/mongo/error/socket_timeout_error.rb +23 -0
- data/lib/mongo/error/unsupported_features.rb +43 -0
- data/lib/mongo/event.rb +40 -0
- data/lib/mongo/event/listeners.rb +63 -0
- data/lib/mongo/event/primary_elected.rb +53 -0
- data/lib/mongo/event/publisher.rb +42 -0
- data/lib/mongo/event/server_added.rb +53 -0
- data/lib/mongo/event/server_removed.rb +53 -0
- data/lib/mongo/event/subscriber.rb +41 -0
- data/lib/mongo/grid.rb +16 -0
- data/lib/mongo/grid/file.rb +94 -0
- data/lib/mongo/grid/file/chunk.rb +184 -0
- data/lib/mongo/grid/file/metadata.rb +223 -0
- data/lib/mongo/grid/fs.rb +149 -0
- data/lib/mongo/index.rb +64 -0
- data/lib/mongo/index/view.rb +205 -0
- data/lib/mongo/loggable.rb +126 -0
- data/lib/mongo/logger.rb +132 -0
- data/lib/mongo/operation.rb +26 -0
- data/lib/mongo/operation/aggregate.rb +100 -0
- data/lib/mongo/operation/aggregate/result.rb +84 -0
- data/lib/mongo/operation/batchable.rb +103 -0
- data/lib/mongo/operation/bulk_delete/result.rb +197 -0
- data/lib/mongo/operation/bulk_insert/result.rb +195 -0
- data/lib/mongo/operation/bulk_update/result.rb +295 -0
- data/lib/mongo/operation/command.rb +62 -0
- data/lib/mongo/operation/executable.rb +105 -0
- data/lib/mongo/operation/kill_cursors.rb +39 -0
- data/lib/mongo/operation/limited.rb +37 -0
- data/lib/mongo/operation/list_collections/result.rb +116 -0
- data/lib/mongo/operation/list_indexes/result.rb +118 -0
- data/lib/mongo/operation/map_reduce.rb +96 -0
- data/lib/mongo/operation/map_reduce/result.rb +122 -0
- data/lib/mongo/{functional.rb → operation/read.rb} +7 -7
- data/lib/mongo/operation/read/collections_info.rb +67 -0
- data/lib/mongo/operation/read/get_more.rb +71 -0
- data/lib/mongo/operation/read/indexes.rb +68 -0
- data/lib/mongo/operation/read/list_collections.rb +75 -0
- data/lib/mongo/operation/read/list_indexes.rb +77 -0
- data/lib/mongo/operation/read/query.rb +71 -0
- data/lib/mongo/operation/read_preferrable.rb +34 -0
- data/lib/mongo/operation/result.rb +259 -0
- data/lib/mongo/operation/specifiable.rb +380 -0
- data/lib/mongo/operation/write.rb +25 -0
- data/lib/mongo/operation/write/bulk_delete.rb +158 -0
- data/lib/mongo/operation/write/bulk_insert.rb +160 -0
- data/lib/mongo/operation/write/bulk_update.rb +167 -0
- data/lib/mongo/{connection/socket/socket_util.rb → operation/write/command.rb} +9 -24
- data/lib/mongo/operation/write/command/create_user.rb +43 -0
- data/lib/mongo/operation/write/command/delete.rb +56 -0
- data/lib/mongo/operation/write/command/drop_index.rb +51 -0
- data/lib/mongo/operation/write/command/ensure_index.rb +55 -0
- data/lib/mongo/operation/write/command/insert.rb +55 -0
- data/lib/mongo/operation/write/command/remove_user.rb +42 -0
- data/lib/mongo/operation/write/command/update.rb +60 -0
- data/lib/mongo/operation/write/command/writable.rb +61 -0
- data/lib/mongo/operation/write/create_index.rb +84 -0
- data/lib/mongo/operation/write/create_user.rb +75 -0
- data/lib/mongo/operation/write/delete.rb +91 -0
- data/lib/mongo/operation/write/drop_index.rb +62 -0
- data/lib/mongo/operation/write/insert.rb +88 -0
- data/lib/mongo/operation/write/remove_user.rb +70 -0
- data/lib/mongo/operation/write/update.rb +98 -0
- data/lib/mongo/protocol.rb +15 -0
- data/lib/mongo/protocol/bit_vector.rb +61 -0
- data/lib/mongo/protocol/delete.rb +94 -0
- data/lib/mongo/protocol/get_more.rb +99 -0
- data/lib/mongo/protocol/insert.rb +99 -0
- data/lib/mongo/protocol/kill_cursors.rb +74 -0
- data/lib/mongo/protocol/message.rb +252 -0
- data/lib/mongo/protocol/query.rb +147 -0
- data/lib/mongo/protocol/reply.rb +72 -0
- data/lib/mongo/protocol/serializers.rb +180 -0
- data/lib/mongo/protocol/update.rb +111 -0
- data/lib/mongo/server.rb +163 -0
- data/lib/mongo/server/connectable.rb +99 -0
- data/lib/mongo/server/connection.rb +133 -0
- data/lib/mongo/server/connection_pool.rb +141 -0
- data/lib/mongo/server/connection_pool/queue.rb +182 -0
- data/lib/mongo/server/context.rb +66 -0
- data/lib/mongo/server/description.rb +450 -0
- data/lib/mongo/server/description/features.rb +85 -0
- data/lib/mongo/server/description/inspector.rb +79 -0
- data/lib/mongo/server/description/inspector/primary_elected.rb +58 -0
- data/lib/mongo/server/description/inspector/server_added.rb +59 -0
- data/lib/mongo/server/description/inspector/server_removed.rb +59 -0
- data/lib/mongo/server/monitor.rb +160 -0
- data/lib/mongo/server/monitor/connection.rb +88 -0
- data/lib/mongo/server_selector.rb +81 -0
- data/lib/mongo/server_selector/nearest.rb +94 -0
- data/lib/mongo/server_selector/primary.rb +88 -0
- data/lib/mongo/server_selector/primary_preferred.rb +94 -0
- data/lib/mongo/server_selector/secondary.rb +91 -0
- data/lib/mongo/server_selector/secondary_preferred.rb +96 -0
- data/lib/mongo/server_selector/selectable.rb +209 -0
- data/lib/mongo/socket.rb +179 -0
- data/lib/mongo/socket/ssl.rb +108 -0
- data/lib/mongo/socket/tcp.rb +69 -0
- data/lib/mongo/socket/unix.rb +66 -0
- data/lib/mongo/uri.rb +504 -0
- data/lib/mongo/version.rb +21 -0
- data/lib/mongo/write_concern.rb +99 -0
- data/lib/mongo/write_concern/acknowledged.rb +38 -0
- data/lib/mongo/write_concern/normalizable.rb +73 -0
- data/lib/mongo/write_concern/unacknowledged.rb +43 -0
- data/mongo.gemspec +17 -14
- data/spec/mongo/address/ipv4_spec.rb +74 -0
- data/spec/mongo/address/ipv6_spec.rb +74 -0
- data/spec/mongo/address/unix_spec.rb +30 -0
- data/spec/mongo/address_spec.rb +206 -0
- data/spec/mongo/auth/cr_spec.rb +59 -0
- data/spec/mongo/auth/ldap_spec.rb +40 -0
- data/spec/mongo/auth/scram/conversation_spec.rb +197 -0
- data/spec/mongo/auth/scram_spec.rb +55 -0
- data/spec/mongo/auth/user/view_spec.rb +76 -0
- data/spec/mongo/auth/user_spec.rb +190 -0
- data/spec/mongo/auth/x509_spec.rb +40 -0
- data/spec/mongo/auth_spec.rb +65 -0
- data/spec/mongo/bulk/bulk_write_spec.rb +175 -0
- data/spec/mongo/client_spec.rb +564 -0
- data/spec/mongo/cluster/topology/replica_set_spec.rb +101 -0
- data/spec/mongo/cluster/topology/sharded_spec.rb +74 -0
- data/spec/mongo/cluster/topology/standalone_spec.rb +79 -0
- data/spec/mongo/cluster/topology_spec.rb +65 -0
- data/spec/mongo/cluster_spec.rb +129 -0
- data/spec/mongo/collection/view/aggregation_spec.rb +135 -0
- data/spec/mongo/collection/view/explainable_spec.rb +32 -0
- data/spec/mongo/collection/view/map_reduce_spec.rb +242 -0
- data/spec/mongo/collection/view/readable_spec.rb +603 -0
- data/spec/mongo/collection/view/writable_spec.rb +504 -0
- data/spec/mongo/collection/view_spec.rb +521 -0
- data/spec/mongo/collection_spec.rb +362 -0
- data/spec/mongo/cursor_spec.rb +295 -0
- data/spec/mongo/database_spec.rb +306 -0
- data/spec/mongo/error/parser_spec.rb +119 -0
- data/spec/mongo/event/publisher_spec.rb +50 -0
- data/spec/mongo/event/subscriber_spec.rb +34 -0
- data/spec/mongo/grid/file/chunk_spec.rb +226 -0
- data/spec/mongo/grid/file/metadata_spec.rb +69 -0
- data/spec/mongo/grid/file_spec.rb +138 -0
- data/spec/mongo/grid/fs_spec.rb +129 -0
- data/spec/mongo/index/view_spec.rb +226 -0
- data/spec/mongo/loggable_spec.rb +62 -0
- data/spec/mongo/logger_spec.rb +97 -0
- data/spec/mongo/operation/aggregate/result_spec.rb +80 -0
- data/spec/mongo/operation/aggregate_spec.rb +135 -0
- data/spec/mongo/operation/command_spec.rb +106 -0
- data/spec/mongo/operation/kill_cursors_spec.rb +66 -0
- data/spec/mongo/operation/limited_spec.rb +50 -0
- data/spec/mongo/operation/map_reduce_spec.rb +143 -0
- data/spec/mongo/operation/read/collections_info_spec.rb +40 -0
- data/spec/mongo/operation/read/get_more_spec.rb +81 -0
- data/spec/mongo/operation/read/indexes_spec.rb +31 -0
- data/spec/mongo/operation/read/query_spec.rb +84 -0
- data/spec/mongo/operation/result_spec.rb +275 -0
- data/spec/mongo/operation/specifiable_spec.rb +53 -0
- data/spec/mongo/operation/write/bulk_delete_spec.rb +473 -0
- data/spec/mongo/operation/write/bulk_insert_spec.rb +466 -0
- data/spec/mongo/operation/write/bulk_update_spec.rb +524 -0
- data/spec/mongo/operation/write/command/delete_spec.rb +116 -0
- data/spec/mongo/operation/write/command/insert_spec.rb +117 -0
- data/spec/mongo/operation/write/command/update_spec.rb +123 -0
- data/spec/mongo/operation/write/create_user_spec.rb +44 -0
- data/spec/mongo/operation/write/delete_spec.rb +178 -0
- data/spec/mongo/operation/write/drop_index_spec.rb +51 -0
- data/spec/mongo/operation/write/ensure_index_spec.rb +81 -0
- data/spec/mongo/operation/write/insert_spec.rb +231 -0
- data/spec/mongo/operation/write/remove_user_spec.rb +46 -0
- data/spec/mongo/operation/write/response_spec.rb +85 -0
- data/spec/mongo/operation/write/update_spec.rb +177 -0
- data/spec/mongo/protocol/delete_spec.rb +167 -0
- data/spec/mongo/protocol/get_more_spec.rb +146 -0
- data/spec/mongo/protocol/insert_spec.rb +161 -0
- data/spec/mongo/protocol/kill_cursors_spec.rb +101 -0
- data/spec/mongo/protocol/query_spec.rb +285 -0
- data/spec/mongo/protocol/reply_spec.rb +157 -0
- data/spec/mongo/protocol/update_spec.rb +186 -0
- data/spec/mongo/server/connection_pool/queue_spec.rb +170 -0
- data/spec/mongo/server/connection_pool_spec.rb +120 -0
- data/spec/mongo/server/connection_spec.rb +289 -0
- data/spec/mongo/server/description/features_spec.rb +138 -0
- data/spec/mongo/server/description/inspector/primary_elected_spec.rb +94 -0
- data/spec/mongo/server/description/inspector/server_added_spec.rb +92 -0
- data/spec/mongo/server/description/inspector/server_removed_spec.rb +95 -0
- data/spec/mongo/server/description_spec.rb +510 -0
- data/spec/mongo/server/monitor_spec.rb +130 -0
- data/spec/mongo/server_discovery_and_monitoring_spec.rb +103 -0
- data/spec/mongo/server_selection_rtt_spec.rb +104 -0
- data/spec/mongo/server_selection_spec.rb +89 -0
- data/spec/mongo/server_selector/nearest_spec.rb +250 -0
- data/spec/mongo/server_selector/primary_preferred_spec.rb +290 -0
- data/spec/mongo/server_selector/primary_spec.rb +114 -0
- data/spec/mongo/server_selector/secondary_preferred_spec.rb +252 -0
- data/spec/mongo/server_selector/secondary_spec.rb +196 -0
- data/spec/mongo/server_selector_spec.rb +101 -0
- data/spec/mongo/server_spec.rb +131 -0
- data/spec/mongo/uri_spec.rb +517 -0
- data/spec/mongo/write_concern/acknowledged_spec.rb +44 -0
- data/spec/mongo/write_concern/unacknowledged_spec.rb +15 -0
- data/spec/mongo_orchestration_spec.rb +70 -0
- data/spec/spec_helper.rb +148 -0
- data/spec/support/authorization.rb +245 -0
- data/spec/support/helpers.rb +140 -0
- data/spec/support/matchers.rb +37 -0
- data/spec/support/mongo_orchestration.rb +61 -0
- data/spec/support/mongo_orchestration/requestable.rb +109 -0
- data/spec/support/mongo_orchestration/standalone.rb +57 -0
- data/spec/support/sdam/rs/discover_arbiters.yml +41 -0
- data/spec/support/sdam/rs/discover_passives.yml +41 -0
- data/spec/support/sdam/rs/discover_primary.yml +40 -0
- data/spec/support/sdam/rs/discover_secondary.yml +41 -0
- data/spec/support/sdam/rs/discovery.yml +195 -0
- data/spec/support/sdam/rs/ghost_discovered.yml +39 -0
- data/spec/support/sdam/rs/hosts_differ_from_seeds.yml +34 -0
- data/spec/support/sdam/rs/member_reconfig.yml +68 -0
- data/spec/support/sdam/rs/member_standalone.yml +60 -0
- data/spec/support/sdam/rs/new_primary.yml +74 -0
- data/spec/support/sdam/rs/new_primary_wrong_set_name.yml +71 -0
- data/spec/support/sdam/rs/non_rs_member.yml +31 -0
- data/spec/support/sdam/rs/normalize_case.yml +49 -0
- data/spec/support/sdam/rs/primary_becomes_standalone.yml +52 -0
- data/spec/support/sdam/rs/primary_changes_set_name.yml +57 -0
- data/spec/support/sdam/rs/primary_disconnect.yml +56 -0
- data/spec/support/sdam/rs/primary_wrong_set_name.yml +27 -0
- data/spec/support/sdam/rs/response_from_removed.yml +63 -0
- data/spec/support/sdam/rs/rsother_discovered.yml +41 -0
- data/spec/support/sdam/rs/sec_not_auth.yml +49 -0
- data/spec/support/sdam/rs/secondary_wrong_set_name.yml +28 -0
- data/spec/support/sdam/rs/secondary_wrong_set_name_with_primary.yml +69 -0
- data/spec/support/sdam/rs/unexpected_mongos.yml +26 -0
- data/spec/support/sdam/rs/wrong_set_name.yml +35 -0
- data/spec/support/sdam/sharded/multiple_mongoses.yml +46 -0
- data/spec/support/sdam/sharded/non_mongos_removed.yml +41 -0
- data/spec/support/sdam/sharded/normalize_uri_case.yml +32 -0
- data/spec/support/sdam/single/direct_connection_external_ip.yml +34 -0
- data/spec/support/sdam/single/direct_connection_mongos.yml +33 -0
- data/spec/support/sdam/single/direct_connection_rsarbiter.yml +35 -0
- data/spec/support/sdam/single/direct_connection_rsprimary.yml +34 -0
- data/spec/support/sdam/single/direct_connection_rssecondary.yml +35 -0
- data/spec/support/sdam/single/direct_connection_slave.yml +32 -0
- data/spec/support/sdam/single/direct_connection_standalone.yml +32 -0
- data/spec/support/sdam/single/not_ok_response.yml +39 -0
- data/spec/support/sdam/single/standalone_removed.yml +32 -0
- data/spec/support/sdam/single/unavailable_seed.yml +28 -0
- data/spec/support/server_discovery_and_monitoring.rb +167 -0
- data/spec/support/server_selection.rb +140 -0
- data/spec/support/server_selection/rtt/first_value.yml +4 -0
- data/spec/support/server_selection/rtt/first_value_zero.yml +4 -0
- data/spec/support/server_selection/rtt/value_test_1.yml +4 -0
- data/spec/support/server_selection/rtt/value_test_2.yml +4 -0
- data/spec/support/server_selection/rtt/value_test_3.yml +4 -0
- data/spec/support/server_selection/rtt/value_test_4.yml +4 -0
- data/spec/support/server_selection/rtt/value_test_5.yml +4 -0
- data/spec/support/server_selection/selection/ReplicaSetNoPrimary/read/Nearest.yml +32 -0
- data/spec/support/server_selection/selection/ReplicaSetNoPrimary/read/Nearest_non_matching.yml +27 -0
- data/spec/support/server_selection/selection/ReplicaSetNoPrimary/read/Primary.yml +23 -0
- data/spec/support/server_selection/selection/ReplicaSetNoPrimary/read/PrimaryPreferred.yml +32 -0
- data/spec/support/server_selection/selection/ReplicaSetNoPrimary/read/PrimaryPreferred_non_matching.yml +27 -0
- data/spec/support/server_selection/selection/ReplicaSetNoPrimary/read/Secondary.yml +32 -0
- data/spec/support/server_selection/selection/ReplicaSetNoPrimary/read/SecondaryPreferred.yml +32 -0
- data/spec/support/server_selection/selection/ReplicaSetNoPrimary/read/SecondaryPreferred_non_matching.yml +27 -0
- data/spec/support/server_selection/selection/ReplicaSetNoPrimary/read/Secondary_non_matching.yml +27 -0
- data/spec/support/server_selection/selection/ReplicaSetWithPrimary/read/Nearest.yml +41 -0
- data/spec/support/server_selection/selection/ReplicaSetWithPrimary/read/Nearest_non_matching.yml +34 -0
- data/spec/support/server_selection/selection/ReplicaSetWithPrimary/read/Primary.yml +33 -0
- data/spec/support/server_selection/selection/ReplicaSetWithPrimary/read/PrimaryPreferred.yml +39 -0
- data/spec/support/server_selection/selection/ReplicaSetWithPrimary/read/PrimaryPreferred_non_matching.yml +36 -0
- data/spec/support/server_selection/selection/Sharded/read/SecondaryPreferred.yml +32 -0
- data/spec/support/server_selection/selection/Single/read/SecondaryPreferred.yml +23 -0
- data/spec/support/server_selection/selection/Unknown/read/SecondaryPreferred.yml +13 -0
- data/spec/support/server_selection_rtt.rb +41 -0
- data/spec/support/shared/bulk_write.rb +498 -0
- data/spec/support/shared/cursor.rb +38 -0
- data/spec/support/shared/operation.rb +77 -0
- data/spec/support/shared/protocol.rb +31 -0
- data/spec/support/shared/server_selector.rb +111 -0
- data/spec/support/shared/socket.rb +82 -0
- data/spec/support/travis.rb +14 -0
- metadata +523 -189
- metadata.gz.sig +0 -0
- data/VERSION +0 -1
- data/lib/mongo/bulk_write_collection_view.rb +0 -387
- data/lib/mongo/collection_writer.rb +0 -364
- data/lib/mongo/connection/node.rb +0 -249
- data/lib/mongo/connection/pool.rb +0 -340
- data/lib/mongo/connection/pool_manager.rb +0 -320
- data/lib/mongo/connection/sharding_pool_manager.rb +0 -67
- data/lib/mongo/connection/socket/ssl_socket.rb +0 -95
- data/lib/mongo/connection/socket/tcp_socket.rb +0 -87
- data/lib/mongo/connection/socket/unix_socket.rb +0 -39
- data/lib/mongo/db.rb +0 -808
- data/lib/mongo/exception.rb +0 -145
- data/lib/mongo/functional/authentication.rb +0 -455
- data/lib/mongo/functional/logging.rb +0 -85
- data/lib/mongo/functional/read_preference.rb +0 -183
- data/lib/mongo/functional/scram.rb +0 -556
- data/lib/mongo/functional/uri_parser.rb +0 -409
- data/lib/mongo/functional/write_concern.rb +0 -66
- data/lib/mongo/gridfs/grid.rb +0 -112
- data/lib/mongo/gridfs/grid_ext.rb +0 -53
- data/lib/mongo/gridfs/grid_file_system.rb +0 -163
- data/lib/mongo/gridfs/grid_io.rb +0 -484
- data/lib/mongo/legacy.rb +0 -140
- data/lib/mongo/mongo_client.rb +0 -697
- data/lib/mongo/mongo_replica_set_client.rb +0 -535
- data/lib/mongo/mongo_sharded_client.rb +0 -159
- data/lib/mongo/networking.rb +0 -372
- data/lib/mongo/utils/conversions.rb +0 -110
- data/lib/mongo/utils/core_ext.rb +0 -70
- data/lib/mongo/utils/server_version.rb +0 -69
- data/lib/mongo/utils/support.rb +0 -80
- data/test/functional/authentication_test.rb +0 -39
- data/test/functional/bulk_api_stress_test.rb +0 -133
- data/test/functional/bulk_write_collection_view_test.rb +0 -1198
- data/test/functional/client_test.rb +0 -627
- data/test/functional/collection_test.rb +0 -2175
- data/test/functional/collection_writer_test.rb +0 -83
- data/test/functional/conversions_test.rb +0 -163
- data/test/functional/cursor_fail_test.rb +0 -57
- data/test/functional/cursor_message_test.rb +0 -56
- data/test/functional/cursor_test.rb +0 -683
- data/test/functional/db_api_test.rb +0 -835
- data/test/functional/db_test.rb +0 -348
- data/test/functional/grid_file_system_test.rb +0 -285
- data/test/functional/grid_io_test.rb +0 -252
- data/test/functional/grid_test.rb +0 -273
- data/test/functional/pool_test.rb +0 -136
- data/test/functional/safe_test.rb +0 -98
- data/test/functional/support_test.rb +0 -62
- data/test/functional/timeout_test.rb +0 -60
- data/test/functional/uri_test.rb +0 -446
- data/test/functional/write_concern_test.rb +0 -118
- data/test/helpers/general.rb +0 -50
- data/test/helpers/test_unit.rb +0 -476
- data/test/replica_set/authentication_test.rb +0 -37
- data/test/replica_set/basic_test.rb +0 -189
- data/test/replica_set/client_test.rb +0 -393
- data/test/replica_set/connection_test.rb +0 -138
- data/test/replica_set/count_test.rb +0 -66
- data/test/replica_set/cursor_test.rb +0 -220
- data/test/replica_set/insert_test.rb +0 -157
- data/test/replica_set/max_values_test.rb +0 -151
- data/test/replica_set/pinning_test.rb +0 -105
- data/test/replica_set/query_test.rb +0 -73
- data/test/replica_set/read_preference_test.rb +0 -219
- data/test/replica_set/refresh_test.rb +0 -211
- data/test/replica_set/replication_ack_test.rb +0 -95
- data/test/sharded_cluster/basic_test.rb +0 -203
- data/test/shared/authentication/basic_auth_shared.rb +0 -260
- data/test/shared/authentication/bulk_api_auth_shared.rb +0 -249
- data/test/shared/authentication/gssapi_shared.rb +0 -176
- data/test/shared/authentication/sasl_plain_shared.rb +0 -96
- data/test/shared/authentication/scram_shared.rb +0 -92
- data/test/shared/ssl_shared.rb +0 -235
- data/test/test_helper.rb +0 -61
- data/test/threading/basic_test.rb +0 -120
- data/test/tools/mongo_config.rb +0 -708
- data/test/tools/mongo_config_test.rb +0 -160
- data/test/unit/client_test.rb +0 -381
- data/test/unit/collection_test.rb +0 -166
- data/test/unit/connection_test.rb +0 -335
- data/test/unit/cursor_test.rb +0 -307
- data/test/unit/db_test.rb +0 -136
- data/test/unit/grid_test.rb +0 -76
- data/test/unit/mongo_sharded_client_test.rb +0 -48
- data/test/unit/node_test.rb +0 -93
- data/test/unit/pool_manager_test.rb +0 -111
- data/test/unit/read_pref_test.rb +0 -406
- data/test/unit/read_test.rb +0 -159
- data/test/unit/safe_test.rb +0 -158
- data/test/unit/sharding_pool_manager_test.rb +0 -84
- data/test/unit/write_concern_test.rb +0 -175
data/lib/mongo/collection.rb
CHANGED
@@ -1,4 +1,4 @@
|
|
1
|
-
# Copyright (C)
|
1
|
+
# Copyright (C) 2014-2015 MongoDB, Inc.
|
2
2
|
#
|
3
3
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
4
|
# you may not use this file except in compliance with the License.
|
@@ -12,1181 +12,210 @@
|
|
12
12
|
# See the License for the specific language governing permissions and
|
13
13
|
# limitations under the License.
|
14
14
|
|
15
|
+
require 'mongo/collection/view'
|
16
|
+
|
15
17
|
module Mongo
|
16
18
|
|
17
|
-
#
|
19
|
+
# Represents a collection in the database and operations that can directly be
|
20
|
+
# applied to one.
|
21
|
+
#
|
22
|
+
# @since 2.0.0
|
18
23
|
class Collection
|
19
|
-
|
20
|
-
include Mongo::WriteConcern
|
21
|
-
|
22
|
-
attr_reader :db,
|
23
|
-
:name,
|
24
|
-
:pk_factory,
|
25
|
-
:hint,
|
26
|
-
:write_concern,
|
27
|
-
:capped,
|
28
|
-
:operation_writer,
|
29
|
-
:command_writer
|
30
|
-
|
31
|
-
# Read Preference
|
32
|
-
attr_accessor :read,
|
33
|
-
:tag_sets,
|
34
|
-
:acceptable_latency
|
35
|
-
|
36
|
-
# Initialize a collection object.
|
37
|
-
#
|
38
|
-
# @param [String, Symbol] name the name of the collection.
|
39
|
-
# @param [DB] db a MongoDB database instance.
|
40
|
-
#
|
41
|
-
# @option opts [String, Integer, Symbol] :w (1) Set default number of nodes to which a write
|
42
|
-
# should be acknowledged.
|
43
|
-
# @option opts [Integer] :wtimeout (nil) Set replica set acknowledgement timeout.
|
44
|
-
# @option opts [Boolean] :j (false) If true, block until write operations have been committed
|
45
|
-
# to the journal. Cannot be used in combination with 'fsync'. Prior to MongoDB 2.6 this option was
|
46
|
-
# ignored if the server was running without journaling. Starting with MongoDB 2.6, write operations will
|
47
|
-
# fail with an exception if this option is used when the server is running without journaling.
|
48
|
-
# @option opts [Boolean] :fsync (false) If true, and the server is running without journaling, blocks until
|
49
|
-
# the server has synced all data files to disk. If the server is running with journaling, this acts the same as
|
50
|
-
# the 'j' option, blocking until write operations have been committed to the journal.
|
51
|
-
# Cannot be used in combination with 'j'.
|
52
|
-
#
|
53
|
-
# Notes about write concern:
|
54
|
-
# These write concern options will be used for insert, update, and remove methods called on this
|
55
|
-
# Collection instance. If no value is provided, the default values set on this instance's DB will be used.
|
56
|
-
# These option values can be overridden for any invocation of insert, update, or remove.
|
57
|
-
#
|
58
|
-
# @option opts [:create_pk] :pk (BSON::ObjectId) A primary key factory to use
|
59
|
-
# other than the default BSON::ObjectId.
|
60
|
-
# @option opts [:primary, :secondary] :read The default read preference for queries
|
61
|
-
# initiates from this connection object. If +:secondary+ is chosen, reads will be sent
|
62
|
-
# to one of the closest available secondary nodes. If a secondary node cannot be located, the
|
63
|
-
# read will be sent to the primary. If this option is left unspecified, the value of the read
|
64
|
-
# preference for this collection's associated Mongo::DB object will be used.
|
65
|
-
#
|
66
|
-
# @raise [InvalidNSName]
|
67
|
-
# if collection name is empty, contains '$', or starts or ends with '.'
|
68
|
-
#
|
69
|
-
# @raise [TypeError]
|
70
|
-
# if collection name is not a string or symbol
|
71
|
-
#
|
72
|
-
# @return [Collection]
|
73
|
-
def initialize(name, db, opts={})
|
74
|
-
if db.is_a?(String) && name.is_a?(Mongo::DB)
|
75
|
-
warn "Warning: the order of parameters to initialize a collection have changed. " +
|
76
|
-
"Please specify the collection name first, followed by the db. This will be made permanent" +
|
77
|
-
"in v2.0."
|
78
|
-
db, name = name, db
|
79
|
-
end
|
80
|
-
|
81
|
-
raise TypeError,
|
82
|
-
"Collection name must be a String or Symbol." unless [String, Symbol].include?(name.class)
|
83
|
-
name = name.to_s
|
84
|
-
|
85
|
-
raise Mongo::InvalidNSName,
|
86
|
-
"Collection names cannot be empty." if name.empty? || name.include?("..")
|
87
|
-
|
88
|
-
if name.include?("$")
|
89
|
-
raise Mongo::InvalidNSName,
|
90
|
-
"Collection names must not contain '$'" unless name =~ /((^\$cmd)|(oplog\.\$main))/
|
91
|
-
end
|
92
|
-
|
93
|
-
raise Mongo::InvalidNSName,
|
94
|
-
"Collection names must not start or end with '.'" if name.match(/^\./) || name.match(/\.$/)
|
95
|
-
|
96
|
-
pk_factory = nil
|
97
|
-
if opts.respond_to?(:create_pk) || !opts.is_a?(Hash)
|
98
|
-
warn "The method for specifying a primary key factory on a Collection has changed.\n" +
|
99
|
-
"Please specify it as an option (e.g., :pk => PkFactory)."
|
100
|
-
pk_factory = opts
|
101
|
-
end
|
102
|
-
|
103
|
-
@db, @name = db, name
|
104
|
-
@connection = @db.connection
|
105
|
-
@logger = @connection.logger
|
106
|
-
@cache_time = @db.cache_time
|
107
|
-
@cache = Hash.new(0)
|
108
|
-
unless pk_factory
|
109
|
-
@write_concern = get_write_concern(opts, db)
|
110
|
-
@read = opts[:read] || @db.read
|
111
|
-
Mongo::ReadPreference::validate(@read)
|
112
|
-
@capped = opts[:capped]
|
113
|
-
@tag_sets = opts.fetch(:tag_sets, @db.tag_sets)
|
114
|
-
@acceptable_latency = opts.fetch(:acceptable_latency, @db.acceptable_latency)
|
115
|
-
end
|
116
|
-
@pk_factory = pk_factory || opts[:pk] || BSON::ObjectId
|
117
|
-
@hint = nil
|
118
|
-
@operation_writer = CollectionOperationWriter.new(self)
|
119
|
-
@command_writer = CollectionCommandWriter.new(self)
|
120
|
-
end
|
121
|
-
|
122
|
-
# Indicate whether this is a capped collection.
|
123
|
-
#
|
124
|
-
# @raise [Mongo::OperationFailure]
|
125
|
-
# if the collection doesn't exist.
|
126
|
-
#
|
127
|
-
# @return [Boolean]
|
128
|
-
def capped?
|
129
|
-
@capped ||= [1, true].include?(@db.command({:collstats => @name})['capped'])
|
130
|
-
end
|
131
|
-
|
132
|
-
# Return a sub-collection of this collection by name. If 'users' is a collection, then
|
133
|
-
# 'users.comments' is a sub-collection of users.
|
134
|
-
#
|
135
|
-
# @param [String, Symbol] name
|
136
|
-
# the collection to return
|
137
|
-
#
|
138
|
-
# @raise [Mongo::InvalidNSName]
|
139
|
-
# if passed an invalid collection name
|
140
|
-
#
|
141
|
-
# @return [Collection]
|
142
|
-
# the specified sub-collection
|
143
|
-
def [](name)
|
144
|
-
name = "#{self.name}.#{name}"
|
145
|
-
return Collection.new(name, db) if !db.strict? ||
|
146
|
-
db.collection_names.include?(name.to_s)
|
147
|
-
raise "Collection #{name} doesn't exist. Currently in strict mode."
|
148
|
-
end
|
149
|
-
|
150
|
-
# Set a hint field for query optimizer. Hint may be a single field
|
151
|
-
# name, array of field names, or a hash (preferably an [OrderedHash]).
|
152
|
-
# If using MongoDB > 1.1, you probably don't ever need to set a hint.
|
153
|
-
#
|
154
|
-
# @param [String, Array, OrderedHash] hint a single field, an array of
|
155
|
-
# fields, or a hash specifying fields
|
156
|
-
def hint=(hint=nil)
|
157
|
-
@hint = normalize_hint_fields(hint)
|
158
|
-
self
|
159
|
-
end
|
160
|
-
|
161
|
-
# Set a hint field using a named index.
|
162
|
-
# @param [String] hint index name
|
163
|
-
def named_hint=(hint=nil)
|
164
|
-
@hint = hint
|
165
|
-
self
|
166
|
-
end
|
167
|
-
|
168
|
-
# Query the database.
|
169
|
-
#
|
170
|
-
# The +selector+ argument is a prototype document that all results must
|
171
|
-
# match. For example:
|
172
|
-
#
|
173
|
-
# collection.find({"hello" => "world"})
|
174
|
-
#
|
175
|
-
# only matches documents that have a key "hello" with value "world".
|
176
|
-
# Matches can have other keys *in addition* to "hello".
|
177
|
-
#
|
178
|
-
# If given an optional block +find+ will yield a Cursor to that block,
|
179
|
-
# close the cursor, and then return nil. This guarantees that partially
|
180
|
-
# evaluated cursors will be closed. If given no block +find+ returns a
|
181
|
-
# cursor.
|
182
|
-
#
|
183
|
-
# @param [Hash] selector
|
184
|
-
# a document specifying elements which must be present for a
|
185
|
-
# document to be included in the result set. Note that in rare cases,
|
186
|
-
# (e.g., with $near queries), the order of keys will matter. To preserve
|
187
|
-
# key order on a selector, use an instance of BSON::OrderedHash (only applies
|
188
|
-
# to Ruby 1.8).
|
189
|
-
#
|
190
|
-
# @option opts [Array, Hash] :fields field names that should be returned in the result
|
191
|
-
# set ("_id" will be included unless explicitly excluded). By limiting results to a certain subset of fields,
|
192
|
-
# you can cut down on network traffic and decoding time. If using a Hash, keys should be field
|
193
|
-
# names and values should be either 1 or 0, depending on whether you want to include or exclude
|
194
|
-
# the given field.
|
195
|
-
# @option opts [:primary, :secondary] :read The default read preference for queries
|
196
|
-
# initiates from this connection object. If +:secondary+ is chosen, reads will be sent
|
197
|
-
# to one of the closest available secondary nodes. If a secondary node cannot be located, the
|
198
|
-
# read will be sent to the primary. If this option is left unspecified, the value of the read
|
199
|
-
# preference for this Collection object will be used.
|
200
|
-
# @option opts [Integer] :skip number of documents to skip from the beginning of the result set
|
201
|
-
# @option opts [Integer] :limit maximum number of documents to return
|
202
|
-
# @option opts [Array] :sort an array of [key, direction] pairs to sort by. Direction should
|
203
|
-
# be specified as Mongo::ASCENDING (or :ascending / :asc) or Mongo::DESCENDING (or :descending / :desc)
|
204
|
-
# @option opts [String, Array, OrderedHash] :hint hint for query optimizer, usually not necessary if
|
205
|
-
# using MongoDB > 1.1
|
206
|
-
# @option opts [String] :named_hint for specifying a named index as a hint, will be overriden by :hint
|
207
|
-
# if :hint is also provided.
|
208
|
-
# @option opts [Boolean] :snapshot (false) if true, snapshot mode will be used for this query.
|
209
|
-
# Snapshot mode assures no duplicates are returned, or objects missed, which were preset at both the start and
|
210
|
-
# end of the query's execution.
|
211
|
-
# For details see http://www.mongodb.org/display/DOCS/How+to+do+Snapshotting+in+the+Mongo+Database
|
212
|
-
# @option opts [Boolean] :batch_size (100) the number of documents to returned by the database per
|
213
|
-
# GETMORE operation. A value of 0 will let the database server decide how many results to return.
|
214
|
-
# This option can be ignored for most use cases.
|
215
|
-
# @option opts [Boolean] :timeout (true) when +true+, the returned cursor will be subject to
|
216
|
-
# the normal cursor timeout behavior of the mongod process. When +false+, the returned cursor will
|
217
|
-
# never timeout. Note that disabling timeout will only work when #find is invoked with a block.
|
218
|
-
# This is to prevent any inadvertent failure to close the cursor, as the cursor is explicitly
|
219
|
-
# closed when block code finishes.
|
220
|
-
# @option opts [Integer] :max_scan (nil) Limit the number of items to scan on both collection scans and indexed queries..
|
221
|
-
# @option opts [Boolean] :show_disk_loc (false) Return the disk location of each query result (for debugging).
|
222
|
-
# @option opts [Boolean] :return_key (false) Return the index key used to obtain the result (for debugging).
|
223
|
-
# @option opts [Block] :transformer (nil) a block for transforming returned documents.
|
224
|
-
# This is normally used by object mappers to convert each returned document to an instance of a class.
|
225
|
-
# @option opts [String] :comment (nil) a comment to include in profiling logs
|
226
|
-
# @option opts [Boolean] :compile_regex (true) whether BSON regex objects should be compiled into Ruby regexes.
|
227
|
-
# If false, a BSON::Regex object will be returned instead.
|
228
|
-
#
|
229
|
-
# @raise [ArgumentError]
|
230
|
-
# if timeout is set to false and find is not invoked in a block
|
231
|
-
#
|
232
|
-
# @raise [RuntimeError]
|
233
|
-
# if given unknown options
|
234
|
-
def find(selector={}, opts={})
|
235
|
-
opts = opts.dup
|
236
|
-
fields = opts.delete(:fields)
|
237
|
-
fields = ["_id"] if fields && fields.empty?
|
238
|
-
skip = opts.delete(:skip) || skip || 0
|
239
|
-
limit = opts.delete(:limit) || 0
|
240
|
-
sort = opts.delete(:sort)
|
241
|
-
hint = opts.delete(:hint)
|
242
|
-
named_hint = opts.delete(:named_hint)
|
243
|
-
snapshot = opts.delete(:snapshot)
|
244
|
-
batch_size = opts.delete(:batch_size)
|
245
|
-
timeout = (opts.delete(:timeout) == false) ? false : true
|
246
|
-
max_scan = opts.delete(:max_scan)
|
247
|
-
return_key = opts.delete(:return_key)
|
248
|
-
transformer = opts.delete(:transformer)
|
249
|
-
show_disk_loc = opts.delete(:show_disk_loc)
|
250
|
-
comment = opts.delete(:comment)
|
251
|
-
read = opts.delete(:read) || @read
|
252
|
-
tag_sets = opts.delete(:tag_sets) || @tag_sets
|
253
|
-
acceptable_latency = opts.delete(:acceptable_latency) || @acceptable_latency
|
254
|
-
compile_regex = opts.key?(:compile_regex) ? opts.delete(:compile_regex) : true
|
255
|
-
|
256
|
-
if timeout == false && !block_given?
|
257
|
-
raise ArgumentError, "Collection#find must be invoked with a block when timeout is disabled."
|
258
|
-
end
|
259
|
-
|
260
|
-
if hint
|
261
|
-
hint = normalize_hint_fields(hint)
|
262
|
-
else
|
263
|
-
hint = @hint # assumed to be normalized already
|
264
|
-
end
|
24
|
+
extend Forwardable
|
265
25
|
|
266
|
-
|
26
|
+
# @return [ Mongo::Database ] The database the collection resides in.
|
27
|
+
attr_reader :database
|
267
28
|
|
268
|
-
|
269
|
-
|
270
|
-
:fields => fields,
|
271
|
-
:skip => skip,
|
272
|
-
:limit => limit,
|
273
|
-
:order => sort,
|
274
|
-
:hint => hint || named_hint,
|
275
|
-
:snapshot => snapshot,
|
276
|
-
:timeout => timeout,
|
277
|
-
:batch_size => batch_size,
|
278
|
-
:transformer => transformer,
|
279
|
-
:max_scan => max_scan,
|
280
|
-
:show_disk_loc => show_disk_loc,
|
281
|
-
:return_key => return_key,
|
282
|
-
:read => read,
|
283
|
-
:tag_sets => tag_sets,
|
284
|
-
:comment => comment,
|
285
|
-
:acceptable_latency => acceptable_latency,
|
286
|
-
:compile_regex => compile_regex
|
287
|
-
})
|
29
|
+
# @return [ String ] The name of the collection.
|
30
|
+
attr_reader :name
|
288
31
|
|
289
|
-
|
290
|
-
|
291
|
-
yield cursor
|
292
|
-
ensure
|
293
|
-
cursor.close
|
294
|
-
end
|
295
|
-
nil
|
296
|
-
else
|
297
|
-
cursor
|
298
|
-
end
|
299
|
-
end
|
32
|
+
# @return [ Hash ] The collection options.
|
33
|
+
attr_reader :options
|
300
34
|
|
301
|
-
#
|
302
|
-
|
303
|
-
# @return [OrderedHash, Nil]
|
304
|
-
# a single document or nil if no result is found.
|
305
|
-
#
|
306
|
-
# @param [Hash, ObjectId, Nil] spec_or_object_id a hash specifying elements
|
307
|
-
# which must be present for a document to be included in the result set or an
|
308
|
-
# instance of ObjectId to be used as the value for an _id query.
|
309
|
-
# If nil, an empty selector, {}, will be used.
|
310
|
-
#
|
311
|
-
# @option opts [Hash]
|
312
|
-
# any valid options that can be send to Collection#find
|
313
|
-
#
|
314
|
-
# @raise [TypeError]
|
315
|
-
# if the argument is of an improper type.
|
316
|
-
def find_one(spec_or_object_id=nil, opts={})
|
317
|
-
spec = case spec_or_object_id
|
318
|
-
when nil
|
319
|
-
{}
|
320
|
-
when BSON::ObjectId
|
321
|
-
{:_id => spec_or_object_id}
|
322
|
-
when Hash
|
323
|
-
spec_or_object_id
|
324
|
-
else
|
325
|
-
raise TypeError, "spec_or_object_id must be an instance of ObjectId or Hash, or nil"
|
326
|
-
end
|
327
|
-
timeout = opts.delete(:max_time_ms)
|
328
|
-
cursor = find(spec, opts.merge(:limit => -1))
|
329
|
-
timeout ? cursor.max_time_ms(timeout).next_document : cursor.next_document
|
330
|
-
end
|
35
|
+
# Get client, cluster, read preference, and write concern from client.
|
36
|
+
def_delegators :database, :client, :cluster, :read_preference, :write_concern
|
331
37
|
|
332
|
-
#
|
333
|
-
|
334
|
-
# @param [Hash] doc
|
335
|
-
# the document to be saved. If the document already has an '_id' key,
|
336
|
-
# then an update (upsert) operation will be performed, and any existing
|
337
|
-
# document with that _id is overwritten. Otherwise an insert operation is performed.
|
338
|
-
#
|
339
|
-
# @return [ObjectId] the _id of the saved document.
|
340
|
-
#
|
341
|
-
# @option opts [String, Integer, Symbol] :w (1) Set default number of nodes to which a write
|
342
|
-
# should be acknowledged.
|
343
|
-
# @option opts [Integer] :wtimeout (nil) Set replica set acknowledgement timeout.
|
344
|
-
# @option opts [Boolean] :j (false) If true, block until write operations have been committed
|
345
|
-
# to the journal. Cannot be used in combination with 'fsync'. Prior to MongoDB 2.6 this option was
|
346
|
-
# ignored if the server was running without journaling. Starting with MongoDB 2.6, write operations will
|
347
|
-
# fail with an exception if this option is used when the server is running without journaling.
|
348
|
-
# @option opts [Boolean] :fsync (false) If true, and the server is running without journaling, blocks until
|
349
|
-
# the server has synced all data files to disk. If the server is running with journaling, this acts the same as
|
350
|
-
# the 'j' option, blocking until write operations have been committed to the journal.
|
351
|
-
# Cannot be used in combination with 'j'.
|
352
|
-
#
|
353
|
-
# Options provided here will override any write concern options set on this collection,
|
354
|
-
# its database object, or the current connection. See the options
|
355
|
-
# for DB#get_last_error.
|
356
|
-
#
|
357
|
-
# @raise [Mongo::OperationFailure] will be raised iff :w > 0 and the operation fails.
|
358
|
-
def save(doc, opts={})
|
359
|
-
if doc.has_key?(:_id) || doc.has_key?('_id')
|
360
|
-
id = doc[:_id] || doc['_id']
|
361
|
-
update({:_id => id}, doc, opts.merge!({:upsert => true}))
|
362
|
-
id
|
363
|
-
else
|
364
|
-
insert(doc, opts)
|
365
|
-
end
|
366
|
-
end
|
38
|
+
# Delegate to the cluster for the next primary.
|
39
|
+
def_delegators :cluster, :next_primary
|
367
40
|
|
368
|
-
#
|
369
|
-
|
370
|
-
# @param [Hash, Array] doc_or_docs
|
371
|
-
# a document (as a hash) or array of documents to be inserted.
|
372
|
-
#
|
373
|
-
# @return [ObjectId, Array]
|
374
|
-
# The _id of the inserted document or a list of _ids of all inserted documents.
|
375
|
-
# @return [[ObjectId, Array], [Hash, Array]]
|
376
|
-
# 1st, the _id of the inserted document or a list of _ids of all inserted documents.
|
377
|
-
# 2nd, a list of invalid documents.
|
378
|
-
# Return this result format only when :collect_on_error is true.
|
379
|
-
#
|
380
|
-
# @option opts [String, Integer, Symbol] :w (1) Set default number of nodes to which a write
|
381
|
-
# should be acknowledged.
|
382
|
-
# @option opts [Integer] :wtimeout (nil) Set replica set acknowledgement timeout.
|
383
|
-
# @option opts [Boolean] :j (false) If true, block until write operations have been committed
|
384
|
-
# to the journal. Cannot be used in combination with 'fsync'. Prior to MongoDB 2.6 this option was
|
385
|
-
# ignored if the server was running without journaling. Starting with MongoDB 2.6, write operations will
|
386
|
-
# fail with an exception if this option is used when the server is running without journaling.
|
387
|
-
# @option opts [Boolean] :fsync (false) If true, and the server is running without journaling, blocks until
|
388
|
-
# the server has synced all data files to disk. If the server is running with journaling, this acts the same as
|
389
|
-
# the 'j' option, blocking until write operations have been committed to the journal.
|
390
|
-
# Cannot be used in combination with 'j'.
|
391
|
-
#
|
392
|
-
# Notes on write concern:
|
393
|
-
# Options provided here will override any write concern options set on this collection,
|
394
|
-
# its database object, or the current connection. See the options for +DB#get_last_error+.
|
395
|
-
#
|
396
|
-
# @option opts [Boolean] :continue_on_error (+false+) If true, then
|
397
|
-
# continue a bulk insert even if one of the documents inserted
|
398
|
-
# triggers a database assertion (as in a duplicate insert, for instance).
|
399
|
-
# If not acknowledging writes, the list of ids returned will
|
400
|
-
# include the object ids of all documents attempted on insert, even
|
401
|
-
# if some are rejected on error. When acknowledging writes, any error will raise an
|
402
|
-
# OperationFailure exception.
|
403
|
-
# MongoDB v2.0+.
|
404
|
-
# @option opts [Boolean] :collect_on_error (+false+) if true, then
|
405
|
-
# collects invalid documents as an array. Note that this option changes the result format.
|
406
|
-
#
|
407
|
-
# @raise [Mongo::OperationFailure] will be raised iff :w > 0 and the operation fails.
|
408
|
-
def insert(doc_or_docs, opts={})
|
409
|
-
if doc_or_docs.respond_to?(:collect!)
|
410
|
-
doc_or_docs.collect! { |doc| @pk_factory.create_pk(doc) }
|
411
|
-
error_docs, errors, write_concern_errors, rest_ignored = batch_write(:insert, doc_or_docs, true, opts)
|
412
|
-
errors = write_concern_errors + errors
|
413
|
-
raise errors.last if !opts[:collect_on_error] && !errors.empty?
|
414
|
-
inserted_docs = doc_or_docs - error_docs
|
415
|
-
inserted_ids = inserted_docs.collect {|o| o[:_id] || o['_id']}
|
416
|
-
opts[:collect_on_error] ? [inserted_ids, error_docs] : inserted_ids
|
417
|
-
else
|
418
|
-
@pk_factory.create_pk(doc_or_docs)
|
419
|
-
send_write(:insert, nil, doc_or_docs, true, opts)
|
420
|
-
return doc_or_docs[:_id] || doc_or_docs['_id']
|
421
|
-
end
|
422
|
-
end
|
423
|
-
alias_method :<<, :insert
|
41
|
+
# Convenience delegators to find.
|
42
|
+
def_delegators :find, :parallel_scan
|
424
43
|
|
425
|
-
#
|
426
|
-
#
|
427
|
-
# @param [Hash] selector
|
428
|
-
# If specified, only matching documents will be removed.
|
429
|
-
#
|
430
|
-
# @option opts [String, Integer, Symbol] :w (1) Set default number of nodes to which a write
|
431
|
-
# should be acknowledged.
|
432
|
-
# @option opts [Integer] :wtimeout (nil) Set replica set acknowledgement timeout.
|
433
|
-
# @option opts [Boolean] :j (false) If true, block until write operations have been committed
|
434
|
-
# to the journal. Cannot be used in combination with 'fsync'. Prior to MongoDB 2.6 this option was
|
435
|
-
# ignored if the server was running without journaling. Starting with MongoDB 2.6, write operations will
|
436
|
-
# fail with an exception if this option is used when the server is running without journaling.
|
437
|
-
# @option opts [Boolean] :fsync (false) If true, and the server is running without journaling, blocks until
|
438
|
-
# the server has synced all data files to disk. If the server is running with journaling, this acts the same as
|
439
|
-
# the 'j' option, blocking until write operations have been committed to the journal.
|
440
|
-
# Cannot be used in combination with 'j'.
|
441
|
-
# @option opts [Integer] :limit (0) Set limit option, currently only 0 for all or 1 for just one.
|
442
|
-
#
|
443
|
-
# Notes on write concern:
|
444
|
-
# Options provided here will override any write concern options set on this collection,
|
445
|
-
# its database object, or the current connection. See the options for +DB#get_last_error+.
|
44
|
+
# Check if a collection is equal to another object. Will check the name and
|
45
|
+
# the database for equality.
|
446
46
|
#
|
447
|
-
# @example
|
448
|
-
#
|
449
|
-
# users.remove({})
|
47
|
+
# @example Check collection equality.
|
48
|
+
# collection == other
|
450
49
|
#
|
451
|
-
# @
|
452
|
-
# users.remove({:expire => {"$lte" => Time.now}})
|
50
|
+
# @param [ Object ] other The object to check.
|
453
51
|
#
|
454
|
-
# @return [
|
455
|
-
# Otherwise, returns true.
|
52
|
+
# @return [ true, false ] If the objects are equal.
|
456
53
|
#
|
457
|
-
# @
|
458
|
-
def
|
459
|
-
|
54
|
+
# @since 2.0.0
|
55
|
+
def ==(other)
|
56
|
+
return false unless other.is_a?(Collection)
|
57
|
+
name == other.name && database == other.database && options == other.options
|
460
58
|
end
|
461
59
|
|
462
|
-
#
|
60
|
+
# Is the collection capped?
|
463
61
|
#
|
464
|
-
# @
|
465
|
-
#
|
466
|
-
# the update command currently updates only the first document matching the
|
467
|
-
# given selector. If you want all matching documents to be updated, be sure
|
468
|
-
# to specify :multi => true.
|
469
|
-
# @param [Hash] document
|
470
|
-
# a hash specifying the fields to be changed in the selected document,
|
471
|
-
# or (in the case of an upsert) the document to be inserted
|
62
|
+
# @example Is the collection capped?
|
63
|
+
# collection.capped?
|
472
64
|
#
|
473
|
-
# @
|
474
|
-
# @option opts [Boolean] :multi (+false+) update all documents matching the selector, as opposed to
|
475
|
-
# just the first matching document. Note: only works in MongoDB 1.1.3 or later.
|
476
|
-
# @option opts [String, Integer, Symbol] :w (1) Set default number of nodes to which a write
|
477
|
-
# should be acknowledged.
|
478
|
-
# @option opts [Integer] :wtimeout (nil) Set replica set acknowledgement timeout.
|
479
|
-
# @option opts [Boolean] :j (false) If true, block until write operations have been committed
|
480
|
-
# to the journal. Cannot be used in combination with 'fsync'. Prior to MongoDB 2.6 this option was
|
481
|
-
# ignored if the server was running without journaling. Starting with MongoDB 2.6, write operations will
|
482
|
-
# fail with an exception if this option is used when the server is running without journaling.
|
483
|
-
# @option opts [Boolean] :fsync (false) If true, and the server is running without journaling, blocks until
|
484
|
-
# the server has synced all data files to disk. If the server is running with journaling, this acts the same as
|
485
|
-
# the 'j' option, blocking until write operations have been committed to the journal.
|
486
|
-
# Cannot be used in combination with 'j'.
|
65
|
+
# @return [ true, false ] If the collection is capped.
|
487
66
|
#
|
488
|
-
#
|
489
|
-
|
490
|
-
|
491
|
-
#
|
492
|
-
# @return [Hash, true] Returns a Hash containing the last error object if acknowledging writes.
|
493
|
-
# Otherwise, returns true.
|
494
|
-
#
|
495
|
-
# @raise [Mongo::OperationFailure] will be raised iff :w > 0 and the operation fails.
|
496
|
-
def update(selector, document, opts={})
|
497
|
-
send_write(:update, selector, document, !document.keys.first.to_s.start_with?("$"), opts)
|
67
|
+
# @since 2.0.0
|
68
|
+
def capped?
|
69
|
+
database.command(:collstats => name).documents[0]['capped']
|
498
70
|
end
|
499
71
|
|
500
|
-
#
|
501
|
-
#
|
502
|
-
# @param [String, Array] spec
|
503
|
-
# should be either a single field name or an array of
|
504
|
-
# [field name, type] pairs. Index types should be specified
|
505
|
-
# as Mongo::ASCENDING, Mongo::DESCENDING, Mongo::GEO2D, Mongo::GEO2DSPHERE, Mongo::GEOHAYSTACK,
|
506
|
-
# Mongo::TEXT or Mongo::HASHED.
|
507
|
-
#
|
508
|
-
# Note that geospatial indexing only works with versions of MongoDB >= 1.3.3+. Keep in mind, too,
|
509
|
-
# that in order to geo-index a given field, that field must reference either an array or a sub-object
|
510
|
-
# where the first two values represent x- and y-coordinates. Examples can be seen below.
|
511
|
-
#
|
512
|
-
# Also note that it is permissible to create compound indexes that include a geospatial index as
|
513
|
-
# long as the geospatial index comes first.
|
514
|
-
#
|
515
|
-
# If your code calls create_index frequently, you can use Collection#ensure_index to cache these calls
|
516
|
-
# and thereby prevent excessive round trips to the database.
|
517
|
-
#
|
518
|
-
# @option opts [Boolean] :unique (false) if true, this index will enforce a uniqueness constraint.
|
519
|
-
# @option opts [Boolean] :background (false) indicate that the index should be built in the background. This
|
520
|
-
# feature is only available in MongoDB >= 1.3.2.
|
521
|
-
# @option opts [Boolean] :drop_dups (nil) (DEPRECATED) If creating a unique index on a collection with
|
522
|
-
# pre-existing records, this option will keep the first document the database indexes and drop all subsequent
|
523
|
-
# with duplicate values.
|
524
|
-
# @option opts [Integer] :bucket_size (nil) For use with geoHaystack indexes. Number of documents to group
|
525
|
-
# together within a certain proximity to a given longitude and latitude.
|
526
|
-
# @option opts [Integer] :min (nil) specify the minimum longitude and latitude for a geo index.
|
527
|
-
# @option opts [Integer] :max (nil) specify the maximum longitude and latitude for a geo index.
|
528
|
-
#
|
529
|
-
# @example Creating a compound index using a hash: (Ruby 1.9+ Syntax)
|
530
|
-
# @posts.create_index({'subject' => Mongo::ASCENDING, 'created_at' => Mongo::DESCENDING})
|
531
|
-
#
|
532
|
-
# @example Creating a compound index:
|
533
|
-
# @posts.create_index([['subject', Mongo::ASCENDING], ['created_at', Mongo::DESCENDING]])
|
534
|
-
#
|
535
|
-
# @example Creating a geospatial index using a hash: (Ruby 1.9+ Syntax)
|
536
|
-
# @restaurants.create_index(:location => Mongo::GEO2D)
|
72
|
+
# Force the collection to be created in the database.
|
537
73
|
#
|
538
|
-
# @example
|
539
|
-
#
|
74
|
+
# @example Force the collection to be created.
|
75
|
+
# collection.create
|
540
76
|
#
|
541
|
-
#
|
542
|
-
# {'location': [0, 50]}
|
543
|
-
# {'location': {'x' => 0, 'y' => 50}}
|
544
|
-
# {'location': {'latitude' => 0, 'longitude' => 50}}
|
77
|
+
# @return [ Result ] The result of the command.
|
545
78
|
#
|
546
|
-
# @
|
547
|
-
|
548
|
-
|
549
|
-
# @note The :drop_dups option is no longer supported by MongoDB starting with server version 2.7.5.
|
550
|
-
# The option is silently ignored by the server and unique index builds using the option will
|
551
|
-
# fail if a duplicate value is detected.
|
552
|
-
#
|
553
|
-
# @note Note that the options listed may be subset of those available.
|
554
|
-
# See the MongoDB documentation for a full list of supported options by server version.
|
555
|
-
#
|
556
|
-
# @return [String] the name of the index created.
|
557
|
-
def create_index(spec, opts={})
|
558
|
-
options = opts.dup
|
559
|
-
options[:dropDups] = options.delete(:drop_dups) if options[:drop_dups]
|
560
|
-
options[:bucketSize] = options.delete(:bucket_size) if options[:bucket_size]
|
561
|
-
field_spec = parse_index_spec(spec)
|
562
|
-
name = options.delete(:name) || generate_index_name(field_spec)
|
563
|
-
name = name.to_s if name
|
564
|
-
generate_indexes(field_spec, name, options)
|
565
|
-
name
|
79
|
+
# @since 2.0.0
|
80
|
+
def create
|
81
|
+
database.command({ :create => name }.merge(options))
|
566
82
|
end
|
567
83
|
|
568
|
-
#
|
569
|
-
#
|
570
|
-
# Any changes to an index will be propagated through regardless of cache time (e.g., a change of index direction)
|
84
|
+
# Drop the collection. Will also drop all indexes associated with the
|
85
|
+
# collection.
|
571
86
|
#
|
572
|
-
#
|
87
|
+
# @example Drop the collection.
|
88
|
+
# collection.drop
|
573
89
|
#
|
574
|
-
# @
|
575
|
-
# Time t: @posts.ensure_index(:subject => Mongo::ASCENDING) -- calls create_index and
|
576
|
-
# sets the 5 minute cache
|
577
|
-
# Time t+2min : @posts.ensure_index(:subject => Mongo::ASCENDING) -- doesn't do anything
|
578
|
-
# Time t+3min : @posts.ensure_index(:something_else => Mongo::ASCENDING) -- calls create_index
|
579
|
-
# and sets 5 minute cache
|
580
|
-
# Time t+10min : @posts.ensure_index(:subject => Mongo::ASCENDING) -- calls create_index and
|
581
|
-
# resets the 5 minute counter
|
90
|
+
# @return [ Result ] The result of the command.
|
582
91
|
#
|
583
|
-
# @
|
584
|
-
# The option is silently ignored by the server and unique index builds using the option will
|
585
|
-
# fail if a duplicate value is detected.
|
586
|
-
#
|
587
|
-
# @note Note that the options listed may be subset of those available.
|
588
|
-
# See the MongoDB documentation for a full list of supported options by server version.
|
589
|
-
#
|
590
|
-
# @return [String] the name of the index.
|
591
|
-
def ensure_index(spec, opts={})
|
592
|
-
now = Time.now.utc.to_i
|
593
|
-
options = opts.dup
|
594
|
-
options[:dropDups] = options.delete(:drop_dups) if options[:drop_dups]
|
595
|
-
options[:bucketSize] = options.delete(:bucket_size) if options[:bucket_size]
|
596
|
-
field_spec = parse_index_spec(spec)
|
597
|
-
name = options.delete(:name) || generate_index_name(field_spec)
|
598
|
-
name = name.to_s if name
|
599
|
-
|
600
|
-
if !@cache[name] || @cache[name] <= now
|
601
|
-
generate_indexes(field_spec, name, options)
|
602
|
-
end
|
603
|
-
|
604
|
-
# Reset the cache here in case there are any errors inserting. Best to be safe.
|
605
|
-
@cache[name] = now + @cache_time
|
606
|
-
name
|
607
|
-
end
|
608
|
-
|
609
|
-
# Drop a specified index.
|
610
|
-
#
|
611
|
-
# @param [String] name
|
612
|
-
def drop_index(name)
|
613
|
-
if name.is_a?(Array)
|
614
|
-
return drop_index(index_name(name))
|
615
|
-
end
|
616
|
-
@cache[name.to_s] = nil
|
617
|
-
@db.drop_index(@name, name)
|
618
|
-
end
|
619
|
-
|
620
|
-
# Drop all indexes.
|
621
|
-
def drop_indexes
|
622
|
-
@cache = {}
|
623
|
-
|
624
|
-
# Note: calling drop_indexes with no args will drop them all.
|
625
|
-
@db.drop_index(@name, '*')
|
626
|
-
end
|
627
|
-
|
628
|
-
# Drop the entire collection. USE WITH CAUTION.
|
92
|
+
# @since 2.0.0
|
629
93
|
def drop
|
630
|
-
|
631
|
-
end
|
632
|
-
|
633
|
-
# Atomically update and return a document using MongoDB's findAndModify command. (MongoDB > 1.3.0)
|
634
|
-
#
|
635
|
-
# @option opts [Hash] :query ({}) a query selector document for matching
|
636
|
-
# the desired document.
|
637
|
-
# @option opts [Hash] :update (nil) the update operation to perform on the
|
638
|
-
# matched document.
|
639
|
-
# @option opts [Array, String, OrderedHash] :sort ({}) specify a sort
|
640
|
-
# option for the query using any
|
641
|
-
# of the sort options available for Cursor#sort. Sort order is important
|
642
|
-
# if the query will be matching multiple documents since only the first
|
643
|
-
# matching document will be updated and returned.
|
644
|
-
# @option opts [Boolean] :remove (false) If true, removes the returned
|
645
|
-
# document from the collection.
|
646
|
-
# @option opts [Boolean] :new (false) If true, returns the updated
|
647
|
-
# document; otherwise, returns the document prior to update.
|
648
|
-
# @option opts [Boolean] :upsert (false) If true, creates a new document
|
649
|
-
# if the query returns no document.
|
650
|
-
# @option opts [Hash] :fields (nil) A subset of fields to return.
|
651
|
-
# Specify an inclusion of a field with 1. _id is included by default and must
|
652
|
-
# be explicitly excluded.
|
653
|
-
# @option opts [Boolean] :full_response (false) If true, returns the entire
|
654
|
-
# response object from the server including 'ok' and 'lastErrorObject'.
|
655
|
-
#
|
656
|
-
# @return [Hash] the matched document.
|
657
|
-
def find_and_modify(opts={})
|
658
|
-
full_response = opts.delete(:full_response)
|
659
|
-
|
660
|
-
cmd = BSON::OrderedHash.new
|
661
|
-
cmd[:findandmodify] = @name
|
662
|
-
cmd.merge!(opts)
|
663
|
-
|
664
|
-
cmd[:sort] =
|
665
|
-
Mongo::Support.format_order_clause(opts[:sort]) if opts[:sort]
|
666
|
-
|
667
|
-
full_response ? @db.command(cmd) : @db.command(cmd)['value']
|
94
|
+
database.command(:drop => name)
|
668
95
|
end
|
669
96
|
|
670
|
-
#
|
671
|
-
# @note Aggregate requires server version >= 2.1.1
|
672
|
-
# @note Field References: Within an expression, field names must be quoted and prefixed by a dollar sign ($).
|
97
|
+
# Find documents in the collection.
|
673
98
|
#
|
674
|
-
# @example
|
675
|
-
#
|
99
|
+
# @example Find documents in the collection by a selector.
|
100
|
+
# collection.find(name: 1)
|
676
101
|
#
|
677
|
-
# @example
|
678
|
-
#
|
102
|
+
# @example Get all documents in a collection.
|
103
|
+
# collection.find
|
679
104
|
#
|
680
|
-
# @param [
|
105
|
+
# @param [ Hash ] filter The filter to use in the find.
|
681
106
|
#
|
682
|
-
#
|
683
|
-
# renaming fields,or creating/populating fields that hold sub-documents.
|
107
|
+
# @return [ CollectionView ] The collection view.
|
684
108
|
#
|
685
|
-
#
|
686
|
-
|
687
|
-
|
688
|
-
#
|
689
|
-
# '$skip' Skips over the specified number of documents and passes the rest along the pipeline.
|
690
|
-
#
|
691
|
-
# '$unwind' Peels off elements of an array individually, returning one document for each member.
|
692
|
-
#
|
693
|
-
# '$group' Groups documents for calculating aggregate values.
|
694
|
-
#
|
695
|
-
# '$sort' Sorts all input documents and returns them to the pipeline in sorted order.
|
696
|
-
#
|
697
|
-
# '$out' The name of a collection to which the result set will be saved.
|
698
|
-
#
|
699
|
-
# @option opts [:primary, :secondary] :read Read preference indicating which server to perform this operation
|
700
|
-
# on. If $out is specified and :read is not :primary, the aggregation will be rerouted to the primary with
|
701
|
-
# a warning. See Collection#find for more details.
|
702
|
-
# @option opts [String] :comment (nil) a comment to include in profiling logs
|
703
|
-
# @option opts [Hash] :cursor return a cursor object instead of an Array. Takes an optional batchSize parameter
|
704
|
-
# to specify the maximum size, in documents, of the first batch returned.
|
705
|
-
#
|
706
|
-
# @return [Array] An Array with the aggregate command's results.
|
707
|
-
#
|
708
|
-
# @raise MongoArgumentError if operators either aren't supplied or aren't in the correct format.
|
709
|
-
# @raise MongoOperationFailure if the aggregate command fails.
|
710
|
-
#
|
711
|
-
def aggregate(pipeline=nil, opts={})
|
712
|
-
raise MongoArgumentError, "pipeline must be an array of operators" unless pipeline.class == Array
|
713
|
-
raise MongoArgumentError, "pipeline operators must be hashes" unless pipeline.all? { |op| op.class == Hash }
|
714
|
-
|
715
|
-
selector = BSON::OrderedHash.new
|
716
|
-
selector['aggregate'] = self.name
|
717
|
-
selector['pipeline'] = pipeline
|
718
|
-
|
719
|
-
result = @db.command(selector, command_options(opts))
|
720
|
-
unless Mongo::Support.ok?(result)
|
721
|
-
raise Mongo::OperationFailure, "aggregate failed: #{result['errmsg']}"
|
722
|
-
end
|
723
|
-
|
724
|
-
if result.key?('cursor')
|
725
|
-
cursor_info = result['cursor']
|
726
|
-
pinned_pool = @connection.pinned_pool
|
727
|
-
pinned_pool = pinned_pool[:pool] if pinned_pool.respond_to?(:keys)
|
728
|
-
|
729
|
-
seed = {
|
730
|
-
:cursor_id => cursor_info['id'],
|
731
|
-
:first_batch => cursor_info['firstBatch'],
|
732
|
-
:pool => pinned_pool,
|
733
|
-
:ns => cursor_info['ns']
|
734
|
-
}
|
735
|
-
|
736
|
-
return Cursor.new(self, seed.merge!(opts))
|
737
|
-
|
738
|
-
elsif selector['pipeline'].any? { |op| op.key?('$out') || op.key?(:$out) }
|
739
|
-
return result
|
740
|
-
end
|
741
|
-
|
742
|
-
result['result'] || result
|
109
|
+
# @since 2.0.0
|
110
|
+
def find(filter = nil)
|
111
|
+
View.new(self, filter || {})
|
743
112
|
end
|
744
113
|
|
745
|
-
#
|
114
|
+
# Get a view of all indexes for this collection. Can be iterated or has
|
115
|
+
# more operations.
|
746
116
|
#
|
747
|
-
# @
|
748
|
-
#
|
117
|
+
# @example Get the index view.
|
118
|
+
# collection.indexes
|
749
119
|
#
|
750
|
-
# @
|
751
|
-
# the operation to a subset of the collection.
|
752
|
-
# @option opts [Array] :sort ([]) an array of [key, direction] pairs to sort by. Direction should
|
753
|
-
# be specified as Mongo::ASCENDING (or :ascending / :asc) or Mongo::DESCENDING (or :descending / :desc)
|
754
|
-
# @option opts [Integer] :limit (nil) if passing a query, number of objects to return from the collection.
|
755
|
-
# @option opts [String, BSON::Code] :finalize (nil) a javascript function to apply to the result set after the
|
756
|
-
# map/reduce operation has finished.
|
757
|
-
# @option opts [String, Hash] :out Location of the result of the map-reduce operation. You can output to a
|
758
|
-
# collection, output to a collection with an action, or output inline. You may output to a collection
|
759
|
-
# when performing map reduce operations on the primary members of the set; on secondary members you
|
760
|
-
# may only use the inline output. See the server mapReduce documentation for available options.
|
761
|
-
# @option opts [Boolean] :keeptemp (false) if true, the generated collection will be persisted. The default
|
762
|
-
# is false. Note that this option has no effect is versions of MongoDB > v1.7.6.
|
763
|
-
# @option opts [Boolean ] :verbose (false) if true, provides statistics on job execution time.
|
764
|
-
# @option opts [Boolean] :raw (false) if true, return the raw result object from the map_reduce command, and not
|
765
|
-
# the instantiated collection that's returned by default. Note if a collection name isn't returned in the
|
766
|
-
# map-reduce output (as, for example, when using :out => { :inline => 1 }), then you must specify this option
|
767
|
-
# or an ArgumentError will be raised.
|
768
|
-
# @option opts [:primary, :secondary] :read Read preference indicating which server to run this map-reduce
|
769
|
-
# on. See Collection#find for more details.
|
770
|
-
# @option opts [String] :comment (nil) a comment to include in profiling logs
|
120
|
+
# @param [ Hash ] options Options for getting a list of all indexes.
|
771
121
|
#
|
772
|
-
# @return [
|
122
|
+
# @return [ View::Index ] The index view.
|
773
123
|
#
|
774
|
-
# @
|
775
|
-
|
776
|
-
|
777
|
-
def map_reduce(map, reduce, opts={})
|
778
|
-
opts = opts.dup
|
779
|
-
map = BSON::Code.new(map) unless map.is_a?(BSON::Code)
|
780
|
-
reduce = BSON::Code.new(reduce) unless reduce.is_a?(BSON::Code)
|
781
|
-
raw = opts.delete(:raw)
|
782
|
-
|
783
|
-
hash = BSON::OrderedHash.new
|
784
|
-
hash['mapreduce'] = self.name
|
785
|
-
hash['map'] = map
|
786
|
-
hash['reduce'] = reduce
|
787
|
-
hash['out'] = opts.delete(:out)
|
788
|
-
hash['sort'] = Mongo::Support.format_order_clause(opts.delete(:sort)) if opts.key?(:sort)
|
789
|
-
|
790
|
-
result = @db.command(hash, command_options(opts))
|
791
|
-
unless Mongo::Support.ok?(result)
|
792
|
-
raise Mongo::OperationFailure, "map-reduce failed: #{result['errmsg']}"
|
793
|
-
end
|
794
|
-
|
795
|
-
if raw
|
796
|
-
result
|
797
|
-
elsif result['result']
|
798
|
-
if result['result'].is_a?(BSON::OrderedHash) &&
|
799
|
-
result['result'].key?('db') &&
|
800
|
-
result['result'].key?('collection')
|
801
|
-
otherdb = @db.connection[result['result']['db']]
|
802
|
-
otherdb[result['result']['collection']]
|
803
|
-
else
|
804
|
-
@db[result["result"]]
|
805
|
-
end
|
806
|
-
else
|
807
|
-
raise ArgumentError, "Could not instantiate collection from result. If you specified " +
|
808
|
-
"{:out => {:inline => true}}, then you must also specify :raw => true to get the results."
|
809
|
-
end
|
124
|
+
# @since 2.0.0
|
125
|
+
def indexes(options = {})
|
126
|
+
Index::View.new(self, options)
|
810
127
|
end
|
811
|
-
alias :mapreduce :map_reduce
|
812
128
|
|
813
|
-
#
|
129
|
+
# Instantiate a new collection.
|
814
130
|
#
|
815
|
-
# @
|
816
|
-
#
|
131
|
+
# @example Instantiate a new collection.
|
132
|
+
# Mongo::Collection.new(database, 'test')
|
817
133
|
#
|
818
|
-
# @
|
819
|
-
# @
|
820
|
-
# @
|
821
|
-
# which the aggregation is run (optional).
|
822
|
-
# @option opts [Hash] :initial the initial value of the aggregation counter object (required).
|
823
|
-
# @option opts [String, BSON::Code] :reduce (nil) a JavaScript aggregation function (required).
|
824
|
-
# @option opts [String, BSON::Code] :finalize (nil) a JavaScript function that receives and modifies
|
825
|
-
# each of the resultant grouped objects. Available only when group is run with command
|
826
|
-
# set to true.
|
827
|
-
# @option opts [:primary, :secondary] :read Read preference indicating which server to perform this group
|
828
|
-
# on. See Collection#find for more details.
|
829
|
-
# @option opts [String] :comment (nil) a comment to include in profiling logs
|
134
|
+
# @param [ Mongo::Database ] database The collection's database.
|
135
|
+
# @param [ String, Symbol ] name The collection name.
|
136
|
+
# @param [ Hash ] options The collection options.
|
830
137
|
#
|
831
|
-
# @
|
832
|
-
def
|
833
|
-
|
834
|
-
|
835
|
-
|
836
|
-
|
837
|
-
raise MongoArgumentError, "Group takes either an array of fields to group by or a JavaScript function" +
|
838
|
-
"in the form of a String or BSON::Code."
|
839
|
-
end
|
840
|
-
|
841
|
-
warn "Collection#group no longer takes a list of parameters. This usage is deprecated and will be removed in v2.0." +
|
842
|
-
"Check out the new API at http://api.mongodb.org/ruby/current/Mongo/Collection.html#group-instance_method"
|
843
|
-
|
844
|
-
reduce = BSON::Code.new(reduce) unless reduce.is_a?(BSON::Code)
|
845
|
-
|
846
|
-
group_command = {
|
847
|
-
"group" => {
|
848
|
-
"ns" => @name,
|
849
|
-
"$reduce" => reduce,
|
850
|
-
"cond" => condition,
|
851
|
-
"initial" => initial
|
852
|
-
}
|
853
|
-
}
|
854
|
-
|
855
|
-
unless opts.nil?
|
856
|
-
if opts.is_a? Array
|
857
|
-
key_type = "key"
|
858
|
-
key_value = {}
|
859
|
-
opts.each { |k| key_value[k] = 1 }
|
860
|
-
else
|
861
|
-
key_type = "$keyf"
|
862
|
-
key_value = opts.is_a?(BSON::Code) ? opts : BSON::Code.new(opts)
|
863
|
-
end
|
864
|
-
|
865
|
-
group_command["group"][key_type] = key_value
|
866
|
-
end
|
867
|
-
|
868
|
-
finalize = BSON::Code.new(finalize) if finalize.is_a?(String)
|
869
|
-
if finalize.is_a?(BSON::Code)
|
870
|
-
group_command['group']['finalize'] = finalize
|
871
|
-
end
|
872
|
-
|
873
|
-
result = @db.command(group_command)
|
874
|
-
|
875
|
-
if Mongo::Support.ok?(result)
|
876
|
-
result["retval"]
|
877
|
-
else
|
878
|
-
raise OperationFailure, "group command failed: #{result['errmsg']}"
|
879
|
-
end
|
138
|
+
# @since 2.0.0
|
139
|
+
def initialize(database, name, options = {})
|
140
|
+
raise Error::InvalidCollectionName.new unless name
|
141
|
+
@database = database
|
142
|
+
@name = name.to_s.freeze
|
143
|
+
@options = options.freeze
|
880
144
|
end
|
881
145
|
|
882
|
-
#
|
883
|
-
# Returns a list of up to num_cursors cursors that can be iterated concurrently. As long as the collection
|
884
|
-
# is not modified during scanning, each document appears once in one of the cursors' result sets.
|
146
|
+
# Get a pretty printed string inspection for the collection.
|
885
147
|
#
|
886
|
-
# @
|
148
|
+
# @example Inspect the collection.
|
149
|
+
# collection.inspect
|
887
150
|
#
|
888
|
-
# @
|
889
|
-
# @param [Hash] opts
|
151
|
+
# @return [ String ] The collection inspection.
|
890
152
|
#
|
891
|
-
# @
|
892
|
-
def
|
893
|
-
|
894
|
-
cmd[:parallelCollectionScan] = self.name
|
895
|
-
cmd[:numCursors] = num_cursors
|
896
|
-
result = @db.command(cmd, command_options(opts))
|
897
|
-
|
898
|
-
result['cursors'].collect do |cursor_info|
|
899
|
-
pinned_pool = @connection.pinned_pool
|
900
|
-
pinned_pool = pinned_pool[:pool] if pinned_pool.respond_to?(:keys)
|
901
|
-
|
902
|
-
seed = {
|
903
|
-
:cursor_id => cursor_info['cursor']['id'],
|
904
|
-
:first_batch => cursor_info['cursor']['firstBatch'],
|
905
|
-
:pool => pinned_pool,
|
906
|
-
:ns => cursor_info['ns']
|
907
|
-
}
|
908
|
-
Cursor.new(self, seed.merge!(opts))
|
909
|
-
end
|
910
|
-
|
153
|
+
# @since 2.0.0
|
154
|
+
def inspect
|
155
|
+
"<Mongo::Collection:0x#{object_id} namespace=#{namespace}>"
|
911
156
|
end
|
912
157
|
|
913
|
-
|
914
|
-
|
915
|
-
def new_group(opts={})
|
916
|
-
reduce = opts.delete(:reduce)
|
917
|
-
finalize = opts.delete(:finalize)
|
918
|
-
cond = opts.delete(:cond) || {}
|
919
|
-
initial = opts.delete(:initial)
|
920
|
-
|
921
|
-
if !(reduce && initial)
|
922
|
-
raise MongoArgumentError, "Group requires at minimum values for initial and reduce."
|
923
|
-
end
|
924
|
-
|
925
|
-
cmd = {
|
926
|
-
"group" => {
|
927
|
-
"ns" => @name,
|
928
|
-
"$reduce" => reduce.to_bson_code,
|
929
|
-
"cond" => cond,
|
930
|
-
"initial" => initial
|
931
|
-
}
|
932
|
-
}
|
933
|
-
|
934
|
-
if finalize
|
935
|
-
cmd['group']['finalize'] = finalize.to_bson_code
|
936
|
-
end
|
937
|
-
|
938
|
-
if key = opts.delete(:key)
|
939
|
-
if key.is_a?(String) || key.is_a?(Symbol)
|
940
|
-
key = [key]
|
941
|
-
end
|
942
|
-
key_value = {}
|
943
|
-
key.each { |k| key_value[k] = 1 }
|
944
|
-
cmd["group"]["key"] = key_value
|
945
|
-
elsif keyf = opts.delete(:keyf)
|
946
|
-
cmd["group"]["$keyf"] = keyf.to_bson_code
|
947
|
-
end
|
948
|
-
|
949
|
-
result = @db.command(cmd, command_options(opts))
|
950
|
-
result["retval"]
|
951
|
-
end
|
952
|
-
|
953
|
-
public
|
954
|
-
|
955
|
-
# Return a list of distinct values for +key+ across all
|
956
|
-
# documents in the collection. The key may use dot notation
|
957
|
-
# to reach into an embedded object.
|
958
|
-
#
|
959
|
-
# @param [String, Symbol, OrderedHash] key or hash to group by.
|
960
|
-
# @param [Hash] query a selector for limiting the result set over which to group.
|
961
|
-
# @param [Hash] opts the options for this distinct operation.
|
158
|
+
# Insert a single document into the collection.
|
962
159
|
#
|
963
|
-
# @
|
964
|
-
#
|
965
|
-
# @option opts [String] :comment (nil) a comment to include in profiling logs
|
160
|
+
# @example Insert a document into the collection.
|
161
|
+
# collection.insert_one({ name: 'test' })
|
966
162
|
#
|
967
|
-
# @
|
968
|
-
#
|
969
|
-
# @collection.save({:zip => 94108, :name => {:age => 24}})
|
970
|
-
# @collection.save({:zip => 10010, :name => {:age => 27}})
|
971
|
-
# @collection.save({:zip => 99701, :name => {:age => 24}})
|
972
|
-
# @collection.save({:zip => 94108, :name => {:age => 27}})
|
163
|
+
# @param [ Hash ] document The document to insert.
|
164
|
+
# @param [ Hash ] options The insert options.
|
973
165
|
#
|
974
|
-
#
|
975
|
-
# [10010, 94108, 99701]
|
976
|
-
# @collection.distinct("name.age")
|
977
|
-
# [27, 24]
|
166
|
+
# @return [ Result ] The database response wrapper.
|
978
167
|
#
|
979
|
-
#
|
980
|
-
|
981
|
-
|
982
|
-
# [27]
|
983
|
-
#
|
984
|
-
# @return [Array] an array of distinct values.
|
985
|
-
def distinct(key, query=nil, opts={})
|
986
|
-
raise MongoArgumentError unless [String, Symbol].include?(key.class)
|
987
|
-
command = BSON::OrderedHash.new
|
988
|
-
command[:distinct] = @name
|
989
|
-
command[:key] = key.to_s
|
990
|
-
command[:query] = query
|
991
|
-
|
992
|
-
@db.command(command, command_options(opts))["values"]
|
168
|
+
# @since 2.0.0
|
169
|
+
def insert_one(document, options = {})
|
170
|
+
insert_many([ document ], options)
|
993
171
|
end
|
994
172
|
|
995
|
-
#
|
173
|
+
# Insert the provided documents into the collection.
|
996
174
|
#
|
997
|
-
#
|
998
|
-
#
|
175
|
+
# @example Insert documents into the collection.
|
176
|
+
# collection.insert_many([{ name: 'test' }])
|
999
177
|
#
|
1000
|
-
# @param [
|
178
|
+
# @param [ Array<Hash> ] documents The documents to insert.
|
179
|
+
# @param [ Hash ] options The insert options.
|
1001
180
|
#
|
1002
|
-
# @return [
|
181
|
+
# @return [ Result ] The database response wrapper.
|
1003
182
|
#
|
1004
|
-
# @
|
1005
|
-
def
|
1006
|
-
|
1007
|
-
|
1008
|
-
|
1009
|
-
|
1010
|
-
|
1011
|
-
|
1012
|
-
|
1013
|
-
|
1014
|
-
if new_name.empty? or new_name.include? ".."
|
1015
|
-
raise Mongo::InvalidNSName, "collection names cannot be empty"
|
1016
|
-
end
|
1017
|
-
if new_name.include? "$"
|
1018
|
-
raise Mongo::InvalidNSName, "collection names must not contain '$'"
|
1019
|
-
end
|
1020
|
-
if new_name.match(/^\./) or new_name.match(/\.$/)
|
1021
|
-
raise Mongo::InvalidNSName, "collection names must not start or end with '.'"
|
1022
|
-
end
|
1023
|
-
|
1024
|
-
@db.rename_collection(@name, new_name)
|
1025
|
-
@name = new_name
|
183
|
+
# @since 2.0.0
|
184
|
+
def insert_many(documents, options = {})
|
185
|
+
Operation::Write::Insert.new(
|
186
|
+
:documents => documents,
|
187
|
+
:db_name => database.name,
|
188
|
+
:coll_name => name,
|
189
|
+
:write_concern => write_concern,
|
190
|
+
:options => options
|
191
|
+
).execute(next_primary.context)
|
1026
192
|
end
|
1027
193
|
|
1028
|
-
#
|
194
|
+
# Execute a batch of bulk write operations.
|
1029
195
|
#
|
1030
|
-
# @
|
1031
|
-
|
1032
|
-
@db.index_information(@name)
|
1033
|
-
end
|
1034
|
-
|
1035
|
-
# Return a hash containing options that apply to this collection.
|
1036
|
-
# For all possible keys and values, see DB#create_collection.
|
196
|
+
# @example Execute a bulk write.
|
197
|
+
# collection.bulk_write(operations, options)
|
1037
198
|
#
|
1038
|
-
# @
|
1039
|
-
|
1040
|
-
|
1041
|
-
|
1042
|
-
|
1043
|
-
# Return stats on the collection. Uses MongoDB's collstats command.
|
199
|
+
# @param [ Array<Hash> ] operations The operations.
|
200
|
+
# @param [ Hash ] options The options.
|
201
|
+
#
|
202
|
+
# @return [ BSON::Document ] The result of the operation.
|
1044
203
|
#
|
1045
|
-
# @
|
1046
|
-
def
|
1047
|
-
|
204
|
+
# @since 2.0.0
|
205
|
+
def bulk_write(operations, options)
|
206
|
+
BulkWrite.new(operations, options, self).execute
|
1048
207
|
end
|
1049
208
|
|
1050
|
-
# Get the
|
209
|
+
# Get the fully qualified namespace of the collection.
|
1051
210
|
#
|
1052
|
-
# @
|
1053
|
-
#
|
1054
|
-
# @option opts [Integer] :limit (nil) The number of documents to limit.
|
1055
|
-
# @option opts [String, Array, OrderedHash] :hint hint for query optimizer, usually not necessary if
|
1056
|
-
# using MongoDB > 1.1. This option is only supported with #count in server version > 2.6.
|
1057
|
-
# @option opts [String] :named_hint for specifying a named index as a hint, will be overridden by :hint
|
1058
|
-
# if :hint is also provided. This option is only supported with #count in server version > 2.6.
|
1059
|
-
# @option opts [:primary, :secondary] :read Read preference for this command. See Collection#find for
|
1060
|
-
# more details.
|
1061
|
-
# @option opts [String] :comment (nil) a comment to include in profiling logs
|
211
|
+
# @example Get the fully qualified namespace.
|
212
|
+
# collection.namespace
|
1062
213
|
#
|
1063
|
-
# @return [
|
1064
|
-
def count(opts={})
|
1065
|
-
find(opts[:query],
|
1066
|
-
:skip => opts[:skip],
|
1067
|
-
:limit => opts[:limit],
|
1068
|
-
:named_hint => opts[:named_hint] || @hint,
|
1069
|
-
:hint => opts[:hint] || @hint,
|
1070
|
-
:read => opts[:read],
|
1071
|
-
:comment => opts[:comment]).count(true)
|
1072
|
-
end
|
1073
|
-
alias :size :count
|
1074
|
-
|
1075
|
-
protected
|
1076
|
-
|
1077
|
-
# Provide required command options if they are missing in the command options hash.
|
214
|
+
# @return [ String ] The collection namespace.
|
1078
215
|
#
|
1079
|
-
# @
|
1080
|
-
def
|
1081
|
-
|
216
|
+
# @since 2.0.0
|
217
|
+
def namespace
|
218
|
+
"#{name}.#{database.name}"
|
1082
219
|
end
|
1083
|
-
|
1084
|
-
def normalize_hint_fields(hint)
|
1085
|
-
case hint
|
1086
|
-
when String
|
1087
|
-
{hint => 1}
|
1088
|
-
when Hash
|
1089
|
-
hint
|
1090
|
-
when nil
|
1091
|
-
nil
|
1092
|
-
else
|
1093
|
-
h = BSON::OrderedHash.new
|
1094
|
-
hint.to_a.each { |k| h[k] = 1 }
|
1095
|
-
h
|
1096
|
-
end
|
1097
|
-
end
|
1098
|
-
|
1099
|
-
private
|
1100
|
-
|
1101
|
-
def send_write(op_type, selector, doc_or_docs, check_keys, opts, collection_name=@name)
|
1102
|
-
write_concern = get_write_concern(opts, self)
|
1103
|
-
if @db.connection.use_write_command?(write_concern)
|
1104
|
-
@command_writer.send_write_command(op_type, selector, doc_or_docs, check_keys, opts, write_concern, collection_name)
|
1105
|
-
else
|
1106
|
-
@operation_writer.send_write_operation(op_type, selector, doc_or_docs, check_keys, opts, write_concern, collection_name)
|
1107
|
-
end
|
1108
|
-
end
|
1109
|
-
|
1110
|
-
def index_name(spec)
|
1111
|
-
field_spec = parse_index_spec(spec)
|
1112
|
-
index_information.each do |index|
|
1113
|
-
return index[0] if index[1]['key'] == field_spec
|
1114
|
-
end
|
1115
|
-
nil
|
1116
|
-
end
|
1117
|
-
|
1118
|
-
def parse_index_spec(spec)
|
1119
|
-
field_spec = BSON::OrderedHash.new
|
1120
|
-
if spec.is_a?(String) || spec.is_a?(Symbol)
|
1121
|
-
field_spec[spec.to_s] = 1
|
1122
|
-
elsif spec.is_a?(Hash)
|
1123
|
-
if RUBY_VERSION < '1.9' && !spec.is_a?(BSON::OrderedHash)
|
1124
|
-
raise MongoArgumentError, "Must use OrderedHash in Ruby < 1.9.0"
|
1125
|
-
end
|
1126
|
-
validate_index_types(spec.values)
|
1127
|
-
field_spec = spec.is_a?(BSON::OrderedHash) ? spec : BSON::OrderedHash.try_convert(spec)
|
1128
|
-
elsif spec.is_a?(Array) && spec.all? {|field| field.is_a?(Array) }
|
1129
|
-
spec.each do |f|
|
1130
|
-
validate_index_types(f[1])
|
1131
|
-
field_spec[f[0].to_s] = f[1]
|
1132
|
-
end
|
1133
|
-
else
|
1134
|
-
raise MongoArgumentError, "Invalid index specification #{spec.inspect}; " +
|
1135
|
-
"should be either a hash (OrderedHash), string, symbol, or an array of arrays."
|
1136
|
-
end
|
1137
|
-
field_spec
|
1138
|
-
end
|
1139
|
-
|
1140
|
-
def validate_index_types(*types)
|
1141
|
-
types.flatten!
|
1142
|
-
types.each do |t|
|
1143
|
-
unless Mongo::INDEX_TYPES.values.include?(t)
|
1144
|
-
raise MongoArgumentError, "Invalid index field #{t.inspect}; " +
|
1145
|
-
"should be one of " + Mongo::INDEX_TYPES.map {|k,v| "Mongo::#{k} (#{v})"}.join(', ')
|
1146
|
-
end
|
1147
|
-
end
|
1148
|
-
end
|
1149
|
-
|
1150
|
-
def generate_indexes(field_spec, name, opts)
|
1151
|
-
selector = {
|
1152
|
-
:name => name,
|
1153
|
-
:key => field_spec
|
1154
|
-
}
|
1155
|
-
selector.merge!(opts)
|
1156
|
-
|
1157
|
-
begin
|
1158
|
-
cmd = BSON::OrderedHash[:createIndexes, @name, :indexes, [selector]]
|
1159
|
-
@db.command(cmd)
|
1160
|
-
rescue Mongo::OperationFailure => ex
|
1161
|
-
if Mongo::ErrorCode::COMMAND_NOT_FOUND_CODES.include?(ex.error_code)
|
1162
|
-
selector[:ns] = "#{@db.name}.#{@name}"
|
1163
|
-
send_write(:insert, nil, selector, false, {:w => 1}, Mongo::DB::SYSTEM_INDEX_COLLECTION)
|
1164
|
-
else
|
1165
|
-
raise Mongo::OperationFailure, "Failed to create index #{selector.inspect} with the following error: " +
|
1166
|
-
"#{ex.message}"
|
1167
|
-
end
|
1168
|
-
end
|
1169
|
-
|
1170
|
-
nil
|
1171
|
-
end
|
1172
|
-
|
1173
|
-
def generate_index_name(spec)
|
1174
|
-
indexes = []
|
1175
|
-
spec.each_pair do |field, type|
|
1176
|
-
indexes.push("#{field}_#{type}")
|
1177
|
-
end
|
1178
|
-
indexes.join("_")
|
1179
|
-
end
|
1180
|
-
|
1181
|
-
def batch_write(op_type, documents, check_keys=true, opts={})
|
1182
|
-
write_concern = get_write_concern(opts, self)
|
1183
|
-
if @db.connection.use_write_command?(write_concern)
|
1184
|
-
return @command_writer.batch_write(op_type, documents, check_keys, opts)
|
1185
|
-
else
|
1186
|
-
return @operation_writer.batch_write(op_type, documents, check_keys, opts)
|
1187
|
-
end
|
1188
|
-
end
|
1189
|
-
|
1190
220
|
end
|
1191
|
-
|
1192
221
|
end
|