kbaum-mongo 0.18.3p
Sign up to get free protection for your applications and to get access to all the features.
- data/LICENSE.txt +202 -0
- data/README.rdoc +339 -0
- data/Rakefile +138 -0
- data/bin/bson_benchmark.rb +59 -0
- data/bin/fail_if_no_c.rb +11 -0
- data/examples/admin.rb +42 -0
- data/examples/capped.rb +22 -0
- data/examples/cursor.rb +48 -0
- data/examples/gridfs.rb +88 -0
- data/examples/index_test.rb +126 -0
- data/examples/info.rb +31 -0
- data/examples/queries.rb +70 -0
- data/examples/simple.rb +24 -0
- data/examples/strict.rb +35 -0
- data/examples/types.rb +36 -0
- data/lib/mongo/collection.rb +609 -0
- data/lib/mongo/connection.rb +672 -0
- data/lib/mongo/cursor.rb +403 -0
- data/lib/mongo/db.rb +555 -0
- data/lib/mongo/exceptions.rb +66 -0
- data/lib/mongo/gridfs/chunk.rb +91 -0
- data/lib/mongo/gridfs/grid.rb +79 -0
- data/lib/mongo/gridfs/grid_file_system.rb +101 -0
- data/lib/mongo/gridfs/grid_io.rb +338 -0
- data/lib/mongo/gridfs/grid_store.rb +580 -0
- data/lib/mongo/gridfs.rb +25 -0
- data/lib/mongo/types/binary.rb +52 -0
- data/lib/mongo/types/code.rb +36 -0
- data/lib/mongo/types/dbref.rb +40 -0
- data/lib/mongo/types/min_max_keys.rb +58 -0
- data/lib/mongo/types/objectid.rb +180 -0
- data/lib/mongo/types/regexp_of_holding.rb +45 -0
- data/lib/mongo/util/bson_c.rb +18 -0
- data/lib/mongo/util/bson_ruby.rb +606 -0
- data/lib/mongo/util/byte_buffer.rb +222 -0
- data/lib/mongo/util/conversions.rb +87 -0
- data/lib/mongo/util/ordered_hash.rb +140 -0
- data/lib/mongo/util/server_version.rb +69 -0
- data/lib/mongo/util/support.rb +26 -0
- data/lib/mongo.rb +63 -0
- data/mongo-ruby-driver.gemspec +28 -0
- data/test/auxillary/autoreconnect_test.rb +42 -0
- data/test/binary_test.rb +15 -0
- data/test/bson_test.rb +427 -0
- data/test/byte_buffer_test.rb +81 -0
- data/test/chunk_test.rb +82 -0
- data/test/collection_test.rb +515 -0
- data/test/connection_test.rb +160 -0
- data/test/conversions_test.rb +120 -0
- data/test/cursor_test.rb +379 -0
- data/test/db_api_test.rb +780 -0
- data/test/db_connection_test.rb +16 -0
- data/test/db_test.rb +272 -0
- data/test/grid_file_system_test.rb +210 -0
- data/test/grid_io_test.rb +78 -0
- data/test/grid_store_test.rb +334 -0
- data/test/grid_test.rb +87 -0
- data/test/objectid_test.rb +125 -0
- data/test/ordered_hash_test.rb +172 -0
- data/test/replica/count_test.rb +34 -0
- data/test/replica/insert_test.rb +50 -0
- data/test/replica/pooled_insert_test.rb +54 -0
- data/test/replica/query_test.rb +39 -0
- data/test/slave_connection_test.rb +36 -0
- data/test/test_helper.rb +42 -0
- data/test/threading/test_threading_large_pool.rb +90 -0
- data/test/threading_test.rb +87 -0
- data/test/unit/collection_test.rb +61 -0
- data/test/unit/connection_test.rb +117 -0
- data/test/unit/cursor_test.rb +93 -0
- data/test/unit/db_test.rb +98 -0
- metadata +127 -0
@@ -0,0 +1,580 @@
|
|
1
|
+
# --
|
2
|
+
# Copyright (C) 2008-2010 10gen Inc.
|
3
|
+
#
|
4
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5
|
+
# you may not use this file except in compliance with the License.
|
6
|
+
# You may obtain a copy of the License at
|
7
|
+
#
|
8
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9
|
+
#
|
10
|
+
# Unless required by applicable law or agreed to in writing, software
|
11
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13
|
+
# See the License for the specific language governing permissions and
|
14
|
+
# limitations under the License.
|
15
|
+
# ++
|
16
|
+
|
17
|
+
require 'mongo/types/objectid'
|
18
|
+
require 'mongo/util/ordered_hash'
|
19
|
+
require 'mongo/gridfs/chunk'
|
20
|
+
|
21
|
+
module GridFS
|
22
|
+
|
23
|
+
# GridStore is an IO-like class that provides input and output for
|
24
|
+
# streams of data to MongoDB.
|
25
|
+
#
|
26
|
+
# @example
|
27
|
+
#
|
28
|
+
# include GridFS
|
29
|
+
#
|
30
|
+
# #Store the text "Hello, world!" in the grid store.
|
31
|
+
# GridStore.open(database, 'filename', 'w') do |f|
|
32
|
+
# f.puts "Hello, world!"
|
33
|
+
# end
|
34
|
+
#
|
35
|
+
# # Output "Hello, world!"
|
36
|
+
# GridStore.open(database, 'filename', 'r') do |f|
|
37
|
+
# puts f.read
|
38
|
+
# end
|
39
|
+
#
|
40
|
+
# # Add text to the grid store.
|
41
|
+
# GridStore.open(database, 'filename', 'w+') do |f|
|
42
|
+
# f.puts "But wait, there's more!"
|
43
|
+
# end
|
44
|
+
#
|
45
|
+
# # Retrieve everything, outputting "Hello, world!\nBut wait, there's more!\n"
|
46
|
+
# GridStore.open(database, 'filename', 'r') do |f|
|
47
|
+
# puts f.read
|
48
|
+
# end
|
49
|
+
#
|
50
|
+
# @deprecated
|
51
|
+
class GridStore
|
52
|
+
include Enumerable
|
53
|
+
|
54
|
+
DEFAULT_ROOT_COLLECTION = 'fs'
|
55
|
+
|
56
|
+
DEFAULT_CONTENT_TYPE = 'text/plain'
|
57
|
+
|
58
|
+
DEPRECATION_WARNING = "GridFS::GridStore is deprecated. Use either Grid or GridFileSystem."
|
59
|
+
|
60
|
+
attr_accessor :filename
|
61
|
+
|
62
|
+
# Array of strings; may be +nil+
|
63
|
+
attr_accessor :aliases
|
64
|
+
|
65
|
+
# Default is DEFAULT_CONTENT_TYPE
|
66
|
+
attr_accessor :content_type
|
67
|
+
|
68
|
+
# Size of file in bytes
|
69
|
+
attr_reader :length
|
70
|
+
|
71
|
+
attr_accessor :metadata
|
72
|
+
|
73
|
+
attr_reader :files_id
|
74
|
+
|
75
|
+
# Time that the file was first saved.
|
76
|
+
attr_reader :upload_date
|
77
|
+
|
78
|
+
attr_reader :chunk_size
|
79
|
+
|
80
|
+
attr_accessor :lineno
|
81
|
+
|
82
|
+
attr_reader :md5
|
83
|
+
|
84
|
+
def self.default_root_collection
|
85
|
+
@@default_root_collection ||= DEFAULT_ROOT_COLLECTION
|
86
|
+
end
|
87
|
+
|
88
|
+
def self.default_root_collection=(name)
|
89
|
+
@@default_root_collection = name
|
90
|
+
end
|
91
|
+
|
92
|
+
# Determine whether a given file exists in the GridStore.
|
93
|
+
#
|
94
|
+
# @param [Mongo::DB] a MongoDB database.
|
95
|
+
# @param [String] name the filename.
|
96
|
+
# @param [String] root_collection the name of the gridfs root collection.
|
97
|
+
#
|
98
|
+
# @return [Boolean]
|
99
|
+
# @deprecated
|
100
|
+
def self.exist?(db, name, root_collection=GridStore.default_root_collection)
|
101
|
+
warn DEPRECATION_WARNING
|
102
|
+
db.collection("#{root_collection}.files").find({'filename' => name}).next_document != nil
|
103
|
+
end
|
104
|
+
|
105
|
+
# Open a GridFS file for reading, writing, or appending. Note that
|
106
|
+
# this method must be used with a block.
|
107
|
+
#
|
108
|
+
# @param [Mongo::DB] a MongoDB database.
|
109
|
+
# @param [String] name the filename.
|
110
|
+
# @param [String] mode one of 'r', 'w', or 'w+' for reading, writing,
|
111
|
+
# and appending, respectively.
|
112
|
+
# @param [Hash] options any of the options available on
|
113
|
+
# GridStore initialization.
|
114
|
+
#
|
115
|
+
# @see GridStore#initialize.
|
116
|
+
# @see The various GridStore class methods, e.g., GridStore.open, GridStore.read etc.
|
117
|
+
# @deprecated
|
118
|
+
def self.open(db, name, mode, options={})
|
119
|
+
gs = self.new(db, name, mode, options)
|
120
|
+
result = nil
|
121
|
+
begin
|
122
|
+
result = yield gs if block_given?
|
123
|
+
ensure
|
124
|
+
gs.close
|
125
|
+
end
|
126
|
+
result
|
127
|
+
end
|
128
|
+
|
129
|
+
# Read a file stored in GridFS.
|
130
|
+
#
|
131
|
+
# @param [Mongo::DB] db a MongoDB database.
|
132
|
+
# @param [String] name the name of the file.
|
133
|
+
# @param [Integer] length the number of bytes to read.
|
134
|
+
# @param [Integer] offset the number of bytes beyond the
|
135
|
+
# beginning of the file to start reading.
|
136
|
+
#
|
137
|
+
# @return [String] the file data
|
138
|
+
# @deprecated
|
139
|
+
def self.read(db, name, length=nil, offset=nil)
|
140
|
+
GridStore.open(db, name, 'r') do |gs|
|
141
|
+
gs.seek(offset) if offset
|
142
|
+
gs.read(length)
|
143
|
+
end
|
144
|
+
end
|
145
|
+
|
146
|
+
# List the contents of all GridFS files stored in the given db and
|
147
|
+
# root collection.
|
148
|
+
#
|
149
|
+
# @param [Mongo::DB] db a MongoDB database.
|
150
|
+
# @param [String] root_collection the name of the root collection.
|
151
|
+
#
|
152
|
+
# @return [Array]
|
153
|
+
# @deprecated
|
154
|
+
def self.list(db, root_collection=GridStore.default_root_collection)
|
155
|
+
warn DEPRECATION_WARNING
|
156
|
+
db.collection("#{root_collection}.files").find().map do |f|
|
157
|
+
f['filename']
|
158
|
+
end
|
159
|
+
end
|
160
|
+
|
161
|
+
# Get each line of data from the specified file
|
162
|
+
# as an array of strings.
|
163
|
+
#
|
164
|
+
# @param [Mongo::DB] db a MongoDB database.
|
165
|
+
# @param [String] name the filename.
|
166
|
+
# @param [String, Reg] separator
|
167
|
+
#
|
168
|
+
# @return [Array]
|
169
|
+
# @deprecated
|
170
|
+
def self.readlines(db, name, separator=$/)
|
171
|
+
GridStore.open(db, name, 'r') do |gs|
|
172
|
+
gs.readlines(separator)
|
173
|
+
end
|
174
|
+
end
|
175
|
+
|
176
|
+
# Remove one for more files from the given db.
|
177
|
+
#
|
178
|
+
# @param [Mongo::Database] db a MongoDB database.
|
179
|
+
# @param [Array<String>] names the filenames to remove
|
180
|
+
#
|
181
|
+
# @return [True]
|
182
|
+
# @deprecated
|
183
|
+
def self.unlink(db, *names)
|
184
|
+
names.each do |name|
|
185
|
+
gs = GridStore.new(db, name)
|
186
|
+
gs.delete_chunks
|
187
|
+
gs.collection.remove('_id' => gs.files_id)
|
188
|
+
end
|
189
|
+
end
|
190
|
+
class << self
|
191
|
+
alias_method :delete, :unlink
|
192
|
+
end
|
193
|
+
|
194
|
+
# Rename a file in this collection. Note that this method uses
|
195
|
+
# Collection#update, which means that you will not be notified of the
|
196
|
+
# success of the operation.
|
197
|
+
#
|
198
|
+
# @param [Mongo::DB] a MongoDB database.
|
199
|
+
# @param [String] src the name of the source file.
|
200
|
+
# @param [String] dest the name of the destination file.
|
201
|
+
# @param [String] root_collection the name of the default root collection.
|
202
|
+
# @deprecated
|
203
|
+
def self.mv(db, src, dest, root_collection=GridStore.default_root_collection)
|
204
|
+
warn DEPRECATION_WARNING
|
205
|
+
db.collection("#{root_collection}.files").update({ :filename => src }, { '$set' => { :filename => dest } })
|
206
|
+
end
|
207
|
+
|
208
|
+
# Initialize a GridStore instance for reading, writing, or modifying a given file.
|
209
|
+
# Note that it's often easier to work with the various GridStore class methods (open, read, etc.).
|
210
|
+
#
|
211
|
+
# @param [Mongo::DB] db a MongoDB database.
|
212
|
+
# @param [String] name a filename.
|
213
|
+
# @param [String] mode either 'r', 'w', or 'w+' for reading, writing, or appending, respectively.
|
214
|
+
#
|
215
|
+
# @option options [String] :root DEFAULT_ROOT_COLLECTION ('r', 'w', 'w+') the name of the root collection to use.
|
216
|
+
#
|
217
|
+
# @option options [String] :metadata ({}) (w, w+) A hash containing any data you want persisted as
|
218
|
+
# this file's metadata.
|
219
|
+
#
|
220
|
+
# @option options [Integer] :chunk_size (Chunk::DEFAULT_CHUNK_SIZE) (w) Sets chunk size for files opened for writing.
|
221
|
+
# See also GridStore#chunk_size=.
|
222
|
+
#
|
223
|
+
# @option options [String] :content_type ('text/plain') Set the content type stored as the
|
224
|
+
# file's metadata. See also GridStore#content_type=.
|
225
|
+
# @deprecated
|
226
|
+
def initialize(db, name, mode='r', options={})
|
227
|
+
warn DEPRECATION_WARNING
|
228
|
+
@db, @filename, @mode = db, name, mode
|
229
|
+
@root = options[:root] || GridStore.default_root_collection
|
230
|
+
|
231
|
+
doc = collection.find({'filename' => @filename}).next_document
|
232
|
+
if doc
|
233
|
+
@files_id = doc['_id']
|
234
|
+
@content_type = doc['contentType']
|
235
|
+
@chunk_size = doc['chunkSize']
|
236
|
+
@upload_date = doc['uploadDate']
|
237
|
+
@aliases = doc['aliases']
|
238
|
+
@length = doc['length']
|
239
|
+
@metadata = doc['metadata']
|
240
|
+
@md5 = doc['md5']
|
241
|
+
else
|
242
|
+
@files_id = Mongo::ObjectID.new
|
243
|
+
@content_type = DEFAULT_CONTENT_TYPE
|
244
|
+
@chunk_size = Chunk::DEFAULT_CHUNK_SIZE
|
245
|
+
@length = 0
|
246
|
+
end
|
247
|
+
|
248
|
+
case mode
|
249
|
+
when 'r'
|
250
|
+
@curr_chunk = nth_chunk(0)
|
251
|
+
@position = 0
|
252
|
+
when 'w'
|
253
|
+
chunk_collection.create_index([['files_id', Mongo::ASCENDING], ['n', Mongo::ASCENDING]])
|
254
|
+
delete_chunks
|
255
|
+
@curr_chunk = Chunk.new(self, 'n' => 0)
|
256
|
+
@content_type = options[:content_type] if options[:content_type]
|
257
|
+
@chunk_size = options[:chunk_size] if options[:chunk_size]
|
258
|
+
@metadata = options[:metadata] if options[:metadata]
|
259
|
+
@position = 0
|
260
|
+
when 'w+'
|
261
|
+
chunk_collection.create_index([['files_id', Mongo::ASCENDING], ['n', Mongo::ASCENDING]])
|
262
|
+
@curr_chunk = nth_chunk(last_chunk_number) || Chunk.new(self, 'n' => 0) # might be empty
|
263
|
+
@curr_chunk.pos = @curr_chunk.data.length if @curr_chunk
|
264
|
+
@metadata = options[:metadata] if options[:metadata]
|
265
|
+
@position = @length
|
266
|
+
else
|
267
|
+
raise "error: illegal mode #{mode}"
|
268
|
+
end
|
269
|
+
|
270
|
+
@lineno = 0
|
271
|
+
@pushback_byte = nil
|
272
|
+
end
|
273
|
+
|
274
|
+
# Get the files collection referenced by this GridStore instance.
|
275
|
+
#
|
276
|
+
# @return [Mongo::Collection]
|
277
|
+
def collection
|
278
|
+
@db.collection("#{@root}.files")
|
279
|
+
end
|
280
|
+
|
281
|
+
# Get the chunk collection referenced by this GridStore.
|
282
|
+
#
|
283
|
+
# @return [Mongo::Collection]
|
284
|
+
def chunk_collection
|
285
|
+
@db.collection("#{@root}.chunks")
|
286
|
+
end
|
287
|
+
|
288
|
+
# Change the chunk size. This is permitted only when the file is opened for write
|
289
|
+
# and no data has yet been written.
|
290
|
+
#
|
291
|
+
# @param [Integer] size the new chunk size, in bytes.
|
292
|
+
#
|
293
|
+
# @return [Integer] the new chunk size.
|
294
|
+
def chunk_size=(size)
|
295
|
+
unless @mode[0] == ?w && @position == 0 && @upload_date == nil
|
296
|
+
raise "error: can only change chunk size if open for write and no data written."
|
297
|
+
end
|
298
|
+
@chunk_size = size
|
299
|
+
end
|
300
|
+
|
301
|
+
# ================ reading ================
|
302
|
+
|
303
|
+
def getc
|
304
|
+
if @pushback_byte
|
305
|
+
byte = @pushback_byte
|
306
|
+
@pushback_byte = nil
|
307
|
+
@position += 1
|
308
|
+
byte
|
309
|
+
elsif eof?
|
310
|
+
nil
|
311
|
+
else
|
312
|
+
if @curr_chunk.eof?
|
313
|
+
@curr_chunk = nth_chunk(@curr_chunk.chunk_number + 1)
|
314
|
+
end
|
315
|
+
@position += 1
|
316
|
+
@curr_chunk.getc
|
317
|
+
end
|
318
|
+
end
|
319
|
+
|
320
|
+
def gets(separator=$/)
|
321
|
+
str = ''
|
322
|
+
byte = self.getc
|
323
|
+
return nil if byte == nil # EOF
|
324
|
+
while byte != nil
|
325
|
+
s = byte.chr
|
326
|
+
str << s
|
327
|
+
break if s == separator
|
328
|
+
byte = self.getc
|
329
|
+
end
|
330
|
+
@lineno += 1
|
331
|
+
str
|
332
|
+
end
|
333
|
+
|
334
|
+
def read(len=nil, buf=nil)
|
335
|
+
if len
|
336
|
+
read_partial(len, buf)
|
337
|
+
else
|
338
|
+
read_all(buf)
|
339
|
+
end
|
340
|
+
end
|
341
|
+
|
342
|
+
def readchar
|
343
|
+
byte = self.getc
|
344
|
+
raise EOFError.new if byte == nil
|
345
|
+
byte
|
346
|
+
end
|
347
|
+
|
348
|
+
def readline(separator=$/)
|
349
|
+
line = gets
|
350
|
+
raise EOFError.new if line == nil
|
351
|
+
line
|
352
|
+
end
|
353
|
+
|
354
|
+
def readlines(separator=$/)
|
355
|
+
read.split(separator).collect { |line| "#{line}#{separator}" }
|
356
|
+
end
|
357
|
+
|
358
|
+
def each
|
359
|
+
line = gets
|
360
|
+
while line
|
361
|
+
yield line
|
362
|
+
line = gets
|
363
|
+
end
|
364
|
+
end
|
365
|
+
alias_method :each_line, :each
|
366
|
+
|
367
|
+
def each_byte
|
368
|
+
byte = self.getc
|
369
|
+
while byte
|
370
|
+
yield byte
|
371
|
+
byte = self.getc
|
372
|
+
end
|
373
|
+
end
|
374
|
+
|
375
|
+
def ungetc(byte)
|
376
|
+
@pushback_byte = byte
|
377
|
+
@position -= 1
|
378
|
+
end
|
379
|
+
|
380
|
+
# ================ writing ================
|
381
|
+
|
382
|
+
def putc(byte)
|
383
|
+
if @curr_chunk.pos == @chunk_size
|
384
|
+
prev_chunk_number = @curr_chunk.chunk_number
|
385
|
+
@curr_chunk.save
|
386
|
+
@curr_chunk = Chunk.new(self, 'n' => prev_chunk_number + 1)
|
387
|
+
end
|
388
|
+
@position += 1
|
389
|
+
@curr_chunk.putc(byte)
|
390
|
+
end
|
391
|
+
|
392
|
+
def print(*objs)
|
393
|
+
objs = [$_] if objs == nil || objs.empty?
|
394
|
+
objs.each { |obj|
|
395
|
+
str = obj.to_s
|
396
|
+
str.each_byte { |byte| self.putc(byte) }
|
397
|
+
}
|
398
|
+
nil
|
399
|
+
end
|
400
|
+
|
401
|
+
def puts(*objs)
|
402
|
+
if objs == nil || objs.empty?
|
403
|
+
self.putc(10)
|
404
|
+
else
|
405
|
+
print(*objs.collect{ |obj|
|
406
|
+
str = obj.to_s
|
407
|
+
str << "\n" unless str =~ /\n$/
|
408
|
+
str
|
409
|
+
})
|
410
|
+
end
|
411
|
+
nil
|
412
|
+
end
|
413
|
+
|
414
|
+
def <<(obj)
|
415
|
+
write(obj.to_s)
|
416
|
+
end
|
417
|
+
|
418
|
+
def write(string)
|
419
|
+
raise "#@filename not opened for write" unless @mode[0] == ?w
|
420
|
+
# Since Ruby 1.9.1 doesn't necessarily store one character per byte.
|
421
|
+
if string.respond_to?(:force_encoding)
|
422
|
+
string.force_encoding("binary")
|
423
|
+
end
|
424
|
+
to_write = string.length
|
425
|
+
while (to_write > 0) do
|
426
|
+
if @curr_chunk && @curr_chunk.data.position == @chunk_size
|
427
|
+
prev_chunk_number = @curr_chunk.chunk_number
|
428
|
+
@curr_chunk = GridFS::Chunk.new(self, 'n' => prev_chunk_number + 1)
|
429
|
+
end
|
430
|
+
chunk_available = @chunk_size - @curr_chunk.data.position
|
431
|
+
step_size = (to_write > chunk_available) ? chunk_available : to_write
|
432
|
+
@curr_chunk.data.put_array(ByteBuffer.new(string[-to_write,step_size]).to_a)
|
433
|
+
to_write -= step_size
|
434
|
+
@curr_chunk.save
|
435
|
+
end
|
436
|
+
string.length - to_write
|
437
|
+
end
|
438
|
+
|
439
|
+
# A no-op.
|
440
|
+
def flush
|
441
|
+
end
|
442
|
+
|
443
|
+
# ================ status ================
|
444
|
+
|
445
|
+
def eof
|
446
|
+
raise IOError.new("stream not open for reading") unless @mode[0] == ?r
|
447
|
+
@position >= @length
|
448
|
+
end
|
449
|
+
alias_method :eof?, :eof
|
450
|
+
|
451
|
+
# ================ positioning ================
|
452
|
+
|
453
|
+
def rewind
|
454
|
+
if @curr_chunk.chunk_number != 0
|
455
|
+
if @mode[0] == ?w
|
456
|
+
delete_chunks
|
457
|
+
@curr_chunk = Chunk.new(self, 'n' => 0)
|
458
|
+
else
|
459
|
+
@curr_chunk == nth_chunk(0)
|
460
|
+
end
|
461
|
+
end
|
462
|
+
@curr_chunk.pos = 0
|
463
|
+
@lineno = 0
|
464
|
+
@position = 0
|
465
|
+
end
|
466
|
+
|
467
|
+
def seek(pos, whence=IO::SEEK_SET)
|
468
|
+
target_pos = case whence
|
469
|
+
when IO::SEEK_CUR
|
470
|
+
@position + pos
|
471
|
+
when IO::SEEK_END
|
472
|
+
@length + pos
|
473
|
+
when IO::SEEK_SET
|
474
|
+
pos
|
475
|
+
end
|
476
|
+
|
477
|
+
new_chunk_number = (target_pos / @chunk_size).to_i
|
478
|
+
if new_chunk_number != @curr_chunk.chunk_number
|
479
|
+
@curr_chunk.save if @mode[0] == ?w
|
480
|
+
@curr_chunk = nth_chunk(new_chunk_number)
|
481
|
+
end
|
482
|
+
@position = target_pos
|
483
|
+
@curr_chunk.pos = @position % @chunk_size
|
484
|
+
0
|
485
|
+
end
|
486
|
+
|
487
|
+
def tell
|
488
|
+
@position
|
489
|
+
end
|
490
|
+
|
491
|
+
#---
|
492
|
+
# ================ closing ================
|
493
|
+
#+++
|
494
|
+
|
495
|
+
def close
|
496
|
+
if @mode[0] == ?w
|
497
|
+
if @curr_chunk
|
498
|
+
@curr_chunk.truncate
|
499
|
+
@curr_chunk.save if @curr_chunk.pos > 0
|
500
|
+
end
|
501
|
+
files = collection
|
502
|
+
if @upload_date
|
503
|
+
files.remove('_id' => @files_id)
|
504
|
+
else
|
505
|
+
@upload_date = Time.now
|
506
|
+
end
|
507
|
+
files.insert(to_mongo_object)
|
508
|
+
end
|
509
|
+
@db = nil
|
510
|
+
end
|
511
|
+
|
512
|
+
def closed?
|
513
|
+
@db == nil
|
514
|
+
end
|
515
|
+
|
516
|
+
def delete_chunks
|
517
|
+
chunk_collection.remove({'files_id' => @files_id}) if @files_id
|
518
|
+
@curr_chunk = nil
|
519
|
+
end
|
520
|
+
|
521
|
+
#---
|
522
|
+
# ================ protected ================
|
523
|
+
#+++
|
524
|
+
|
525
|
+
protected
|
526
|
+
|
527
|
+
def to_mongo_object
|
528
|
+
h = OrderedHash.new
|
529
|
+
h['_id'] = @files_id
|
530
|
+
h['filename'] = @filename
|
531
|
+
h['contentType'] = @content_type
|
532
|
+
h['length'] = @curr_chunk ? @curr_chunk.chunk_number * @chunk_size + @curr_chunk.pos : 0
|
533
|
+
h['chunkSize'] = @chunk_size
|
534
|
+
h['uploadDate'] = @upload_date
|
535
|
+
h['aliases'] = @aliases
|
536
|
+
h['metadata'] = @metadata
|
537
|
+
md5_command = OrderedHash.new
|
538
|
+
md5_command['filemd5'] = @files_id
|
539
|
+
md5_command['root'] = @root
|
540
|
+
h['md5'] = @db.command(md5_command)['md5']
|
541
|
+
h
|
542
|
+
end
|
543
|
+
|
544
|
+
def read_partial(len, buf=nil)
|
545
|
+
buf ||= ''
|
546
|
+
byte = self.getc
|
547
|
+
while byte != nil && (len == nil || len > 0)
|
548
|
+
buf << byte.chr
|
549
|
+
len -= 1 if len
|
550
|
+
byte = self.getc if (len == nil || len > 0)
|
551
|
+
end
|
552
|
+
buf
|
553
|
+
end
|
554
|
+
|
555
|
+
def read_all(buf=nil)
|
556
|
+
buf ||= ''
|
557
|
+
while true do
|
558
|
+
if (@curr_chunk.pos > 0)
|
559
|
+
data = @curr_chunk.data.to_s
|
560
|
+
buf += data[@position, data.length]
|
561
|
+
else
|
562
|
+
buf += @curr_chunk.data.to_s
|
563
|
+
end
|
564
|
+
break if @curr_chunk.chunk_number == last_chunk_number
|
565
|
+
@curr_chunk = nth_chunk(@curr_chunk.chunk_number + 1)
|
566
|
+
end
|
567
|
+
buf
|
568
|
+
end
|
569
|
+
|
570
|
+
def nth_chunk(n)
|
571
|
+
mongo_chunk = chunk_collection.find({'files_id' => @files_id, 'n' => n}).next_document
|
572
|
+
Chunk.new(self, mongo_chunk || {})
|
573
|
+
end
|
574
|
+
|
575
|
+
def last_chunk_number
|
576
|
+
(@length / @chunk_size).to_i
|
577
|
+
end
|
578
|
+
|
579
|
+
end
|
580
|
+
end
|
data/lib/mongo/gridfs.rb
ADDED
@@ -0,0 +1,25 @@
|
|
1
|
+
# --
|
2
|
+
# Copyright (C) 2008-2010 10gen Inc.
|
3
|
+
#
|
4
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5
|
+
# you may not use this file except in compliance with the License.
|
6
|
+
# You may obtain a copy of the License at
|
7
|
+
#
|
8
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9
|
+
#
|
10
|
+
# Unless required by applicable law or agreed to in writing, software
|
11
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13
|
+
# See the License for the specific language governing permissions and
|
14
|
+
# limitations under the License.
|
15
|
+
# ++
|
16
|
+
require 'mongo/gridfs/grid_store'
|
17
|
+
|
18
|
+
# GridFS is a specification for storing large binary objects in MongoDB.
|
19
|
+
# See the documentation for GridFS::GridStore
|
20
|
+
#
|
21
|
+
# @see GridFS::GridStore
|
22
|
+
#
|
23
|
+
# @core gridfs
|
24
|
+
module GridFS
|
25
|
+
end
|
@@ -0,0 +1,52 @@
|
|
1
|
+
# --
|
2
|
+
# Copyright (C) 2008-2010 10gen Inc.
|
3
|
+
#
|
4
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5
|
+
# you may not use this file except in compliance with the License.
|
6
|
+
# You may obtain a copy of the License at
|
7
|
+
#
|
8
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9
|
+
#
|
10
|
+
# Unless required by applicable law or agreed to in writing, software
|
11
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13
|
+
# See the License for the specific language governing permissions and
|
14
|
+
# limitations under the License.
|
15
|
+
# ++
|
16
|
+
|
17
|
+
require 'mongo/util/byte_buffer'
|
18
|
+
|
19
|
+
module Mongo
|
20
|
+
|
21
|
+
# An array of binary bytes with a MongoDB subtype. See the subtype
|
22
|
+
# constants for reference.
|
23
|
+
#
|
24
|
+
# Use this class when storing binary data in documents.
|
25
|
+
class Binary < ByteBuffer
|
26
|
+
|
27
|
+
SUBTYPE_BYTES = 0x02
|
28
|
+
SUBTYPE_UUID = 0x03
|
29
|
+
SUBTYPE_MD5 = 0x05
|
30
|
+
SUBTYPE_USER_DEFINED = 0x80
|
31
|
+
|
32
|
+
# One of the SUBTYPE_* constants. Default is SUBTYPE_BYTES.
|
33
|
+
attr_accessor :subtype
|
34
|
+
|
35
|
+
# Create a buffer for storing binary data in MongoDB.
|
36
|
+
#
|
37
|
+
# @param [Array] initia_data
|
38
|
+
# @param [Fixnum] one of four values specifying a BSON binary subtype. Possible values are
|
39
|
+
# SUBTYPE_BYTES, SUBTYPE_UUID, SUBTYPE_MD5, and SUBTYPE_USER_DEFINED.
|
40
|
+
#
|
41
|
+
# @see http://www.mongodb.org/display/DOCS/BSON#BSON-noteondatabinary BSON binary subtypes.
|
42
|
+
def initialize(initial_data=[], subtype=SUBTYPE_BYTES)
|
43
|
+
super(initial_data)
|
44
|
+
@subtype = subtype
|
45
|
+
end
|
46
|
+
|
47
|
+
def inspect
|
48
|
+
"<Mongo::Binary:#{object_id}>"
|
49
|
+
end
|
50
|
+
|
51
|
+
end
|
52
|
+
end
|
@@ -0,0 +1,36 @@
|
|
1
|
+
# --
|
2
|
+
# Copyright (C) 2008-2010 10gen Inc.
|
3
|
+
#
|
4
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5
|
+
# you may not use this file except in compliance with the License.
|
6
|
+
# You may obtain a copy of the License at
|
7
|
+
#
|
8
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9
|
+
#
|
10
|
+
# Unless required by applicable law or agreed to in writing, software
|
11
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13
|
+
# See the License for the specific language governing permissions and
|
14
|
+
# limitations under the License.
|
15
|
+
# ++
|
16
|
+
|
17
|
+
module Mongo
|
18
|
+
|
19
|
+
# JavaScript code to be evaluated by MongoDB.
|
20
|
+
class Code < String
|
21
|
+
|
22
|
+
# Hash mapping identifiers to their values
|
23
|
+
attr_accessor :scope
|
24
|
+
|
25
|
+
# Wrap code to be evaluated by MongoDB.
|
26
|
+
#
|
27
|
+
# @param [String] code the JavaScript code.
|
28
|
+
# @param [Hash] a document mapping identifiers to values, which
|
29
|
+
# represent the scope in which the code is to be executed.
|
30
|
+
def initialize(code, scope={})
|
31
|
+
super(code)
|
32
|
+
@scope = scope
|
33
|
+
end
|
34
|
+
|
35
|
+
end
|
36
|
+
end
|