mongodb-mongo 0.4.4 → 0.5.0
Sign up to get free protection for your applications and to get access to all the features.
- data/README.rdoc +33 -8
- data/bin/run_test_script +15 -0
- data/lib/mongo/gridfs/chunk.rb +82 -0
- data/lib/mongo/gridfs/grid_store.rb +433 -0
- data/lib/mongo/gridfs.rb +1 -0
- data/lib/mongo/types/binary.rb +18 -10
- data/lib/mongo/util/bson.rb +25 -33
- data/lib/mongo/util/xml_to_ruby.rb +4 -1
- data/mongo-ruby-driver.gemspec +15 -3
- data/tests/mongo-qa/_common.rb +8 -0
- data/tests/mongo-qa/capped +17 -0
- data/tests/mongo-qa/count1 +10 -0
- data/tests/mongo-qa/find +6 -0
- data/tests/mongo-qa/remove +7 -0
- data/tests/mongo-qa/test1 +7 -0
- data/tests/test_bson.rb +23 -8
- data/tests/test_chunk.rb +87 -0
- data/tests/test_grid_store.rb +235 -0
- metadata +14 -2
- data/bin/validate +0 -51
data/README.rdoc
CHANGED
@@ -121,13 +121,31 @@ Mongo as-is. If the string is ASCII all is well, because ASCII is a subset of
|
|
121
121
|
UTF-8. If the string is not ASCII then it may not be a well-formed UTF-8
|
122
122
|
string.
|
123
123
|
|
124
|
-
==
|
124
|
+
== Primary Keys
|
125
|
+
|
126
|
+
The field _id is a primary key. It is treated specially by the database, and
|
127
|
+
its use makes many operations more efficient.
|
128
|
+
|
129
|
+
The value of an _id may be of any type. (Older versions of Mongo required that
|
130
|
+
they be XGen::Mongo::Driver::ObjectID instances.)
|
131
|
+
|
132
|
+
The database itself inserts an _id value if none is specified when a record is
|
133
|
+
inserted.
|
134
|
+
|
135
|
+
The driver automatically sends the _id field to the database first, which is
|
136
|
+
how Mongo likes it. You don't have to worry about where the _id field is in
|
137
|
+
your hash record, or worry if you are using an OrderedHash or not.
|
125
138
|
|
126
139
|
=== Primary Key Factories
|
127
140
|
|
128
|
-
A
|
129
|
-
|
130
|
-
|
141
|
+
A primary key factory is a class you supply to a DB object that knows how to
|
142
|
+
generate _id values. Primary key factories are no longer necessary because
|
143
|
+
Mongo now inserts an _id value for every record that does not already have
|
144
|
+
one. However, if you want to control _id values or even their types, using a
|
145
|
+
PK factory lets you do so.
|
146
|
+
|
147
|
+
You can tell the Ruby Mongo driver how to create primary keys by passing in
|
148
|
+
the :pk option to the Mongo#db method.
|
131
149
|
|
132
150
|
include XGen::Mongo::Driver
|
133
151
|
db = Mongo.new.db('dbname', :pk => MyPKFactory.new)
|
@@ -163,10 +181,17 @@ ActiveRecord-like framework for non-Rails apps) and the AR Mongo adapter code
|
|
163
181
|
end
|
164
182
|
end
|
165
183
|
|
166
|
-
A database's PK factory object may be set
|
167
|
-
|
168
|
-
|
169
|
-
|
184
|
+
A database's PK factory object may be set either when a DB object is created
|
185
|
+
or immediately after you obtain it, but only once. The only reason it is
|
186
|
+
changeable at all is so that libraries such as MongoRecord that use this
|
187
|
+
driver can set the PK factory after obtaining the database but before using it
|
188
|
+
for the first time.
|
189
|
+
|
190
|
+
== The DB Class
|
191
|
+
|
192
|
+
=== Primary Key factories
|
193
|
+
|
194
|
+
See the section on "Primary Keys" above.
|
170
195
|
|
171
196
|
=== Strict mode
|
172
197
|
|
data/bin/run_test_script
ADDED
@@ -0,0 +1,15 @@
|
|
1
|
+
#!/bin/bash
|
2
|
+
# usage: run_test_script test_name output_file
|
3
|
+
#
|
4
|
+
# See http://mongodb.onconfluence.com/display/DOCS/Using+the+Framework+(for+Driver+Developers)
|
5
|
+
|
6
|
+
HERE=`dirname $0`
|
7
|
+
|
8
|
+
begintime=`date`
|
9
|
+
ruby $HERE/../tests/mongo-qa/$1
|
10
|
+
exitval=$?
|
11
|
+
endtime=`date`
|
12
|
+
|
13
|
+
echo "begintime:$begintime" >> $2
|
14
|
+
echo "endtime:$endtime" >> $2
|
15
|
+
echo "exit_code:$exitval" >> $2
|
@@ -0,0 +1,82 @@
|
|
1
|
+
require 'mongo/types/objectid'
|
2
|
+
require 'mongo/util/byte_buffer'
|
3
|
+
require 'mongo/util/ordered_hash'
|
4
|
+
|
5
|
+
|
6
|
+
module XGen
|
7
|
+
module Mongo
|
8
|
+
module GridFS
|
9
|
+
|
10
|
+
# A chunk stores a portion of GridStore data.
|
11
|
+
#
|
12
|
+
# TODO: user-defined chunk size
|
13
|
+
class Chunk
|
14
|
+
|
15
|
+
DEFAULT_CHUNK_SIZE = 1024 * 256
|
16
|
+
|
17
|
+
attr_reader :object_id, :chunk_number
|
18
|
+
attr_accessor :data
|
19
|
+
|
20
|
+
def initialize(file, mongo_object={})
|
21
|
+
@file = file
|
22
|
+
@object_id = mongo_object['_id'] || XGen::Mongo::Driver::ObjectID.new
|
23
|
+
@chunk_number = mongo_object['n'] || 0
|
24
|
+
|
25
|
+
@data = ByteBuffer.new
|
26
|
+
case mongo_object['data']
|
27
|
+
when String
|
28
|
+
mongo_object['data'].each_byte { |b| @data.put(b) }
|
29
|
+
when ByteBuffer
|
30
|
+
@data.put_array(mongo_object['data'].to_a)
|
31
|
+
when Array
|
32
|
+
@data.put_array(mongo_object['data'])
|
33
|
+
when nil
|
34
|
+
else
|
35
|
+
raise "illegal chunk format; data is #{mongo_object['data'] ? (' ' + mongo_object['data'].class.name) : 'nil'}"
|
36
|
+
end
|
37
|
+
@data.rewind
|
38
|
+
end
|
39
|
+
|
40
|
+
def pos; @data.position; end
|
41
|
+
def pos=(pos); @data.position = pos; end
|
42
|
+
def eof?; !@data.more?; end
|
43
|
+
|
44
|
+
def size; @data.size; end
|
45
|
+
alias_method :length, :size
|
46
|
+
|
47
|
+
# Erase all data after current position.
|
48
|
+
def truncate
|
49
|
+
if @data.position < @data.length
|
50
|
+
curr_data = @data
|
51
|
+
@data = ByteBuffer.new
|
52
|
+
@data.put_array(curr_data.to_a[0...curr_data.position])
|
53
|
+
end
|
54
|
+
end
|
55
|
+
|
56
|
+
def getc
|
57
|
+
@data.more? ? @data.get : nil
|
58
|
+
end
|
59
|
+
|
60
|
+
def putc(byte)
|
61
|
+
@data.put(byte)
|
62
|
+
end
|
63
|
+
|
64
|
+
def save
|
65
|
+
coll = @file.chunk_collection
|
66
|
+
coll.remove({'_id' => @object_id})
|
67
|
+
coll.insert(to_mongo_object)
|
68
|
+
end
|
69
|
+
|
70
|
+
def to_mongo_object
|
71
|
+
h = OrderedHash.new
|
72
|
+
h['_id'] = @object_id
|
73
|
+
h['files_id'] = @file.files_id
|
74
|
+
h['n'] = @chunk_number
|
75
|
+
h['data'] = data
|
76
|
+
h
|
77
|
+
end
|
78
|
+
|
79
|
+
end
|
80
|
+
end
|
81
|
+
end
|
82
|
+
end
|
@@ -0,0 +1,433 @@
|
|
1
|
+
require 'mongo/types/objectid'
|
2
|
+
require 'mongo/util/ordered_hash'
|
3
|
+
require 'mongo/gridfs/chunk'
|
4
|
+
|
5
|
+
module XGen
|
6
|
+
module Mongo
|
7
|
+
module GridFS
|
8
|
+
|
9
|
+
# GridStore is an IO-like object that provides input and output for
|
10
|
+
# streams of data to Mongo. See Mongo's documentation about GridFS for
|
11
|
+
# storage implementation details.
|
12
|
+
#
|
13
|
+
# Example code:
|
14
|
+
#
|
15
|
+
# require 'mongo/gridfs'
|
16
|
+
# GridStore.open(database, 'filename', 'w') { |f|
|
17
|
+
# f.puts "Hello, world!"
|
18
|
+
# }
|
19
|
+
# GridStore.open(database, 'filename, 'r') { |f|
|
20
|
+
# puts f.read # => Hello, world!\n
|
21
|
+
# }
|
22
|
+
# GridStore.open(database, 'filename', 'w+') { |f|
|
23
|
+
# f.puts "But wait, there's more!"
|
24
|
+
# }
|
25
|
+
# GridStore.open(database, 'filename, 'r') { |f|
|
26
|
+
# puts f.read # => Hello, world!\nBut wait, there's more!\n
|
27
|
+
# }
|
28
|
+
class GridStore
|
29
|
+
|
30
|
+
DEFAULT_ROOT_COLLECTION = 'gridfs'
|
31
|
+
DEFAULT_CONTENT_TYPE = 'text/plain'
|
32
|
+
|
33
|
+
include Enumerable
|
34
|
+
|
35
|
+
attr_accessor :filename
|
36
|
+
|
37
|
+
# Array of strings; may be +nil+
|
38
|
+
attr_accessor :aliases
|
39
|
+
|
40
|
+
# Default is DEFAULT_CONTENT_TYPE
|
41
|
+
attr_accessor :content_type
|
42
|
+
|
43
|
+
attr_accessor :metadata
|
44
|
+
|
45
|
+
attr_reader :files_id
|
46
|
+
|
47
|
+
# Time that the file was first saved.
|
48
|
+
attr_reader :upload_date
|
49
|
+
|
50
|
+
attr_reader :chunk_size
|
51
|
+
|
52
|
+
attr_accessor :lineno
|
53
|
+
|
54
|
+
class << self
|
55
|
+
|
56
|
+
def exist?(db, name, root_collection=DEFAULT_ROOT_COLLECTION)
|
57
|
+
db.collection("#{root_collection}.files").find({'filename' => name}).next_object != nil
|
58
|
+
end
|
59
|
+
|
60
|
+
def open(db, name, mode, options={})
|
61
|
+
gs = self.new(db, name, mode, options)
|
62
|
+
result = nil
|
63
|
+
begin
|
64
|
+
result = yield gs if block_given?
|
65
|
+
ensure
|
66
|
+
gs.close
|
67
|
+
end
|
68
|
+
result
|
69
|
+
end
|
70
|
+
|
71
|
+
def read(db, name, length=nil, offset=nil)
|
72
|
+
GridStore.open(db, name, 'r') { |gs|
|
73
|
+
gs.seek(offset) if offset
|
74
|
+
gs.read(length)
|
75
|
+
}
|
76
|
+
end
|
77
|
+
|
78
|
+
def readlines(db, name, separator=$/)
|
79
|
+
GridStore.open(db, name, 'r') { |gs|
|
80
|
+
gs.readlines(separator)
|
81
|
+
}
|
82
|
+
end
|
83
|
+
|
84
|
+
def unlink(db, *names)
|
85
|
+
names.each { |name|
|
86
|
+
gs = GridStore.new(db, name)
|
87
|
+
gs.send(:delete_chunks)
|
88
|
+
gs.collection.remove('_id' => gs.files_id)
|
89
|
+
}
|
90
|
+
end
|
91
|
+
alias_method :delete, :unlink
|
92
|
+
|
93
|
+
end
|
94
|
+
|
95
|
+
#---
|
96
|
+
# ================================================================
|
97
|
+
#+++
|
98
|
+
|
99
|
+
# Mode may only be 'r', 'w', or 'w+'.
|
100
|
+
#
|
101
|
+
# Options. Descriptions start with a list of the modes for which that
|
102
|
+
# option is legitimate.
|
103
|
+
#
|
104
|
+
# :root :: (r, w, w+) Name of root collection to use, instead of
|
105
|
+
# DEFAULT_ROOT_COLLECTION.
|
106
|
+
#
|
107
|
+
# :metadata:: (w, w+) A hash containing any data you want persisted as
|
108
|
+
# this file's metadata. See also metadata=
|
109
|
+
#
|
110
|
+
# :chunk_size :: (w) Sets chunk size for files opened for writing
|
111
|
+
# See also chunk_size= which may only be called before
|
112
|
+
# any data is written.
|
113
|
+
#
|
114
|
+
# :content_type :: (w) Default value is DEFAULT_CONTENT_TYPE. See
|
115
|
+
# also #content_type=
|
116
|
+
def initialize(db, name, mode='r', options={})
|
117
|
+
@db, @filename, @mode = db, name, mode
|
118
|
+
@root = options[:root] || DEFAULT_ROOT_COLLECTION
|
119
|
+
|
120
|
+
doc = collection.find({'filename' => @filename}).next_object
|
121
|
+
if doc
|
122
|
+
@files_id = doc['_id']
|
123
|
+
@content_type = doc['contentType']
|
124
|
+
@chunk_size = doc['chunkSize']
|
125
|
+
@upload_date = doc['uploadDate']
|
126
|
+
@aliases = doc['aliases']
|
127
|
+
@length = doc['length']
|
128
|
+
@metadata = doc['metadata']
|
129
|
+
else
|
130
|
+
@files_id = XGen::Mongo::Driver::ObjectID.new
|
131
|
+
@content_type = DEFAULT_CONTENT_TYPE
|
132
|
+
@chunk_size = Chunk::DEFAULT_CHUNK_SIZE
|
133
|
+
@length = 0
|
134
|
+
end
|
135
|
+
|
136
|
+
case mode
|
137
|
+
when 'r'
|
138
|
+
@curr_chunk = nth_chunk(0)
|
139
|
+
@position = 0
|
140
|
+
when 'w'
|
141
|
+
chunk_collection.create_index("chunk_index", ['files_id', 'n'])
|
142
|
+
delete_chunks
|
143
|
+
@curr_chunk = Chunk.new(self, 'n' => 0)
|
144
|
+
@content_type = options[:content_type] if options[:content_type]
|
145
|
+
@chunk_size = options[:chunk_size] if options[:chunk_size]
|
146
|
+
@metadata = options[:metadata] if options[:metadata]
|
147
|
+
@position = 0
|
148
|
+
when 'w+'
|
149
|
+
chunk_collection.create_index("chunk_index", ['files_id', 'n'])
|
150
|
+
@curr_chunk = nth_chunk(last_chunk_number) || Chunk.new(self, 'n' => 0) # might be empty
|
151
|
+
@curr_chunk.pos = @curr_chunk.data.length if @curr_chunk
|
152
|
+
@metadata = options[:metadata] if options[:metadata]
|
153
|
+
@position = @length
|
154
|
+
else
|
155
|
+
raise "error: illegal mode #{mode}"
|
156
|
+
end
|
157
|
+
|
158
|
+
@lineno = 0
|
159
|
+
@pushback_byte = nil
|
160
|
+
end
|
161
|
+
|
162
|
+
def collection
|
163
|
+
@db.collection("#{@root}.files")
|
164
|
+
end
|
165
|
+
|
166
|
+
# Returns collection used for storing chunks. Depends on value of
|
167
|
+
# @root.
|
168
|
+
def chunk_collection
|
169
|
+
@db.collection("#{@root}.chunks")
|
170
|
+
end
|
171
|
+
|
172
|
+
# Change chunk size. Can only change if the file is opened for write
|
173
|
+
# and no data has yet been written.
|
174
|
+
def chunk_size=(size)
|
175
|
+
unless @mode[0] == ?w && @position == 0 && @upload_date == nil
|
176
|
+
raise "error: can only change chunk size if open for write and no data written."
|
177
|
+
end
|
178
|
+
@chunk_size = size
|
179
|
+
end
|
180
|
+
|
181
|
+
#---
|
182
|
+
# ================ reading ================
|
183
|
+
#+++
|
184
|
+
|
185
|
+
def getc
|
186
|
+
if @pushback_byte
|
187
|
+
byte = @pushback_byte
|
188
|
+
@pushback_byte = nil
|
189
|
+
@position += 1
|
190
|
+
byte
|
191
|
+
elsif eof?
|
192
|
+
nil
|
193
|
+
else
|
194
|
+
if @curr_chunk.eof?
|
195
|
+
@curr_chunk = nth_chunk(@curr_chunk.chunk_number + 1)
|
196
|
+
end
|
197
|
+
@position += 1
|
198
|
+
@curr_chunk.getc
|
199
|
+
end
|
200
|
+
end
|
201
|
+
|
202
|
+
def gets(separator=$/)
|
203
|
+
str = ''
|
204
|
+
byte = getc
|
205
|
+
return nil if byte == nil # EOF
|
206
|
+
while byte != nil
|
207
|
+
s = byte.chr
|
208
|
+
str << s
|
209
|
+
break if s == separator
|
210
|
+
byte = getc
|
211
|
+
end
|
212
|
+
@lineno += 1
|
213
|
+
str
|
214
|
+
end
|
215
|
+
|
216
|
+
def read(len=nil, buf=nil)
|
217
|
+
buf ||= ''
|
218
|
+
byte = getc
|
219
|
+
while byte != nil && (len == nil || len > 0)
|
220
|
+
buf << byte.chr
|
221
|
+
len -= 1 if len
|
222
|
+
byte = getc if (len == nil || len > 0)
|
223
|
+
end
|
224
|
+
buf
|
225
|
+
end
|
226
|
+
|
227
|
+
def readchar
|
228
|
+
byte = getc
|
229
|
+
raise EOFError.new if byte == nil
|
230
|
+
byte
|
231
|
+
end
|
232
|
+
|
233
|
+
def readline(separator=$/)
|
234
|
+
line = gets
|
235
|
+
raise EOFError.new if line == nil
|
236
|
+
line
|
237
|
+
end
|
238
|
+
|
239
|
+
def readlines(separator=$/)
|
240
|
+
read.split(separator).collect { |line| "#{line}#{separator}" }
|
241
|
+
end
|
242
|
+
|
243
|
+
def each
|
244
|
+
line = gets
|
245
|
+
while line
|
246
|
+
yield line
|
247
|
+
line = gets
|
248
|
+
end
|
249
|
+
end
|
250
|
+
alias_method :each_line, :each
|
251
|
+
|
252
|
+
def each_byte
|
253
|
+
byte = getc
|
254
|
+
while byte
|
255
|
+
yield byte
|
256
|
+
byte = getc
|
257
|
+
end
|
258
|
+
end
|
259
|
+
|
260
|
+
def ungetc(byte)
|
261
|
+
@pushback_byte = byte
|
262
|
+
@position -= 1
|
263
|
+
end
|
264
|
+
|
265
|
+
#---
|
266
|
+
# ================ writing ================
|
267
|
+
#+++
|
268
|
+
|
269
|
+
def putc(byte)
|
270
|
+
if @curr_chunk.pos == @chunk_size
|
271
|
+
prev_chunk_number = @curr_chunk.chunk_number
|
272
|
+
@curr_chunk.save
|
273
|
+
@curr_chunk = Chunk.new(self, 'n' => prev_chunk_number + 1)
|
274
|
+
end
|
275
|
+
@position += 1
|
276
|
+
@curr_chunk.putc(byte)
|
277
|
+
end
|
278
|
+
|
279
|
+
def print(*objs)
|
280
|
+
objs = [$_] if objs == nil || objs.empty?
|
281
|
+
objs.each { |obj|
|
282
|
+
str = obj.to_s
|
283
|
+
str.each_byte { |byte| putc(byte) }
|
284
|
+
}
|
285
|
+
nil
|
286
|
+
end
|
287
|
+
|
288
|
+
def puts(*objs)
|
289
|
+
if objs == nil || objs.empty?
|
290
|
+
putc(10)
|
291
|
+
else
|
292
|
+
print(*objs.collect{ |obj|
|
293
|
+
str = obj.to_s
|
294
|
+
str << "\n" unless str =~ /\n$/
|
295
|
+
str
|
296
|
+
})
|
297
|
+
end
|
298
|
+
nil
|
299
|
+
end
|
300
|
+
|
301
|
+
def <<(obj)
|
302
|
+
write(obj.to_s)
|
303
|
+
end
|
304
|
+
|
305
|
+
# Writes +string+ as bytes and returns the number of bytes written.
|
306
|
+
def write(string)
|
307
|
+
raise "#@filename not opened for write" unless @mode[0] == ?w
|
308
|
+
count = 0
|
309
|
+
string.each_byte { |byte|
|
310
|
+
putc byte
|
311
|
+
count += 1
|
312
|
+
}
|
313
|
+
count
|
314
|
+
end
|
315
|
+
|
316
|
+
# A no-op.
|
317
|
+
def flush
|
318
|
+
end
|
319
|
+
|
320
|
+
#---
|
321
|
+
# ================ status ================
|
322
|
+
#+++
|
323
|
+
|
324
|
+
def eof
|
325
|
+
raise IOError.new("stream not open for reading") unless @mode[0] == ?r
|
326
|
+
@position >= @length
|
327
|
+
end
|
328
|
+
alias_method :eof?, :eof
|
329
|
+
|
330
|
+
#---
|
331
|
+
# ================ positioning ================
|
332
|
+
#+++
|
333
|
+
|
334
|
+
def rewind
|
335
|
+
if @curr_chunk.chunk_number != 0
|
336
|
+
if @mode[0] == ?w
|
337
|
+
delete_chunks
|
338
|
+
@curr_chunk = Chunk.new(self, 'n' => 0)
|
339
|
+
else
|
340
|
+
@curr_chunk == nth_chunk(0)
|
341
|
+
end
|
342
|
+
end
|
343
|
+
@curr_chunk.pos = 0
|
344
|
+
@lineno = 0
|
345
|
+
@position = 0
|
346
|
+
end
|
347
|
+
|
348
|
+
def seek(pos, whence=IO::SEEK_SET)
|
349
|
+
target_pos = case whence
|
350
|
+
when IO::SEEK_CUR
|
351
|
+
@position + pos
|
352
|
+
when IO::SEEK_END
|
353
|
+
@length - pos
|
354
|
+
when IO::SEEK_SET
|
355
|
+
pos
|
356
|
+
end
|
357
|
+
|
358
|
+
new_chunk_number = (target_pos / @chunk_size).to_i
|
359
|
+
if new_chunk_number != @curr_chunk.chunk_number
|
360
|
+
@curr_chunk.save if @mode[0] == ?w
|
361
|
+
@curr_chunk = nth_chunk(new_chunk_number)
|
362
|
+
end
|
363
|
+
@position = target_pos
|
364
|
+
@curr_chunk.pos = @position % @chunk_size
|
365
|
+
0
|
366
|
+
end
|
367
|
+
|
368
|
+
def tell
|
369
|
+
@position
|
370
|
+
end
|
371
|
+
|
372
|
+
#---
|
373
|
+
# ================ closing ================
|
374
|
+
#+++
|
375
|
+
|
376
|
+
def close
|
377
|
+
if @mode[0] == ?w
|
378
|
+
if @curr_chunk
|
379
|
+
@curr_chunk.truncate
|
380
|
+
@curr_chunk.save if @curr_chunk.pos > 0
|
381
|
+
end
|
382
|
+
files = collection
|
383
|
+
if @upload_date
|
384
|
+
files.remove('_id' => @files_id)
|
385
|
+
else
|
386
|
+
@upload_date = Time.now
|
387
|
+
end
|
388
|
+
files.insert(to_mongo_object)
|
389
|
+
end
|
390
|
+
@db = nil
|
391
|
+
end
|
392
|
+
|
393
|
+
def closed?
|
394
|
+
@db == nil
|
395
|
+
end
|
396
|
+
|
397
|
+
#---
|
398
|
+
# ================ protected ================
|
399
|
+
#+++
|
400
|
+
|
401
|
+
protected
|
402
|
+
|
403
|
+
def to_mongo_object
|
404
|
+
h = OrderedHash.new
|
405
|
+
h['_id'] = @files_id
|
406
|
+
h['filename'] = @filename
|
407
|
+
h['contentType'] = @content_type
|
408
|
+
h['length'] = @curr_chunk ? @curr_chunk.chunk_number * @chunk_size + @curr_chunk.pos : 0
|
409
|
+
h['chunkSize'] = @chunk_size
|
410
|
+
h['uploadDate'] = @upload_date
|
411
|
+
h['aliases'] = @aliases
|
412
|
+
h['metadata'] = @metadata
|
413
|
+
h
|
414
|
+
end
|
415
|
+
|
416
|
+
def delete_chunks
|
417
|
+
chunk_collection.remove({'files_id' => @files_id}) if @files_id
|
418
|
+
@curr_chunk = nil
|
419
|
+
end
|
420
|
+
|
421
|
+
def nth_chunk(n)
|
422
|
+
mongo_chunk = chunk_collection.find({'files_id' => @files_id, 'n' => n}).next_object
|
423
|
+
Chunk.new(self, mongo_chunk || {})
|
424
|
+
end
|
425
|
+
|
426
|
+
def last_chunk_number
|
427
|
+
(@length / @chunk_size).to_i
|
428
|
+
end
|
429
|
+
|
430
|
+
end
|
431
|
+
end
|
432
|
+
end
|
433
|
+
end
|
data/lib/mongo/gridfs.rb
ADDED
@@ -0,0 +1 @@
|
|
1
|
+
require 'mongo/gridfs/grid_store'
|
data/lib/mongo/types/binary.rb
CHANGED
@@ -14,21 +14,29 @@
|
|
14
14
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
15
15
|
# ++
|
16
16
|
|
17
|
+
require 'mongo/util/byte_buffer'
|
18
|
+
|
17
19
|
module XGen
|
18
20
|
module Mongo
|
19
21
|
module Driver
|
20
22
|
|
21
|
-
# An array of binary bytes
|
22
|
-
|
23
|
-
class Binary < String; end
|
23
|
+
# An array of binary bytes with a Mongo subtype value.
|
24
|
+
class Binary < ByteBuffer
|
24
25
|
|
25
|
-
|
26
|
-
|
27
|
-
|
26
|
+
SUBTYPE_BYTES = 0x02
|
27
|
+
SUBTYPE_UUID = 0x03
|
28
|
+
SUBTYPE_MD5 = 0x05
|
29
|
+
SUBTYPE_USER_DEFINED = 0x80
|
28
30
|
|
29
|
-
|
30
|
-
|
31
|
-
|
32
|
-
|
31
|
+
# One of the SUBTYPE_* constants. Default is SUBTYPE_BYTES.
|
32
|
+
attr_accessor :subtype
|
33
|
+
|
34
|
+
def initialize(initial_data=[], subtype=SUBTYPE_BYTES)
|
35
|
+
super(initial_data)
|
36
|
+
@subtype = subtype
|
37
|
+
end
|
38
|
+
|
39
|
+
end
|
40
|
+
end
|
33
41
|
end
|
34
42
|
end
|
data/lib/mongo/util/bson.rb
CHANGED
@@ -26,6 +26,8 @@ require 'mongo/types/undefined'
|
|
26
26
|
# A BSON seralizer/deserializer.
|
27
27
|
class BSON
|
28
28
|
|
29
|
+
include XGen::Mongo::Driver
|
30
|
+
|
29
31
|
MINKEY = -1
|
30
32
|
EOO = 0
|
31
33
|
NUMBER = 1
|
@@ -46,8 +48,6 @@ class BSON
|
|
46
48
|
NUMBER_INT = 16
|
47
49
|
MAXKEY = 127
|
48
50
|
|
49
|
-
BYTE_TYPE = 2
|
50
|
-
|
51
51
|
if RUBY_VERSION >= '1.9'
|
52
52
|
def self.to_utf8(str)
|
53
53
|
str.encode("utf-8")
|
@@ -169,7 +169,7 @@ class BSON
|
|
169
169
|
doc[key] = nil
|
170
170
|
when UNDEFINED
|
171
171
|
key = deserialize_cstr(@buf)
|
172
|
-
doc[key] =
|
172
|
+
doc[key] = Undefined.new
|
173
173
|
when REF
|
174
174
|
key = deserialize_cstr(@buf)
|
175
175
|
doc[key] = deserialize_dbref_data(@buf, key, parent)
|
@@ -242,7 +242,7 @@ class BSON
|
|
242
242
|
options |= Regexp::MULTILINE if options_str.include?('m')
|
243
243
|
options |= Regexp::EXTENDED if options_str.include?('x')
|
244
244
|
options_str.gsub!(/[imx]/, '') # Now remove the three we understand
|
245
|
-
|
245
|
+
RegexpOfHolding.new(str, options, options_str)
|
246
246
|
end
|
247
247
|
|
248
248
|
def deserialize_string_data(buf)
|
@@ -256,23 +256,20 @@ class BSON
|
|
256
256
|
end
|
257
257
|
|
258
258
|
def deserialize_oid_data(buf)
|
259
|
-
|
259
|
+
ObjectID.new(buf.get(12))
|
260
260
|
end
|
261
261
|
|
262
262
|
def deserialize_dbref_data(buf, key, parent)
|
263
263
|
ns = deserialize_string_data(buf)
|
264
264
|
oid = deserialize_oid_data(buf)
|
265
|
-
|
265
|
+
DBRef.new(parent, key, @db, ns, oid)
|
266
266
|
end
|
267
267
|
|
268
268
|
def deserialize_binary_data(buf)
|
269
|
-
buf.get_int # length + 4; ignored
|
270
|
-
buf.get # byte type; ignored
|
271
269
|
len = buf.get_int
|
272
|
-
|
273
|
-
|
274
|
-
|
275
|
-
str.to_mongo_binary
|
270
|
+
type = buf.get
|
271
|
+
len = buf.get_int if type == Binary::SUBTYPE_BYTES
|
272
|
+
Binary.new(buf.get(len), type)
|
276
273
|
end
|
277
274
|
|
278
275
|
def serialize_eoo_element(buf)
|
@@ -293,24 +290,19 @@ class BSON
|
|
293
290
|
buf.put(BINARY)
|
294
291
|
self.class.serialize_cstr(buf, key)
|
295
292
|
|
296
|
-
bytes =
|
297
|
-
when ByteBuffer
|
298
|
-
val.to_a
|
299
|
-
else
|
300
|
-
if RUBY_VERSION >= '1.9'
|
301
|
-
val.bytes.to_a
|
302
|
-
else
|
303
|
-
a = []
|
304
|
-
val.each_byte { |byte| a << byte }
|
305
|
-
a
|
306
|
-
end
|
307
|
-
end
|
308
|
-
|
293
|
+
bytes = val.to_a
|
309
294
|
num_bytes = bytes.length
|
310
|
-
|
311
|
-
|
312
|
-
|
313
|
-
|
295
|
+
subtype = val.respond_to?(:subtype) ? val.subtype : Binary::SUBTYPE_BYTES
|
296
|
+
if subtype == Binary::SUBTYPE_BYTES
|
297
|
+
buf.put_int(num_bytes + 4)
|
298
|
+
buf.put(subtype)
|
299
|
+
buf.put_int(num_bytes)
|
300
|
+
buf.put_array(bytes)
|
301
|
+
else
|
302
|
+
buf.put_int(num_bytes)
|
303
|
+
buf.put(subtype)
|
304
|
+
buf.put_array(bytes)
|
305
|
+
end
|
314
306
|
end
|
315
307
|
|
316
308
|
def serialize_undefined_element(buf, key)
|
@@ -420,7 +412,7 @@ class BSON
|
|
420
412
|
NUMBER_INT
|
421
413
|
when Numeric
|
422
414
|
NUMBER
|
423
|
-
when
|
415
|
+
when ByteBuffer
|
424
416
|
BINARY
|
425
417
|
when String
|
426
418
|
# magic awful stuff - the DB requires that a where clause is sent as CODE
|
@@ -429,9 +421,9 @@ class BSON
|
|
429
421
|
ARRAY
|
430
422
|
when Regexp
|
431
423
|
REGEX
|
432
|
-
when
|
424
|
+
when ObjectID
|
433
425
|
OID
|
434
|
-
when
|
426
|
+
when DBRef
|
435
427
|
REF
|
436
428
|
when true, false
|
437
429
|
BOOLEAN
|
@@ -441,7 +433,7 @@ class BSON
|
|
441
433
|
OBJECT
|
442
434
|
when Symbol
|
443
435
|
SYMBOL
|
444
|
-
when
|
436
|
+
when Undefined
|
445
437
|
UNDEFINED
|
446
438
|
else
|
447
439
|
raise "Unknown type of object: #{o.class.name}"
|
@@ -45,7 +45,10 @@ class XMLToRuby
|
|
45
45
|
when 'string', 'code'
|
46
46
|
e.text.to_s
|
47
47
|
when 'binary'
|
48
|
-
|
48
|
+
bin = Binary.new
|
49
|
+
decoded = Base64.decode64(e.text.to_s)
|
50
|
+
decoded.each_byte { |b| bin.put(b) }
|
51
|
+
bin
|
49
52
|
when 'symbol'
|
50
53
|
e.text.to_s.intern
|
51
54
|
when 'boolean'
|
data/mongo-ruby-driver.gemspec
CHANGED
@@ -1,13 +1,13 @@
|
|
1
1
|
Gem::Specification.new do |s|
|
2
2
|
s.name = 'mongo'
|
3
|
-
s.version = '0.
|
3
|
+
s.version = '0.5.0'
|
4
4
|
s.platform = Gem::Platform::RUBY
|
5
5
|
s.summary = 'Simple pure-Ruby driver for the 10gen Mongo DB'
|
6
6
|
s.description = 'A pure-Ruby driver for the 10gen Mongo DB. For more information about Mongo, see http://www.mongodb.org.'
|
7
7
|
|
8
8
|
s.require_paths = ['lib']
|
9
9
|
|
10
|
-
s.files = ['bin/mongo_console', 'bin/
|
10
|
+
s.files = ['bin/mongo_console', 'bin/run_test_script',
|
11
11
|
'examples/benchmarks.rb',
|
12
12
|
'examples/blog.rb',
|
13
13
|
'examples/index_test.rb',
|
@@ -17,6 +17,9 @@ Gem::Specification.new do |s|
|
|
17
17
|
'lib/mongo/collection.rb',
|
18
18
|
'lib/mongo/cursor.rb',
|
19
19
|
'lib/mongo/db.rb',
|
20
|
+
'lib/mongo/gridfs/chunk.rb',
|
21
|
+
'lib/mongo/gridfs/grid_store.rb',
|
22
|
+
'lib/mongo/gridfs.rb',
|
20
23
|
'lib/mongo/message/get_more_message.rb',
|
21
24
|
'lib/mongo/message/insert_message.rb',
|
22
25
|
'lib/mongo/message/kill_cursors_message.rb',
|
@@ -40,13 +43,22 @@ Gem::Specification.new do |s|
|
|
40
43
|
'lib/mongo/util/ordered_hash.rb',
|
41
44
|
'lib/mongo/util/xml_to_ruby.rb',
|
42
45
|
'README.rdoc', 'Rakefile', 'mongo-ruby-driver.gemspec']
|
43
|
-
s.test_files = ['tests/
|
46
|
+
s.test_files = ['tests/mongo-qa/_common.rb',
|
47
|
+
'tests/mongo-qa/capped',
|
48
|
+
'tests/mongo-qa/circuar',
|
49
|
+
'tests/mongo-qa/count1',
|
50
|
+
'tests/mongo-qa/find',
|
51
|
+
'tests/mongo-qa/remove',
|
52
|
+
'tests/mongo-qa/test1',
|
53
|
+
'tests/test_admin.rb',
|
44
54
|
'tests/test_bson.rb',
|
45
55
|
'tests/test_byte_buffer.rb',
|
56
|
+
'tests/test_chunk.rb',
|
46
57
|
'tests/test_cursor.rb',
|
47
58
|
'tests/test_db.rb',
|
48
59
|
'tests/test_db_api.rb',
|
49
60
|
'tests/test_db_connection.rb',
|
61
|
+
'tests/test_grid_store.rb',
|
50
62
|
'tests/test_message.rb',
|
51
63
|
'tests/test_mongo.rb',
|
52
64
|
'tests/test_objectid.rb',
|
@@ -0,0 +1,17 @@
|
|
1
|
+
#!/usr/bin/env ruby
|
2
|
+
|
3
|
+
require File.join(File.dirname(__FILE__), '_common.rb')
|
4
|
+
db = Mongo.new(DEFAULT_HOST, DEFAULT_PORT).db(DEFAULT_DB)
|
5
|
+
|
6
|
+
db.create_collection('capped1', :capped => true, :size => 500)
|
7
|
+
coll = db.collection('capped1')
|
8
|
+
coll.insert('x' => 1)
|
9
|
+
coll.insert('x' => 2)
|
10
|
+
|
11
|
+
db.create_collection('capped2', :capped => true, :size => 1000, :max => 11)
|
12
|
+
coll = db.collection('capped2')
|
13
|
+
str = ''
|
14
|
+
100.times {
|
15
|
+
coll.insert('dashes' => str)
|
16
|
+
str << '-'
|
17
|
+
}
|
@@ -0,0 +1,10 @@
|
|
1
|
+
#!/usr/bin/env ruby
|
2
|
+
|
3
|
+
require File.join(File.dirname(__FILE__), '_common.rb')
|
4
|
+
db = Mongo.new(DEFAULT_HOST, DEFAULT_PORT).db(DEFAULT_DB)
|
5
|
+
|
6
|
+
puts db.collection('test1').count
|
7
|
+
puts db.collection('test2').count
|
8
|
+
puts db.collection('test3').count('i' => 'a')
|
9
|
+
puts db.collection('test3').count('i' => 3)
|
10
|
+
puts db.collection('test3').count({'i' => {'$gte' => 67}})
|
data/tests/mongo-qa/find
ADDED
data/tests/test_bson.rb
CHANGED
@@ -113,25 +113,40 @@ class BSONTest < Test::Unit::TestCase
|
|
113
113
|
end
|
114
114
|
|
115
115
|
def test_binary
|
116
|
-
bin =
|
117
|
-
|
116
|
+
bin = Binary.new
|
117
|
+
'binstring'.each_byte { |b| bin.put(b) }
|
118
118
|
|
119
119
|
doc = {'bin' => bin}
|
120
120
|
@b.serialize(doc)
|
121
121
|
doc2 = @b.deserialize
|
122
|
-
|
122
|
+
bin2 = doc2['bin']
|
123
|
+
assert_kind_of Binary, bin2
|
124
|
+
assert_equal 'binstring', bin2.to_s
|
125
|
+
end
|
126
|
+
|
127
|
+
def test_binary_type
|
128
|
+
bin = Binary.new([1, 2, 3, 4, 5], Binary::SUBTYPE_USER_DEFINED)
|
129
|
+
|
130
|
+
doc = {'bin' => bin}
|
131
|
+
@b.serialize(doc)
|
132
|
+
doc2 = @b.deserialize
|
133
|
+
bin2 = doc2['bin']
|
134
|
+
assert_kind_of Binary, bin2
|
135
|
+
assert_equal [1, 2, 3, 4, 5], bin2.to_a
|
136
|
+
assert_equal Binary::SUBTYPE_USER_DEFINED, bin2.subtype
|
123
137
|
end
|
124
138
|
|
125
139
|
def test_binary_byte_buffer
|
126
140
|
bb = ByteBuffer.new
|
127
|
-
|
141
|
+
5.times { |i| bb.put(i + 1) }
|
142
|
+
|
128
143
|
doc = {'bin' => bb}
|
129
144
|
@b.serialize(doc)
|
130
145
|
doc2 = @b.deserialize
|
131
|
-
|
132
|
-
|
133
|
-
|
134
|
-
assert_equal
|
146
|
+
bin2 = doc2['bin']
|
147
|
+
assert_kind_of Binary, bin2
|
148
|
+
assert_equal [1, 2, 3, 4, 5], bin2.to_a
|
149
|
+
assert_equal Binary::SUBTYPE_BYTES, bin2.subtype
|
135
150
|
end
|
136
151
|
|
137
152
|
def test_undefined
|
data/tests/test_chunk.rb
ADDED
@@ -0,0 +1,87 @@
|
|
1
|
+
$LOAD_PATH[0,0] = File.join(File.dirname(__FILE__), '..', 'lib')
|
2
|
+
require 'test/unit'
|
3
|
+
require 'mongo'
|
4
|
+
require 'mongo/gridfs'
|
5
|
+
|
6
|
+
class ChunkTest < Test::Unit::TestCase
|
7
|
+
|
8
|
+
include XGen::Mongo::Driver
|
9
|
+
include XGen::Mongo::GridFS
|
10
|
+
|
11
|
+
def setup
|
12
|
+
@host = ENV['MONGO_RUBY_DRIVER_HOST'] || 'localhost'
|
13
|
+
@port = ENV['MONGO_RUBY_DRIVER_PORT'] || Mongo::DEFAULT_PORT
|
14
|
+
@db = Mongo.new(@host, @port).db('ruby-mongo-utils-test')
|
15
|
+
|
16
|
+
@files = @db.collection('gridfs.files')
|
17
|
+
@chunks = @db.collection('gridfs.chunks')
|
18
|
+
@chunks.clear
|
19
|
+
@files.clear
|
20
|
+
|
21
|
+
@f = GridStore.new(@db, 'foobar', 'w')
|
22
|
+
@c = @f.instance_variable_get('@curr_chunk')
|
23
|
+
end
|
24
|
+
|
25
|
+
def teardown
|
26
|
+
if @db && @db.connected?
|
27
|
+
@chunks.clear
|
28
|
+
@files.clear
|
29
|
+
@db.close
|
30
|
+
end
|
31
|
+
end
|
32
|
+
|
33
|
+
def test_pos
|
34
|
+
assert_equal 0, @c.pos
|
35
|
+
assert @c.eof? # since data is empty
|
36
|
+
|
37
|
+
b = ByteBuffer.new
|
38
|
+
3.times { |i| b.put(i) }
|
39
|
+
c = Chunk.new(@f, 'data' => b)
|
40
|
+
assert !c.eof?
|
41
|
+
end
|
42
|
+
|
43
|
+
def test_getc
|
44
|
+
b = ByteBuffer.new
|
45
|
+
3.times { |i| b.put(i) }
|
46
|
+
c = Chunk.new(@f, 'data' => b)
|
47
|
+
|
48
|
+
assert !c.eof?
|
49
|
+
assert_equal 0, c.getc
|
50
|
+
assert !c.eof?
|
51
|
+
assert_equal 1, c.getc
|
52
|
+
assert !c.eof?
|
53
|
+
assert_equal 2, c.getc
|
54
|
+
assert c.eof?
|
55
|
+
end
|
56
|
+
|
57
|
+
def test_putc
|
58
|
+
3.times { |i| @c.putc(i) }
|
59
|
+
@c.pos = 0
|
60
|
+
|
61
|
+
assert !@c.eof?
|
62
|
+
assert_equal 0, @c.getc
|
63
|
+
assert !@c.eof?
|
64
|
+
assert_equal 1, @c.getc
|
65
|
+
assert !@c.eof?
|
66
|
+
assert_equal 2, @c.getc
|
67
|
+
assert @c.eof?
|
68
|
+
end
|
69
|
+
|
70
|
+
def test_truncate
|
71
|
+
10.times { |i| @c.putc(i) }
|
72
|
+
assert_equal 10, @c.size
|
73
|
+
@c.pos = 3
|
74
|
+
@c.truncate
|
75
|
+
assert_equal 3, @c.size
|
76
|
+
|
77
|
+
@c.pos = 0
|
78
|
+
assert !@c.eof?
|
79
|
+
assert_equal 0, @c.getc
|
80
|
+
assert !@c.eof?
|
81
|
+
assert_equal 1, @c.getc
|
82
|
+
assert !@c.eof?
|
83
|
+
assert_equal 2, @c.getc
|
84
|
+
assert @c.eof?
|
85
|
+
end
|
86
|
+
|
87
|
+
end
|
@@ -0,0 +1,235 @@
|
|
1
|
+
$LOAD_PATH[0,0] = File.join(File.dirname(__FILE__), '..', 'lib')
|
2
|
+
require 'test/unit'
|
3
|
+
require 'mongo'
|
4
|
+
require 'mongo/gridfs'
|
5
|
+
|
6
|
+
class GridStoreTest < Test::Unit::TestCase
|
7
|
+
|
8
|
+
include XGen::Mongo::Driver
|
9
|
+
include XGen::Mongo::GridFS
|
10
|
+
|
11
|
+
def setup
|
12
|
+
@host = ENV['MONGO_RUBY_DRIVER_HOST'] || 'localhost'
|
13
|
+
@port = ENV['MONGO_RUBY_DRIVER_PORT'] || Mongo::DEFAULT_PORT
|
14
|
+
@db = Mongo.new(@host, @port).db('ruby-mongo-utils-test')
|
15
|
+
|
16
|
+
@files = @db.collection('gridfs.files')
|
17
|
+
@chunks = @db.collection('gridfs.chunks')
|
18
|
+
@chunks.clear
|
19
|
+
@files.clear
|
20
|
+
|
21
|
+
GridStore.open(@db, 'foobar', 'w') { |f| f.write("hello, world!") }
|
22
|
+
end
|
23
|
+
|
24
|
+
def teardown
|
25
|
+
if @db && @db.connected?
|
26
|
+
@chunks.clear
|
27
|
+
@files.clear
|
28
|
+
@db.close
|
29
|
+
end
|
30
|
+
end
|
31
|
+
|
32
|
+
def test_exist
|
33
|
+
assert GridStore.exist?(@db, 'foobar')
|
34
|
+
assert !GridStore.exist?(@db, 'does_not_exist')
|
35
|
+
assert !GridStore.exist?(@db, 'foobar', 'another_root')
|
36
|
+
end
|
37
|
+
|
38
|
+
def test_small_write
|
39
|
+
rows = @files.find({'filename' => 'foobar'}).to_a
|
40
|
+
assert_not_nil rows
|
41
|
+
assert_equal 1, rows.length
|
42
|
+
row = rows[0]
|
43
|
+
assert_not_nil row
|
44
|
+
|
45
|
+
file_id = row['_id']
|
46
|
+
assert_kind_of ObjectID, file_id
|
47
|
+
rows = @chunks.find({'files_id' => file_id}).to_a
|
48
|
+
assert_not_nil rows
|
49
|
+
assert_equal 1, rows.length
|
50
|
+
end
|
51
|
+
|
52
|
+
def test_small_file
|
53
|
+
rows = @files.find({'filename' => 'foobar'}).to_a
|
54
|
+
assert_not_nil rows
|
55
|
+
assert_equal 1, rows.length
|
56
|
+
row = rows[0]
|
57
|
+
assert_not_nil row
|
58
|
+
assert_equal "hello, world!", GridStore.read(@db, 'foobar')
|
59
|
+
end
|
60
|
+
|
61
|
+
def test_overwrite
|
62
|
+
GridStore.open(@db, 'foobar', 'w') { |f| f.write("overwrite") }
|
63
|
+
assert_equal "overwrite", GridStore.read(@db, 'foobar')
|
64
|
+
end
|
65
|
+
|
66
|
+
def test_read_length
|
67
|
+
assert_equal "hello", GridStore.read(@db, 'foobar', 5)
|
68
|
+
end
|
69
|
+
|
70
|
+
# Also tests seek
|
71
|
+
def test_read_with_offset
|
72
|
+
assert_equal "world", GridStore.read(@db, 'foobar', 5, 7)
|
73
|
+
assert_equal "world!", GridStore.read(@db, 'foobar', nil, 7)
|
74
|
+
end
|
75
|
+
|
76
|
+
def test_multi_chunk
|
77
|
+
@chunks.clear
|
78
|
+
@files.clear
|
79
|
+
|
80
|
+
size = 512
|
81
|
+
GridStore.open(@db, 'biggie', 'w') { |f|
|
82
|
+
f.chunk_size = size
|
83
|
+
f.write('x' * size)
|
84
|
+
f.write('y' * size)
|
85
|
+
f.write('z' * size)
|
86
|
+
}
|
87
|
+
|
88
|
+
assert_equal 3, @chunks.count
|
89
|
+
assert_equal ('x' * size) + ('y' * size) + ('z' * size), GridStore.read(@db, 'biggie')
|
90
|
+
end
|
91
|
+
|
92
|
+
def test_puts_and_readlines
|
93
|
+
GridStore.open(@db, 'multiline', 'w') { |f|
|
94
|
+
f.puts "line one"
|
95
|
+
f.puts "line two\n"
|
96
|
+
f.puts "line three"
|
97
|
+
}
|
98
|
+
|
99
|
+
lines = GridStore.readlines(@db, 'multiline')
|
100
|
+
assert_equal ["line one\n", "line two\n", "line three\n"], lines
|
101
|
+
end
|
102
|
+
|
103
|
+
def test_unlink
|
104
|
+
assert_equal 1, @files.count
|
105
|
+
assert_equal 1, @chunks.count
|
106
|
+
GridStore.unlink(@db, 'foobar')
|
107
|
+
assert_equal 0, @files.count
|
108
|
+
assert_equal 0, @chunks.count
|
109
|
+
end
|
110
|
+
|
111
|
+
def test_append
|
112
|
+
GridStore.open(@db, 'foobar', 'w+') { |f| f.write(" how are you?") }
|
113
|
+
assert_equal 1, @chunks.count
|
114
|
+
assert_equal "hello, world! how are you?", GridStore.read(@db, 'foobar')
|
115
|
+
end
|
116
|
+
|
117
|
+
def test_rewind_and_truncate_on_write
|
118
|
+
GridStore.open(@db, 'foobar', 'w') { |f|
|
119
|
+
f.write("some text is inserted here")
|
120
|
+
f.rewind
|
121
|
+
f.write("abc")
|
122
|
+
}
|
123
|
+
assert_equal "abc", GridStore.read(@db, 'foobar')
|
124
|
+
end
|
125
|
+
|
126
|
+
def test_tell
|
127
|
+
GridStore.open(@db, 'foobar', 'r') { |f|
|
128
|
+
f.read(5)
|
129
|
+
assert_equal 5, f.tell
|
130
|
+
}
|
131
|
+
end
|
132
|
+
|
133
|
+
def test_empty_block_ok
|
134
|
+
GridStore.open(@db, 'empty', 'w')
|
135
|
+
end
|
136
|
+
|
137
|
+
def test_save_empty_file
|
138
|
+
@chunks.clear
|
139
|
+
@files.clear
|
140
|
+
GridStore.open(@db, 'empty', 'w') {} # re-write with zero bytes
|
141
|
+
assert_equal 1, @files.count
|
142
|
+
assert_equal 0, @chunks.count
|
143
|
+
end
|
144
|
+
|
145
|
+
def test_empty_file_eof
|
146
|
+
GridStore.open(@db, 'empty', 'w')
|
147
|
+
GridStore.open(@db, 'empty', 'r') { |f|
|
148
|
+
assert f.eof?
|
149
|
+
}
|
150
|
+
end
|
151
|
+
|
152
|
+
def test_cannot_change_chunk_size_on_read
|
153
|
+
begin
|
154
|
+
GridStore.open(@db, 'foobar', 'r') { |f| f.chunk_size = 42 }
|
155
|
+
fail "should have seen error"
|
156
|
+
rescue => ex
|
157
|
+
assert_match /error: can only change chunk size/, ex.to_s
|
158
|
+
end
|
159
|
+
end
|
160
|
+
|
161
|
+
def test_cannot_change_chunk_size_after_data_written
|
162
|
+
begin
|
163
|
+
GridStore.open(@db, 'foobar', 'w') { |f|
|
164
|
+
f.write("some text")
|
165
|
+
f.chunk_size = 42
|
166
|
+
}
|
167
|
+
fail "should have seen error"
|
168
|
+
rescue => ex
|
169
|
+
assert_match /error: can only change chunk size/, ex.to_s
|
170
|
+
end
|
171
|
+
end
|
172
|
+
|
173
|
+
def test_change_chunk_size
|
174
|
+
GridStore.open(@db, 'new-file', 'w') { |f|
|
175
|
+
f.chunk_size = 42
|
176
|
+
f.write("foo")
|
177
|
+
}
|
178
|
+
GridStore.open(@db, 'new-file', 'r') { |f|
|
179
|
+
assert f.chunk_size == 42
|
180
|
+
}
|
181
|
+
end
|
182
|
+
|
183
|
+
def test_chunk_size_in_option
|
184
|
+
GridStore.open(@db, 'new-file', 'w', :chunk_size => 42) { |f| f.write("foo") }
|
185
|
+
GridStore.open(@db, 'new-file', 'r') { |f|
|
186
|
+
assert f.chunk_size == 42
|
187
|
+
}
|
188
|
+
end
|
189
|
+
|
190
|
+
def test_upload_date
|
191
|
+
now = Time.now
|
192
|
+
orig_file_upload_date = nil
|
193
|
+
GridStore.open(@db, 'foobar', 'r') { |f| orig_file_upload_date = f.upload_date }
|
194
|
+
assert_not_nil orig_file_upload_date
|
195
|
+
assert (orig_file_upload_date - now) < 5 # even a really slow system < 5 secs
|
196
|
+
|
197
|
+
sleep(2)
|
198
|
+
GridStore.open(@db, 'foobar', 'w') { |f| f.write "new data" }
|
199
|
+
file_upload_date = nil
|
200
|
+
GridStore.open(@db, 'foobar', 'r') { |f| file_upload_date = f.upload_date }
|
201
|
+
assert_equal orig_file_upload_date, file_upload_date
|
202
|
+
end
|
203
|
+
|
204
|
+
def test_content_type
|
205
|
+
ct = nil
|
206
|
+
GridStore.open(@db, 'foobar', 'r') { |f| ct = f.content_type }
|
207
|
+
assert_equal GridStore::DEFAULT_CONTENT_TYPE, ct
|
208
|
+
|
209
|
+
GridStore.open(@db, 'foobar', 'w+') { |f| f.content_type = 'text/html' }
|
210
|
+
ct2 = nil
|
211
|
+
GridStore.open(@db, 'foobar', 'r') { |f| ct2 = f.content_type }
|
212
|
+
assert_equal 'text/html', ct2
|
213
|
+
end
|
214
|
+
|
215
|
+
def test_content_type_option
|
216
|
+
GridStore.open(@db, 'new-file', 'w', :content_type => 'image/jpg') { |f| f.write('foo') }
|
217
|
+
ct = nil
|
218
|
+
GridStore.open(@db, 'new-file', 'r') { |f| ct = f.content_type }
|
219
|
+
assert_equal 'image/jpg', ct
|
220
|
+
end
|
221
|
+
|
222
|
+
def test_unknown_mode
|
223
|
+
GridStore.open(@db, 'foobar', 'x')
|
224
|
+
fail 'should have seen "illegal mode" error raised'
|
225
|
+
rescue => ex
|
226
|
+
assert_equal "error: illegal mode x", ex.to_s
|
227
|
+
end
|
228
|
+
|
229
|
+
def test_metadata
|
230
|
+
GridStore.open(@db, 'foobar', 'r') { |f| assert_nil f.metadata }
|
231
|
+
GridStore.open(@db, 'foobar', 'w+') { |f| f.metadata = {'a' => 1} }
|
232
|
+
GridStore.open(@db, 'foobar', 'r') { |f| assert_equal({'a' => 1}, f.metadata) }
|
233
|
+
end
|
234
|
+
|
235
|
+
end
|
metadata
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: mongodb-mongo
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.
|
4
|
+
version: 0.5.0
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Jim Menard
|
@@ -23,7 +23,7 @@ extra_rdoc_files:
|
|
23
23
|
- README.rdoc
|
24
24
|
files:
|
25
25
|
- bin/mongo_console
|
26
|
-
- bin/
|
26
|
+
- bin/run_test_script
|
27
27
|
- examples/benchmarks.rb
|
28
28
|
- examples/blog.rb
|
29
29
|
- examples/index_test.rb
|
@@ -33,6 +33,9 @@ files:
|
|
33
33
|
- lib/mongo/collection.rb
|
34
34
|
- lib/mongo/cursor.rb
|
35
35
|
- lib/mongo/db.rb
|
36
|
+
- lib/mongo/gridfs/chunk.rb
|
37
|
+
- lib/mongo/gridfs/grid_store.rb
|
38
|
+
- lib/mongo/gridfs.rb
|
36
39
|
- lib/mongo/message/get_more_message.rb
|
37
40
|
- lib/mongo/message/insert_message.rb
|
38
41
|
- lib/mongo/message/kill_cursors_message.rb
|
@@ -87,13 +90,22 @@ signing_key:
|
|
87
90
|
specification_version: 2
|
88
91
|
summary: Simple pure-Ruby driver for the 10gen Mongo DB
|
89
92
|
test_files:
|
93
|
+
- tests/mongo-qa/_common.rb
|
94
|
+
- tests/mongo-qa/capped
|
95
|
+
- tests/mongo-qa/circuar
|
96
|
+
- tests/mongo-qa/count1
|
97
|
+
- tests/mongo-qa/find
|
98
|
+
- tests/mongo-qa/remove
|
99
|
+
- tests/mongo-qa/test1
|
90
100
|
- tests/test_admin.rb
|
91
101
|
- tests/test_bson.rb
|
92
102
|
- tests/test_byte_buffer.rb
|
103
|
+
- tests/test_chunk.rb
|
93
104
|
- tests/test_cursor.rb
|
94
105
|
- tests/test_db.rb
|
95
106
|
- tests/test_db_api.rb
|
96
107
|
- tests/test_db_connection.rb
|
108
|
+
- tests/test_grid_store.rb
|
97
109
|
- tests/test_message.rb
|
98
110
|
- tests/test_mongo.rb
|
99
111
|
- tests/test_objectid.rb
|
data/bin/validate
DELETED
@@ -1,51 +0,0 @@
|
|
1
|
-
#!/usr/bin/env ruby
|
2
|
-
#
|
3
|
-
# usage: validate somefile.xson somefile.bson
|
4
|
-
#
|
5
|
-
# Reads somefile.xson file (XML that describes a Mongo-type document),
|
6
|
-
# converts it into a Ruby OrderedHash, runs that through the BSON
|
7
|
-
# serialization code, and writes the BSON bytes to somefile.bson.
|
8
|
-
#
|
9
|
-
# In addition, this script takes the generated BSON, reads it in then writes
|
10
|
-
# it back out to a temp BSON file. If they are different, we report that error
|
11
|
-
# to STDOUT.
|
12
|
-
#
|
13
|
-
# This script is used by the mongo-qa project
|
14
|
-
# (http://github.com/mongodb/mongo-qa).
|
15
|
-
|
16
|
-
$LOAD_PATH[0,0] = File.join(File.dirname(__FILE__), '..', 'lib')
|
17
|
-
require 'mongo'
|
18
|
-
require 'mongo/util/xml_to_ruby'
|
19
|
-
|
20
|
-
if ARGV.length < 2
|
21
|
-
$stderr.puts "usage: validate somefile.xson somefile.bson"
|
22
|
-
exit 1
|
23
|
-
end
|
24
|
-
|
25
|
-
# Translate the .xson XML into a Ruby object, turn that object into BSON, and
|
26
|
-
# write the BSON to the file as requested.
|
27
|
-
obj = File.open(ARGV[0], 'rb') { |f| XMLToRuby.new.xml_to_ruby(f) }
|
28
|
-
bson = BSON.new.serialize(obj).to_a
|
29
|
-
File.open(ARGV[1], 'wb') { |f| bson.each { |b| f.putc(b) } }
|
30
|
-
|
31
|
-
# Now the additional testing. Read the generated BSON back in, deserialize it,
|
32
|
-
# and re-serialize the results. Compare that BSON with the BSON from the file
|
33
|
-
# we output.
|
34
|
-
bson = File.open(ARGV[1], 'rb') { |f| f.read }
|
35
|
-
bson = if RUBY_VERSION >= '1.9'
|
36
|
-
bson.bytes.to_a
|
37
|
-
else
|
38
|
-
bson.split(//).collect { |c| c[0] }
|
39
|
-
end
|
40
|
-
|
41
|
-
# Turn the Ruby object into BSON bytes and compare with the BSON bytes from
|
42
|
-
# the file.
|
43
|
-
bson_from_ruby = BSON.new.serialize(obj).to_a
|
44
|
-
|
45
|
-
if bson.length != bson_from_ruby.length
|
46
|
-
$stderr.puts "error: round-trip BSON lengths differ when testing #{ARGV[0]}"
|
47
|
-
exit 1
|
48
|
-
elsif bson != bson_from_ruby
|
49
|
-
$stderr.puts "error: round-trip BSON contents differ when testing #{ARGV[0]}"
|
50
|
-
exit 1
|
51
|
-
end
|