rffdb 0.0.5

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml ADDED
@@ -0,0 +1,7 @@
1
+ ---
2
+ SHA1:
3
+ metadata.gz: bc32e7e6da39ca78200a564f07ababf89cb3749f
4
+ data.tar.gz: a9978b86ec538b44b238e10e32fa85bb4bc83136
5
+ SHA512:
6
+ metadata.gz: 8220e8ef033aa49a605148edf591334d6449e569ca38a3a823b2b6019759d077e390a3be3e1d6e5c718b22e79eeecf9a3c0b42443fb057c16748f40978b2fd22
7
+ data.tar.gz: 84ccd13638927c947da052720ffda3fcd50869a0a267308f5c9a6e466deebbb8a908929b31bba261a839f60b1bdd9048878f981b7d19e360dc0cd888b7409dfb
data/LICENSE ADDED
@@ -0,0 +1,22 @@
1
+ Copyright (c) 2014 Jonathan Gnagy <jonathan.gnagy@gmail.com>
2
+
3
+ Permission is hereby granted, free of charge, to any person
4
+ obtaining a copy of this software and associated documentation
5
+ files (the "Software"), to deal in the Software without
6
+ restriction, including without limitation the rights to use,
7
+ copy, modify, merge, publish, distribute, sublicense, and/or sell
8
+ copies of the Software, and to permit persons to whom the
9
+ Software is furnished to do so, subject to the following
10
+ conditions:
11
+
12
+ The above copyright notice and this permission notice shall be
13
+ included in all copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
17
+ OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
19
+ HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
20
+ WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21
+ FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22
+ OTHER DEALINGS IN THE SOFTWARE.
@@ -0,0 +1,14 @@
1
+ module RubyFFDB
2
+ # Generic Cache Provider definition. Any subclass *must* implement or inherit the methods defined here (if any).
3
+ class CacheProvider
4
+ # Used for pulling data from the cache
5
+ def [](key)
6
+ nil
7
+ end
8
+
9
+ # Used for storing data in the cache
10
+ def []=(key, value)
11
+ false
12
+ end
13
+ end
14
+ end
@@ -0,0 +1,162 @@
1
+ module RubyFFDB
2
+ module CacheProviders
3
+ # A very simple Least Recently Used (LRU) cache implementation. Stores data in a Hash,
4
+ # uses a dedicated Array for storing and sorting keys (and implementing the LRU algorithm),
5
+ # and doesn't bother storing access information for cache data. It stores hit and miss counts
6
+ # for the entire cache (not for individual keys). It also uses three mutexes for thread-safety:
7
+ # a write lock, a read lock, and a metadata lock.
8
+ class LRUCache < CacheProvider
9
+ attr_reader :max_size, :keys
10
+
11
+ # @raise [Exceptions::InvalidCacheSize] if the max_size isn't an Integer
12
+ def initialize(max_size = 100)
13
+ raise Exceptions::InvalidCacheSize unless max_size.kind_of?(Integer)
14
+
15
+ @max_size = max_size
16
+ @hits = 0
17
+ @misses = 0
18
+ @keys = []
19
+ @data = {}
20
+ @read_mutex = Mutex.new
21
+ @write_mutex = Mutex.new
22
+ @meta_mutex = Mutex.new
23
+ end
24
+
25
+ # Does the cache contain the requested item?
26
+ # @param key [Symbol] the index of the potentially cached object
27
+ def has?(key)
28
+ @meta_mutex.synchronize { @keys.include?(key) }
29
+ end
30
+
31
+ alias_method :has_key?, :has?
32
+ alias_method :include?, :has?
33
+
34
+ # The number of items in the cache
35
+ # @return [Fixnum] key count
36
+ def size
37
+ @meta_mutex.synchronize { @keys.size }
38
+ end
39
+
40
+ # Convert the contents of the cache to a Hash
41
+ # @return [Hash] the cached data
42
+ def to_hash
43
+ @read_mutex.synchronize { @data.dup }
44
+ end
45
+
46
+ # Return a raw Array of the cache data without its keys.
47
+ # Not particularly useful but it may be useful in the future.
48
+ # @return [Array] just the cached values
49
+ def values
50
+ @read_mutex.synchronize { @data.values }
51
+ end
52
+
53
+ # Allow iterating over the cached items, represented as key+value pairs
54
+ def each(&block)
55
+ to_hash.each(&block)
56
+ end
57
+
58
+ # Invalidate a cached item by its index / key. Returns `nil` if the object doesn't exist.
59
+ # @param key [Symbol] the cached object's index
60
+ def invalidate(key)
61
+ invalidate_key(key)
62
+ @write_mutex.synchronize { @data.delete(key) }
63
+ end
64
+
65
+ alias_method :delete, :invalidate
66
+
67
+ # Remove all items from the cache without clearing statistics
68
+ # @return [Boolean] was the truncate operation successful?
69
+ def truncate
70
+ @read_mutex.synchronize do
71
+ @write_mutex.synchronize do
72
+ @meta_mutex.synchronize { @keys = [] }
73
+ @data = {}
74
+ end
75
+ @data.empty?
76
+ end
77
+ end
78
+
79
+ # Similar to {#truncate} (in fact, it calls it) but it also clears the statistical metadata.
80
+ # @return [Boolean] was the flush operation successful?
81
+ def flush
82
+ if truncate
83
+ @meta_mutex.synchronize { @hits, @misses = 0, 0 }
84
+ true
85
+ else
86
+ false
87
+ end
88
+ end
89
+
90
+ # Provides a hash of the current metadata for the cache. It provides the current cache size (`:size`),
91
+ # the number of cache hits (`:hits`), and the number of cache misses (`:misses`).
92
+ # @return [Hash] cache statistics
93
+ def statistics
94
+ {size: size, hits: @meta_mutex.synchronize { @hits }, misses: @meta_mutex.synchronize { @misses }}
95
+ end
96
+
97
+ # Store some data (`value`) indexed by a `key`. If an object exists with the same key, and the
98
+ # value is different, it will be overwritten. Storing a value causes its key to be moved to the end
99
+ # of the keys array (meaning it is the __most recently used__ item), and this happens on #store regardless
100
+ # of whether or not the key previously existed. This behavior is relied upon by {#retrieve} to allow
101
+ # reorganization of the keys without necessarily modifying the data it indexes. Uses recursion for overwriting
102
+ # existing items.
103
+ #
104
+ # @param key [Symbol] the index to use for referencing this cached item
105
+ # @param value [Object] the data to cache
106
+ def store(key, value)
107
+ if has?(key)
108
+ if @read_mutex.synchronize { @data[key] == value }
109
+ invalidate_key(key)
110
+ @meta_mutex.synchronize { @keys << key }
111
+ value
112
+ else
113
+ invalidate(key)
114
+ store(key, value)
115
+ end
116
+ else
117
+ if size >= @max_size
118
+ invalidate(@keys.first) until size < @max_size
119
+ end
120
+
121
+ @write_mutex.synchronize do
122
+ @meta_mutex.synchronize { @keys << key }
123
+ @data[key] = value
124
+ end
125
+ end
126
+ end
127
+
128
+ alias_method :[]=, :store
129
+
130
+ # Retrieve an item from the cache. Returns `nil` if the item doesn't exist. Relies on {#store} returning the
131
+ # stored value to ensure the LRU algorithm is maintained safely.
132
+ # @param key [Symbol] the index to retrieve
133
+ def retrieve(key)
134
+ if has?(key)
135
+ @meta_mutex.synchronize { @hits += 1 }
136
+ # Looks dumb, as it stores the value again, but it actually only reorganizes the keys Array
137
+ store(key, @read_mutex.synchronize { @data[key] })
138
+ else
139
+ @meta_mutex.synchronize { @misses += 1 }
140
+ nil
141
+ end
142
+ end
143
+
144
+ alias_method :[], :retrieve
145
+
146
+ def marshal_dump
147
+ [@max_size, @hits, @misses, @keys, @data]
148
+ end
149
+
150
+ def marshal_load(array)
151
+ @max_size, @hits, @misses, @keys, @data = array
152
+ end
153
+
154
+ private
155
+
156
+ # Invalidate just the key of a cached item. Dangerous if used incorrectly.
157
+ def invalidate_key(key)
158
+ @meta_mutex.synchronize { @keys.delete(key) }
159
+ end
160
+ end
161
+ end
162
+ end
@@ -0,0 +1,35 @@
1
+ module RubyFFDB
2
+ module CacheProviders
3
+ # A simple Random Replacement (RR) cache implementation. Stores data in a Hash,
4
+ # uses a dedicated Array for storing keys (and implementing the RR algorithm),
5
+ # and doesn't bother storing access information for cache data. It stores hit and miss counts
6
+ # for the entire cache (not for individual keys). It also uses three mutexes for thread-safety:
7
+ # a write lock, a read lock, and a metadata lock. The RRCache borrows nearly all its functionality
8
+ # from the {LRUCache}, only overwriting the storage (and therefore the revocation) method.
9
+ class RRCache < LRUCache
10
+ # Store some data (`value`) indexed by a `key`. If an object exists with the same key, and the
11
+ # value is different, it will be overwritten. Storing a new item when the cache is full causes
12
+ # the keys Array a random entry to be evicted via a shuffling of the keys. Keys are stored in
13
+ # the order in which they were inserted (not shuffled).
14
+ #
15
+ # @param key [Symbol] the index to use for referencing this cached item
16
+ # @param value [Object] the data to cache
17
+ def store(key, value)
18
+ if has?(key)
19
+ super(key, value)
20
+ else
21
+ if size >= @max_size
22
+ invalidate(@keys.shuffle.first) until size < @max_size
23
+ end
24
+
25
+ @write_mutex.synchronize do
26
+ @meta_mutex.synchronize { @keys << key }
27
+ @data[key] = value
28
+ end
29
+ end
30
+ end
31
+
32
+ alias_method :[]=, :store
33
+ end
34
+ end
35
+ end
@@ -0,0 +1,225 @@
1
+ module RubyFFDB
2
+ class Document
3
+
4
+ # @raise [Exceptions::NoSuchDocument] when attempting to retrieve a non-existing document by id
5
+ def initialize(existing_id = false, lazy = true)
6
+ if existing_id
7
+ @document_id = existing_id
8
+ raise Exceptions::NoSuchDocument unless File.exists?(file_path)
9
+ if lazy
10
+ @lazy = true
11
+ else
12
+ reload(true)
13
+ @lazy = false
14
+ end
15
+ @saved = true
16
+ else
17
+ @document_id = storage.next_id(self.class)
18
+ @data = {}
19
+ # relative to database root
20
+ @saved = false
21
+ end
22
+ @read_lock = Mutex.new
23
+ @write_lock = Mutex.new
24
+ end
25
+
26
+ # Overrides the Object#id method to deliver an id derived from this document's storage engine
27
+ # @return [Fixnum] the object id from the storage engine
28
+ def id
29
+ @document_id
30
+ end
31
+
32
+ # The location of the flat-file
33
+ # @return [String] the path to the flat-file used to store this document (may not exist yet)
34
+ def file_path
35
+ storage.file_path(self.class, @document_id)
36
+ end
37
+
38
+ # Commit the document to storage
39
+ # @return [Boolean]
40
+ def commit
41
+ @read_lock.synchronize do
42
+ @write_lock.synchronize do
43
+ storage.store(self.class, @document_id, @data.dup) unless @saved
44
+ @saved = true
45
+ end
46
+ end
47
+ end
48
+
49
+ alias_method :save, :commit
50
+
51
+ # Has this documented been committed to storage?
52
+ # @return [Boolean]
53
+ def committed?
54
+ return @saved
55
+ end
56
+
57
+ # Retrieve the stored data from disk, never using cache. Allows forcing to overwrite uncommitted changes.
58
+ # @raise [Exceptions::PendingChanges] if attempting to reload with uncommitted changes (and if `force` is false)
59
+ def reload(force = false)
60
+ if committed? or force
61
+ @read_lock.synchronize do
62
+ @write_lock.synchronize do
63
+ @data = storage.retrieve(self.class, @document_id, false)
64
+ end
65
+ end
66
+ else
67
+ raise Exceptions::PendingChanges
68
+ end
69
+ @read_lock.synchronize do
70
+ @write_lock.synchronize { @saved = true }
71
+ end
72
+ end
73
+
74
+ # Overwrites the document's data, either from disk or from cache. Useful for lazy-loading and not
75
+ # typically used directly. Since data might have been pulled from cache, this can lead to bizarre
76
+ # things if not used carefully and things rely on #committed? or @saved.
77
+ def refresh
78
+ @write_lock.synchronize do
79
+ @data = storage.retrieve(self.class, @document_id)
80
+ @saved = true
81
+ end
82
+ end
83
+
84
+ # Currently an alias for #new, but used as a wrapper in case more work needs to be done
85
+ # before pulling a document from the storage engine (such as sanitizing input, etc)
86
+ def self.load(id)
87
+ return self.new(id)
88
+ end
89
+
90
+ self.singleton_class.send(:alias_method, :get, :load)
91
+
92
+ # This DSL method is used to define the schema for a document. It sets up all data access for the class,
93
+ # and allows specifying strict checks on that schema during its use, such as validations, class types, regexp
94
+ # formatting, etc.
95
+ #
96
+ # @param name [Symbol] the unique name of the attribute
97
+ # @option options [Class] :class (Object) the expected object class for this attribute
98
+ # @option options [Regexp] :format a regular expression for the required format of the attribute (for any :class that supports #.to_s)
99
+ # @option options [Array, Symbol] :validate either a symbol or array of symbols referencing the instance method(s) to use to validate this attribute
100
+ def self.attribute(name, options = {})
101
+ @structure ||= {}
102
+ @structure[name.to_sym] = {}
103
+ # setup the schema
104
+ @structure[name.to_sym][:class] = options.has_key?(:class) ? options[:class] : Object
105
+ @structure[name.to_sym][:format] = options.has_key?(:format) ? options[:format] : nil
106
+ @structure[name.to_sym][:validations] = options.has_key?(:validate) ? [*options[:validate]] : []
107
+ end
108
+
109
+ # This DSL method is used to setup the backend {StorageEngine} class and optionally the {CacheProvider}
110
+ # for this Document type.
111
+ #
112
+ # @param storage_engine [Class] the {StorageEngine} child class to use
113
+ # @option cache_opts [Class] :cache_provider (CacheProviders::LRUCache) the {CacheProvider} child class for caching
114
+ # @option cache_opts [Fixnum] :cache_size the cache size, in terms of the number of objects stored
115
+ # @raise [Exceptions::InvalidEngine] if the specified {StorageEngine} does not exist
116
+ # @raise [Exceptions::InvalidCacheProvider] if a cache_provider is specified and it isn't a type of {CacheProvider}
117
+ def self.engine(storage_engine, cache_opts = {})
118
+ raise Exceptions::InvalidEngine unless storage_engine.instance_of? Class and storage_engine.ancestors.include?(StorageEngine)
119
+ @engine = storage_engine
120
+ if cache_opts.has_key?(:cache_provider)
121
+ # Make sure the cache provider specified is valid
122
+ unless cache_opts[:cache_provider].instance_of? Class and cache_opts[:cache_provider].ancestors.include?(CacheProvider)
123
+ raise Exceptions::InvalidCacheProvider
124
+ end
125
+ @engine.cache_provider(self, cache_opts[:cache_provider])
126
+ end
127
+ if cache_opts.has_key?(:cache_size)
128
+ @engine.cache_size(self, cache_opts[:cache_size])
129
+ end
130
+ end
131
+
132
+ # @return [StorageEngine] a reference to the storage engine singleton of this document class
133
+ def self.storage
134
+ @engine ||= StorageEngines::YamlEngine
135
+ @engine
136
+ end
137
+
138
+ # @return [StorageEngine] a reference to the storage engine singleton of this document class
139
+ def storage
140
+ self.class.send(:storage)
141
+ end
142
+
143
+ # @return [Hash] a copy of the schema information for this class
144
+ def self.structure
145
+ @structure ||= {}
146
+ @structure.dup
147
+ end
148
+
149
+ # @return [Hash] a copy of the schema information for this class
150
+ def structure
151
+ self.class.send(:structure)
152
+ end
153
+
154
+ # Sets the maximum number of entries the cache instance for this document will hold.
155
+ # Note, this clears the current contents of the cache.
156
+ # @param size [Fixnum] the maximum size of this class' cache instance
157
+ def self.cache_size(size)
158
+ storage.cache_size(self, size)
159
+ end
160
+
161
+ # Allow direct access to the cache instance of this document class
162
+ # @return [CacheProvider] this class' cache instance
163
+ def self.cache
164
+ storage.cache(self)
165
+ end
166
+
167
+ # Return all available instances of this type
168
+ # @return [DocumentCollection] all documents of this type
169
+ def self.all
170
+ DocumentCollection.new(storage.all(self).collect {|doc_id| load(doc_id)}, self)
171
+ end
172
+
173
+ # Query for Documents based on an attribute
174
+ # @see DocumentCollection#where
175
+ def self.where(attribute, value, comparison_method = "==")
176
+ all.where(attribute, value, comparison_method)
177
+ end
178
+
179
+ # Uses the defined schema to setup getter and setter methods. Runs validations,
180
+ # format checking, and type checking on setting methods.
181
+ # @raise [Exceptions::FailedValidation] if validation of an attribute fails while setting
182
+ # @raise [Exceptions::InvalidInput] if, while setting, an attribute fails to conform to the type or format defined in the schema
183
+ def method_missing(method, *args, &block)
184
+ setter = method.to_s.match(/.*=$/) ? true : false
185
+ key = setter ? method.to_s.match(/(.*)=$/)[1].to_sym : method.to_s.to_sym
186
+
187
+ if structure.has_key?(key) and setter
188
+ if args.last.kind_of? structure[key][:class] and (structure[key][:format].nil? or args.last.to_s.match structure[key][:format])
189
+ valid = true
190
+ structure[key][:validations].each do |validation|
191
+ valid = self.send(validation.to_sym, args.last)
192
+ raise Exceptions::FailedValidation unless valid
193
+ end
194
+ refresh if @read_lock.synchronize { @lazy } and @read_lock.synchronize { committed? } # here is where the lazy-loading happens
195
+ @read_lock.synchronize do
196
+ @write_lock.synchronize do
197
+ @data[key.to_s] = args.last if valid
198
+ end
199
+ end
200
+ else
201
+ raise Exceptions::InvalidInput
202
+ end
203
+ @saved = false
204
+ elsif structure.has_key?(key)
205
+ refresh if @read_lock.synchronize { @lazy } and @read_lock.synchronize { committed? } # here is where the lazy-loading happens
206
+ @read_lock.synchronize do
207
+ @data[key.to_s]
208
+ end
209
+ else
210
+ super
211
+ end
212
+ end
213
+
214
+ def respond_to?(method)
215
+ key = method.to_s.match(/.*=$/) ? method.to_s.match(/(.*)=$/)[1].to_sym : method.to_s.to_sym
216
+
217
+ if structure.has_key?(key)
218
+ true
219
+ else
220
+ super
221
+ end
222
+ end
223
+
224
+ end
225
+ end
@@ -0,0 +1,72 @@
1
+ module RubyFFDB
2
+ class DocumentCollection
3
+ include Enumerable
4
+
5
+ # @return [Class] this is a collection of this {Document} subclass
6
+ attr_reader :type
7
+
8
+ # @param list [#to_a] the list of Documents to reference
9
+ # @param type [Class] the type of Document this collection references
10
+ def initialize(list, type = Document)
11
+ @list = list.to_a
12
+ @type = type
13
+ end
14
+
15
+ # Iterates over the list of Document instances
16
+ def each(&block)
17
+ @list.each(&block)
18
+ end
19
+
20
+ # Returns the number of Document instances in the collection
21
+ # @return [Fixnum]
22
+ def size
23
+ @list.size
24
+ end
25
+
26
+ # Return the first item in the collection
27
+ # @return [Document] the first item in the collection
28
+ def first
29
+ @list.first
30
+ end
31
+
32
+ # Return the last item in the collection
33
+ # @return [Document] the last item in the collection
34
+ def last
35
+ @list.last
36
+ end
37
+
38
+ # Return the collection item at the specified index
39
+ # @return [Document,DocumentCollection] the item at the requested index
40
+ def [](index)
41
+ if index.kind_of?(Range)
42
+ self.class.new(@list[index], @type)
43
+ else
44
+ @list[index]
45
+ end
46
+ end
47
+
48
+ # Allow complex sorting like an Array
49
+ # @return [DocumentCollection] sorted collection
50
+ def sort(&block)
51
+ self.class.new(super(&block), @type)
52
+ end
53
+
54
+ # Horribly inefficient way to allow querying Documents by their attributes.
55
+ # This method can be chained for multiple / more specific queries.
56
+ #
57
+ # @param attribute [Symbol] the attribute to query
58
+ # @param value [Object] the value to compare against
59
+ # @param comparison_method [String,Symbol] the method to use for comparison - allowed options are "'==', '>', '>=', '<', '<=', and 'match'"
60
+ # @raise [Exceptions::InvalidWhereQuery] if not the right kind of comparison
61
+ # @return [DocumentCollection]
62
+ def where(attribute, value, comparison_method = '==')
63
+ unless [:'==', :'>', :'>=', :'<', :'<=', :match].include?(comparison_method.to_sym)
64
+ raise Exceptions::InvalidWhereQuery
65
+ end
66
+ self.class.new(
67
+ @list.collect {|item| item if item.send(attribute).send(comparison_method.to_sym, value) }.compact,
68
+ @type
69
+ )
70
+ end
71
+ end
72
+ end
@@ -0,0 +1,5 @@
1
+ module RubyFFDB
2
+ # Generic Exception definition. Any subclass *must* implement or inherit the methods defined here (if any).
3
+ class Exception < StandardError
4
+ end
5
+ end
@@ -0,0 +1,9 @@
1
+ module RubyFFDB
2
+ module Exceptions
3
+ class InvalidCacheSize < Exception
4
+ end
5
+
6
+ class InvalidCacheProvider < Exception
7
+ end
8
+ end
9
+ end
@@ -0,0 +1,22 @@
1
+ module RubyFFDB
2
+ module Exceptions
3
+ class FailedValidation < Exception
4
+ end
5
+
6
+ class InvalidEngine < Exception
7
+ end
8
+
9
+ class InvalidInput < Exception
10
+ end
11
+
12
+ class InvalidWhereQuery < Exception
13
+ end
14
+
15
+ class NoSuchDocument < Exception
16
+ end
17
+
18
+ class PendingChanges < Exception
19
+ end
20
+ end
21
+ end
22
+
@@ -0,0 +1,123 @@
1
+ module RubyFFDB
2
+ # Generic Storage Engine definition. Any subclass *must* implement or inherit the methods defined here (if any).
3
+ class StorageEngine
4
+ # Read locking by document type
5
+ # @param type [Document] implements the equivalent of table-level read-locking on this {Document} type
6
+ def self.read_lock(type, &block)
7
+ @read_mutexes ||= {}
8
+ @read_mutexes[type] ||= Mutex.new
9
+ @read_mutexes[type].synchronize(&block)
10
+ end
11
+
12
+ # Write locking by document type, with implicit read locking
13
+ # @param type [Document] implements the equivalent of table-level write-locking on this {Document} type
14
+ def self.write_lock(type, &block)
15
+ @write_mutexes ||= {}
16
+ @write_mutexes[type] ||= Mutex.new
17
+ @write_mutexes[type].synchronize do
18
+ read_lock(type, &block)
19
+ end
20
+ end
21
+
22
+ # Store data
23
+ # @param type [Document] type of {Document} to store
24
+ # @param object_id [Object] unique identifier for the data to store (usually an Integer)
25
+ # @param data [Object] data to be stored
26
+ def self.store(type, object_id, data)
27
+ false
28
+ end
29
+
30
+ # Retrieve some stored data
31
+ # @param type [Document] type of {Document} to retrieve
32
+ # @param object_id [Object] unique identifier for the stored data (usually an Integer)
33
+ # @param use_caching [Boolean] attempt to pull the data from cache (or not)
34
+ def self.retrieve(type, object_id, use_caching = true)
35
+ false
36
+ end
37
+
38
+ # Flush all changes to disk (usually done automatically)
39
+ def self.flush
40
+ false
41
+ end
42
+
43
+ # The full path to a stored (or would-be stored) {Document}
44
+ # @param type [Document] the document type
45
+ # @param object_id [Object] unique identifier for the document (usually an Integer)
46
+ def self.file_path(type, object_id)
47
+ false
48
+ end
49
+
50
+ # Return all known instances of a {Document}
51
+ # @param type [Document] the document type
52
+ # @return [Array]
53
+ def self.all(type)
54
+ []
55
+ end
56
+
57
+ # Determine the next unique identifier available for a {Document} type
58
+ # @param type [Document] the document type
59
+ # @return [Fixnum]
60
+ def self.next_id(type)
61
+ last_id = all(type)[-1]
62
+ next_key = last_id.nil? ? 1 : (last_id + 1)
63
+ if @highest_known_key and @highest_known_key >= next_key
64
+ write_lock(type) { @highest_known_key += 1 }
65
+ else
66
+ write_lock(type) { @highest_known_key = next_key }
67
+ end
68
+ end
69
+
70
+ # Set the cache provider to use for a document type
71
+ # This completely flushes all cache.
72
+ # @param document_type [Document] the document type
73
+ # @param cache_provider_class [CacheProvider] the type of {CacheProvider} to use
74
+ def self.cache_provider(document_type, cache_provider_class)
75
+ unless cache_provider_class.instance_of? Class and cache_provider_class.ancestors.include?(CacheProvider)
76
+ raise Exceptions::InvalidCacheProvider
77
+ end
78
+ @caches ||= {}
79
+ @caches[document_type] = cache_provider_class.new
80
+ end
81
+
82
+ # Set the maximum size of a cache, based on {Document} type
83
+ # @param type [Document] the document type
84
+ # @param size [Fixnum] the maximum size of the cache
85
+ def self.cache_size(type, size)
86
+ @caches ||= {}
87
+ if @caches.has_key?(type)
88
+ @caches[type] = @caches[type].class.new(size)
89
+ else
90
+ @caches[type] = CacheProviders::LRUCache.new(size)
91
+ end
92
+ end
93
+
94
+ # Attempt to retrieve an item from the {Document} type's cache instance
95
+ # @param type [Document] the document type
96
+ # @param object_id [Object] unique identifier for the document (usually an Integer)
97
+ def self.cache_lookup(type, object_id)
98
+ @caches ||= {}
99
+ @caches[type] ||= CacheProviders::LRUCache.new
100
+ @caches[type][object_id.to_s]
101
+ end
102
+
103
+ # Store some data in the cache for the {Document} type
104
+ # @param type [Document] the document type
105
+ # @param object_id [Object] unique identifier for the document (usually an Integer)
106
+ # @param data [Object] data to be stored
107
+ # @return [Boolean]
108
+ def self.cache_store(type, object_id, data)
109
+ @caches ||= {}
110
+ @caches[type] ||= CacheProviders::LRUCache.new
111
+ @caches[type][object_id.to_s] = data
112
+ return true
113
+ end
114
+
115
+ # Allow access to the cache instance directly (kind of dangerous but helpful for troubleshooting)
116
+ # @param type [Document] the document type
117
+ # @return [CacheProvider]
118
+ def self.cache(type)
119
+ @caches ||= {}
120
+ @caches[type] ||= CacheProviders::LRUCache.new
121
+ end
122
+ end
123
+ end
@@ -0,0 +1,52 @@
1
+ module RubyFFDB
2
+ module StorageEngines
3
+ class JsonEngine < StorageEngine
4
+ # TODO add support for sharding since directories will fill up quickly
5
+ require 'json'
6
+
7
+ def self.store(type, object_id, data)
8
+ path = file_path(type, object_id)
9
+ write_lock(type) do
10
+ FileUtils.mkdir_p(File.dirname(path))
11
+ File.open(path, "w") do |file|
12
+ file.puts JSON.dump(data)
13
+ end
14
+ cache_store(type, object_id, data)
15
+ end
16
+ return true
17
+ end
18
+
19
+ def self.retrieve(type, object_id, use_caching = true)
20
+ result = nil
21
+ begin
22
+ result = cache_lookup(type, object_id) if use_caching
23
+ unless result
24
+ read_lock(type) do
25
+ file = File.open(file_path(type, object_id), "r")
26
+ result = JSON.load(file)
27
+ file.close
28
+ end
29
+ end
30
+ cache_store(type, object_id, result)
31
+ rescue => e
32
+ puts e.message
33
+ end
34
+ return result.dup # Return a duplicate to support caching
35
+ end
36
+
37
+ # Lazily grab all document ids in use
38
+ def self.all(type)
39
+ directory_glob = read_lock(type) { Dir.glob(File.join(File.dirname(file_path(type, 0)), "*.json")) }
40
+ if directory_glob and !directory_glob.empty?
41
+ directory_glob.map {|doc| Integer(File.basename(doc, ".json"))}.sort
42
+ else
43
+ []
44
+ end
45
+ end
46
+
47
+ def self.file_path(type, object_id)
48
+ File.join(type.to_s.gsub('::', "__"), 'documents', object_id.to_s + ".json")
49
+ end
50
+ end
51
+ end
52
+ end
@@ -0,0 +1,50 @@
1
+ module RubyFFDB
2
+ module StorageEngines
3
+ class YamlEngine < StorageEngine
4
+ # TODO add support for sharding since directories will fill up quickly
5
+ require 'yaml'
6
+
7
+ def self.store(type, object_id, data)
8
+ path = file_path(type, object_id)
9
+ write_lock(type) do
10
+ FileUtils.mkdir_p(File.dirname(path))
11
+ File.open(path, "w") do |file|
12
+ file.puts YAML.dump(data)
13
+ end
14
+ cache_store(type, object_id, data)
15
+ end
16
+ return true
17
+ end
18
+
19
+ def self.retrieve(type, object_id, use_caching = true)
20
+ result = nil
21
+ begin
22
+ result = cache_lookup(type, object_id) if use_caching
23
+ read_lock(type) do
24
+ result ||= YAML.load_file(file_path(type, object_id))
25
+ end
26
+ write_lock(type) do
27
+ cache_store(type, object_id, result)
28
+ end
29
+ rescue => e
30
+ puts e.message
31
+ end
32
+ return result.dup # Return a duplicate to support caching
33
+ end
34
+
35
+ # Lazily grab all document ids in use
36
+ def self.all(type)
37
+ directory_glob = read_lock(type) { Dir.glob(File.join(File.dirname(file_path(type, 0)), "*.yml")) }
38
+ if directory_glob and !directory_glob.empty?
39
+ directory_glob.map {|doc| Integer(File.basename(doc, ".yml"))}.sort
40
+ else
41
+ []
42
+ end
43
+ end
44
+
45
+ def self.file_path(type, object_id)
46
+ File.join(type.to_s.gsub('::', "__"), 'documents', object_id.to_s + ".yml")
47
+ end
48
+ end
49
+ end
50
+ end
@@ -0,0 +1,3 @@
1
+ module RubyFFDB
2
+ VERSION = "0.0.5"
3
+ end
data/lib/rffdb.rb ADDED
@@ -0,0 +1,11 @@
1
+ require 'ostruct'
2
+ require 'rffdb/version'
3
+ require 'rffdb/exception'
4
+ require 'rffdb/exceptions/cache_exceptions'
5
+ require 'rffdb/exceptions/document_exceptions'
6
+ require 'rffdb/cache_provider'
7
+ require 'rffdb/cache_providers/lru_cache'
8
+ require 'rffdb/storage_engine'
9
+ require 'rffdb/storage_engines/yaml_engine'
10
+ require 'rffdb/document'
11
+ require 'rffdb/document_collection'
metadata ADDED
@@ -0,0 +1,58 @@
1
+ --- !ruby/object:Gem::Specification
2
+ name: rffdb
3
+ version: !ruby/object:Gem::Version
4
+ version: 0.0.5
5
+ platform: ruby
6
+ authors:
7
+ - Jonathan Gnagy
8
+ autorequire:
9
+ bindir: bin
10
+ cert_chain: []
11
+ date: 2014-06-30 00:00:00.000000000 Z
12
+ dependencies: []
13
+ description: A demonstration gem
14
+ email: jonathan.gnagy@gmail.com
15
+ executables: []
16
+ extensions: []
17
+ extra_rdoc_files: []
18
+ files:
19
+ - LICENSE
20
+ - lib/rffdb.rb
21
+ - lib/rffdb/cache_provider.rb
22
+ - lib/rffdb/cache_providers/lru_cache.rb
23
+ - lib/rffdb/cache_providers/rr_cache.rb
24
+ - lib/rffdb/document.rb
25
+ - lib/rffdb/document_collection.rb
26
+ - lib/rffdb/exception.rb
27
+ - lib/rffdb/exceptions/cache_exceptions.rb
28
+ - lib/rffdb/exceptions/document_exceptions.rb
29
+ - lib/rffdb/storage_engine.rb
30
+ - lib/rffdb/storage_engines/json_engine.rb
31
+ - lib/rffdb/storage_engines/yaml_engine.rb
32
+ - lib/rffdb/version.rb
33
+ homepage: https://rubygems.org/gems/rffdb
34
+ licenses:
35
+ - MIT
36
+ metadata: {}
37
+ post_install_message:
38
+ rdoc_options: []
39
+ require_paths:
40
+ - lib
41
+ required_ruby_version: !ruby/object:Gem::Requirement
42
+ requirements:
43
+ - - "~>"
44
+ - !ruby/object:Gem::Version
45
+ version: '2.0'
46
+ required_rubygems_version: !ruby/object:Gem::Requirement
47
+ requirements:
48
+ - - ">="
49
+ - !ruby/object:Gem::Version
50
+ version: '0'
51
+ requirements: []
52
+ rubyforge_project:
53
+ rubygems_version: 2.2.0
54
+ signing_key:
55
+ specification_version: 4
56
+ summary: Ruby FlatFile DB
57
+ test_files: []
58
+ has_rdoc: