rffdb 0.0.6 → 0.0.8

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA1:
3
- metadata.gz: 1663bc00dcb1ccd67c4e62f73367682709c603b3
4
- data.tar.gz: ba8637b384bb8215977d852b5e1f0e483fd0f638
3
+ metadata.gz: 2ac408fa55b4135737e2c26d4d1ebdb208951278
4
+ data.tar.gz: 362c818f4cd7435d248f9ccbe5ce85c602f494de
5
5
  SHA512:
6
- metadata.gz: 013ed8f87707dc0e6f71909951e646e20fc682d5851c60a447bff9975bd5e8620a7bc0b86a8d6a70b28150dd5a06935c26d119e3891af4959413cd370b12cccd
7
- data.tar.gz: 98985078b1ed11cf51a28d11f1355b5ac056f8c82336f8af7229a935fd726cbe188c4d96b0ee6582b446b7ad419fcb033cda43c7fd03300f9d312c10025359b6
6
+ metadata.gz: c0fe51088853821a6a109b91386fc68f3ebae4e596304b14151179293d1976ebb5949d8f7d9e649d67784d327325c5d7d42f95e05e93b891afb0136d05fc06bf
7
+ data.tar.gz: b8412b293f6b541c728f1ce6bdf479a32a1482e15a154d5d40d09a489cade1340bd81150d702a7d7c2509e36e0c26686ecf75b5f3394c620a60af72d618d7e8a
@@ -1,14 +1,15 @@
1
1
  module RubyFFDB
2
- # Generic Cache Provider definition. Any subclass *must* implement or inherit the methods defined here (if any).
2
+ # Generic Cache Provider definition. Any subclass *must* implement or inherit
3
+ # the methods defined here (if any).
3
4
  class CacheProvider
4
5
  # Used for pulling data from the cache
5
- def [](key)
6
+ def [](_key)
6
7
  nil
7
8
  end
8
-
9
+
9
10
  # Used for storing data in the cache
10
- def []=(key, value)
11
+ def []=(_key, _value)
11
12
  false
12
13
  end
13
14
  end
14
- end
15
+ end
@@ -1,16 +1,17 @@
1
1
  module RubyFFDB
2
2
  module CacheProviders
3
- # A very simple Least Recently Used (LRU) cache implementation. Stores data in a Hash,
4
- # uses a dedicated Array for storing and sorting keys (and implementing the LRU algorithm),
5
- # and doesn't bother storing access information for cache data. It stores hit and miss counts
6
- # for the entire cache (not for individual keys). It also uses three mutexes for thread-safety:
7
- # a write lock, a read lock, and a metadata lock.
3
+ # A very simple Least Recently Used (LRU) cache implementation. Stores data
4
+ # in a Hash, uses a dedicated Array for storing and sorting keys (and
5
+ # implementing the LRU algorithm), and doesn't bother storing access
6
+ # information for cache data. It stores hit and miss counts for the
7
+ # entire cache (not for individual keys). It also uses three mutexes for
8
+ # thread-safety: a write lock, a read lock, and a metadata lock.
8
9
  class LRUCache < CacheProvider
9
10
  attr_reader :max_size, :keys
10
11
 
11
12
  # @raise [Exceptions::InvalidCacheSize] if the max_size isn't an Integer
12
13
  def initialize(max_size = 100)
13
- raise Exceptions::InvalidCacheSize unless max_size.kind_of?(Integer)
14
+ fail Exceptions::InvalidCacheSize unless max_size.is_a?(Integer)
14
15
 
15
16
  @max_size = max_size
16
17
  @hits = 0
@@ -55,7 +56,8 @@ module RubyFFDB
55
56
  to_hash.each(&block)
56
57
  end
57
58
 
58
- # Invalidate a cached item by its index / key. Returns `nil` if the object doesn't exist.
59
+ # Invalidate a cached item by its index / key. Returns `nil` if the object
60
+ # doesn't exist.
59
61
  # @param key [Symbol] the cached object's index
60
62
  def invalidate(key)
61
63
  invalidate_key(key)
@@ -76,7 +78,8 @@ module RubyFFDB
76
78
  end
77
79
  end
78
80
 
79
- # Similar to {#truncate} (in fact, it calls it) but it also clears the statistical metadata.
81
+ # Similar to {#truncate} (in fact, it calls it) but it also clears the
82
+ # statistical metadata.
80
83
  # @return [Boolean] was the flush operation successful?
81
84
  def flush
82
85
  if truncate
@@ -87,19 +90,26 @@ module RubyFFDB
87
90
  end
88
91
  end
89
92
 
90
- # Provides a hash of the current metadata for the cache. It provides the current cache size (`:size`),
91
- # the number of cache hits (`:hits`), and the number of cache misses (`:misses`).
93
+ # Provides a hash of the current metadata for the cache. It provides the
94
+ # current cache size (`:size`),the number of cache hits (`:hits`), and
95
+ # the number of cache misses (`:misses`).
92
96
  # @return [Hash] cache statistics
93
97
  def statistics
94
- {size: size, hits: @meta_mutex.synchronize { @hits }, misses: @meta_mutex.synchronize { @misses }}
98
+ {
99
+ size: size,
100
+ hits: @meta_mutex.synchronize { @hits },
101
+ misses: @meta_mutex.synchronize { @misses }
102
+ }
95
103
  end
96
104
 
97
- # Store some data (`value`) indexed by a `key`. If an object exists with the same key, and the
98
- # value is different, it will be overwritten. Storing a value causes its key to be moved to the end
99
- # of the keys array (meaning it is the __most recently used__ item), and this happens on #store regardless
100
- # of whether or not the key previously existed. This behavior is relied upon by {#retrieve} to allow
101
- # reorganization of the keys without necessarily modifying the data it indexes. Uses recursion for overwriting
102
- # existing items.
105
+ # Store some data (`value`) indexed by a `key`. If an object exists with
106
+ # the same key, and the value is different, it will be overwritten.
107
+ # Storing a value causes its key to be moved to the end of the keys array
108
+ # (meaning it is the __most recently used__ item), and this happens on
109
+ # #store regardless of whether or not the key previously existed.
110
+ # This behavior is relied upon by {#retrieve} to allow reorganization of
111
+ # the keys without necessarily modifying the data it indexes.
112
+ # Uses recursion for overwriting existing items.
103
113
  #
104
114
  # @param key [Symbol] the index to use for referencing this cached item
105
115
  # @param value [Object] the data to cache
@@ -114,9 +124,7 @@ module RubyFFDB
114
124
  store(key, value)
115
125
  end
116
126
  else
117
- if size >= @max_size
118
- invalidate(@keys.first) until size < @max_size
119
- end
127
+ invalidate(@keys.first) until size < @max_size if size >= @max_size
120
128
 
121
129
  @write_mutex.synchronize do
122
130
  @meta_mutex.synchronize { @keys << key }
@@ -127,13 +135,14 @@ module RubyFFDB
127
135
 
128
136
  alias_method :[]=, :store
129
137
 
130
- # Retrieve an item from the cache. Returns `nil` if the item doesn't exist. Relies on {#store} returning the
131
- # stored value to ensure the LRU algorithm is maintained safely.
138
+ # Retrieve an item from the cache. Returns `nil` if the item does not
139
+ # exist. Relies on {#store} returning the stored value to ensure the LRU
140
+ # algorithm is maintained safely.
132
141
  # @param key [Symbol] the index to retrieve
133
142
  def retrieve(key)
134
143
  if has?(key)
135
144
  @meta_mutex.synchronize { @hits += 1 }
136
- # Looks dumb, as it stores the value again, but it actually only reorganizes the keys Array
145
+ # Looks dumb, but it actually only reorganizes the keys Array
137
146
  store(key, @read_mutex.synchronize { @data[key] })
138
147
  else
139
148
  @meta_mutex.synchronize { @misses += 1 }
@@ -159,4 +168,4 @@ module RubyFFDB
159
168
  end
160
169
  end
161
170
  end
162
- end
171
+ end
@@ -1,15 +1,18 @@
1
1
  module RubyFFDB
2
2
  module CacheProviders
3
- # A simple Random Replacement (RR) cache implementation. Stores data in a Hash,
4
- # uses a dedicated Array for storing keys (and implementing the RR algorithm),
5
- # and doesn't bother storing access information for cache data. It stores hit and miss counts
6
- # for the entire cache (not for individual keys). It also uses three mutexes for thread-safety:
7
- # a write lock, a read lock, and a metadata lock. The RRCache borrows nearly all its functionality
8
- # from the {LRUCache}, only overwriting the storage (and therefore the revocation) method.
3
+ # A simple Random Replacement (RR) cache implementation. Stores data in a
4
+ # Hash, uses a dedicated Array for storing keys (and implementing the RR
5
+ # algorithm), and doesn't bother storing access information for cache data.
6
+ # It stores hit and miss counts for the entire cache (not for individual
7
+ # keys). It also uses three mutexes for thread-safety: a write lock, a read
8
+ # lock, and a metadata lock. The RRCache borrows nearly all its
9
+ # functionality from the {LRUCache}, only overwriting the storage (and
10
+ # therefore the revocation) method.
9
11
  class RRCache < LRUCache
10
- # Store some data (`value`) indexed by a `key`. If an object exists with the same key, and the
11
- # value is different, it will be overwritten. Storing a new item when the cache is full causes
12
- # the keys Array a random entry to be evicted via a shuffling of the keys. Keys are stored in
12
+ # Store some data (`value`) indexed by a `key`. If an object exists with
13
+ # the same key, and the value is different, it will be overwritten.
14
+ # Storing a new item when the cache is full causes the keys Array a random
15
+ # entry to be evicted via a shuffling of the keys. Keys are stored in
13
16
  # the order in which they were inserted (not shuffled).
14
17
  #
15
18
  # @param key [Symbol] the index to use for referencing this cached item
@@ -32,4 +35,4 @@ module RubyFFDB
32
35
  alias_method :[]=, :store
33
36
  end
34
37
  end
35
- end
38
+ end
@@ -1,4 +1,8 @@
1
1
  module RubyFFDB
2
2
  # Data directory for DB storage
3
- DB_DATA = ENV["RFFDB_DB_DATA"] ? File.expand_path(ENV["RFFDB_DB_DATA"]) : File.expand_path(File.join("~", ".rffdb", "data"))
4
- end
3
+ DB_DATA = if ENV['RFFDB_DB_DATA']
4
+ File.expand_path(ENV['RFFDB_DB_DATA'])
5
+ else
6
+ File.expand_path(File.join('~', '.rffdb', 'data'))
7
+ end
8
+ end
@@ -1,11 +1,13 @@
1
1
  module RubyFFDB
2
2
  class Document
3
+ include Comparable
4
+ attr_reader :id
3
5
 
4
- # @raise [Exceptions::NoSuchDocument] when attempting to retrieve a non-existing document by id
6
+ # @raise [Exceptions::NoSuchDocument] retrieved a non-existing document
5
7
  def initialize(existing_id = false, lazy = true)
6
8
  if existing_id
7
- @document_id = existing_id
8
- raise Exceptions::NoSuchDocument unless File.exists?(file_path)
9
+ @id = existing_id
10
+ fail Exceptions::NoSuchDocument unless File.exist?(file_path)
9
11
  if lazy
10
12
  @lazy = true
11
13
  else
@@ -14,7 +16,7 @@ module RubyFFDB
14
16
  end
15
17
  @saved = true
16
18
  else
17
- @document_id = storage.next_id(self.class)
19
+ @id = storage.next_id(self.class)
18
20
  @data = {}
19
21
  # relative to database root
20
22
  @saved = false
@@ -23,16 +25,10 @@ module RubyFFDB
23
25
  @write_lock = Mutex.new
24
26
  end
25
27
 
26
- # Overrides the Object#id method to deliver an id derived from this document's storage engine
27
- # @return [Fixnum] the object id from the storage engine
28
- def id
29
- @document_id
30
- end
31
-
32
28
  # The location of the flat-file
33
- # @return [String] the path to the flat-file used to store this document (may not exist yet)
29
+ # @return [String] flat-file used to store this document (may not exist yet)
34
30
  def file_path
35
- storage.file_path(self.class, @document_id)
31
+ storage.file_path(self.class, @id)
36
32
  end
37
33
 
38
34
  # Commit the document to storage
@@ -40,7 +36,7 @@ module RubyFFDB
40
36
  def commit
41
37
  @read_lock.synchronize do
42
38
  @write_lock.synchronize do
43
- storage.store(self.class, @document_id, @data.dup) unless @saved
39
+ storage.store(self.class, @id, @data.dup) unless @saved
44
40
  @saved = true
45
41
  end
46
42
  end
@@ -51,91 +47,118 @@ module RubyFFDB
51
47
  # Has this documented been committed to storage?
52
48
  # @return [Boolean]
53
49
  def committed?
54
- return @saved
50
+ @saved
55
51
  end
56
52
 
57
- # Retrieve the stored data from disk, never using cache. Allows forcing to overwrite uncommitted changes.
58
- # @raise [Exceptions::PendingChanges] if attempting to reload with uncommitted changes (and if `force` is false)
53
+ # Retrieve the stored data from disk, never using cache. Allows forcing to
54
+ # overwrite uncommitted changes.
55
+ # @raise [Exceptions::PendingChanges] if attempting to reload with
56
+ # uncommitted changes (and if `force` is false)
59
57
  def reload(force = false)
60
- if committed? or force
58
+ if committed? || force
61
59
  @read_lock.synchronize do
62
60
  @write_lock.synchronize do
63
- @data = storage.retrieve(self.class, @document_id, false)
61
+ @data = storage.retrieve(self.class, @id, false)
64
62
  end
65
63
  end
66
64
  else
67
- raise Exceptions::PendingChanges
65
+ fail Exceptions::PendingChanges
68
66
  end
69
67
  @read_lock.synchronize do
70
68
  @write_lock.synchronize { @saved = true }
71
69
  end
72
70
  end
73
71
 
74
- # Overwrites the document's data, either from disk or from cache. Useful for lazy-loading and not
75
- # typically used directly. Since data might have been pulled from cache, this can lead to bizarre
76
- # things if not used carefully and things rely on #committed? or @saved.
72
+ # Overwrites the document's data, either from disk or from cache. Useful for
73
+ # lazy-loading and not typically used directly. Since data might have been
74
+ # pulled from cache, this can lead to bizarre things if not used carefully
75
+ # and things rely on #committed? or @saved.
77
76
  def refresh
78
77
  @write_lock.synchronize do
79
- @data = storage.retrieve(self.class, @document_id)
78
+ @data = storage.retrieve(self.class, @id)
80
79
  @saved = true
81
80
  end
82
81
  end
83
82
 
84
- # Currently an alias for #new, but used as a wrapper in case more work needs to be done
85
- # before pulling a document from the storage engine (such as sanitizing input, etc)
83
+ # Currently an alias for #new, but used as a wrapper in case more work needs
84
+ # to be done before pulling a document from the storage engine (such as
85
+ # sanitizing input, etc)
86
86
  def self.load(id)
87
- return self.new(id)
87
+ new(id)
88
88
  end
89
89
 
90
- self.singleton_class.send(:alias_method, :get, :load)
90
+ singleton_class.send(:alias_method, :get, :load)
91
91
 
92
- # This DSL method is used to define the schema for a document. It sets up all data access for the class,
93
- # and allows specifying strict checks on that schema during its use, such as validations, class types, regexp
92
+ # This DSL method is used to define the schema for a document. It sets up
93
+ # all data access for the class, and allows specifying strict checks
94
+ # on that schema during its use, such as validations, class types, regexp
94
95
  # formatting, etc.
95
96
  #
96
97
  # @param name [Symbol] the unique name of the attribute
97
- # @option options [Class] :class (Object) the expected object class for this attribute
98
- # @option options [Regexp] :format a regular expression for the required format of the attribute (for any :class that supports #.to_s)
99
- # @option options [Array, Symbol] :validate either a symbol or array of symbols referencing the instance method(s) to use to validate this attribute
98
+ # @option options [Class] :class (Object) the expected object class for
99
+ # this attribute
100
+ # @option options [Regexp] :format a regular expression for the required
101
+ # format of the attribute (for any :class that supports #.to_s)
102
+ # @option options [Array, Symbol] :validate either a symbol or array of
103
+ # symbols referencing the instance method(s) to use to validate this
104
+ # attribute
105
+ # @option options [Boolean] :unique should this attribute be unique?
100
106
  def self.attribute(name, options = {})
101
107
  @structure ||= {}
102
108
  @structure[name.to_sym] = {}
103
109
  # setup the schema
104
- @structure[name.to_sym][:class] = options.has_key?(:class) ? options[:class] : Object
105
- @structure[name.to_sym][:format] = options.has_key?(:format) ? options[:format] : nil
106
- @structure[name.to_sym][:validations] = options.has_key?(:validate) ? [*options[:validate]] : []
110
+ @structure[name.to_sym][:class] =
111
+ options.key?(:class) ? options[:class] : Object
112
+ @structure[name.to_sym][:format] =
113
+ options.key?(:format) ? options[:format] : nil
114
+ @structure[name.to_sym][:validations] =
115
+ options.key?(:validate) ? [*options[:validate]] : []
116
+ @structure[name.to_sym][:unique] =
117
+ options.key?(:unique) == true ? true : false
107
118
  end
108
119
 
109
- # This DSL method is used to setup the backend {StorageEngine} class and optionally the {CacheProvider}
110
- # for this Document type.
120
+ # This DSL method is used to setup the backend {StorageEngine} class and
121
+ # optionally the {CacheProvider} for this Document type.
111
122
  #
112
123
  # @param storage_engine [Class] the {StorageEngine} child class to use
113
- # @option cache_opts [Class] :cache_provider (CacheProviders::LRUCache) the {CacheProvider} child class for caching
114
- # @option cache_opts [Fixnum] :cache_size the cache size, in terms of the number of objects stored
115
- # @raise [Exceptions::InvalidEngine] if the specified {StorageEngine} does not exist
116
- # @raise [Exceptions::InvalidCacheProvider] if a cache_provider is specified and it isn't a type of {CacheProvider}
124
+ # @option cache_opts [Class] :cache_provider (CacheProviders::LRUCache) the
125
+ # {CacheProvider} child class for caching
126
+ # @option cache_opts [Fixnum] :cache_size the cache size, in terms of the
127
+ # number of objects stored
128
+ # @raise [Exceptions::InvalidEngine] if the specified {StorageEngine} does
129
+ # not exist
130
+ # @raise [Exceptions::InvalidCacheProvider] if a cache_provider is specified
131
+ # and it isn't a type of {CacheProvider}
117
132
  def self.engine(storage_engine, cache_opts = {})
118
- raise Exceptions::InvalidEngine unless storage_engine.instance_of? Class and storage_engine.ancestors.include?(StorageEngine)
133
+ unless storage_engine.instance_of?(Class) &&
134
+ storage_engine.ancestors.include?(StorageEngine)
135
+ fail Exceptions::InvalidEngine
136
+ end
119
137
  @engine = storage_engine
120
- if cache_opts.has_key?(:cache_provider)
138
+ if cache_opts.key?(:cache_provider)
121
139
  # Make sure the cache provider specified is valid
122
- unless cache_opts[:cache_provider].instance_of? Class and cache_opts[:cache_provider].ancestors.include?(CacheProvider)
123
- raise Exceptions::InvalidCacheProvider
140
+ unless cache_opts[:cache_provider].instance_of?(Class) &&
141
+ cache_opts[:cache_provider].ancestors.include?(CacheProvider)
142
+ fail Exceptions::InvalidCacheProvider
124
143
  end
144
+
125
145
  @engine.cache_provider(self, cache_opts[:cache_provider])
126
146
  end
127
- if cache_opts.has_key?(:cache_size)
128
- @engine.cache_size(self, cache_opts[:cache_size])
129
- end
147
+
148
+ @engine.cache_size(
149
+ self, cache_opts[:cache_size]
150
+ ) if cache_opts.key?(:cache_size)
130
151
  end
131
152
 
132
- # @return [StorageEngine] a reference to the storage engine singleton of this document class
153
+ # @return [StorageEngine] a reference to the storage engine singleton of
154
+ # this document class
133
155
  def self.storage
134
156
  @engine ||= StorageEngines::YamlEngine
135
157
  @engine
136
158
  end
137
159
 
138
- # @return [StorageEngine] a reference to the storage engine singleton of this document class
160
+ # @return [StorageEngine] a reference to the storage engine singleton of
161
+ # this document class
139
162
  def storage
140
163
  self.class.send(:storage)
141
164
  end
@@ -151,8 +174,8 @@ module RubyFFDB
151
174
  self.class.send(:structure)
152
175
  end
153
176
 
154
- # Sets the maximum number of entries the cache instance for this document will hold.
155
- # Note, this clears the current contents of the cache.
177
+ # Sets the maximum number of entries the cache instance for this document
178
+ # will hold. Note: this clears the current contents of the cache.
156
179
  # @param size [Fixnum] the maximum size of this class' cache instance
157
180
  def self.cache_size(size)
158
181
  storage.cache_size(self, size)
@@ -167,42 +190,64 @@ module RubyFFDB
167
190
  # Return all available instances of this type
168
191
  # @return [DocumentCollection] all documents of this type
169
192
  def self.all
170
- DocumentCollection.new(storage.all(self).collect {|doc_id| load(doc_id)}, self)
193
+ DocumentCollection.new(
194
+ storage.all(self).collect { |doc_id| load(doc_id) },
195
+ self
196
+ )
171
197
  end
172
198
 
173
199
  # Query for Documents based on an attribute
174
200
  # @see DocumentCollection#where
175
- def self.where(attribute, value, comparison_method = "==")
201
+ def self.where(attribute, value, comparison_method = '==')
176
202
  all.where(attribute, value, comparison_method)
177
203
  end
178
204
 
179
- # Uses the defined schema to setup getter and setter methods. Runs validations,
180
- # format checking, and type checking on setting methods.
181
- # @raise [Exceptions::FailedValidation] if validation of an attribute fails while setting
182
- # @raise [Exceptions::InvalidInput] if, while setting, an attribute fails to conform to the type or format defined in the schema
205
+ # Compare two documents
206
+ def <=>(other)
207
+ id <=> other.id
208
+ end
209
+
210
+ # Uses the defined schema to setup getter and setter methods. Runs
211
+ # validations, format checking, and type checking on setting methods.
212
+ # @todo refactor and comment better
213
+ # @raise [Exceptions::FailedValidation] if validation of an attribute fails
214
+ # while setting
215
+ # @raise [Exceptions::InvalidInput] if, while setting, an attribute fails to
216
+ # conform to the type or format defined in the schema
183
217
  def method_missing(method, *args, &block)
184
- setter = method.to_s.match(/.*=$/) ? true : false
185
- key = setter ? method.to_s.match(/(.*)=$/)[1].to_sym : method.to_s.to_sym
186
-
187
- if structure.has_key?(key) and setter
188
- if args.last.kind_of? structure[key][:class] and (structure[key][:format].nil? or args.last.to_s.match structure[key][:format])
218
+ setter = method.to_s.match(/(.*)=$/) ? true : false
219
+ key = setter ? $1.to_sym : method.to_s.to_sym
220
+
221
+ if structure.key?(key) && setter
222
+ if args.last.is_a?(structure[key][:class]) &&
223
+ (
224
+ structure[key][:format].nil? ||
225
+ args.last.to_s.match(structure[key][:format])
226
+ )
189
227
  valid = true
228
+ if structure[key][:unique] == true
229
+ fail Exceptions::NotUnique unless test_uniqueness(key, args.last)
230
+ end
190
231
  structure[key][:validations].each do |validation|
191
- valid = self.send(validation.to_sym, args.last)
192
- raise Exceptions::FailedValidation unless valid
232
+ valid = send(validation.to_sym, args.last)
233
+ fail Exceptions::FailedValidation unless valid
193
234
  end
194
- refresh if @read_lock.synchronize { @lazy } and @read_lock.synchronize { committed? } # here is where the lazy-loading happens
235
+ # here is where the lazy-loading happens
236
+ refresh if @read_lock.synchronize { @lazy } &&
237
+ @read_lock.synchronize { committed? }
195
238
  @read_lock.synchronize do
196
239
  @write_lock.synchronize do
197
240
  @data[key.to_s] = args.last if valid
198
241
  end
199
242
  end
200
243
  else
201
- raise Exceptions::InvalidInput
244
+ fail Exceptions::InvalidInput
202
245
  end
203
246
  @saved = false
204
- elsif structure.has_key?(key)
205
- refresh if @read_lock.synchronize { @lazy } and @read_lock.synchronize { committed? } # here is where the lazy-loading happens
247
+ elsif structure.key?(key)
248
+ # here is where the lazy-loading happens
249
+ refresh if @read_lock.synchronize { @lazy } &&
250
+ @read_lock.synchronize { committed? }
206
251
  @read_lock.synchronize do
207
252
  @data[key.to_s]
208
253
  end
@@ -212,14 +257,30 @@ module RubyFFDB
212
257
  end
213
258
 
214
259
  def respond_to?(method)
215
- key = method.to_s.match(/.*=$/) ? method.to_s.match(/(.*)=$/)[1].to_sym : method.to_s.to_sym
216
-
217
- if structure.has_key?(key)
260
+ key = method.to_s.match(/(.*)=$/) ? $1.to_sym : method.to_s.to_sym
261
+
262
+ if structure.key?(key)
218
263
  true
219
264
  else
220
265
  super
221
266
  end
222
267
  end
223
268
 
269
+ private
270
+
271
+ # check if a value is unique
272
+ # @return [Boolean] is the value for this column unique?
273
+ def test_uniqueness(column, value)
274
+ if committed?
275
+ (self.class.where(column.to_sym, value) - self).empty?
276
+ else
277
+ list = self.class.where(column.to_sym, value)
278
+ if list.size == 1
279
+ list.first.id == id
280
+ else
281
+ true
282
+ end
283
+ end
284
+ end
224
285
  end
225
- end
286
+ end
@@ -1,6 +1,7 @@
1
1
  module RubyFFDB
2
2
  class DocumentCollection
3
3
  include Enumerable
4
+ include Comparable
4
5
 
5
6
  # @return [Class] this is a collection of this {Document} subclass
6
7
  attr_reader :type
@@ -16,7 +17,7 @@ module RubyFFDB
16
17
  def each(&block)
17
18
  @list.each(&block)
18
19
  end
19
-
20
+
20
21
  # Returns the number of Document instances in the collection
21
22
  # @return [Fixnum]
22
23
  def size
@@ -38,13 +39,78 @@ module RubyFFDB
38
39
  # Return the collection item at the specified index
39
40
  # @return [Document,DocumentCollection] the item at the requested index
40
41
  def [](index)
41
- if index.kind_of?(Range)
42
+ if index.is_a?(Range)
42
43
  self.class.new(@list[index], @type)
43
44
  else
44
45
  @list[index]
45
46
  end
46
47
  end
47
48
 
49
+ # Return a collection after subtracting from the original
50
+ # @return [DocumentCollection]
51
+ def -(other)
52
+ new_list = @list.dup
53
+ if other.respond_to?(:to_a)
54
+ other.to_a.each do |item|
55
+ new_list.delete_if { |document| document.id == item.id }
56
+ end
57
+ elsif other.is_a?(@type)
58
+ new_list.delete_if { |document| document.id == other.id }
59
+ else
60
+ fail Exceptions::InvalidInput
61
+ end
62
+ self.class.new(new_list, @type)
63
+ end
64
+
65
+ # Return a collection after adding to the original
66
+ # Warning: this may cause duplicates or mixed type joins! For safety,
67
+ # use #merge
68
+ # @return [DocumentCollection]
69
+ def +(other)
70
+ if other.respond_to?(:to_a)
71
+ self.class.new(@list + other.to_a, @type)
72
+ elsif other.is_a?(@type)
73
+ self.class.new(@list + [other], @type)
74
+ else
75
+ fail Exceptions::InvalidInput
76
+ end
77
+ end
78
+
79
+ # Merge two collections
80
+ # @return [DocumentCollection]
81
+ def merge(other)
82
+ if other.is_a?(self.class) && other.type == @type
83
+ new_list = []
84
+
85
+ new_keys = collect(&:id)
86
+ new_keys += other.collect(&:id)
87
+
88
+ new_keys.sort.uniq.each do |doc_id|
89
+ new_list << self.class.get(doc_id)
90
+ end
91
+
92
+ self.class.new(new_list, @type)
93
+ else
94
+ fail Exceptions::InvalidInput
95
+ end
96
+ end
97
+
98
+ # Allow comparison of collection
99
+ # @return [Boolean] do the collections contain the same document ids?
100
+ def ==(other)
101
+ if other.is_a? self.class
102
+ collect(&:id).sort == other.collect(&:id).sort
103
+ else
104
+ false
105
+ end
106
+ end
107
+
108
+ # Does the collection contain anything?
109
+ # @return [Boolean]
110
+ def empty?
111
+ @list.empty?
112
+ end
113
+
48
114
  # Allow complex sorting like an Array
49
115
  # @return [DocumentCollection] sorted collection
50
116
  def sort(&block)
@@ -56,17 +122,21 @@ module RubyFFDB
56
122
  #
57
123
  # @param attribute [Symbol] the attribute to query
58
124
  # @param value [Object] the value to compare against
59
- # @param comparison_method [String,Symbol] the method to use for comparison - allowed options are "'==', '>', '>=', '<', '<=', and 'match'"
125
+ # @param comparison_method [String,Symbol] the method to use for comparison
126
+ # - allowed options are "'==', '>', '>=', '<', '<=', and 'match'"
60
127
  # @raise [Exceptions::InvalidWhereQuery] if not the right kind of comparison
61
128
  # @return [DocumentCollection]
62
129
  def where(attribute, value, comparison_method = '==')
63
- unless [:'==', :'>', :'>=', :'<', :'<=', :match].include?(comparison_method.to_sym)
64
- raise Exceptions::InvalidWhereQuery
130
+ valid_comparison_methods = [:'==', :'>', :'>=', :'<', :'<=', :match]
131
+ unless valid_comparison_methods.include?(comparison_method.to_sym)
132
+ fail Exceptions::InvalidWhereQuery
65
133
  end
66
134
  self.class.new(
67
- @list.collect {|item| item if item.send(attribute).send(comparison_method.to_sym, value) }.compact,
135
+ @list.collect do |item|
136
+ item if item.send(attribute).send(comparison_method.to_sym, value)
137
+ end.compact,
68
138
  @type
69
139
  )
70
140
  end
71
141
  end
72
- end
142
+ end
@@ -1,5 +1,6 @@
1
1
  module RubyFFDB
2
- # Generic Exception definition. Any subclass *must* implement or inherit the methods defined here (if any).
2
+ # Generic Exception definition. Any subclass *must* implement or inherit the
3
+ # methods defined here (if any).
3
4
  class Exception < StandardError
4
5
  end
5
- end
6
+ end
@@ -6,4 +6,4 @@ module RubyFFDB
6
6
  class InvalidCacheProvider < Exception
7
7
  end
8
8
  end
9
- end
9
+ end
@@ -15,8 +15,10 @@ module RubyFFDB
15
15
  class NoSuchDocument < Exception
16
16
  end
17
17
 
18
+ class NotUnique < Exception
19
+ end
20
+
18
21
  class PendingChanges < Exception
19
22
  end
20
23
  end
21
24
  end
22
-
@@ -1,8 +1,10 @@
1
1
  module RubyFFDB
2
- # Generic Storage Engine definition. Any subclass *must* implement or inherit the methods defined here (if any).
2
+ # Generic Storage Engine definition. Any subclass *must* implement or inherit
3
+ # the methods defined here (if any).
3
4
  class StorageEngine
4
5
  # Read locking by document type
5
- # @param type [Document] implements the equivalent of table-level read-locking on this {Document} type
6
+ # @param type [Document] implements the equivalent of table-level
7
+ # read-locking on this {Document} type
6
8
  def self.read_lock(type, &block)
7
9
  @read_mutexes ||= {}
8
10
  @read_mutexes[type] ||= Mutex.new
@@ -10,7 +12,8 @@ module RubyFFDB
10
12
  end
11
13
 
12
14
  # Write locking by document type, with implicit read locking
13
- # @param type [Document] implements the equivalent of table-level write-locking on this {Document} type
15
+ # @param type [Document] implements the equivalent of table-level
16
+ # write-locking on this {Document} type
14
17
  def self.write_lock(type, &block)
15
18
  @write_mutexes ||= {}
16
19
  @write_mutexes[type] ||= Mutex.new
@@ -20,37 +23,42 @@ module RubyFFDB
20
23
  end
21
24
 
22
25
  # Store data
26
+ # This method should be overridden in subclasses.
23
27
  # @param type [Document] type of {Document} to store
24
- # @param object_id [Object] unique identifier for the data to store (usually an Integer)
28
+ # @param object_id [Object] unique identifier for the data to store
25
29
  # @param data [Object] data to be stored
26
- def self.store(type, object_id, data)
30
+ def self.store(_type, _object_id, _data)
27
31
  false
28
32
  end
29
33
 
30
34
  # Retrieve some stored data
35
+ # This method should be overridden in subclasses.
31
36
  # @param type [Document] type of {Document} to retrieve
32
- # @param object_id [Object] unique identifier for the stored data (usually an Integer)
37
+ # @param object_id [Object] unique identifier for the stored data
33
38
  # @param use_caching [Boolean] attempt to pull the data from cache (or not)
34
- def self.retrieve(type, object_id, use_caching = true)
39
+ def self.retrieve(_type, _object_id, _use_caching = true)
35
40
  false
36
41
  end
37
42
 
38
43
  # Flush all changes to disk (usually done automatically)
44
+ # This method should be overridden in subclasses.
39
45
  def self.flush
40
46
  false
41
47
  end
42
48
 
43
49
  # The full path to a stored (or would-be stored) {Document}
50
+ # This method should be overridden in subclasses.
44
51
  # @param type [Document] the document type
45
- # @param object_id [Object] unique identifier for the document (usually an Integer)
46
- def self.file_path(type, object_id)
52
+ # @param object_id [Object] unique identifier for the document
53
+ def self.file_path(_type, _object_id)
47
54
  false
48
55
  end
49
56
 
50
57
  # Return all known instances of a {Document}
58
+ # This method should be overridden in subclasses.
51
59
  # @param type [Document] the document type
52
60
  # @return [Array]
53
- def self.all(type)
61
+ def self.all(_type)
54
62
  []
55
63
  end
56
64
 
@@ -60,7 +68,7 @@ module RubyFFDB
60
68
  def self.next_id(type)
61
69
  last_id = all(type)[-1]
62
70
  next_key = last_id.nil? ? 1 : (last_id + 1)
63
- if @highest_known_key and @highest_known_key >= next_key
71
+ if @highest_known_key && @highest_known_key >= next_key
64
72
  write_lock(type) { @highest_known_key += 1 }
65
73
  else
66
74
  write_lock(type) { @highest_known_key = next_key }
@@ -70,10 +78,12 @@ module RubyFFDB
70
78
  # Set the cache provider to use for a document type
71
79
  # This completely flushes all cache.
72
80
  # @param document_type [Document] the document type
73
- # @param cache_provider_class [CacheProvider] the type of {CacheProvider} to use
81
+ # @param cache_provider_class [CacheProvider] the type {CacheProvider}
82
+ # subclass for caching
74
83
  def self.cache_provider(document_type, cache_provider_class)
75
- unless cache_provider_class.instance_of? Class and cache_provider_class.ancestors.include?(CacheProvider)
76
- raise Exceptions::InvalidCacheProvider
84
+ unless cache_provider_class.instance_of?(Class) &&
85
+ cache_provider_class.ancestors.include?(CacheProvider)
86
+ fail Exceptions::InvalidCacheProvider
77
87
  end
78
88
  @caches ||= {}
79
89
  @caches[document_type] = cache_provider_class.new
@@ -84,7 +94,7 @@ module RubyFFDB
84
94
  # @param size [Fixnum] the maximum size of the cache
85
95
  def self.cache_size(type, size)
86
96
  @caches ||= {}
87
- if @caches.has_key?(type)
97
+ if @caches.key?(type)
88
98
  @caches[type] = @caches[type].class.new(size)
89
99
  else
90
100
  @caches[type] = CacheProviders::LRUCache.new(size)
@@ -93,7 +103,7 @@ module RubyFFDB
93
103
 
94
104
  # Attempt to retrieve an item from the {Document} type's cache instance
95
105
  # @param type [Document] the document type
96
- # @param object_id [Object] unique identifier for the document (usually an Integer)
106
+ # @param object_id [Object] unique identifier for the document
97
107
  def self.cache_lookup(type, object_id)
98
108
  @caches ||= {}
99
109
  @caches[type] ||= CacheProviders::LRUCache.new
@@ -102,17 +112,18 @@ module RubyFFDB
102
112
 
103
113
  # Store some data in the cache for the {Document} type
104
114
  # @param type [Document] the document type
105
- # @param object_id [Object] unique identifier for the document (usually an Integer)
115
+ # @param object_id [Object] unique identifier for the document
106
116
  # @param data [Object] data to be stored
107
117
  # @return [Boolean]
108
118
  def self.cache_store(type, object_id, data)
109
119
  @caches ||= {}
110
120
  @caches[type] ||= CacheProviders::LRUCache.new
111
121
  @caches[type][object_id.to_s] = data
112
- return true
122
+ true
113
123
  end
114
124
 
115
- # Allow access to the cache instance directly (kind of dangerous but helpful for troubleshooting)
125
+ # Allow access to the cache instance directly (kind of dangerous but helpful
126
+ # for troubleshooting)
116
127
  # @param type [Document] the document type
117
128
  # @return [CacheProvider]
118
129
  def self.cache(type)
@@ -120,4 +131,4 @@ module RubyFFDB
120
131
  @caches[type] ||= CacheProviders::LRUCache.new
121
132
  end
122
133
  end
123
- end
134
+ end
@@ -1,19 +1,19 @@
1
1
  module RubyFFDB
2
2
  module StorageEngines
3
3
  class JsonEngine < StorageEngine
4
- # TODO add support for sharding since directories will fill up quickly
4
+ # TODO: add support for sharding since directories will fill up quickly
5
5
  require 'json'
6
6
 
7
7
  def self.store(type, object_id, data)
8
8
  path = file_path(type, object_id)
9
9
  write_lock(type) do
10
10
  FileUtils.mkdir_p(File.dirname(path))
11
- File.open(path, "w") do |file|
11
+ File.open(path, 'w') do |file|
12
12
  file.puts JSON.dump(data)
13
13
  end
14
14
  cache_store(type, object_id, data)
15
15
  end
16
- return true
16
+ true
17
17
  end
18
18
 
19
19
  def self.retrieve(type, object_id, use_caching = true)
@@ -22,7 +22,7 @@ module RubyFFDB
22
22
  result = cache_lookup(type, object_id) if use_caching
23
23
  unless result
24
24
  read_lock(type) do
25
- file = File.open(file_path(type, object_id), "r")
25
+ file = File.open(file_path(type, object_id), 'r')
26
26
  result = JSON.load(file)
27
27
  file.close
28
28
  end
@@ -31,22 +31,29 @@ module RubyFFDB
31
31
  rescue => e
32
32
  puts e.message
33
33
  end
34
- return result.dup # Return a duplicate to support caching
34
+ result.dup # Return a duplicate to support caching
35
35
  end
36
36
 
37
37
  # Lazily grab all document ids in use
38
38
  def self.all(type)
39
- directory_glob = read_lock(type) { Dir.glob(File.join(File.dirname(file_path(type, 0)), "*.json")) }
39
+ directory_glob = read_lock(type) do
40
+ Dir.glob(File.join(File.dirname(file_path(type, 0)), '*.json'))
41
+ end
40
42
  if directory_glob and !directory_glob.empty?
41
- directory_glob.map {|doc| Integer(File.basename(doc, ".json"))}.sort
43
+ directory_glob.map { |doc| Integer(File.basename(doc, '.json')) }.sort
42
44
  else
43
45
  []
44
46
  end
45
47
  end
46
48
 
47
49
  def self.file_path(type, object_id)
48
- File.join(DB_DATA, type.to_s.gsub('::', "__"), 'documents', object_id.to_s + ".json")
50
+ File.join(
51
+ DB_DATA,
52
+ type.to_s.gsub('::', '__'),
53
+ 'documents',
54
+ object_id.to_s + '.json'
55
+ )
49
56
  end
50
57
  end
51
58
  end
52
- end
59
+ end
@@ -1,19 +1,19 @@
1
1
  module RubyFFDB
2
2
  module StorageEngines
3
3
  class YamlEngine < StorageEngine
4
- # TODO add support for sharding since directories will fill up quickly
4
+ # TODO: add support for sharding since directories will fill up quickly
5
5
  require 'yaml'
6
6
 
7
7
  def self.store(type, object_id, data)
8
8
  path = file_path(type, object_id)
9
9
  write_lock(type) do
10
10
  FileUtils.mkdir_p(File.dirname(path))
11
- File.open(path, "w") do |file|
11
+ File.open(path, 'w') do |file|
12
12
  file.puts YAML.dump(data)
13
13
  end
14
14
  cache_store(type, object_id, data)
15
15
  end
16
- return true
16
+ true
17
17
  end
18
18
 
19
19
  def self.retrieve(type, object_id, use_caching = true)
@@ -29,22 +29,29 @@ module RubyFFDB
29
29
  rescue => e
30
30
  puts e.message
31
31
  end
32
- return result.dup # Return a duplicate to support caching
32
+ result.dup # Return a duplicate to support caching
33
33
  end
34
34
 
35
35
  # Lazily grab all document ids in use
36
36
  def self.all(type)
37
- directory_glob = read_lock(type) { Dir.glob(File.join(File.dirname(file_path(type, 0)), "*.yml")) }
38
- if directory_glob and !directory_glob.empty?
39
- directory_glob.map {|doc| Integer(File.basename(doc, ".yml"))}.sort
37
+ directory_glob = read_lock(type) do
38
+ Dir.glob(File.join(File.dirname(file_path(type, 0)), '*.yml'))
39
+ end
40
+ if directory_glob && !directory_glob.empty?
41
+ directory_glob.map { |doc| Integer(File.basename(doc, '.yml')) }.sort
40
42
  else
41
43
  []
42
44
  end
43
45
  end
44
46
 
45
47
  def self.file_path(type, object_id)
46
- File.join(DB_DATA, type.to_s.gsub('::', "__"), 'documents', object_id.to_s + ".yml")
48
+ File.join(
49
+ DB_DATA,
50
+ type.to_s.gsub('::', '__'),
51
+ 'documents',
52
+ object_id.to_s + '.yml'
53
+ )
47
54
  end
48
55
  end
49
56
  end
50
- end
57
+ end
data/lib/rffdb/version.rb CHANGED
@@ -1,3 +1,3 @@
1
1
  module RubyFFDB
2
- VERSION = "0.0.6"
3
- end
2
+ VERSION = [0, 0, 8].join('.')
3
+ end
data/lib/rffdb.rb CHANGED
@@ -9,4 +9,4 @@ require 'rffdb/cache_providers/lru_cache'
9
9
  require 'rffdb/storage_engine'
10
10
  require 'rffdb/storage_engines/yaml_engine'
11
11
  require 'rffdb/document'
12
- require 'rffdb/document_collection'
12
+ require 'rffdb/document_collection'
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: rffdb
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.0.6
4
+ version: 0.0.8
5
5
  platform: ruby
6
6
  authors:
7
7
  - Jonathan Gnagy
@@ -9,7 +9,21 @@ autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
11
  date: 2014-06-30 00:00:00.000000000 Z
12
- dependencies: []
12
+ dependencies:
13
+ - !ruby/object:Gem::Dependency
14
+ name: rspec
15
+ requirement: !ruby/object:Gem::Requirement
16
+ requirements:
17
+ - - "~>"
18
+ - !ruby/object:Gem::Version
19
+ version: '3.1'
20
+ type: :development
21
+ prerelease: false
22
+ version_requirements: !ruby/object:Gem::Requirement
23
+ requirements:
24
+ - - "~>"
25
+ - !ruby/object:Gem::Version
26
+ version: '3.1'
13
27
  description: A demonstration gem
14
28
  email: jonathan.gnagy@gmail.com
15
29
  executables: []