hashery 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- data/HISTORY +15 -0
- data/LICENSE +23 -0
- data/README.rdoc +47 -0
- data/ROADMAP.rdoc +13 -0
- data/lib/hashery.rb +13 -0
- data/lib/hashery/castinghash.rb +171 -0
- data/lib/hashery/dictionary.rb +430 -0
- data/lib/hashery/lruhash.rb +274 -0
- data/lib/hashery/memoizer.rb +64 -0
- data/lib/hashery/opencascade.rb +82 -0
- data/lib/hashery/openhash.rb +65 -0
- data/lib/hashery/openobject.rb +279 -0
- data/lib/hashery/orderedhash.rb +417 -0
- data/lib/hashery/ostructable.rb +209 -0
- data/lib/hashery/queryhash.rb +35 -0
- data/lib/hashery/stash.rb +181 -0
- data/lib/hashery/statichash.rb +48 -0
- data/meta/authors +3 -0
- data/meta/created +1 -0
- data/meta/description +5 -0
- data/meta/homepage +1 -0
- data/meta/license +1 -0
- data/meta/name +1 -0
- data/meta/release +1 -0
- data/meta/repository +1 -0
- data/meta/suite +1 -0
- data/meta/summary +1 -0
- data/meta/version +1 -0
- data/test/case_dictionary.rb +142 -0
- data/test/case_opencascade.rb +68 -0
- data/test/case_openhash.rb +18 -0
- data/test/case_openobject.rb +130 -0
- data/test/case_stash.rb +131 -0
- metadata +102 -0
@@ -0,0 +1,274 @@
|
|
1
|
+
# LRU based Hash
|
2
|
+
|
3
|
+
require 'enumerator'
|
4
|
+
|
5
|
+
# Hash with LRU expiry policy. There are at most max_size elements in a
|
6
|
+
# LRUHash. When adding more elements old elements are removed according
|
7
|
+
# to LRU policy.
|
8
|
+
#
|
9
|
+
# by Robert Klemme
|
10
|
+
|
11
|
+
class LRUHash
|
12
|
+
|
13
|
+
include Enumerable
|
14
|
+
|
15
|
+
attr_reader :max_size
|
16
|
+
|
17
|
+
attr_accessor :default
|
18
|
+
attr_accessor :default_proc
|
19
|
+
attr_accessor :release_proc
|
20
|
+
|
21
|
+
def initialize(max_size, default_value = nil, &block)
|
22
|
+
@max_size = normalize_max(max_size)
|
23
|
+
@default = default_value
|
24
|
+
@default_proc = block
|
25
|
+
|
26
|
+
@h = {}
|
27
|
+
@head = Node.new
|
28
|
+
@tail = front(Node.new)
|
29
|
+
end
|
30
|
+
|
31
|
+
def each_pair
|
32
|
+
if block_given?
|
33
|
+
each_node do |n|
|
34
|
+
yield [n.key, n.value]
|
35
|
+
end
|
36
|
+
else
|
37
|
+
enum_for :each_pair
|
38
|
+
end
|
39
|
+
end
|
40
|
+
|
41
|
+
alias each each_pair
|
42
|
+
|
43
|
+
def each_key
|
44
|
+
if block_given?
|
45
|
+
each_node do |n|
|
46
|
+
yield n.key
|
47
|
+
end
|
48
|
+
else
|
49
|
+
enum_for :each_key
|
50
|
+
end
|
51
|
+
end
|
52
|
+
|
53
|
+
def each_value
|
54
|
+
if block_given?
|
55
|
+
each_node do |n|
|
56
|
+
yield n.value
|
57
|
+
end
|
58
|
+
else
|
59
|
+
enum_for :each_value
|
60
|
+
end
|
61
|
+
end
|
62
|
+
|
63
|
+
def size
|
64
|
+
@h.size
|
65
|
+
end
|
66
|
+
|
67
|
+
def empty?
|
68
|
+
@head.succ.equal? @tail
|
69
|
+
end
|
70
|
+
|
71
|
+
def fetch(key, &b)
|
72
|
+
n = @h[key]
|
73
|
+
|
74
|
+
if n
|
75
|
+
front(n).value
|
76
|
+
else
|
77
|
+
(b || FETCH)[key]
|
78
|
+
end
|
79
|
+
end
|
80
|
+
|
81
|
+
def [](key)
|
82
|
+
fetch(key) do |k|
|
83
|
+
@default_proc ? @default_proc[self, k] : default
|
84
|
+
end
|
85
|
+
end
|
86
|
+
|
87
|
+
def keys
|
88
|
+
@h.keys
|
89
|
+
end
|
90
|
+
|
91
|
+
def values
|
92
|
+
@h.map {|k,n| n.value}
|
93
|
+
end
|
94
|
+
|
95
|
+
def has_key?(key)
|
96
|
+
@h.has_key? key
|
97
|
+
end
|
98
|
+
|
99
|
+
alias key? has_key?
|
100
|
+
alias member? has_key?
|
101
|
+
alias include? has_key?
|
102
|
+
|
103
|
+
def has_value?(value)
|
104
|
+
each_pair do |k, v|
|
105
|
+
return true if value.eql? v
|
106
|
+
end
|
107
|
+
|
108
|
+
false
|
109
|
+
end
|
110
|
+
|
111
|
+
alias value? has_value?
|
112
|
+
|
113
|
+
def values_at(*key_list)
|
114
|
+
key_list.map {|k| self[k]}
|
115
|
+
end
|
116
|
+
|
117
|
+
def assoc(key)
|
118
|
+
n = @h[key]
|
119
|
+
|
120
|
+
if n
|
121
|
+
front(n)
|
122
|
+
[n.key, n.value]
|
123
|
+
end
|
124
|
+
end
|
125
|
+
|
126
|
+
def rassoc(value)
|
127
|
+
each_node do |n|
|
128
|
+
if value.eql? n.value
|
129
|
+
front(n)
|
130
|
+
return [n.key, n.value]
|
131
|
+
end
|
132
|
+
end
|
133
|
+
nil
|
134
|
+
end
|
135
|
+
|
136
|
+
def key(value)
|
137
|
+
pair = rassoc(value) and pair.first
|
138
|
+
end
|
139
|
+
|
140
|
+
def store(key, value)
|
141
|
+
# same optimization as in Hash
|
142
|
+
key = key.dup.freeze if String === key && !key.frozen?
|
143
|
+
|
144
|
+
n = @h[key]
|
145
|
+
|
146
|
+
unless n
|
147
|
+
if size == max_size
|
148
|
+
# reuse node to optimize memory usage
|
149
|
+
n = delete_oldest
|
150
|
+
n.key = key
|
151
|
+
n.value = value
|
152
|
+
else
|
153
|
+
n = Node.new key, value
|
154
|
+
end
|
155
|
+
|
156
|
+
@h[key] = n
|
157
|
+
end
|
158
|
+
|
159
|
+
front(n).value = value
|
160
|
+
end
|
161
|
+
|
162
|
+
alias []= store
|
163
|
+
|
164
|
+
def delete(key)
|
165
|
+
n = @h[key] and remove_node(n).value
|
166
|
+
end
|
167
|
+
|
168
|
+
def delete_if
|
169
|
+
each_node do |n|
|
170
|
+
remove_node n if yield n.key, n.value
|
171
|
+
end
|
172
|
+
end
|
173
|
+
|
174
|
+
def max_size=(limit)
|
175
|
+
limit = normalize_max(limit)
|
176
|
+
|
177
|
+
while size > limit
|
178
|
+
delete_oldest
|
179
|
+
end
|
180
|
+
|
181
|
+
@max_size = limit
|
182
|
+
end
|
183
|
+
|
184
|
+
def clear
|
185
|
+
until empty?
|
186
|
+
delete_oldest
|
187
|
+
end
|
188
|
+
|
189
|
+
self
|
190
|
+
end
|
191
|
+
|
192
|
+
def to_s
|
193
|
+
s = nil
|
194
|
+
each_pair {|k, v| (s ? (s << ', ') : s = '{') << k.to_s << '=>' << v.to_s}
|
195
|
+
s ? (s << '}') : '{}'
|
196
|
+
end
|
197
|
+
|
198
|
+
alias inspect to_s
|
199
|
+
|
200
|
+
private
|
201
|
+
|
202
|
+
# iterate nodes
|
203
|
+
def each_node
|
204
|
+
n = @head.succ
|
205
|
+
|
206
|
+
until n.equal? @tail
|
207
|
+
succ = n.succ
|
208
|
+
yield n
|
209
|
+
n = succ
|
210
|
+
end
|
211
|
+
|
212
|
+
self
|
213
|
+
end
|
214
|
+
|
215
|
+
# move node to front
|
216
|
+
def front(node)
|
217
|
+
node.insert_after(@head)
|
218
|
+
end
|
219
|
+
|
220
|
+
# remove the node and invoke release_proc
|
221
|
+
# if set
|
222
|
+
def remove_node(node)
|
223
|
+
n = @h.delete(node.key)
|
224
|
+
n.unlink
|
225
|
+
release_proc and release_proc[n.key, n.value]
|
226
|
+
n
|
227
|
+
end
|
228
|
+
|
229
|
+
# remove the oldest node returning the node
|
230
|
+
def delete_oldest
|
231
|
+
n = @tail.pred
|
232
|
+
raise "Cannot delete from empty hash" if @head.equal? n
|
233
|
+
remove_node n
|
234
|
+
end
|
235
|
+
|
236
|
+
# Normalize the argument in order to be usable as max_size
|
237
|
+
# criterion is that n.to_i must be an Integer and it must
|
238
|
+
# be larger than zero.
|
239
|
+
def normalize_max(n)
|
240
|
+
n = n.to_i
|
241
|
+
raise ArgumentError, 'Invalid max_size: %p' % n unless Integer === n && n > 0
|
242
|
+
n
|
243
|
+
end
|
244
|
+
|
245
|
+
#
|
246
|
+
FETCH = Proc.new {|k| raise KeyError, 'key not found'}
|
247
|
+
|
248
|
+
# A single node in the doubly linked LRU list of nodes
|
249
|
+
Node = Struct.new :key, :value, :pred, :succ do
|
250
|
+
def unlink
|
251
|
+
pred.succ = succ if pred
|
252
|
+
succ.pred = pred if succ
|
253
|
+
self.succ = self.pred = nil
|
254
|
+
self
|
255
|
+
end
|
256
|
+
|
257
|
+
def insert_after(node)
|
258
|
+
raise 'Cannot insert after self' if equal? node
|
259
|
+
return self if node.succ.equal? self
|
260
|
+
|
261
|
+
unlink
|
262
|
+
|
263
|
+
self.succ = node.succ
|
264
|
+
self.pred = node
|
265
|
+
|
266
|
+
node.succ.pred = self if node.succ
|
267
|
+
node.succ = self
|
268
|
+
|
269
|
+
self
|
270
|
+
end
|
271
|
+
end
|
272
|
+
|
273
|
+
end
|
274
|
+
|
@@ -0,0 +1,64 @@
|
|
1
|
+
# Memoizer
|
2
|
+
#
|
3
|
+
# Copyright (c) 2006 Erik Veenstra
|
4
|
+
#
|
5
|
+
# See http://javathink.blogspot.com/2008/09/what-is-memoizer-and-why-should-you.html
|
6
|
+
|
7
|
+
# Memoizer wraps objects to provide cached method calls.
|
8
|
+
#
|
9
|
+
# class X
|
10
|
+
# def initialize ; @tick = 0 ; end
|
11
|
+
# def tick; @tick + 1; end
|
12
|
+
# def memo; @memo ||= Memoizer.new(self) ; end
|
13
|
+
# end
|
14
|
+
#
|
15
|
+
# x = X.new
|
16
|
+
# x.tick #=> 1
|
17
|
+
# x.memo.tick #=> 2
|
18
|
+
# x.tick #=> 3
|
19
|
+
# x.memo.tick #=> 2
|
20
|
+
# x.tick #=> 4
|
21
|
+
# x.memo.tick #=> 2
|
22
|
+
#
|
23
|
+
# You can also use to cache collections of objects to gain code speed ups.
|
24
|
+
#
|
25
|
+
# points = points.collect{|point| Memoizer.cache(point)}
|
26
|
+
#
|
27
|
+
# After our algorithm has finished using points, we want to get rid of
|
28
|
+
# these Memoizer objects. That's easy:
|
29
|
+
#
|
30
|
+
# points = points.collect{|point| point.__self__ }
|
31
|
+
#
|
32
|
+
# Or if you prefer (it is ever so slightly safer):
|
33
|
+
#
|
34
|
+
# points = points.collect{|point| Memoizer.uncache(point)}
|
35
|
+
#
|
36
|
+
class Memoizer
|
37
|
+
|
38
|
+
#private :class, :clone, :display, :type, :method, :to_a, :to_s
|
39
|
+
private *instance_methods(true).select{ |m| m.to_s !~ /^__/ }
|
40
|
+
|
41
|
+
def initialize(object)
|
42
|
+
@self = object
|
43
|
+
@cache = {}
|
44
|
+
end
|
45
|
+
|
46
|
+
def __self__ ; @self ; end
|
47
|
+
|
48
|
+
# Not thread-safe! Speed is important in caches... ;]
|
49
|
+
def method_missing(method_name, *args, &block)
|
50
|
+
@cache[[method_name, args, block]] ||= @self.__send__(method_name, *args, &block)
|
51
|
+
end
|
52
|
+
|
53
|
+
#def self; @self; end
|
54
|
+
|
55
|
+
def self.cache(object)
|
56
|
+
new(object)
|
57
|
+
end
|
58
|
+
|
59
|
+
def self.uncache(cached_object)
|
60
|
+
cached_object.instance_variable_get('@self')
|
61
|
+
end
|
62
|
+
|
63
|
+
end
|
64
|
+
|
@@ -0,0 +1,82 @@
|
|
1
|
+
#require 'facets/boolean' # bool
|
2
|
+
#require 'facets/nullclass'
|
3
|
+
require 'hashery/openobject'
|
4
|
+
|
5
|
+
# = OpenCascade
|
6
|
+
#
|
7
|
+
# OpenCascade is subclass of OpenObject. It differs in a few
|
8
|
+
# significant ways.
|
9
|
+
#
|
10
|
+
# The main reason this class is labeled "cascade", every internal
|
11
|
+
# Hash is transformed into an OpenCascade dynamically upon access.
|
12
|
+
# This makes it easy to create "cascading" references.
|
13
|
+
#
|
14
|
+
# h = { :x => { :y => { :z => 1 } } }
|
15
|
+
# c = OpenCascade[h]
|
16
|
+
# c.x.y.z #=> 1
|
17
|
+
#
|
18
|
+
# As soon as you access a node it automatically becomes an OpenCascade.
|
19
|
+
#
|
20
|
+
# c = OpenCascade.new #=> #<OpenCascade:0x7fac3680ccf0 {}>
|
21
|
+
# c.r #=> #<OpenCascade:0x7fac368084c0 {}>
|
22
|
+
# c.a.b #=> #<OpenCascade:0x7fac3680a4f0 {}>
|
23
|
+
#
|
24
|
+
# But if you set a node, then that will be it's value.
|
25
|
+
#
|
26
|
+
# c.a.b = 4 #=> 4
|
27
|
+
#
|
28
|
+
# To query a node without causing the auto-creation of an OpenCasade
|
29
|
+
# object, use the ?-mark.
|
30
|
+
#
|
31
|
+
# c.a.z? #=> nil
|
32
|
+
#
|
33
|
+
# Finally, you can set a node and get the reciever back using
|
34
|
+
# the !-mark.
|
35
|
+
#
|
36
|
+
# c = OpenCascade.new #=> #<OpenCascade:0x7fac3680ccf0 {}>
|
37
|
+
# c.x!(4).y!(3) #=> #<OpenCascade:0x7fac3680ccf0 {:x=>4, :y=>3}>
|
38
|
+
#
|
39
|
+
#--
|
40
|
+
# Last, when an entry is not found, 'null' is returned rather then 'nil'.
|
41
|
+
# This allows for run-on entries withuot error. Eg.
|
42
|
+
#
|
43
|
+
# o = OpenCascade.new
|
44
|
+
# o.a.b.c #=> null
|
45
|
+
#
|
46
|
+
# Unfortuately this requires an explict test for null? in 'if' conditions.
|
47
|
+
#
|
48
|
+
# if o.a.b.c.null? # true if null
|
49
|
+
# if o.a.b.c.nil? # true if nil or null
|
50
|
+
# if o.a.b.c.not? # true if nil or null or false
|
51
|
+
#
|
52
|
+
# So be sure to take that into account.
|
53
|
+
#++
|
54
|
+
|
55
|
+
class OpenCascade < OpenObject
|
56
|
+
|
57
|
+
def method_missing(sym, *args, &blk)
|
58
|
+
type = sym.to_s[-1,1]
|
59
|
+
name = sym.to_s.gsub(/[=!?]$/, '').to_sym
|
60
|
+
case type
|
61
|
+
when '='
|
62
|
+
self[name] = args.first
|
63
|
+
when '!'
|
64
|
+
@hash.__send__(key, *args, &blk)
|
65
|
+
when '?'
|
66
|
+
self[name]
|
67
|
+
else
|
68
|
+
if key?(name)
|
69
|
+
val = self[name]
|
70
|
+
if Hash === val
|
71
|
+
self[name] = OpenCascade.new(val) #self.class.new(val)
|
72
|
+
else
|
73
|
+
self[name]
|
74
|
+
end
|
75
|
+
else
|
76
|
+
self[name] = OpenCascade.new #self.class.new
|
77
|
+
end
|
78
|
+
end
|
79
|
+
end
|
80
|
+
|
81
|
+
end
|
82
|
+
|
@@ -0,0 +1,65 @@
|
|
1
|
+
# = OpenHash
|
2
|
+
#
|
3
|
+
# OpenHash is very similar to Ruby's own OpenStruct, but it offers some
|
4
|
+
# useful advantages in that it is a true Hash object.
|
5
|
+
#
|
6
|
+
# Because OpenHash is a subclass of Hash, it can do everything a Hash
|
7
|
+
# can *unless* a Hash method has been explicity exempted for use
|
8
|
+
# an an open read/writer via the #omit! method.
|
9
|
+
|
10
|
+
class OpenHash < Hash
|
11
|
+
|
12
|
+
# New OpenHash.
|
13
|
+
def initialize(data={})
|
14
|
+
super()
|
15
|
+
merge!(data)
|
16
|
+
end
|
17
|
+
|
18
|
+
#
|
19
|
+
def respond_to?(name)
|
20
|
+
key?(name.to_sym) || super(name)
|
21
|
+
end
|
22
|
+
|
23
|
+
#
|
24
|
+
def to_h
|
25
|
+
dup
|
26
|
+
end
|
27
|
+
|
28
|
+
#
|
29
|
+
def to_hash
|
30
|
+
dup
|
31
|
+
end
|
32
|
+
|
33
|
+
#
|
34
|
+
def inspect
|
35
|
+
super
|
36
|
+
end
|
37
|
+
|
38
|
+
# Omit specific Hash methods from slot protection.
|
39
|
+
def omit!(*methods)
|
40
|
+
methods.reject!{ |x| x.to_s =~ /^__/ }
|
41
|
+
(class << self; self; end).class_eval{ private *methods }
|
42
|
+
end
|
43
|
+
|
44
|
+
# Route get and set calls.
|
45
|
+
def method_missing(s,*a, &b)
|
46
|
+
type = s.to_s[-1,1]
|
47
|
+
name = s.to_s.sub(/[!?=]$/, '')
|
48
|
+
key = name.to_sym
|
49
|
+
case type
|
50
|
+
when '='
|
51
|
+
self[key] = a[0]
|
52
|
+
#when '!'
|
53
|
+
# self[s] = OpenHash.new
|
54
|
+
when '?'
|
55
|
+
key?(key)
|
56
|
+
else
|
57
|
+
if key?(key)
|
58
|
+
self[key]
|
59
|
+
else
|
60
|
+
super(s,*a,&b)
|
61
|
+
end
|
62
|
+
end
|
63
|
+
end
|
64
|
+
|
65
|
+
end
|