opod 0.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
data/COPYING ADDED
@@ -0,0 +1,36 @@
1
+ Opod
2
+
3
+ Copyright (c) 2007, George Moschovitis & Thomas Sawyer
4
+
5
+ All rights reserved.
6
+
7
+ This software is distributed under the terms of the BSD license.
8
+
9
+ Redistribution and use in source and binary forms, with or without
10
+ modification, are permitted provided that the following conditions
11
+ are met:
12
+
13
+ * Redistributions of source code must retain the above copyright
14
+ notice, this list of conditions and the following disclaimer.
15
+
16
+ * Redistributions in binary form must reproduce the above
17
+ copyright notice, this list of conditions and the following
18
+ disclaimer in the documentation and/or other materials provided
19
+ with the distribution.
20
+
21
+ * Neither the name of the copyright holders nor the names of
22
+ contributors may be used to endorse or promote products derived
23
+ from this software without specific prior written permission.
24
+
25
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
31
+ TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
32
+ PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
33
+ LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
34
+ NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
35
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36
+
data/README ADDED
@@ -0,0 +1,29 @@
1
+ = Opod
2
+
3
+ Opod is a adaptive cache system for Ruby. It can persist data to
4
+ a variety of static sources --in memory, to file, or via database.
5
+
6
+
7
+ == Installation
8
+
9
+ Installing via RubyGems:
10
+
11
+ gem install opod
12
+
13
+ Manual install:
14
+
15
+ $ unzip opod-0.0.1.zip
16
+ $ cd opod-0.0.1.zip
17
+ $ sudo task/setup
18
+
19
+ Windows users the last line will be 'ruby task/setup'.
20
+
21
+
22
+ == Copying
23
+
24
+ Copyright (c) 2005-2007 Thomas Sawyer, George Moschovitis
25
+
26
+ All rights reserved.
27
+
28
+ BSD License
29
+
@@ -0,0 +1,24 @@
1
+ # TITLE:
2
+ # Cache
3
+ #
4
+ # SUMMARY:
5
+ # Base classes for all cache adapters.
6
+ # However it is not used yet.
7
+
8
+ module Opod
9
+
10
+ # A general cache key.
11
+
12
+ class CacheKey
13
+ end
14
+
15
+ # A general cache mechanism.
16
+ #
17
+ # This cache system was originaly developed for Nitro. It is
18
+ # used to cache fragments, og objects (entities), sessions,
19
+ # application scoped variables and more.
20
+
21
+ class Cache
22
+ end
23
+
24
+ end
@@ -0,0 +1,51 @@
1
+ require "drb"
2
+ require "facets/settings"
3
+ require "nemo/memory"
4
+
5
+ module Opod
6
+
7
+ # A cached backed in a DRb server.
8
+ #
9
+ # === Example
10
+ #
11
+ # This cache needs a corresponding DRb server. Here is how you
12
+ # can setup the standard Nitro Drb server to keep a DrbCache:
13
+ #
14
+ # require 'glue/cache/memory'
15
+ #
16
+ # class MyDrbServer < Nitro::DrbServer
17
+ # def setup_drb_objects
18
+ # ..
19
+ # @my_cache = SyncHash.new
20
+ # DRb.start_service("druby://#{my_drb_address}:#{my_drb_port}", @my_cache)
21
+ # ..
22
+ # end
23
+ # end
24
+ #
25
+ # MyDrbServer.start
26
+
27
+ class DrbCache < MemoryCache
28
+
29
+ # Initialize the cache.
30
+ #
31
+ # === Options
32
+ #
33
+ # :address = The address of the DRb cache object.
34
+ # :port = The port of the DRb cache object.
35
+
36
+ # The address of the Session cache / store (if distibuted).
37
+
38
+ setting :address, :default => '127.0.0.1', :doc => 'The address of the Session cache'
39
+
40
+ # The port of the Session DRb cache / store (if distributed).
41
+
42
+ setting :port, :default => 9069, :doc => 'The port of the Session cache'
43
+
44
+
45
+ def initialize(address = DrbCache.address, port = DrbCache.port)
46
+ @hash = DRbObject.new(nil, "druby://#{address}:#{port}")
47
+ end
48
+
49
+ end
50
+
51
+ end
@@ -0,0 +1,79 @@
1
+ require "uri"
2
+ require "fileutils"
3
+ require "tmpdir"
4
+
5
+ module Opod
6
+
7
+ class FileCache
8
+
9
+ setting :basedir, :default => "#{Dir.tmpdir}/nitro_file_cache", :doc => "The directory to store files"
10
+
11
+ def initialize(name = "cache", keepalive = nil)
12
+ @path = File.join(FileCache.basedir, name)
13
+ @keepalive = keepalive
14
+
15
+ FileUtils.mkdir_p(@path, :mode => 0700)
16
+ end
17
+
18
+ def []=(k,v)
19
+ fn = File.join(@path, escape_filename(k.to_s) )
20
+ encode_file(fn, v)
21
+ end
22
+ alias_method :set, :[]=
23
+
24
+ def [](k)
25
+ fn = File.join(@path, escape_filename(k.to_s) )
26
+ return nil unless File.exists?(fn)
27
+ decode_file(fn)
28
+ end
29
+ alias_method :get, :[]
30
+
31
+ def delete(k)
32
+ f = File.join(@path, escape_filename(k.to_s))
33
+ File.delete(f) if File.exists?(f)
34
+ end
35
+
36
+ def gc!
37
+ return unless @keepalive
38
+
39
+ now = Time.now
40
+ all.each do |fn|
41
+ expire_time = File.stat(fn).atime + @keepalive
42
+ File.delete(fn) if now > expire_time
43
+ end
44
+ end
45
+
46
+ def all
47
+ Dir.glob( File.join(@path, '*' ) )
48
+ end
49
+
50
+ private
51
+
52
+ def decode_file(fn)
53
+ val = nil
54
+ File.open(fn,"rb") do |f|
55
+ f.flock(File::LOCK_EX)
56
+ val = Marshal.load( f.read )
57
+ f.flock(File::LOCK_UN)
58
+ end
59
+ return val
60
+ end
61
+
62
+ def encode_file(fn, value)
63
+ File.open(fn, "wb") do |f|
64
+ f.flock(File::LOCK_EX)
65
+ f.chmod(0600)
66
+ f.write(Marshal.dump(value))
67
+ f.flock(File::LOCK_UN)
68
+ end
69
+ end
70
+
71
+ # need this for fat filesystems
72
+ def escape_filename(fn)
73
+ URI.escape(fn, /["\/:;|=,\[\]]/)
74
+ end
75
+
76
+ end
77
+
78
+ end
79
+
@@ -0,0 +1,69 @@
1
+ # specifications:
2
+ # http://cvs.danga.com/browse.cgi/wcmtools/memcached/doc/protocol.txt?rev=HEAD&content-type=text/plain
3
+ #
4
+ # very simple (= very fast) client for memcached
5
+ #
6
+ # i found the Ruby-MemCache library a little bit buggy and complicated, so i made my own before
7
+ # fixing it ;)
8
+ #
9
+ # TODO socket disconnection handling
10
+ # TODO error handling
11
+ # TODO multiple servers connections
12
+
13
+ require "socket"
14
+
15
+ module Opod
16
+
17
+ class MemCached
18
+
19
+ setting :address, :default => "localhost", :doc => "Server address"
20
+ setting :port, :default => 11211, :doc => "Server port"
21
+
22
+ def initialize(name = "cache", keepalive = nil)
23
+ @sock = TCPSocket.new(MemCached.address, MemCached.port)
24
+ @name = name
25
+ @keepalive = keepalive
26
+ end
27
+
28
+ def []=(k,v)
29
+ if @keepalive
30
+ exptime = (Time.now + @keepalive).to_i
31
+ else
32
+ exptime = 0
33
+ end
34
+
35
+ data = Marshal.dump(v)
36
+ @sock.print("set #{@name}:#{k} 0 #{exptime} #{data.size}\r\n#{data}\r\n")
37
+ response = @sock.gets # "STORED\r\n"
38
+ v
39
+ end
40
+ alias_method :set, :[]=
41
+
42
+ def [](k)
43
+ @sock.print("get #{@name}:#{k}\r\n")
44
+ resp = @sock.gets
45
+ if resp == "END\r\n"
46
+ return nil
47
+ end
48
+
49
+ #dummy, key, flags, size
50
+ size = resp.split(/ /).last.to_i
51
+ raw_data = @sock.read(size)
52
+ @sock.gets # \r\n
53
+ @sock.gets # END\r\n
54
+ Marshal.load( raw_data )
55
+ end
56
+ alias_method :get, :[]
57
+
58
+ def delete(k)
59
+ @sock.print("delete #{@name}:#{k}\r\n")
60
+ @sock.gets # "DELETED\r\n"
61
+ end
62
+
63
+ def gc!
64
+ # garbage collection is handled by the memcache server
65
+ end
66
+
67
+ end
68
+
69
+ end
@@ -0,0 +1,83 @@
1
+ require 'facets/synchash'
2
+
3
+ module Opod
4
+
5
+ # A cache backed in memory.
6
+ #--
7
+ # This implementation is also the base for the Drb Cache.
8
+ #++
9
+
10
+ class MemoryCache
11
+ attr :hash
12
+
13
+ def initialize(options = {})
14
+ if options[:sync]
15
+ @hash = SyncHash
16
+ else
17
+ @hash = {}
18
+ end
19
+ end
20
+
21
+ # Was orig. in Cache.
22
+
23
+ def update(hash)
24
+ hash.each { |key, value| self[key] = value }
25
+ end
26
+
27
+ # Get an object from the cache.
28
+
29
+ def get(key, options = nil)
30
+ @hash[key]
31
+ end
32
+ alias_method :read, :get
33
+ alias_method :[], :get
34
+
35
+ # Put an object in the cache.
36
+
37
+ def set(key, value = nil, options = nil)
38
+ @hash[key] = value
39
+ end
40
+ alias_method :put, :set
41
+ alias_method :write, :set
42
+ alias_method :[]=, :set
43
+
44
+ # Delete an object from the cache.
45
+
46
+ def delete(key, options = nil)
47
+ @hash.delete(key)
48
+ end
49
+ alias_method :remove, :delete
50
+
51
+ def delete_if(&block)
52
+ @hash.delete_if(&block)
53
+ end
54
+
55
+ # Perform session garbage collection. Typically this method
56
+ # is called from a cron like mechanism.
57
+
58
+ def gc!
59
+ delete_if { |key, s| s.expired? }
60
+ end
61
+
62
+ # Return the mapping.
63
+
64
+ def mapping
65
+ @hash
66
+ end
67
+
68
+ # Return all keys in the cache.
69
+
70
+ def keys
71
+ @hash.keys
72
+ end
73
+
74
+ # Return all objects in the cache.
75
+
76
+ def all
77
+ @hash.values
78
+ end
79
+ alias_method :values, :all
80
+
81
+ end
82
+
83
+ end
@@ -0,0 +1,61 @@
1
+ require 'og'
2
+ require 'base64'
3
+
4
+ module Opod
5
+
6
+ class OgCached
7
+ include Og::EntityMixin
8
+
9
+ property :unique_id, String, :sql => 'PRIMARY KEY'
10
+ property :expires, Time
11
+ property :cache_name, String
12
+ property :content, String
13
+
14
+ set_primary_key :unique_id, String
15
+ end
16
+
17
+ class OgCache
18
+
19
+ def initialize(cache_name, keepalive = nil)
20
+ @cache_name = cache_name
21
+ @keepalive = keepalive
22
+ end
23
+
24
+ def []=(k,v)
25
+ unless s = OgCached.find_by_unique_id_and_cache_name(k.to_s, @cache_name)
26
+ s = OgCached.new
27
+ s.cache_name = @cache_name
28
+ s.expires = Time.now + @keepalive if @keepalive
29
+ s.unique_id = k.to_s
30
+ end
31
+ #s.content = v.to_yaml
32
+ s.content = encode(v)
33
+ s.insert
34
+ end
35
+
36
+ def [](k)
37
+ s = OgCached.find_by_unique_id_and_cache_name(k.to_s, @cache_name)
38
+ decode(s.content) if s
39
+ end
40
+
41
+ def gc!
42
+ OgCached.find(:condition => ["expires < ? AND cache_name = ?", Time.now, @cache_name]).each {|s| s.delete }
43
+ end
44
+
45
+ def all
46
+ OgCached.find_by_cache_name(@cache_name)
47
+ end
48
+
49
+ private
50
+
51
+ def encode(c)
52
+ Base64.encode64(Marshal.dump(c))
53
+ end
54
+
55
+ def decode(c)
56
+ Marshal::load(Base64.decode64(c))
57
+ #s.content = YAML::load(s.content)
58
+ end
59
+
60
+ end
61
+ end
@@ -0,0 +1,187 @@
1
+ require 'fileutils'
2
+ require 'pstore'
3
+ require 'tmpdir'
4
+
5
+ module Opod
6
+
7
+ class PStoreCache
8
+
9
+ setting :basedir, :default => File.join(Dir.tmpdir, 'nitro_file_cache'),
10
+ :doc => 'Base directory for cache files'
11
+
12
+ setting :max_tries, :default => 5, :doc => 'Maximum number of tries for cache operations'
13
+
14
+ setting :tokens, :default => '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz',
15
+ :doc => 'Tokens used when generating cache file names'
16
+
17
+ def initialize(keepalive = nil)
18
+ @keepalive = keepalive
19
+ initialize_store
20
+ end
21
+
22
+ # Return the object stored under <i>key</i> from the cache.
23
+ # If the object isn't present in the cache <b>nil</b> is returned.
24
+
25
+ def [](key)
26
+ current_try = 0
27
+ begin
28
+ current_try += 1
29
+ @store.transaction(true) do
30
+ @store.fetch(key, nil)[1]
31
+ end
32
+ rescue PStore::Error
33
+ # Unable to return the requested object from the cache.
34
+ # Create a new store and retry.
35
+ initialize_store
36
+ unless current_try > PStoreCache.max_tries
37
+ retry
38
+ else
39
+ raise Exception.new("Unable to read from cache file #{@store.path}!")
40
+ end
41
+ end
42
+ end
43
+
44
+ # Store <i>obj</i> under <i>key</i> in the cache.
45
+
46
+ def []=(key, obj)
47
+ current_try = 0
48
+ begin
49
+ current_try += 1
50
+ @store.transaction(false) do
51
+ if @keepalive
52
+ @store[key] = [Time.now + @keepalive, obj]
53
+ else
54
+ @store[key] = [nil, obj]
55
+ end
56
+ end
57
+ rescue PStore::Error
58
+ # Unable to store object.
59
+ # Create a new store and retry.
60
+ initialize_store
61
+ unless current_try > PStoreCache.max_tries
62
+ retry
63
+ else
64
+ raise Exception.new("Unable to write to cache file #{@store.path}!")
65
+ end
66
+ end
67
+ end
68
+
69
+ # Return all objects stored in the cache.
70
+
71
+ def all
72
+ current_try = 0
73
+ begin
74
+ current_try += 1
75
+ @store.transaction(true) do
76
+ @store.roots.inject([]) do |result, current|
77
+ result << @store[current][1]
78
+ end
79
+ end
80
+ rescue PStore::Error
81
+ # Unable to return stored objects from cache.
82
+ # Create a new store and retry.
83
+ initialize_store
84
+ unless current_try > PStoreCache.max_tries
85
+ retry
86
+ else
87
+ raise Exception.new("Unable to read from cache file #{@store.path}!")
88
+ end
89
+ end
90
+ end
91
+
92
+ # Delete the object stored under <i>key</i> in the cache.
93
+
94
+ def delete(key)
95
+ begin
96
+ @store.transaction(false) do
97
+ @store.delete(key)
98
+ end
99
+ rescue PStore::Error
100
+ # Unable to delete object from the cache.
101
+ # Create a new store.
102
+ initialize_store
103
+ end
104
+ end
105
+
106
+ # Remove all expired objects from the cache.
107
+
108
+ def gc!
109
+ return unless @keepalive
110
+ begin
111
+ now = Time.now
112
+ @store.transaction(false) do
113
+ @store.roots.each do |r|
114
+ @store.delete(r) if now > @store[r][0]
115
+ end
116
+ end
117
+ rescue PStore::Error
118
+ # Unable to delete object from the cache.
119
+ # Create a new store.
120
+ initialize_store
121
+ end
122
+ end
123
+
124
+ alias get []
125
+
126
+ alias put []=
127
+
128
+ alias read []
129
+
130
+ alias values all
131
+
132
+ alias write []=
133
+
134
+ private
135
+
136
+ # Generate a random filename using <i>tokens</i> and <i>length</i> as constraints.
137
+
138
+ def generate_random_filename(tokens = PStoreCache.tokens, length = 16)
139
+ filename = ''
140
+ 1.upto(length) do
141
+ filename << tokens[rand(tokens.length)]
142
+ end
143
+ filename
144
+ end
145
+
146
+ # Initialize store with a random filename.
147
+ # If no suitable filename can be found, raise an exception.
148
+
149
+ def initialize_store
150
+ # Create cache directory (if needed)
151
+ unless File.exists?(PStoreCache.basedir) && File.directory?(PStoreCache.basedir)
152
+ begin
153
+ FileUtils.mkdir_p(PStoreCache.basedir)
154
+ rescue Exception
155
+ raise Exception.new("Unable to create cache directory #{PStoreCache.basedir}!")
156
+ end
157
+ end
158
+
159
+ # Check whether the current_process has sufficient privilieges for reading/writing files to the cache directory
160
+
161
+ unless File.readable?(PStoreCache.basedir) && File.writable?(PStoreCache.basedir)
162
+ raise Exception.new("Insuffient priviligies for cache directory #{PStoreCache.basedir}!")
163
+ end
164
+
165
+ # generate a new random filename for the cache file
166
+
167
+ current_try = 0
168
+
169
+ begin
170
+ current_try += 1
171
+ filename = generate_random_filename
172
+ end while File.exists?(File.join(PStoreCache.basedir, filename)) && current_try < PStoreCache.max_tries
173
+
174
+ if current_try <= PStoreCache.max_tries
175
+ begin
176
+ @store = PStore.new(File.join(PStoreCache.basedir, filename))
177
+ rescue Exception
178
+ raise Exception.new('Unable to create cache file!')
179
+ end
180
+ else
181
+ raise Exception.new('Unable to create cache file!')
182
+ end
183
+ end
184
+
185
+ end
186
+
187
+ end
@@ -0,0 +1,21 @@
1
+ # -x doc -x dev -x pkg
2
+ COPYING
3
+ README
4
+ lib
5
+ lib/opod
6
+ lib/opod/cache.rb
7
+ lib/opod/drb.rb
8
+ lib/opod/file.rb
9
+ lib/opod/memcached.rb
10
+ lib/opod/memory.rb
11
+ lib/opod/og.rb
12
+ lib/opod/pstore.rb
13
+ log
14
+ meta
15
+ meta/MANIFEST
16
+ meta/opod-0.0.1.roll
17
+ task
18
+ task/config.yaml
19
+ task/publish
20
+ task/rdoc
21
+ test
@@ -0,0 +1,21 @@
1
+ ---
2
+ title : Opod
3
+ author : George Moschovitis
4
+ contact : Thomas Sawyer
5
+ email : transfire@gmail.com
6
+ homepage : "http://opod.rubyforge.org"
7
+ status : beta
8
+
9
+ summary: Obejct Pods are places to keep digital stuff.
10
+
11
+ description: >
12
+ Object storage through various adapters.
13
+
14
+ dependency:
15
+ - [ facets, ">= 2.1.0" ]
16
+
17
+ formats: [ gem, zip ]
18
+
19
+ lib_path:
20
+ - lib/opod
21
+
@@ -0,0 +1,9 @@
1
+ rdoc:
2
+ op: doc/rdoc
3
+ title: OPOD
4
+
5
+ publish:
6
+ source: doc/web
7
+ project: opod
8
+ username: transami
9
+
@@ -0,0 +1,44 @@
1
+ #!/usr/bin/env ratch
2
+
3
+ # publish website to rubyforge
4
+
5
+ # This task publishes the source dir (deafult doc/)
6
+ # to a rubyforge website.
7
+
8
+ config = configuration['publish']
9
+
10
+ project = config['project']
11
+ subdir = config['subdir']
12
+ source = config['source']
13
+ username = ENV['RUBYFORGE_USERNAME'] || config['username']
14
+ protect = %w{usage statcvs statsvn robot.txt wiki}
15
+ exclude = %w{.svn}
16
+
17
+ abort("no project name") unless project
18
+ abort("no username") unless username
19
+ abort("no source dir") unless source
20
+
21
+ if subdir
22
+ destination = File.join(project, subdir)
23
+ else
24
+ destination = project
25
+ end
26
+
27
+ dir = source.chomp('/') + '/'
28
+ url = "#{username}@rubyforge.org:/var/www/gforge-projects/#{destination}"
29
+
30
+ # maybe -p ?
31
+ op = ['-rLvz', '--delete']
32
+ if File.file?(File.join(source,'.rsync-filter'))
33
+ op << "--filter='dir-merge #{source}/.rsync-filter'"
34
+ else
35
+ op.concat exclude.map{|e| "--filter='- #{e}'"}
36
+ op.concat protect.map{|e| "--filter='P #{e}'"}
37
+ end
38
+
39
+ args = op + [dir, url]
40
+
41
+ #cd source do
42
+ rsync *args
43
+ #end
44
+
@@ -0,0 +1,40 @@
1
+ #!/usr/bin/env ratch
2
+
3
+ # generate rdocs
4
+
5
+ # Generate Rdoc documentation. Settings are
6
+ # the same as the rdoc command's options.
7
+
8
+ main :rdoc do
9
+ # Load rdoc configuration.
10
+
11
+ config = configuration['rdoc']
12
+
13
+ config = {
14
+ 'template' => 'html',
15
+ 'op' => 'doc/rdoc',
16
+ 'merge' => true,
17
+ 'inline-source' => true,
18
+ 'exclude' => %w{InstalledFiles Manifest Project dev util},
19
+ 'include' => %w{[A-Z]* lib}
20
+ }.update(config)
21
+
22
+ output = config['op']
23
+
24
+ # Check for 'doc' directory.
25
+ # (Helps to ensure we're in the right place.)
26
+
27
+ dir!(File.dirname(output))
28
+
29
+ # Prepare command arguments.
30
+
31
+ vector = config.command_vector('include')
32
+
33
+ # Remove old rdocs, if any.
34
+
35
+ rm_r(output) if File.exist?(output)
36
+
37
+ # Document.
38
+
39
+ rdoc(*vector)
40
+ end
metadata ADDED
@@ -0,0 +1,81 @@
1
+ --- !ruby/object:Gem::Specification
2
+ rubygems_version: 0.9.4.6
3
+ specification_version: 2
4
+ name: opod
5
+ version: !ruby/object:Gem::Version
6
+ version: 0.0.1
7
+ date: 2007-11-14 00:00:00 -05:00
8
+ summary: Obejct Pods are places to keep digital stuff.
9
+ require_paths:
10
+ - lib
11
+ email: transfire@gmail.com
12
+ homepage: http://opod.rubyforge.org
13
+ rubyforge_project:
14
+ description: Object storage through various adapters.
15
+ autorequire:
16
+ default_executable:
17
+ bindir: bin
18
+ has_rdoc:
19
+ required_ruby_version: !ruby/object:Gem::Requirement
20
+ requirements:
21
+ - - ">="
22
+ - !ruby/object:Gem::Version
23
+ version: "0"
24
+ version:
25
+ required_rubygems_version: !ruby/object:Gem::Requirement
26
+ requirements:
27
+ - - ">="
28
+ - !ruby/object:Gem::Version
29
+ version: "0"
30
+ version:
31
+ platform: ruby
32
+ signing_key:
33
+ cert_chain: []
34
+
35
+ post_install_message:
36
+ extensions_fallback:
37
+ authors:
38
+ - George Moschovitis
39
+ files:
40
+ - COPYING
41
+ - README
42
+ - lib
43
+ - lib/opod
44
+ - lib/opod/cache.rb
45
+ - lib/opod/drb.rb
46
+ - lib/opod/file.rb
47
+ - lib/opod/memcached.rb
48
+ - lib/opod/memory.rb
49
+ - lib/opod/og.rb
50
+ - lib/opod/pstore.rb
51
+ - log
52
+ - meta
53
+ - meta/MANIFEST
54
+ - meta/opod-0.0.1.roll
55
+ - task
56
+ - task/config.yaml
57
+ - task/publish
58
+ - task/rdoc
59
+ - test
60
+ test_files: []
61
+
62
+ rdoc_options: []
63
+
64
+ extra_rdoc_files: []
65
+
66
+ executables: []
67
+
68
+ extensions: []
69
+
70
+ requirements: []
71
+
72
+ dependencies:
73
+ - !ruby/object:Gem::Dependency
74
+ name: facets
75
+ version_requirement:
76
+ version_requirements: !ruby/object:Gem::Requirement
77
+ requirements:
78
+ - - ">="
79
+ - !ruby/object:Gem::Version
80
+ version: 2.1.0
81
+ version: