bone 0.2.6 → 0.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,195 @@
1
+ # BS REDIS 2.0 CONFIG (dev) -- 2010-11-17
2
+
3
+ # NOTE: NOTE: auto-generated by delano on tucker at 2010-11-29 11:49:17 -0500
4
+
5
+ dir /tmp
6
+
7
+ pidfile boned-redis.pid
8
+ logfile boned-redis.log
9
+ dbfilename boned-redis.rdb
10
+
11
+ port 8045
12
+ bind 127.0.0.1
13
+ daemonize yes
14
+
15
+ timeout 300
16
+
17
+ #loglevel debug
18
+ #loglevel verbose
19
+ loglevel warning
20
+
21
+ databases 16
22
+
23
+ save 900 100
24
+ save 300 5000
25
+
26
+
27
+ rdbcompression yes
28
+
29
+ # requirepass foobared
30
+ # maxclients 0
31
+
32
+ appendonly no
33
+ appendfilename redis.aof
34
+
35
+ # TODO: Consider having separate configs when the usecase for Redis
36
+ # changes. For example, one for production, another for batch processing.
37
+ #
38
+ # Nothing is changed from here on out:
39
+
40
+ ################################## INCLUDES ###################################
41
+
42
+ # Include one or more other config files here. This is useful if you
43
+ # have a standard template that goes to all redis server but also need
44
+ # to customize a few per-server settings. Include files can include
45
+ # other files, so use this wisely.
46
+ #
47
+ # include /path/to/local.conf
48
+ # include /path/to/other.conf
49
+
50
+ ################################# REPLICATION #################################
51
+
52
+ # Master-Slave replication. Use slaveof to make a Redis instance a copy of
53
+ # another Redis server. Note that the configuration is local to the slave
54
+ # so for example it is possible to configure the slave to save the DB with a
55
+ # different interval, or to listen to another port, and so on.
56
+ #
57
+ # slaveof <masterip> <masterport>
58
+
59
+ # If the master is password protected (using the "requirepass" configuration
60
+ # directive below) it is possible to tell the slave to authenticate before
61
+ # starting the replication synchronization process, otherwise the master will
62
+ # refuse the slave request.
63
+ #
64
+ # masterauth <master-password>
65
+
66
+
67
+ # The fsync() call tells the Operating System to actually write data on disk
68
+ # instead to wait for more data in the output buffer. Some OS will really flush
69
+ # data on disk, some other OS will just try to do it ASAP.
70
+ #
71
+ # Redis supports three different modes:
72
+ #
73
+ # no: don't fsync, just let the OS flush the data when it wants. Faster.
74
+ # always: fsync after every write to the append only log . Slow, Safest.
75
+ # everysec: fsync only if one second passed since the last fsync. Compromise.
76
+ #
77
+ # The default is "everysec" that's usually the right compromise between
78
+ # speed and data safety. It's up to you to understand if you can relax this to
79
+ # "no" that will will let the operating system flush the output buffer when
80
+ # it wants, for better performances (but if you can live with the idea of
81
+ # some data loss consider the default persistence mode that's snapshotting),
82
+ # or on the contrary, use "always" that's very slow but a bit safer than
83
+ # everysec.
84
+ #
85
+ # If unsure, use "everysec".
86
+
87
+ # appendfsync always
88
+ appendfsync everysec
89
+ # appendfsync no
90
+
91
+ ################################ VIRTUAL MEMORY ###############################
92
+
93
+ # Virtual Memory allows Redis to work with datasets bigger than the actual
94
+ # amount of RAM needed to hold the whole dataset in memory.
95
+ # In order to do so very used keys are taken in memory while the other keys
96
+ # are swapped into a swap file, similarly to what operating systems do
97
+ # with memory pages.
98
+ #
99
+ # To enable VM just set 'vm-enabled' to yes, and set the following three
100
+ # VM parameters accordingly to your needs.
101
+
102
+ vm-enabled no
103
+ # vm-enabled yes
104
+
105
+ # This is the path of the Redis swap file. As you can guess, swap files
106
+ # can't be shared by different Redis instances, so make sure to use a swap
107
+ # file for every redis process you are running. Redis will complain if the
108
+ # swap file is already in use.
109
+ #
110
+ # The best kind of storage for the Redis swap file (that's accessed at random)
111
+ # is a Solid State Disk (SSD).
112
+ #
113
+ # *** WARNING *** if you are using a shared hosting the default of putting
114
+ # the swap file under /tmp is not secure. Create a dir with access granted
115
+ # only to Redis user and configure Redis to create the swap file there.
116
+ vm-swap-file /tmp/redis.swap
117
+
118
+ # vm-max-memory configures the VM to use at max the specified amount of
119
+ # RAM. Everything that deos not fit will be swapped on disk *if* possible, that
120
+ # is, if there is still enough contiguous space in the swap file.
121
+ #
122
+ # With vm-max-memory 0 the system will swap everything it can. Not a good
123
+ # default, just specify the max amount of RAM you can in bytes, but it's
124
+ # better to leave some margin. For instance specify an amount of RAM
125
+ # that's more or less between 60 and 80% of your free RAM.
126
+ vm-max-memory 0
127
+
128
+ # Redis swap files is split into pages. An object can be saved using multiple
129
+ # contiguous pages, but pages can't be shared between different objects.
130
+ # So if your page is too big, small objects swapped out on disk will waste
131
+ # a lot of space. If you page is too small, there is less space in the swap
132
+ # file (assuming you configured the same number of total swap file pages).
133
+ #
134
+ # If you use a lot of small objects, use a page size of 64 or 32 bytes.
135
+ # If you use a lot of big objects, use a bigger page size.
136
+ # If unsure, use the default :)
137
+ vm-page-size 32
138
+
139
+ # Number of total memory pages in the swap file.
140
+ # Given that the page table (a bitmap of free/used pages) is taken in memory,
141
+ # every 8 pages on disk will consume 1 byte of RAM.
142
+ #
143
+ # The total swap size is vm-page-size * vm-pages
144
+ #
145
+ # With the default of 32-bytes memory pages and 134217728 pages Redis will
146
+ # use a 4 GB swap file, that will use 16 MB of RAM for the page table.
147
+ #
148
+ # It's better to use the smallest acceptable value for your application,
149
+ # but the default is large in order to work in most conditions.
150
+ vm-pages 134217728
151
+
152
+ # Max number of VM I/O threads running at the same time.
153
+ # This threads are used to read/write data from/to swap file, since they
154
+ # also encode and decode objects from disk to memory or the reverse, a bigger
155
+ # number of threads can help with big objects even if they can't help with
156
+ # I/O itself as the physical device may not be able to couple with many
157
+ # reads/writes operations at the same time.
158
+ #
159
+ # The special value of 0 turn off threaded I/O and enables the blocking
160
+ # Virtual Memory implementation.
161
+ vm-max-threads 4
162
+
163
+ ############################### ADVANCED CONFIG ###############################
164
+
165
+ # Glue small output buffers together in order to send small replies in a
166
+ # single TCP packet. Uses a bit more CPU but most of the times it is a win
167
+ # in terms of number of queries per second. Use 'yes' if unsure.
168
+ glueoutputbuf yes
169
+
170
+ # Hashes are encoded in a special way (much more memory efficient) when they
171
+ # have at max a given numer of elements, and the biggest element does not
172
+ # exceed a given threshold. You can configure this limits with the following
173
+ # configuration directives.
174
+ hash-max-zipmap-entries 64
175
+ hash-max-zipmap-value 512
176
+
177
+ # Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in
178
+ # order to help rehashing the main Redis hash table (the one mapping top-level
179
+ # keys to values). The hash table implementation redis uses (see dict.c)
180
+ # performs a lazy rehashing: the more operation you run into an hash table
181
+ # that is rhashing, the more rehashing "steps" are performed, so if the
182
+ # server is idle the rehashing is never complete and some more memory is used
183
+ # by the hash table.
184
+ #
185
+ # The default is to use this millisecond 10 times every second in order to
186
+ # active rehashing the main dictionaries, freeing memory when possible.
187
+ #
188
+ # If unsure:
189
+ # use "activerehashing no" if you have hard latency requirements and it is
190
+ # not a good thing in your environment that Redis can reply form time to time
191
+ # to queries with 2 milliseconds delay.
192
+ #
193
+ # use "activerehashing yes" if you don't have such hard requirements but
194
+ # want to free memory asap when possible.
195
+ activerehashing yes
@@ -1,180 +1,171 @@
1
- require 'uri'
2
- require 'net/http'
3
- require 'net/https'
4
1
 
5
2
  unless defined?(BONE_HOME)
6
3
  BONE_HOME = File.expand_path(File.join(File.dirname(__FILE__), '..') )
7
4
  end
8
5
 
9
- module Bone
10
- extend self
11
- VERSION = "0.2.6"
12
- APIVERSION = 'v1'.freeze
13
-
6
+ local_libs = %w{familia}
7
+ local_libs.each { |dir|
8
+ a = File.join(BONE_HOME, '..', '..', 'opensource', dir, 'lib')
9
+ $:.unshift a
10
+ }
11
+
12
+ require 'familia'
13
+ require 'base64'
14
+ require 'openssl'
15
+ require 'time'
16
+
17
+ class Bone
18
+ module VERSION
19
+ def self.to_s
20
+ load_config
21
+ [@version[:MAJOR], @version[:MINOR], @version[:PATCH]].join('.')
22
+ end
23
+ alias_method :inspect, :to_s
24
+ def self.load_config
25
+ require 'yaml'
26
+ @version ||= YAML.load_file(File.join(BONE_HOME, '..', 'VERSION.yml'))
27
+ end
28
+ end
29
+ end
30
+
31
+
32
+ class Bone
33
+ unless defined?(Bone::APIVERSION)
34
+ APIVERSION = 'v2'.freeze
35
+ SECRETCHAR = [('a'..'z'),('A'..'Z'),(0..9)].map(&:to_a).flatten.freeze
36
+ end
37
+ @source = URI.parse(ENV['BONE_SOURCE'] || 'redis://127.0.0.1:6379/')
38
+ @apis = {}
39
+ @digest_type = OpenSSL::Digest::SHA256
14
40
  class Problem < RuntimeError; end
15
- class BadBone < Problem; end
16
-
17
- @digest_type = nil # set at the end
18
- @debug = false
41
+ class NoToken < Problem; end
19
42
  class << self
20
- attr_accessor :digest_type
21
- def enable_debug() @debug = true end
22
- def disable_debug() @debug = false end
23
- def debug?() @debug == true end
24
- def ld(*msg)
25
- return unless Bone.debug?
26
- prefix = "D(#{Thread.current.object_id}): "
27
- STDERR.puts "#{prefix}" << msg.join("#{$/}#{prefix}")
43
+ attr_accessor :debug
44
+ attr_reader :apis, :api, :source, :digest_type
45
+ attr_writer :token, :secret
46
+
47
+ def source= v
48
+ @source = URI.parse v
49
+ select_api
28
50
  end
29
- end
30
-
31
- SOURCE = (ENV['BONE_SOURCE'] || "http://localhost:6043").freeze
32
- TOKEN = ENV['BONE_TOKEN'].freeze
33
-
34
- # Get a key from the boned server. Same as `get!`
35
- # but does not raise an exception for an unknown key.
36
- def get(key, opts={})
37
- get! key, opts
38
- rescue Bone::Problem
39
- nil
40
- end
41
-
42
- # Get a key from the boned server. Raises an exception
43
- # for an unknown key.
44
- def get!(key, opts={})
45
- token = opts[:token] || ENV['BONE_TOKEN'] || TOKEN
46
- request(:get, token, key) # returns the response body
47
- end
48
-
49
- def set(key, value, opts={})
50
- set! key, value, opts
51
- rescue Bone::Problem
52
- nil
53
- end
54
-
55
- def set!(key, value, opts={})
56
- token = opts[:token] || ENV['BONE_TOKEN'] || TOKEN
57
- opts[:value] = value
58
- request(:set, token, key, opts)
59
- key # return the key b/c it could be a binary file
60
- end
61
-
62
- def del(key, opts={})
63
- token = opts[:token] || ENV['BONE_TOKEN'] || TOKEN
64
- request(:del, token, key, opts) # returns the response body
65
- end
66
-
67
- def [](keyname)
68
- get(keyname)
69
- end
70
-
71
- def []=(keyname, value)
72
- set(keyname, value)
73
- end
74
-
75
- def keys(keyname=nil, opts={})
76
- token = opts[:token] || ENV['BONE_TOKEN'] || TOKEN
77
- k = request(:keys, token, keyname, opts)
78
- k.split($/)
79
- end
80
-
81
- # <tt>require</tt> a library from the vendor directory.
82
- # The vendor directory should be organized such
83
- # that +name+ and +version+ can be used to create
84
- # the path to the library.
85
- #
86
- # e.g.
87
- #
88
- # vendor/httpclient-2.1.5.2/httpclient
89
- #
90
- def require_vendor(name, version)
91
- path = File.join(BONE_HOME, 'vendor', "#{name}-#{version}", 'lib')
92
- $:.unshift path
93
- Bone.ld "REQUIRE VENDOR: ", path
94
- require name
95
- end
96
-
97
- def valid_token?(val)
98
- is_sha256? val
99
- end
100
-
101
- def is_sha1?(val)
102
- val.to_s.match /\A[0-9a-f]{40}\z/
103
- end
104
-
105
- def is_sha256?(val)
106
- val.to_s.match /\A[0-9a-f]{64}\z/
107
- end
108
-
109
- def digest(val)
110
- @digest_type.hexdigest val
111
- end
112
-
113
- def generate_token
114
- srand
115
- digest [`hostname`, `w`, Time.now, rand].join(':')
116
- end
117
- alias_method :token, :generate_token
118
-
119
- private
120
-
121
- def request(action, token, key, params={})
122
- params[:token] = token
123
- path = "/bone/#{APIVERSION}/#{action}/#{key}"
124
- source = SOURCE
125
- unless source.match(/\Ahttp/)
126
- source = ['http:', SOURCE].join('//')
127
- end
128
- uri = URI.parse source
129
- host, port = uri.host, uri.port
130
- port ||= (uri.scheme == 'https' ? 443 : 6043)
51
+ alias_method :src=, :source=
52
+ alias_method :src, :source
131
53
 
132
- Bone.ld "SOURCE: #{uri.to_s}"
133
- Bone.ld "URI: #{path}"
134
- Bone.ld "PARAMS: " << params.inspect
54
+ # e.g.
55
+ #
56
+ # Bone.cred = 'token:secret'
57
+ #
58
+ def credentials= token
59
+ @token, @secret = *token.split(':')
60
+ end
61
+ alias_method :cred=, :credentials=
135
62
 
136
- case action
137
- when :del
138
- headers = { 'X-BONE_TOKEN' => token }
139
- req = Net::HTTP::Delete.new(path, headers)
140
- when :set
141
- query = {}
142
- params.each_pair {|n,v| query[n.to_s] = v }
143
- req = Net::HTTP::Post.new(path)
144
- req.set_form_data query
145
- when :get, :keys
146
- args = []
147
- params.each_pair {|n,v| args << "#{n}=#{URI.encode(v.to_s)}" }
148
- query = [path, args.join('&')].join('?')
149
- Bone.ld "GET: #{query}"
150
- req = Net::HTTP::Get.new(query)
151
- else
152
- raise Bone::Problem, "Unknown action: #{action}"
153
- end
154
- http = Net::HTTP.new(host, port)
155
- http.use_ssl = true if uri.scheme == 'https'
156
- http.verify_mode = OpenSSL::SSL::VERIFY_PEER
157
- res = http.request(req)
158
- case res
159
- when Net::HTTPSuccess, Net::HTTPRedirection
160
- res.body
161
- else
162
- raise Bone::Problem, "#{res.body} (#{res.code} #{res.message})"
63
+ def token
64
+ @token || ENV['BONE_TOKEN']
163
65
  end
164
- rescue Errno::ECONNREFUSED => ex
165
- raise Bone::Problem, "No boned"
166
- end
167
-
168
- def determine_digest_type
169
- if RUBY_PLATFORM == "java"
170
- require 'openssl'
171
- Bone.digest_type = OpenSSL::Digest::SHA256
66
+
67
+ def secret
68
+ @secret || ENV['BONE_SECRET']
69
+ end
70
+
71
+ def info *msg
72
+ STDERR.puts *msg
73
+ end
74
+
75
+ def ld *msg
76
+ info *msg if debug
77
+ end
78
+
79
+ # Stolen from Rack::Utils which stole it from Camping.
80
+ def uri_escape s
81
+ s.to_s.gsub(/([^ a-zA-Z0-9_.-]+)/n) {
82
+ '%'+$1.unpack('H2'*bytesize($1)).join('%').upcase
83
+ }.tr(' ', '+')
84
+ end
85
+
86
+ # Stolen from Rack::Utils which stole it from Camping.
87
+ def uri_unescape s
88
+ s.tr('+', ' ').gsub(/((?:%[0-9a-fA-F]{2})+)/n){
89
+ [$1.delete('%')].pack('H*')
90
+ }
91
+ end
92
+
93
+ # Return the bytesize of String; uses String#size under Ruby 1.8 and
94
+ # String#bytesize under 1.9.
95
+ if ''.respond_to?(:bytesize)
96
+ def bytesize s
97
+ s.bytesize
98
+ end
172
99
  else
173
- require 'digest'
174
- Bone.digest_type = Digest::SHA256
100
+ def bytesize s
101
+ s.size
102
+ end
103
+ end
104
+
105
+ def is_sha1? val
106
+ val.to_s.match /\A[0-9a-f]{40}\z/
107
+ end
108
+
109
+ def is_sha256? val
110
+ val.to_s.match /\A[0-9a-f]{64}\z/
111
+ end
112
+
113
+ def digest val, type=nil
114
+ type ||= @digest_type
115
+ type.hexdigest val
116
+ end
117
+
118
+ def random_token
119
+ p1 = (0...21).map{ SECRETCHAR[rand(SECRETCHAR.length)] }.join
120
+ p2 = Bone.api.token_suffix
121
+ p3 = (0...2).map{ SECRETCHAR[rand(SECRETCHAR.length)] }.join
122
+ [p1,p2,p3].join.upcase
123
+ end
124
+
125
+ def random_secret
126
+ src = [SECRETCHAR, %w'* ^ $ ! / . - _ + %'].flatten
127
+ p1 = (0...2).map{ SECRETCHAR[rand(SECRETCHAR.length)] }.join
128
+ p2 = (0...60).map{ src[rand(src.length)] }.join
129
+ p3 = (0...2).map{ SECRETCHAR[rand(SECRETCHAR.length)] }.join
130
+ [p1,p2,p3].join
131
+ end
132
+
133
+ def select_api
134
+ begin
135
+ @api = Bone.apis[Bone.source.scheme.to_sym]
136
+ raise RuntimeError, "Bad source: #{Bone.source}" if api.nil?
137
+ @api.connect
138
+ rescue => ex
139
+ Bone.info "#{ex.class}: #{ex.message}", ex.backtrace
140
+ exit
141
+ end
142
+ end
143
+
144
+ def register_api scheme, klass
145
+ Bone.apis[scheme.to_sym] = klass
146
+ end
147
+
148
+ # <tt>require</tt> a library from the vendor directory.
149
+ # The vendor directory should be organized such
150
+ # that +name+ and +version+ can be used to create
151
+ # the path to the library.
152
+ #
153
+ # e.g.
154
+ #
155
+ # vendor/httpclient-2.1.5.2/httpclient
156
+ #
157
+ def require_vendor name, version
158
+ path = File.join(BONE_HOME, 'vendor', "#{name}-#{version}", 'lib')
159
+ $:.unshift path
160
+ Bone.ld "REQUIRE VENDOR: ", path
161
+ require name
175
162
  end
176
163
  end
177
164
 
178
- @digest_type = determine_digest_type
165
+ require 'bone/api'
166
+ include Bone::API::InstanceMethods
167
+ extend Bone::API::ClassMethods
168
+ select_api
179
169
  end
180
170
 
171
+