boned 0.2.6 → 0.3.0

Sign up to get free protection for your applications and to get access to all the features.
@@ -1,68 +1,51 @@
1
- # Redis configuration file for Boned
1
+ # BS REDIS 2.0 CONFIG (dev) -- 2010-11-17
2
2
 
3
- # Changes:
4
- # * port
5
- # * daemonize
6
- # * dbfilename
3
+ # NOTE: NOTE: auto-generated by delano on tucker at 2010-11-29 11:49:17 -0500
7
4
 
8
- # By default Redis does not run as a daemon. Use 'yes' if you need it.
9
- # Note that Redis will write a pid file in /var/run/redis.pid when daemonized.
5
+ dir /tmp
6
+
7
+ pidfile boned-redis.pid
8
+ logfile boned-redis.log
9
+ dbfilename boned-redis.rdb
10
+
11
+ port 8045
12
+ bind 127.0.0.1
10
13
  daemonize yes
11
14
 
12
- # The filename where to dump the DB
13
- dbfilename /tmp/boned-redis.rdb
15
+ timeout 300
14
16
 
15
- # When run as a daemon, Redis write a pid file in /var/run/redis.pid by default.
16
- # You can specify a custom pid file location here.
17
- pidfile /var/run/redis.pid
17
+ #loglevel debug
18
+ #loglevel verbose
19
+ loglevel warning
18
20
 
19
- # Accept connections on the specified port, default is 6379
20
- port 8045
21
+ databases 16
21
22
 
22
- # If you want you can bind a single interface, if the bind option is not
23
- # specified all the interfaces will listen for connections.
24
- #
25
- # bind 127.0.0.1
23
+ save 900 100
24
+ save 300 5000
26
25
 
27
- # Close the connection after a client is idle for N seconds (0 to disable)
28
- timeout 300
29
26
 
30
- # Save the DB on disk:
31
- #
32
- # save <seconds> <changes>
33
- #
34
- # Will save the DB if both the given number of seconds and the given
35
- # number of write operations against the DB occurred.
36
- #
37
- # In the example below the behaviour will be to save:
38
- # after 900 sec (15 min) if at least 1 key changed
39
- # after 300 sec (5 min) if at least 10 keys changed
40
- # after 60 sec if at least 10000 keys changed
41
- save 600 1
42
- save 300 10
43
- save 60 10000
27
+ rdbcompression yes
44
28
 
29
+ # requirepass foobared
30
+ # maxclients 0
45
31
 
46
- # For default save/load DB in/from the working directory
47
- # Note that you must specify a directory not a file name.
48
- dir ./
32
+ appendonly no
33
+ appendfilename redis.aof
49
34
 
50
- # Set server verbosity to 'debug'
51
- # it can be one of:
52
- # debug (a lot of information, useful for development/testing)
53
- # notice (moderately verbose, what you want in production probably)
54
- # warning (only very important / critical messages are logged)
55
- loglevel debug
35
+ # TODO: Consider having separate configs when the usecase for Redis
36
+ # changes. For example, one for production, another for batch processing.
37
+ #
38
+ # Nothing is changed from here on out:
56
39
 
57
- # Specify the log file name. Also 'stdout' can be used to force
58
- # the demon to log on the standard output. Note that if you use standard
59
- # output for logging but daemonize, logs will be sent to /dev/null
60
- logfile stdout
40
+ ################################## INCLUDES ###################################
61
41
 
62
- # Set the number of databases. The default database is DB 0, you can select
63
- # a different one on a per-connection basis using SELECT <dbid> where
64
- # dbid is a number between 0 and 'databases'-1
65
- databases 16
42
+ # Include one or more other config files here. This is useful if you
43
+ # have a standard template that goes to all redis server but also need
44
+ # to customize a few per-server settings. Include files can include
45
+ # other files, so use this wisely.
46
+ #
47
+ # include /path/to/local.conf
48
+ # include /path/to/other.conf
66
49
 
67
50
  ################################# REPLICATION #################################
68
51
 
@@ -70,67 +53,16 @@ databases 16
70
53
  # another Redis server. Note that the configuration is local to the slave
71
54
  # so for example it is possible to configure the slave to save the DB with a
72
55
  # different interval, or to listen to another port, and so on.
73
-
74
- # slaveof <masterip> <masterport>
75
-
76
- ################################## SECURITY ###################################
77
-
78
- # Require clients to issue AUTH <PASSWORD> before processing any other
79
- # commands. This might be useful in environments in which you do not trust
80
- # others with access to the host running redis-server.
81
56
  #
82
- # This should stay commented out for backward compatibility and because most
83
- # people do not need auth (e.g. they run their own servers).
84
-
85
- # requirepass
86
-
87
- ################################### LIMITS ####################################
88
-
89
- # Set the max number of connected clients at the same time. By default there
90
- # is no limit, and it's up to the number of file descriptors the Redis process
91
- # is able to open. The special value '0' means no limts.
92
- # Once the limit is reached Redis will close all the new connections sending
93
- # an error 'max number of clients reached'.
94
-
95
- # maxclients 128
96
-
97
- # Don't use more memory than the specified amount of bytes.
98
- # When the memory limit is reached Redis will try to remove keys with an
99
- # EXPIRE set. It will try to start freeing keys that are going to expire
100
- # in little time and preserve keys with a longer time to live.
101
- # Redis will also try to remove objects from free lists if possible.
102
- #
103
- # If all this fails, Redis will start to reply with errors to commands
104
- # that will use more memory, like SET, LPUSH, and so on, and will continue
105
- # to reply to most read-only commands like GET.
106
- #
107
- # WARNING: maxmemory can be a good idea mainly if you want to use Redis as a
108
- # 'state' server or cache, not as a real DB. When Redis is used as a real
109
- # database the memory usage will grow over the weeks, it will be obvious if
110
- # it is going to use too much memory in the long run, and you'll have the time
111
- # to upgrade. With maxmemory after the limit is reached you'll start to get
112
- # errors for write operations, and this may even lead to DB inconsistency.
113
-
114
- # maxmemory <bytes>
115
-
116
- ############################## APPEND ONLY MODE ###############################
57
+ # slaveof <masterip> <masterport>
117
58
 
118
- # By default Redis asynchronously dumps the dataset on disk. If you can live
119
- # with the idea that the latest records will be lost if something like a crash
120
- # happens this is the preferred way to run Redis. If instead you care a lot
121
- # about your data and don't want to that a single record can get lost you should
122
- # enable the append only mode: when this mode is enabled Redis will append
123
- # every write operation received in the file appendonly.log. This file will
124
- # be read on startup in order to rebuild the full dataset in memory.
125
- #
126
- # Note that you can have both the async dumps and the append only file if you
127
- # like (you have to comment the "save" statements above to disable the dumps).
128
- # Still if append only mode is enabled Redis will load the data from the
129
- # log file at startup ignoring the dump.rdb file.
59
+ # If the master is password protected (using the "requirepass" configuration
60
+ # directive below) it is possible to tell the slave to authenticate before
61
+ # starting the replication synchronization process, otherwise the master will
62
+ # refuse the slave request.
130
63
  #
131
- # The name of the append only file is "appendonly.log"
64
+ # masterauth <master-password>
132
65
 
133
- appendonly no
134
66
 
135
67
  # The fsync() call tells the Operating System to actually write data on disk
136
68
  # instead to wait for more data in the output buffer. Some OS will really flush
@@ -142,16 +74,92 @@ appendonly no
142
74
  # always: fsync after every write to the append only log . Slow, Safest.
143
75
  # everysec: fsync only if one second passed since the last fsync. Compromise.
144
76
  #
145
- # The default is "always" that's the safer of the options. It's up to you to
146
- # understand if you can relax this to "everysec" that will fsync every second
147
- # or to "no" that will let the operating system flush the output buffer when
148
- # it want, for better performances (but if you can live with the idea of
149
- # some data loss consider the default persistence mode that's snapshotting).
77
+ # The default is "everysec" that's usually the right compromise between
78
+ # speed and data safety. It's up to you to understand if you can relax this to
79
+ # "no" that will will let the operating system flush the output buffer when
80
+ # it wants, for better performances (but if you can live with the idea of
81
+ # some data loss consider the default persistence mode that's snapshotting),
82
+ # or on the contrary, use "always" that's very slow but a bit safer than
83
+ # everysec.
84
+ #
85
+ # If unsure, use "everysec".
150
86
 
151
- appendfsync always
152
- # appendfsync everysec
87
+ # appendfsync always
88
+ appendfsync everysec
153
89
  # appendfsync no
154
90
 
91
+ ################################ VIRTUAL MEMORY ###############################
92
+
93
+ # Virtual Memory allows Redis to work with datasets bigger than the actual
94
+ # amount of RAM needed to hold the whole dataset in memory.
95
+ # In order to do so very used keys are taken in memory while the other keys
96
+ # are swapped into a swap file, similarly to what operating systems do
97
+ # with memory pages.
98
+ #
99
+ # To enable VM just set 'vm-enabled' to yes, and set the following three
100
+ # VM parameters accordingly to your needs.
101
+
102
+ vm-enabled no
103
+ # vm-enabled yes
104
+
105
+ # This is the path of the Redis swap file. As you can guess, swap files
106
+ # can't be shared by different Redis instances, so make sure to use a swap
107
+ # file for every redis process you are running. Redis will complain if the
108
+ # swap file is already in use.
109
+ #
110
+ # The best kind of storage for the Redis swap file (that's accessed at random)
111
+ # is a Solid State Disk (SSD).
112
+ #
113
+ # *** WARNING *** if you are using a shared hosting the default of putting
114
+ # the swap file under /tmp is not secure. Create a dir with access granted
115
+ # only to Redis user and configure Redis to create the swap file there.
116
+ vm-swap-file /tmp/redis.swap
117
+
118
+ # vm-max-memory configures the VM to use at max the specified amount of
119
+ # RAM. Everything that deos not fit will be swapped on disk *if* possible, that
120
+ # is, if there is still enough contiguous space in the swap file.
121
+ #
122
+ # With vm-max-memory 0 the system will swap everything it can. Not a good
123
+ # default, just specify the max amount of RAM you can in bytes, but it's
124
+ # better to leave some margin. For instance specify an amount of RAM
125
+ # that's more or less between 60 and 80% of your free RAM.
126
+ vm-max-memory 0
127
+
128
+ # Redis swap files is split into pages. An object can be saved using multiple
129
+ # contiguous pages, but pages can't be shared between different objects.
130
+ # So if your page is too big, small objects swapped out on disk will waste
131
+ # a lot of space. If you page is too small, there is less space in the swap
132
+ # file (assuming you configured the same number of total swap file pages).
133
+ #
134
+ # If you use a lot of small objects, use a page size of 64 or 32 bytes.
135
+ # If you use a lot of big objects, use a bigger page size.
136
+ # If unsure, use the default :)
137
+ vm-page-size 32
138
+
139
+ # Number of total memory pages in the swap file.
140
+ # Given that the page table (a bitmap of free/used pages) is taken in memory,
141
+ # every 8 pages on disk will consume 1 byte of RAM.
142
+ #
143
+ # The total swap size is vm-page-size * vm-pages
144
+ #
145
+ # With the default of 32-bytes memory pages and 134217728 pages Redis will
146
+ # use a 4 GB swap file, that will use 16 MB of RAM for the page table.
147
+ #
148
+ # It's better to use the smallest acceptable value for your application,
149
+ # but the default is large in order to work in most conditions.
150
+ vm-pages 134217728
151
+
152
+ # Max number of VM I/O threads running at the same time.
153
+ # This threads are used to read/write data from/to swap file, since they
154
+ # also encode and decode objects from disk to memory or the reverse, a bigger
155
+ # number of threads can help with big objects even if they can't help with
156
+ # I/O itself as the physical device may not be able to couple with many
157
+ # reads/writes operations at the same time.
158
+ #
159
+ # The special value of 0 turn off threaded I/O and enables the blocking
160
+ # Virtual Memory implementation.
161
+ vm-max-threads 4
162
+
155
163
  ############################### ADVANCED CONFIG ###############################
156
164
 
157
165
  # Glue small output buffers together in order to send small replies in a
@@ -159,19 +167,29 @@ appendfsync always
159
167
  # in terms of number of queries per second. Use 'yes' if unsure.
160
168
  glueoutputbuf yes
161
169
 
162
- # Use object sharing. Can save a lot of memory if you have many common
163
- # string in your dataset, but performs lookups against the shared objects
164
- # pool so it uses more CPU and can be a bit slower. Usually it's a good
165
- # idea.
166
- #
167
- # When object sharing is enabled (shareobjects yes) you can use
168
- # shareobjectspoolsize to control the size of the pool used in order to try
169
- # object sharing. A bigger pool size will lead to better sharing capabilities.
170
- # In general you want this value to be at least the double of the number of
171
- # very common strings you have in your dataset.
172
- #
173
- # WARNING: object sharing is experimental, don't enable this feature
174
- # in production before of Redis 1.0-stable. Still please try this feature in
175
- # your development environment so that we can test it better.
176
- shareobjects no
177
- shareobjectspoolsize 1024
170
+ # Hashes are encoded in a special way (much more memory efficient) when they
171
+ # have at max a given numer of elements, and the biggest element does not
172
+ # exceed a given threshold. You can configure this limits with the following
173
+ # configuration directives.
174
+ hash-max-zipmap-entries 64
175
+ hash-max-zipmap-value 512
176
+
177
+ # Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in
178
+ # order to help rehashing the main Redis hash table (the one mapping top-level
179
+ # keys to values). The hash table implementation redis uses (see dict.c)
180
+ # performs a lazy rehashing: the more operation you run into an hash table
181
+ # that is rhashing, the more rehashing "steps" are performed, so if the
182
+ # server is idle the rehashing is never complete and some more memory is used
183
+ # by the hash table.
184
+ #
185
+ # The default is to use this millisecond 10 times every second in order to
186
+ # active rehashing the main dictionaries, freeing memory when possible.
187
+ #
188
+ # If unsure:
189
+ # use "activerehashing no" if you have hard latency requirements and it is
190
+ # not a good thing in your environment that Redis can reply form time to time
191
+ # to queries with 2 milliseconds delay.
192
+ #
193
+ # use "activerehashing yes" if you don't have such hard requirements but
194
+ # want to free memory asap when possible.
195
+ activerehashing yes
@@ -0,0 +1,163 @@
1
+ require 'sinatra/reloader' # consider big_band
2
+
3
+ class Boned::APIBase < Sinatra::Base
4
+
5
+ set :public => 'public/'
6
+ set :views => 'views/'
7
+ set :static => true
8
+
9
+ configure :dev do
10
+ Bone.info "Environment: #{ENV['RACK_ENV']}"
11
+ register Sinatra::Reloader
12
+ dont_reload "lib/**/*.rb"
13
+ also_reload "lib/boned.rb"
14
+ before do
15
+ #Bone.debug = true
16
+ Bone.info $/, $/, "--> #{request_method} #{current_uri_path}"
17
+ content_type 'text/plain'
18
+ Boned.allow_register = true
19
+ end
20
+ end
21
+
22
+ configure :prod do
23
+ Bone.debug = false
24
+ before do
25
+ content_type 'application/json'
26
+ Boned.allow_register = false
27
+ end
28
+ end
29
+
30
+ helpers do
31
+ def carefully(ret='', &blk)
32
+ begin
33
+ ret = blk.call
34
+ rescue => ex
35
+ generic_error ex.class, ex
36
+ end
37
+ ret
38
+ end
39
+
40
+ def generic_error(event=nil, ex=nil)
41
+ Bone.info "#{event} #{request_token}:#{request_signature}" unless event.nil?
42
+ unless ex.nil?
43
+ Bone.info ex.message
44
+ Bone.ld ex.backtrace
45
+ end
46
+ return error(404, "Bad bone rising")
47
+ end
48
+
49
+ def error_message msg
50
+ Bone.info "[400] #{msg}"
51
+ return error(400, msg)
52
+ end
53
+
54
+ def request_token
55
+ env['HTTP_X_BONE_TOKEN'] || params[:token]
56
+ end
57
+ def request_signature
58
+ @request_signature ||= params[:sig] || env['HTTP_X_BONE_SIGNATURE']
59
+ @request_signature
60
+ end
61
+ def request_secret
62
+ # no leading/trail whitspace end
63
+ @request_secret ||= (body_content || env['HTTP_X_BONE_SECRET']).strip
64
+ @request_secret
65
+ end
66
+ def body_content
67
+ @body_content ||= request.body.read
68
+ @body_content
69
+ end
70
+
71
+ def uri(*path)
72
+ [root_path, path].flatten.join('/')
73
+ end
74
+ def root_path
75
+ env['SCRIPT_NAME']
76
+ end
77
+ def current_uri_path
78
+ env['REQUEST_URI']
79
+ end
80
+ def request_method
81
+ env['REQUEST_METHOD'].to_s.downcase # important to be downcase for signature check
82
+ end
83
+ def current_host
84
+ env['HTTP_HOST'].to_s.downcase
85
+ end
86
+ def secure?
87
+ (env['HTTP_X_SCHEME'] == "https") # X-Scheme is set by nginx
88
+ end
89
+
90
+ def local?
91
+ LOCAL_HOSTS.member?(env['SERVER_NAME']) && (client_ipaddress == '127.0.0.1')
92
+ end
93
+
94
+ def assert_token
95
+ assert_exists request_token, "No token"
96
+ end
97
+
98
+ def assert_secret
99
+ assert_exists request_secret, "No secret"
100
+ end
101
+
102
+ def check_token
103
+ generic_error "[unknown-token]" if !Bone.token? request_token
104
+ true
105
+ end
106
+
107
+ def check_signature
108
+ assert_exists request_signature, "No signature"
109
+ unless params[:sigversion] == Bone::API::HTTP::SIGVERSION
110
+ error_message "API must be version: #{Bone::API::HTTP::SIGVERSION}"
111
+ end
112
+ # We need to re-parse the query string b/c Sinatra or Rack is
113
+ # including the value of the POST body as a key with no value.
114
+ qs = Bone::API::HTTP.parse_query request.query_string
115
+ qs.delete 'sig' # Yo dawg, I put a signature in your signature
116
+ stamp, now = (qs['stamp'] || 0).to_i, Bone::API::HTTP.canonical_time
117
+ generic_error "[sig-expired] #{stamp}" if (now - stamp) > 30.seconds
118
+ tobj = Bone::API::Redis::Token.new request_token
119
+ secret = tobj.secret.value
120
+ path = current_uri_path.split('?').first
121
+ sig = Bone::API::HTTP.generate_signature secret, current_host, request_method, path, qs, body_content
122
+ generic_error "[sig-mismatch] #{sig}" if sig != request_signature
123
+ Bone.new request_token
124
+ end
125
+
126
+
127
+ # +names+ One or more a required parameter names (Symbol)
128
+ def assert_params(*names)
129
+ names.each do |n|
130
+ return error_message("Missing param: %s" % n) if params[n].to_s.empty?
131
+ end
132
+ true
133
+ end
134
+ alias_method :assert_param, :assert_params
135
+
136
+ def assert_exists(val, msg)
137
+ return error_message msg if val.to_s.empty?
138
+ true
139
+ end
140
+
141
+ def assert_true(val, msg)
142
+ return error_message msg if val != true
143
+ true
144
+ end
145
+
146
+ def assert_sha1(val)
147
+ return error_message("#{val} is not a sha1 digest") unless is_sha1?(val)
148
+ end
149
+
150
+ def assert_sha256(val)
151
+ return error_message("#{val} is not a sha256 digest") unless is_sha256?(val)
152
+ end
153
+
154
+ def is_sha1?(val)
155
+ val.to_s.match(/\A[0-9a-f]{40}\z/)
156
+ end
157
+ def is_sha256?(val)
158
+ val.to_s.match(/\A[0-9a-f]{64}\z/)
159
+ end
160
+
161
+ end
162
+ end
163
+
data/lib/boned/api.rb CHANGED
@@ -1,94 +1,101 @@
1
-
2
1
  require 'boned'
2
+ require 'boned/api/base'
3
3
 
4
+ class Boned::API < Boned::APIBase
4
5
 
5
- class Boned::API < Sinatra::Base
6
+ # TODO: Remove these.
7
+ ##get '/all' do
8
+ ## keys = Bone::API::Redis::Token.redis.keys '*'
9
+ ## keys.join $/
10
+ ##end
11
+ ##get "/:token/secret/?" do
12
+ ## carefully do
13
+ ## assert_token && check_token
14
+ ## bone = check_signature
15
+ ## bone.secret
16
+ ## end
17
+ ##end
6
18
 
7
- set :public => 'public/'
8
- set :views => 'views/'
9
- set :static => true
19
+ #get "/:token/:bucket/keys/?" do
20
+ # Bone.info
21
+ # "poop"
22
+ #end
23
+ #
24
+ #get "/:token/:bucket/key/:key/?" do
25
+ # Bone.info
26
+ # "poop"
27
+ #end
10
28
 
11
- configure :development do
12
- before do
13
- Boned.enable_debug
14
- Boned.ld ' --> ' << env['REQUEST_URI']
15
- content_type 'text/plain'
29
+ get "/:token/key/:key/?" do
30
+ carefully do
31
+ assert_token && check_token
32
+ bone = check_signature
33
+ bone.key?(params[:key]) ? bone[params[:key]] : generic_error
16
34
  end
17
35
  end
18
36
 
19
- configure :production do
20
- Boned.disable_debug
21
- before do
22
- content_type 'application/json'
37
+ get "/:token/keys/?" do
38
+ carefully do
39
+ assert_token && check_token
40
+ bone = check_signature
41
+ list = bone.keys || []
42
+ list.join $/
23
43
  end
24
44
  end
25
45
 
26
- helpers do
27
- def carefully(ret='', &blk)
28
- begin
29
- ret = blk.call
30
- rescue Boned::BadBone => ex
31
- return error(404, ex.message)
32
- rescue => ex
33
- Boned.ld "#{current_token}:#{params[:key]}", ex.message
34
- Boned.ld ex.backtrace
35
- return error(400, "Bad bone rising")
36
- end
37
- ret
38
- end
39
-
40
- def current_token() @env['HTTP_X_BONE_TOKEN'] || params[:token] end
41
- def current_sig() @env['HTTP_X_BONE_SIGNATURE'] || params[:sig] end
42
-
43
- def uri(*path)
44
- [root_path, path].flatten.join('/')
46
+ get "/:token/?" do
47
+ carefully do
48
+ assert_token && check_token
49
+ bone = check_signature
50
+ # list of buckets, currently hardcoded to global
51
+ bone.token?(request_token) ? 'global' : generic_error
45
52
  end
46
- def root_path
47
- env['SCRIPT_NAME']
48
- end
49
-
50
- # +names+ One or more a required parameter names (Symbol)
51
- def assert_params(*names)
52
- names.each do |n|
53
- if params[n].nil? || params[n].empty?
54
- return error(400, "Missing param: %s" % n)
55
- end
56
- end
57
- end
58
- alias_method :assert_param, :assert_params
59
-
60
- def assert_exists(val, msg)
61
- return error(400, msg) if val.nil? ||
62
- (val.respond_to?(:empty?) && val.empty?)
63
- end
64
-
65
- def assert_true(val, msg)
66
- return error(400, msg) if val == true
67
- end
68
-
69
- def assert_sha1(val)
70
- return error(400, "#{val} is not a sha1 digest") unless is_sha1?(val)
53
+ end
54
+
55
+ post "/:token/key/:key/?" do
56
+ carefully do
57
+ assert_token && check_token
58
+ bone = check_signature
59
+ value = body_content # don't modify content in any way
60
+ bone.set params[:key], value
71
61
  end
72
-
73
- def assert_sha256(val)
74
- return error(400, "#{val} is not a sha256 digest") unless is_sha256?(val)
62
+ end
63
+
64
+ delete "/destroy/:token/?" do
65
+ carefully do
66
+ assert_token && check_token
67
+ bone = check_signature
68
+ Bone.destroy request_token
75
69
  end
76
-
77
- def is_sha1?(val)
78
- val.match(/\A[0-9a-f]{40}\z/)
70
+ end
71
+
72
+ post "/generate/?" do
73
+ carefully do
74
+ token, secret = *Bone.generate
75
+ token.nil? ? generic_error : [token, secret].join($/)
79
76
  end
80
- def is_sha256?(val)
81
- val.match(/\A[0-9a-f]{64}\z/)
77
+ end
78
+
79
+ post "/register/:token/?" do
80
+ carefully do
81
+ generic_error '[register-disabled]' unless Boned.allow_register
82
+ assert_secret
83
+ generic_error "[rereg-attempt]" if Bone.token? request_token
84
+ token = Bone.register request_token, request_secret
85
+ token.nil? ? generic_error("[register-failed]") : token
82
86
  end
83
-
87
+ end
88
+
89
+ helpers do
90
+ #Bone.debug = true
84
91
  end
85
92
  end
86
93
 
87
- class Boned::API::Stub < Boned::API
94
+
95
+ class Boned::API::Stub < Boned::APIBase
88
96
  get '/' do
89
97
  content_type 'text/plain'
90
98
  "Do you want to get bones?"
91
99
  end
92
100
  end
93
101
 
94
- require 'boned/api/service'
data/lib/boned/app.rb ADDED
File without changes