continuity 0.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,78 @@
1
+ module Continuity
2
+ class Scheduler
3
+ def self.new_using_redis(redis, frequency = 10)
4
+ new(RedisBackend.new(redis, frequency))
5
+ end
6
+
7
+ def initialize(backend, frequency = 10)
8
+ @frequency = frequency
9
+ @backend = backend
10
+ @next_schedule = 0
11
+ @on_schedule_cbs = []
12
+
13
+ @jobs = {}
14
+ end
15
+
16
+ def every(period, &blk)
17
+ @jobs[PeriodicEntry.new(period)] = blk
18
+ end
19
+
20
+ def cron(cron_line, &blk)
21
+ @jobs[CronEntry.new(cron_line)] = blk
22
+ end
23
+
24
+ def on_schedule(&block)
25
+ @on_schedule_cbs << block
26
+ end
27
+
28
+ def run(check_frequency = 5)
29
+ @scheduling_thread = Thread.new {
30
+ loop do
31
+ begin
32
+ maybe_schedule
33
+ sleep check_frequency
34
+ rescue Object
35
+ $stderr.print "--Error in Continuity Scheduler--\n"
36
+ $stderr.print $!.backtrace.join("\n")
37
+ end
38
+ end
39
+ }
40
+ end
41
+
42
+ def join
43
+ @scheduling_thread.join
44
+ end
45
+
46
+ def maybe_schedule(now = Time.now.to_i)
47
+ return false unless @next_schedule <= now
48
+
49
+ range_scheduled = false
50
+ scheduled_up_to = @backend.lock_for_scheduling(now) do |previous_time|
51
+ range_scheduled = (previous_time+1)..now
52
+ do_jobs(range_scheduled)
53
+ trigger_cbs(range_scheduled)
54
+ yield range_scheduled if block_given?
55
+ end
56
+
57
+ @next_schedule = scheduled_up_to + @frequency
58
+
59
+ return range_scheduled
60
+ end
61
+
62
+ def do_jobs(time_range)
63
+ time_range.each do |t|
64
+ time = Time.at(t)
65
+ @jobs.each do |cron_entry, blk|
66
+ if cron_entry.at?(time)
67
+ blk[time]
68
+ end
69
+ end
70
+ end
71
+ end
72
+ private
73
+
74
+ def trigger_cbs(range)
75
+ @on_schedule_cbs.each { |cb| cb.call(range) }
76
+ end
77
+ end
78
+ end
@@ -0,0 +1,34 @@
1
+ require 'rubygems'
2
+ require 'bundler'
3
+ require 'simplecov'
4
+ SimpleCov.start
5
+ begin
6
+ Bundler.setup(:default, :development)
7
+ rescue Bundler::BundlerError => e
8
+ $stderr.puts e.message
9
+ $stderr.puts "Run `bundle install` to install missing gems"
10
+ exit e.status_code
11
+ end
12
+ require 'minitest'
13
+ require 'minitest/unit'
14
+ require 'minitest/pride'
15
+
16
+ $LOAD_PATH.unshift(File.dirname(__FILE__))
17
+ $LOAD_PATH.unshift(File.join(File.dirname(__FILE__), '..', 'lib'))
18
+ require 'continuity'
19
+
20
+ class MiniTest::Unit::TestCase
21
+ end
22
+
23
+ def redis_clean
24
+ redis = Redis.new(:thread_safe => true, :port => 16379)
25
+ begin
26
+ redis.flushall
27
+ rescue Errno::ECONNREFUSED
28
+ puts '***** Tests need an instance of redis running at 16379. `redis-server test/redis.conf` *****'
29
+ exit
30
+ end
31
+ redis
32
+ end
33
+
34
+ MiniTest.autorun
@@ -0,0 +1,421 @@
1
+ # Redis configuration file example
2
+
3
+ # Note on units: when memory size is needed, it is possible to specifiy
4
+ # it in the usual form of 1k 5GB 4M and so forth:
5
+ #
6
+ # 1k => 1000 bytes
7
+ # 1kb => 1024 bytes
8
+ # 1m => 1000000 bytes
9
+ # 1mb => 1024*1024 bytes
10
+ # 1g => 1000000000 bytes
11
+ # 1gb => 1024*1024*1024 bytes
12
+ #
13
+ # units are case insensitive so 1GB 1Gb 1gB are all the same.
14
+
15
+ # By default Redis does not run as a daemon. Use 'yes' if you need it.
16
+ # Note that Redis will write a pid file in /var/run/redis.pid when daemonized.
17
+ daemonize no
18
+
19
+ # When running daemonized, Redis writes a pid file in /var/run/redis.pid by
20
+ # default. You can specify a custom pid file location here.
21
+ pidfile /var/run/redis.pid
22
+
23
+ # Accept connections on the specified port, default is 6379.
24
+ port 16379
25
+
26
+ # If you want you can bind a single interface, if the bind option is not
27
+ # specified all the interfaces will listen for incoming connections.
28
+ #
29
+ # bind 127.0.0.1
30
+
31
+ # Specify the path for the unix socket that will be used to listen for
32
+ # incoming connections. There is no default, so Redis will not listen
33
+ # on a unix socket when not specified.
34
+ #
35
+ # unixsocket /tmp/redis.sock
36
+
37
+ # Close the connection after a client is idle for N seconds (0 to disable)
38
+ timeout 300
39
+
40
+ # Set server verbosity to 'debug'
41
+ # it can be one of:
42
+ # debug (a lot of information, useful for development/testing)
43
+ # verbose (many rarely useful info, but not a mess like the debug level)
44
+ # notice (moderately verbose, what you want in production probably)
45
+ # warning (only very important / critical messages are logged)
46
+ loglevel verbose
47
+
48
+ # Specify the log file name. Also 'stdout' can be used to force
49
+ # Redis to log on the standard output. Note that if you use standard
50
+ # output for logging but daemonize, logs will be sent to /dev/null
51
+ logfile stdout
52
+
53
+ # To enable logging to the system logger, just set 'syslog-enabled' to yes,
54
+ # and optionally update the other syslog parameters to suit your needs.
55
+ # syslog-enabled no
56
+
57
+ # Specify the syslog identity.
58
+ # syslog-ident redis
59
+
60
+ # Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7.
61
+ # syslog-facility local0
62
+
63
+ # Set the number of databases. The default database is DB 0, you can select
64
+ # a different one on a per-connection basis using SELECT <dbid> where
65
+ # dbid is a number between 0 and 'databases'-1
66
+ databases 16
67
+
68
+ ################################ SNAPSHOTTING #################################
69
+ #
70
+ # Save the DB on disk:
71
+ #
72
+ # save <seconds> <changes>
73
+ #
74
+ # Will save the DB if both the given number of seconds and the given
75
+ # number of write operations against the DB occurred.
76
+ #
77
+ # In the example below the behaviour will be to save:
78
+ # after 900 sec (15 min) if at least 1 key changed
79
+ # after 300 sec (5 min) if at least 10 keys changed
80
+ # after 60 sec if at least 10000 keys changed
81
+ #
82
+ # Note: you can disable saving at all commenting all the "save" lines.
83
+
84
+ save 900 1
85
+ save 300 10
86
+ save 60 10000
87
+
88
+ # Compress string objects using LZF when dump .rdb databases?
89
+ # For default that's set to 'yes' as it's almost always a win.
90
+ # If you want to save some CPU in the saving child set it to 'no' but
91
+ # the dataset will likely be bigger if you have compressible values or keys.
92
+ rdbcompression yes
93
+
94
+ # The filename where to dump the DB
95
+ dbfilename dump.rdb
96
+
97
+ # The working directory.
98
+ #
99
+ # The DB will be written inside this directory, with the filename specified
100
+ # above using the 'dbfilename' configuration directive.
101
+ #
102
+ # Also the Append Only File will be created inside this directory.
103
+ #
104
+ # Note that you must specify a directory here, not a file name.
105
+ dir ./
106
+
107
+ ################################# REPLICATION #################################
108
+
109
+ # Master-Slave replication. Use slaveof to make a Redis instance a copy of
110
+ # another Redis server. Note that the configuration is local to the slave
111
+ # so for example it is possible to configure the slave to save the DB with a
112
+ # different interval, or to listen to another port, and so on.
113
+ #
114
+ # slaveof <masterip> <masterport>
115
+
116
+ # If the master is password protected (using the "requirepass" configuration
117
+ # directive below) it is possible to tell the slave to authenticate before
118
+ # starting the replication synchronization process, otherwise the master will
119
+ # refuse the slave request.
120
+ #
121
+ # masterauth <master-password>
122
+
123
+ # When a slave lost the connection with the master, or when the replication
124
+ # is still in progress, the slave can act in two different ways:
125
+ #
126
+ # 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will
127
+ # still reply to client requests, possibly with out of data data, or the
128
+ # data set may just be empty if this is the first synchronization.
129
+ #
130
+ # 2) if slave-serve-stale data is set to 'no' the slave will reply with
131
+ # an error "SYNC with master in progress" to all the kind of commands
132
+ # but to INFO and SLAVEOF.
133
+ #
134
+ slave-serve-stale-data yes
135
+
136
+ ################################## SECURITY ###################################
137
+
138
+ # Require clients to issue AUTH <PASSWORD> before processing any other
139
+ # commands. This might be useful in environments in which you do not trust
140
+ # others with access to the host running redis-server.
141
+ #
142
+ # This should stay commented out for backward compatibility and because most
143
+ # people do not need auth (e.g. they run their own servers).
144
+ #
145
+ # Warning: since Redis is pretty fast an outside user can try up to
146
+ # 150k passwords per second against a good box. This means that you should
147
+ # use a very strong password otherwise it will be very easy to break.
148
+ #
149
+ # requirepass foobared
150
+
151
+ # Command renaming.
152
+ #
153
+ # It is possilbe to change the name of dangerous commands in a shared
154
+ # environment. For instance the CONFIG command may be renamed into something
155
+ # of hard to guess so that it will be still available for internal-use
156
+ # tools but not available for general clients.
157
+ #
158
+ # Example:
159
+ #
160
+ # rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52
161
+ #
162
+ # It is also possilbe to completely kill a command renaming it into
163
+ # an empty string:
164
+ #
165
+ # rename-command CONFIG ""
166
+
167
+ ################################### LIMITS ####################################
168
+
169
+ # Set the max number of connected clients at the same time. By default there
170
+ # is no limit, and it's up to the number of file descriptors the Redis process
171
+ # is able to open. The special value '0' means no limits.
172
+ # Once the limit is reached Redis will close all the new connections sending
173
+ # an error 'max number of clients reached'.
174
+ #
175
+ # maxclients 128
176
+
177
+ # Don't use more memory than the specified amount of bytes.
178
+ # When the memory limit is reached Redis will try to remove keys with an
179
+ # EXPIRE set. It will try to start freeing keys that are going to expire
180
+ # in little time and preserve keys with a longer time to live.
181
+ # Redis will also try to remove objects from free lists if possible.
182
+ #
183
+ # If all this fails, Redis will start to reply with errors to commands
184
+ # that will use more memory, like SET, LPUSH, and so on, and will continue
185
+ # to reply to most read-only commands like GET.
186
+ #
187
+ # WARNING: maxmemory can be a good idea mainly if you want to use Redis as a
188
+ # 'state' server or cache, not as a real DB. When Redis is used as a real
189
+ # database the memory usage will grow over the weeks, it will be obvious if
190
+ # it is going to use too much memory in the long run, and you'll have the time
191
+ # to upgrade. With maxmemory after the limit is reached you'll start to get
192
+ # errors for write operations, and this may even lead to DB inconsistency.
193
+ #
194
+ # maxmemory <bytes>
195
+
196
+ # MAXMEMORY POLICY: how Redis will select what to remove when maxmemory
197
+ # is reached? You can select among five behavior:
198
+ #
199
+ # volatile-lru -> remove the key with an expire set using an LRU algorithm
200
+ # allkeys-lru -> remove any key accordingly to the LRU algorithm
201
+ # volatile-random -> remove a random key with an expire set
202
+ # allkeys->random -> remove a random key, any key
203
+ # volatile-ttl -> remove the key with the nearest expire time (minor TTL)
204
+ # noeviction -> don't expire at all, just return an error on write operations
205
+ #
206
+ # Note: with all the kind of policies, Redis will return an error on write
207
+ # operations, when there are not suitable keys for eviction.
208
+ #
209
+ # At the date of writing this commands are: set setnx setex append
210
+ # incr decr rpush lpush rpushx lpushx linsert lset rpoplpush sadd
211
+ # sinter sinterstore sunion sunionstore sdiff sdiffstore zadd zincrby
212
+ # zunionstore zinterstore hset hsetnx hmset hincrby incrby decrby
213
+ # getset mset msetnx exec sort
214
+ #
215
+ # The default is:
216
+ #
217
+ # maxmemory-policy volatile-lru
218
+
219
+ # LRU and minimal TTL algorithms are not precise algorithms but approximated
220
+ # algorithms (in order to save memory), so you can select as well the sample
221
+ # size to check. For instance for default Redis will check three keys and
222
+ # pick the one that was used less recently, you can change the sample size
223
+ # using the following configuration directive.
224
+ #
225
+ # maxmemory-samples 3
226
+
227
+ ############################## APPEND ONLY MODE ###############################
228
+
229
+ # By default Redis asynchronously dumps the dataset on disk. If you can live
230
+ # with the idea that the latest records will be lost if something like a crash
231
+ # happens this is the preferred way to run Redis. If instead you care a lot
232
+ # about your data and don't want to that a single record can get lost you should
233
+ # enable the append only mode: when this mode is enabled Redis will append
234
+ # every write operation received in the file appendonly.aof. This file will
235
+ # be read on startup in order to rebuild the full dataset in memory.
236
+ #
237
+ # Note that you can have both the async dumps and the append only file if you
238
+ # like (you have to comment the "save" statements above to disable the dumps).
239
+ # Still if append only mode is enabled Redis will load the data from the
240
+ # log file at startup ignoring the dump.rdb file.
241
+ #
242
+ # IMPORTANT: Check the BGREWRITEAOF to check how to rewrite the append
243
+ # log file in background when it gets too big.
244
+
245
+ appendonly no
246
+
247
+ # The name of the append only file (default: "appendonly.aof")
248
+ # appendfilename appendonly.aof
249
+
250
+ # The fsync() call tells the Operating System to actually write data on disk
251
+ # instead to wait for more data in the output buffer. Some OS will really flush
252
+ # data on disk, some other OS will just try to do it ASAP.
253
+ #
254
+ # Redis supports three different modes:
255
+ #
256
+ # no: don't fsync, just let the OS flush the data when it wants. Faster.
257
+ # always: fsync after every write to the append only log . Slow, Safest.
258
+ # everysec: fsync only if one second passed since the last fsync. Compromise.
259
+ #
260
+ # The default is "everysec" that's usually the right compromise between
261
+ # speed and data safety. It's up to you to understand if you can relax this to
262
+ # "no" that will will let the operating system flush the output buffer when
263
+ # it wants, for better performances (but if you can live with the idea of
264
+ # some data loss consider the default persistence mode that's snapshotting),
265
+ # or on the contrary, use "always" that's very slow but a bit safer than
266
+ # everysec.
267
+ #
268
+ # If unsure, use "everysec".
269
+
270
+ # appendfsync always
271
+ appendfsync everysec
272
+ # appendfsync no
273
+
274
+ # When the AOF fsync policy is set to always or everysec, and a background
275
+ # saving process (a background save or AOF log background rewriting) is
276
+ # performing a lot of I/O against the disk, in some Linux configurations
277
+ # Redis may block too long on the fsync() call. Note that there is no fix for
278
+ # this currently, as even performing fsync in a different thread will block
279
+ # our synchronous write(2) call.
280
+ #
281
+ # In order to mitigate this problem it's possible to use the following option
282
+ # that will prevent fsync() from being called in the main process while a
283
+ # BGSAVE or BGREWRITEAOF is in progress.
284
+ #
285
+ # This means that while another child is saving the durability of Redis is
286
+ # the same as "appendfsync none", that in pratical terms means that it is
287
+ # possible to lost up to 30 seconds of log in the worst scenario (with the
288
+ # default Linux settings).
289
+ #
290
+ # If you have latency problems turn this to "yes". Otherwise leave it as
291
+ # "no" that is the safest pick from the point of view of durability.
292
+ no-appendfsync-on-rewrite no
293
+
294
+ ################################ VIRTUAL MEMORY ###############################
295
+
296
+ # Virtual Memory allows Redis to work with datasets bigger than the actual
297
+ # amount of RAM needed to hold the whole dataset in memory.
298
+ # In order to do so very used keys are taken in memory while the other keys
299
+ # are swapped into a swap file, similarly to what operating systems do
300
+ # with memory pages.
301
+ #
302
+ # To enable VM just set 'vm-enabled' to yes, and set the following three
303
+ # VM parameters accordingly to your needs.
304
+
305
+ #vm-enabled no
306
+ # vm-enabled yes
307
+
308
+ # This is the path of the Redis swap file. As you can guess, swap files
309
+ # can't be shared by different Redis instances, so make sure to use a swap
310
+ # file for every redis process you are running. Redis will complain if the
311
+ # swap file is already in use.
312
+ #
313
+ # The best kind of storage for the Redis swap file (that's accessed at random)
314
+ # is a Solid State Disk (SSD).
315
+ #
316
+ # *** WARNING *** if you are using a shared hosting the default of putting
317
+ # the swap file under /tmp is not secure. Create a dir with access granted
318
+ # only to Redis user and configure Redis to create the swap file there.
319
+ #vm-swap-file /tmp/redis.swap
320
+
321
+ # vm-max-memory configures the VM to use at max the specified amount of
322
+ # RAM. Everything that deos not fit will be swapped on disk *if* possible, that
323
+ # is, if there is still enough contiguous space in the swap file.
324
+ #
325
+ # With vm-max-memory 0 the system will swap everything it can. Not a good
326
+ # default, just specify the max amount of RAM you can in bytes, but it's
327
+ # better to leave some margin. For instance specify an amount of RAM
328
+ # that's more or less between 60 and 80% of your free RAM.
329
+ #vm-max-memory 0
330
+
331
+ # Redis swap files is split into pages. An object can be saved using multiple
332
+ # contiguous pages, but pages can't be shared between different objects.
333
+ # So if your page is too big, small objects swapped out on disk will waste
334
+ # a lot of space. If you page is too small, there is less space in the swap
335
+ # file (assuming you configured the same number of total swap file pages).
336
+ #
337
+ # If you use a lot of small objects, use a page size of 64 or 32 bytes.
338
+ # If you use a lot of big objects, use a bigger page size.
339
+ # If unsure, use the default :)
340
+ #vm-page-size 32
341
+
342
+ # Number of total memory pages in the swap file.
343
+ # Given that the page table (a bitmap of free/used pages) is taken in memory,
344
+ # every 8 pages on disk will consume 1 byte of RAM.
345
+ #
346
+ # The total swap size is vm-page-size * vm-pages
347
+ #
348
+ # With the default of 32-bytes memory pages and 134217728 pages Redis will
349
+ # use a 4 GB swap file, that will use 16 MB of RAM for the page table.
350
+ #
351
+ # It's better to use the smallest acceptable value for your application,
352
+ # but the default is large in order to work in most conditions.
353
+ #vm-pages 134217728
354
+
355
+ # Max number of VM I/O threads running at the same time.
356
+ # This threads are used to read/write data from/to swap file, since they
357
+ # also encode and decode objects from disk to memory or the reverse, a bigger
358
+ # number of threads can help with big objects even if they can't help with
359
+ # I/O itself as the physical device may not be able to couple with many
360
+ # reads/writes operations at the same time.
361
+ #
362
+ # The special value of 0 turn off threaded I/O and enables the blocking
363
+ # Virtual Memory implementation.
364
+ #vm-max-threads 4
365
+
366
+ ############################### ADVANCED CONFIG ###############################
367
+
368
+ # Glue small output buffers together in order to send small replies in a
369
+ # single TCP packet. Uses a bit more CPU but most of the times it is a win
370
+ # in terms of number of queries per second. Use 'yes' if unsure.
371
+ #glueoutputbuf yes
372
+
373
+ # Hashes are encoded in a special way (much more memory efficient) when they
374
+ # have at max a given numer of elements, and the biggest element does not
375
+ # exceed a given threshold. You can configure this limits with the following
376
+ # configuration directives.
377
+ #hash-max-zipmap-entries 64
378
+ #hash-max-zipmap-value 512
379
+
380
+ # Similarly to hashes, small lists are also encoded in a special way in order
381
+ # to save a lot of space. The special representation is only used when
382
+ # you are under the following limits:
383
+ #list-max-ziplist-entries 512
384
+ #list-max-ziplist-value 64
385
+
386
+ # Sets have a special encoding in just one case: when a set is composed
387
+ # of just strings that happens to be integers in radix 10 in the range
388
+ # of 64 bit signed integers.
389
+ # The following configuration setting sets the limit in the size of the
390
+ # set in order to use this special memory saving encoding.
391
+ #set-max-intset-entries 512
392
+
393
+ # Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in
394
+ # order to help rehashing the main Redis hash table (the one mapping top-level
395
+ # keys to values). The hash table implementation redis uses (see dict.c)
396
+ # performs a lazy rehashing: the more operation you run into an hash table
397
+ # that is rhashing, the more rehashing "steps" are performed, so if the
398
+ # server is idle the rehashing is never complete and some more memory is used
399
+ # by the hash table.
400
+ #
401
+ # The default is to use this millisecond 10 times every second in order to
402
+ # active rehashing the main dictionaries, freeing memory when possible.
403
+ #
404
+ # If unsure:
405
+ # use "activerehashing no" if you have hard latency requirements and it is
406
+ # not a good thing in your environment that Redis can reply form time to time
407
+ # to queries with 2 milliseconds delay.
408
+ #
409
+ # use "activerehashing yes" if you don't have such hard requirements but
410
+ # want to free memory asap when possible.
411
+ activerehashing yes
412
+
413
+ ################################## INCLUDES ###################################
414
+
415
+ # Include one or more other config files here. This is useful if you
416
+ # have a standard template that goes to all redis server but also need
417
+ # to customize a few per-server settings. Include files can include
418
+ # other files, so use this wisely.
419
+ #
420
+ # include /path/to/local.conf
421
+ # include /path/to/other.conf