symphony 0.3.0.pre20140327204419

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,407 @@
1
+ # -*- ruby -*-
2
+ #encoding: utf-8
3
+
4
+ require 'set'
5
+ require 'sysexits'
6
+ require 'pluggability'
7
+ require 'loggability'
8
+
9
+ require 'msgpack'
10
+ require 'yajl'
11
+ require 'yaml'
12
+
13
+ require 'symphony' unless defined?( Symphony )
14
+ require 'symphony/signal_handling'
15
+
16
+
17
+ # A task is the subclassable unit of work that Symphony loads when it starts up.
18
+ class Symphony::Task
19
+ extend Loggability,
20
+ Pluggability,
21
+ Sysexits,
22
+ Symphony::MethodUtilities
23
+
24
+ include Symphony::SignalHandling
25
+
26
+
27
+ # Signal to reset to defaults for the child
28
+ SIGNALS = %i[ INT TERM HUP CHLD WINCH ]
29
+
30
+ # Valid work model types
31
+ WORK_MODELS = %i[ longlived oneshot ]
32
+
33
+
34
+ # Loggability API -- log to symphony's logger
35
+ log_to :symphony
36
+
37
+ # Pluggability API -- set the directory/directories that will be search when trying to
38
+ # load tasks by name.
39
+ plugin_prefixes 'symphony/tasks'
40
+
41
+
42
+ ### Create a new Task object and listen for work. Exits with the code returned
43
+ ### by #start when it's done.
44
+ def self::run
45
+ if self.subscribe_to.empty?
46
+ raise ScriptError,
47
+ "No subscriptions defined. Add one or more patterns using subscribe_to."
48
+ end
49
+
50
+ exit self.new( self.queue ).start
51
+ end
52
+
53
+
54
+ ### Inheritance hook -- set some defaults on subclasses.
55
+ def self::inherited( subclass )
56
+ super
57
+
58
+ subclass.instance_variable_set( :@routing_keys, Set.new )
59
+ subclass.instance_variable_set( :@acknowledge, true )
60
+ subclass.instance_variable_set( :@work_model, :longlived )
61
+ subclass.instance_variable_set( :@prefetch, 10 )
62
+ subclass.instance_variable_set( :@timeout_action, :reject )
63
+ end
64
+
65
+
66
+ ### Fetch the Symphony::Queue for this task, creating it if necessary.
67
+ def self::queue
68
+ unless @queue
69
+ @queue = Symphony::Queue.for_task( self )
70
+ end
71
+ return @queue
72
+ end
73
+
74
+
75
+ ### Return an queue name derived from the name of the task class.
76
+ def self::default_queue_name
77
+ name = self.name || "anonymous task %d" % [ self.object_id ]
78
+ return name.gsub( /\W+/, '.' ).downcase
79
+ end
80
+
81
+
82
+ ### Return a consumer tag for this task's queue consumer.
83
+ def self::consumer_tag
84
+ return "%s.%s.%d" % [
85
+ self.queue_name,
86
+ Socket.gethostname.gsub(/\..*$/, ''),
87
+ Process.pid,
88
+ ]
89
+ end
90
+
91
+
92
+ #
93
+ # :section: Declarative Methods
94
+ # These methods are used to configure how the task interacts with its queue and
95
+ # how it runs.
96
+
97
+
98
+ ### Get/set the name of the queue to consume.
99
+ def self::queue_name( new_name=nil )
100
+ if new_name
101
+ @queue_name = new_name
102
+ end
103
+
104
+ @queue_name ||= self.default_queue_name
105
+ return @queue_name
106
+ end
107
+
108
+
109
+ ### Set up one or more topic key patterns to use when binding the Task's queue
110
+ ### to the exchange.
111
+ def self::subscribe_to( *routing_keys )
112
+ unless routing_keys.empty?
113
+ self.log.info "Setting task routing keys to: %p." % [ routing_keys ]
114
+ @routing_keys.replace( routing_keys )
115
+ end
116
+
117
+ return @routing_keys
118
+ end
119
+ class << self; alias_method :routing_keys, :subscribe_to ; end
120
+
121
+
122
+ ### Enable or disable acknowledgements.
123
+ def self::acknowledge( new_setting=nil )
124
+ unless new_setting.nil?
125
+ self.log.info "Turning task acknowlegement %s." % [ new_setting ? "on" : "off" ]
126
+ @acknowledge = new_setting
127
+ end
128
+
129
+ return @acknowledge
130
+ end
131
+
132
+
133
+ ### Get/set the maximum number of seconds the job should work on a single
134
+ ### message before giving up.
135
+ def self::timeout( seconds=nil, options={} )
136
+ unless seconds.nil?
137
+ self.log.info "Setting the task timeout to %0.2fs." % [ seconds.to_f ]
138
+ @timeout = seconds.to_f
139
+ self.timeout_action( options[:action] )
140
+ end
141
+
142
+ return @timeout
143
+ end
144
+
145
+
146
+ ### Set the action taken when work times out.
147
+ def self::timeout_action( new_value=nil )
148
+ if new_value
149
+ @timeout_action = new_value.to_sym
150
+ end
151
+
152
+ return @timeout_action
153
+ end
154
+
155
+
156
+ ### Alter the work model between oneshot or longlived.
157
+ def self::work_model( new_setting=nil )
158
+ if new_setting
159
+ new_setting = new_setting.to_sym
160
+ unless WORK_MODELS.include?( new_setting )
161
+ raise "Unknown work_model %p (must be one of: %s)" %
162
+ [ new_setting, WORK_MODELS.join(', ') ]
163
+ end
164
+
165
+ self.log.info "Setting task work model to: %p." % [ new_setting ]
166
+ @work_model = new_setting
167
+ end
168
+
169
+ return @work_model
170
+ end
171
+
172
+
173
+ ### Set the maximum number of messages to prefetch. Ignored if the work_model is
174
+ ### :oneshot.
175
+ def self::prefetch( count=nil )
176
+ if count
177
+ @prefetch = count
178
+ end
179
+ return @prefetch
180
+ end
181
+
182
+
183
+ #
184
+ # Instance Methods
185
+ #
186
+
187
+ ### Create a worker that will listen on the specified +queue+ for a job.
188
+ def initialize( queue )
189
+ @queue = queue
190
+ @signal_handler = nil
191
+ @shutting_down = false
192
+ @restarting = false
193
+ end
194
+
195
+
196
+ ######
197
+ public
198
+ ######
199
+
200
+ # The queue that the task consumes messages from
201
+ attr_reader :queue
202
+
203
+ # The signal handler thread
204
+ attr_reader :signal_handler
205
+
206
+ # Is the task in the process of shutting down?
207
+ attr_predicate_accessor :shutting_down
208
+
209
+ # Is the task in the process of restarting?
210
+ attr_predicate_accessor :restarting
211
+
212
+
213
+ ### Set up the task and start handling messages.
214
+ def start
215
+ rval = nil
216
+
217
+ begin
218
+ self.restarting = false
219
+ rval = self.with_signal_handler( *SIGNALS ) do
220
+ self.start_handling_messages
221
+ end
222
+ end while self.restarting?
223
+
224
+ return rval ? 0 : 1
225
+
226
+ rescue Exception => err
227
+ self.log.fatal "%p in %p: %s" % [ err.class, self.class, err.message ]
228
+ self.log.debug { ' ' + err.backtrace.join(" \n") }
229
+
230
+ return :software
231
+ end
232
+
233
+
234
+ ### Restart the task after reloading the config.
235
+ def restart
236
+ self.restarting = true
237
+ self.log.warn "Restarting..."
238
+
239
+ if Symphony.config.reload
240
+ self.log.info " config reloaded"
241
+ else
242
+ self.log.info " no config changes"
243
+ end
244
+
245
+ self.log.info " resetting queue"
246
+ Symphony::Queue.reset
247
+ self.queue.shutdown
248
+ end
249
+
250
+
251
+ ### Stop the task immediately, e.g., when sent a second TERM signal.
252
+ def stop_immediately
253
+ self.log.warn "Already in shutdown -- halting immediately."
254
+ self.shutting_down = true
255
+ self.ignore_signals( *SIGNALS )
256
+ self.queue.halt
257
+ end
258
+
259
+
260
+ ### Set the task to stop after what it's doing is completed.
261
+ def stop_gracefully
262
+ self.log.warn "Attempting to shut down gracefully."
263
+ self.shutting_down = true
264
+ self.queue.shutdown
265
+ end
266
+
267
+
268
+ ### Start consuming messages from the queue, calling #work for each one.
269
+ def start_handling_messages
270
+ oneshot = self.class.work_model == :oneshot
271
+
272
+ return self.queue.wait_for_message( oneshot ) do |payload, metadata|
273
+ work_payload = self.preprocess_payload( payload, metadata )
274
+
275
+ if self.class.timeout
276
+ self.work_with_timeout( work_payload, metadata )
277
+ else
278
+ self.work( work_payload, metadata )
279
+ end
280
+ end
281
+ end
282
+
283
+
284
+ ### Start the thread that will deliver signals once they're put on the queue.
285
+ def start_signal_handler
286
+ @signal_handler = Thread.new do
287
+ Thread.current.abort_on_exception = true
288
+ loop do
289
+ self.log.debug "Signal handler: waiting for new signals in the queue."
290
+ self.wait_for_signals
291
+ end
292
+ end
293
+ end
294
+
295
+
296
+ ### Stop the signal handler thread.
297
+ def stop_signal_handler
298
+ @signal_handler.exit if @signal_handler
299
+ end
300
+
301
+
302
+ ### Handle signals; called by the signal handler thread with a signal from the
303
+ ### queue.
304
+ def handle_signal( sig )
305
+ self.log.debug "Handling signal %s" % [ sig ]
306
+ case sig
307
+ when :TERM
308
+ self.on_terminate
309
+ when :INT
310
+ self.on_interrupt
311
+ when :HUP
312
+ self.on_hangup
313
+ when :CHLD
314
+ self.on_child_exit
315
+ when :WINCH
316
+ self.on_window_size_change
317
+ else
318
+ self.log.warn "Unhandled signal %s" % [ sig ]
319
+ end
320
+ end
321
+
322
+
323
+ ### Do any necessary pre-processing on the raw +payload+ according to values in
324
+ ### the given +metadata+.
325
+ def preprocess_payload( payload, metadata )
326
+ self.log.debug "Got a %0.2fK %s payload" %
327
+ [ payload.bytesize / 1024.0, metadata[:content_type] ]
328
+ work_payload = case metadata[:content_type]
329
+ when 'application/x-msgpack'
330
+ MessagePack.unpack( payload )
331
+ when 'application/json', 'text/javascript'
332
+ Yajl::Parser.parse( payload )
333
+ when 'application/x-yaml', 'text/x-yaml'
334
+ YAML.load( payload )
335
+ else
336
+ payload
337
+ end
338
+
339
+ return work_payload
340
+ end
341
+
342
+
343
+ ### Return a consumer tag for this task's queue consumer.
344
+ def make_consumer_tag
345
+ return "%s.%s.%d" % [
346
+ self.queue_name,
347
+ Socket.gethostname.gsub(/\..*$/, ''),
348
+ Process.pid,
349
+ ]
350
+ end
351
+
352
+
353
+ ### Do work based on the given message +payload+ and +metadata+.
354
+ def work( payload, metadata )
355
+ raise NotImplementedError,
356
+ "%p doesn't implement required method #work" % [ self.class ]
357
+ end
358
+
359
+
360
+ ### Wrap a timeout around the call to work, and handle timeouts according to
361
+ ### the configured timeout_action.
362
+ def work_with_timeout( payload, metadata )
363
+ Timeout.timeout( self.class.timeout ) do
364
+ return self.work( payload, metadata )
365
+ end
366
+ rescue Timeout::Error
367
+ self.log.error "Timed out while performing work"
368
+ raise if self.class.timeout_action == :reject
369
+ return false
370
+ end
371
+
372
+
373
+ ### Handle a child process exiting.
374
+ def on_child_exit
375
+ self.log.info "Child exited."
376
+ Process.waitpid( 0, Process::WNOHANG )
377
+ end
378
+
379
+
380
+ ### Handle a window size change event. No-op by default.
381
+ def on_window_size_change
382
+ self.log.info "Window size changed."
383
+ end
384
+
385
+
386
+ ### Handle a hangup signal by re-reading the config and restarting.
387
+ def on_hangup
388
+ self.log.info "Hangup signal."
389
+ self.restart
390
+ end
391
+
392
+
393
+ ### Handle a termination or interrupt signal.
394
+ def on_terminate
395
+ self.log.debug "Signalled to shut down."
396
+
397
+ if self.shutting_down?
398
+ self.stop_immediately
399
+ else
400
+ self.stop_gracefully
401
+ end
402
+ end
403
+ alias_method :on_interrupt, :on_terminate
404
+
405
+
406
+ end # class Symphony::Task
407
+
@@ -0,0 +1,51 @@
1
+ #!/usr/bin/env ruby
2
+
3
+ require 'pp'
4
+ require 'pathname'
5
+ require 'tmpdir'
6
+ require 'symphony/task' unless defined?( Symphony::Task )
7
+ require 'symphony/metrics'
8
+
9
+
10
+ # A spike to log events
11
+ class Auditor < Symphony::Task
12
+ prepend Symphony::Metrics
13
+
14
+ subscribe_to '#'
15
+ prefetch 1000
16
+ queue_name '_audit'
17
+
18
+
19
+ ### Create a new Auditor task.
20
+ def initialize( queue )
21
+ super
22
+ @logdir = Pathname.pwd
23
+ @logfile = @logdir + 'events.log'
24
+ @log = @logfile.open( File::CREAT|File::APPEND|File::WRONLY, encoding: 'utf-8' )
25
+ self.log.info "Logfile is: %s" % [ @logfile ]
26
+ end
27
+
28
+
29
+ ######
30
+ public
31
+ ######
32
+
33
+ #
34
+ # Task API
35
+ #
36
+
37
+ # Log the event.
38
+ def work( payload, metadata )
39
+ @log.puts "%d%s [%s]: %p" % [
40
+ metadata[:delivery_info][:delivery_tag],
41
+ metadata[:delivery_info][:redelivered] ? '+' : '',
42
+ metadata[:delivery_info][:routing_key],
43
+ payload
44
+ ]
45
+
46
+ return true
47
+ end
48
+
49
+
50
+ end # class Auditor
51
+
@@ -0,0 +1,106 @@
1
+ #!/usr/bin/env ruby
2
+
3
+ require 'objspace'
4
+ require 'pathname'
5
+ require 'tmpdir'
6
+ require 'symphony/task' unless defined?( Symphony::Task )
7
+ require 'symphony/metrics'
8
+
9
+
10
+ # Log events that get published to the dead-letter queue
11
+ class FailureLogger < Symphony::Task
12
+ prepend Symphony::Metrics
13
+
14
+ # Audit all events
15
+ subscribe_to '#'
16
+
17
+ # Connect to a specific queue
18
+ queue_name '_failures'
19
+
20
+
21
+ ### Set up the output device. By default it's STDERR, but it can be anything
22
+ ### that responds to #<<.
23
+ def initialize( * )
24
+ super
25
+ @output = $stderr
26
+ $stderr.sync = true
27
+ end
28
+
29
+
30
+ ######
31
+ public
32
+ ######
33
+
34
+ #
35
+ # Task API
36
+ #
37
+
38
+ # Log the failure
39
+ # :headers=>{
40
+ # "x-death"=>[{
41
+ # "reason"=>"rejected",
42
+ # "queue"=>"auditor",
43
+ # "time"=>2014-03-12 18:55:10 -0700,
44
+ # "exchange"=>"events",
45
+ # "routing-keys"=>["some.stuff"]
46
+ # }]
47
+ # }
48
+ def work( payload, metadata )
49
+ self.log_failure( payload, metadata )
50
+ return true
51
+ end
52
+
53
+
54
+ ### Log one or more +deaths+ of the failed event.
55
+ def log_failure( payload, metadata )
56
+ raise "No headers; not a dead-lettered message?" unless
57
+ metadata[:properties] &&
58
+ metadata[:properties][:headers]
59
+ deaths = metadata[:properties][:headers]['x-death'] or
60
+ raise "No x-death header; not a dead-lettered message?"
61
+
62
+ message = self.log_prefix( payload, metadata )
63
+ message << self.log_deaths( deaths )
64
+ message << self.log_payload( payload, metadata )
65
+
66
+ @output << message << "\n"
67
+ end
68
+
69
+
70
+ ### Return a logging message prefix based on the specified +routing_key+ and
71
+ ### +deaths+.
72
+ def log_prefix( payload, metadata )
73
+ return "[%s]: " % [ Time.now.strftime('%Y-%m-%d %H:%M:%S.%4N') ]
74
+ end
75
+
76
+
77
+ ### Return a logging message part based on the specified message +payload+.
78
+ def log_payload( payload, metadata )
79
+ return " -- %0.2fKB %s payload: %p" % [
80
+ ObjectSpace.memsize_of(payload) / 1024.0,
81
+ metadata[:content_type],
82
+ payload,
83
+ ]
84
+ end
85
+
86
+
87
+ ### Return a logging message part based on the specified +deaths+.
88
+ ###
89
+ ### deaths - An Array of Hashes derived from the 'x-death' headers of the message
90
+ ###
91
+ def log_deaths( deaths )
92
+ message = ''
93
+ deaths.each do |death|
94
+ message << " %s-{%s}->%s (%s)" % [
95
+ death['exchange'],
96
+ death['routing-keys'].join(','),
97
+ death['queue'],
98
+ death['reason'],
99
+ ]
100
+ end
101
+
102
+ return message
103
+ end
104
+
105
+ end # class FailureLogger
106
+
@@ -0,0 +1,64 @@
1
+ #!/usr/bin/env ruby
2
+
3
+ require 'socket'
4
+ require 'timeout'
5
+ require 'symphony/task' unless defined?( Symphony::Task )
6
+
7
+
8
+ ### A proof-of-concept task to determine ssh availability of a host.
9
+ class Symphony::Task::Pinger < Symphony::Task
10
+
11
+ # The topic key to subscribe to
12
+ subscribe_to 'monitor.availability.port',
13
+ 'host.ping'
14
+
15
+ # Send success/failure back to the queue on job completion. Then true, the
16
+ # work isn't considered complete until receiving a success ack. When false,
17
+ # a worker simply consuming the task is sufficient.
18
+ acknowledge false # default: true
19
+
20
+ # Timeout for performing work. NOT to be confused with the message TTL
21
+ # during queue lifetime.
22
+ timeout 10.minutes # default: no timeout
23
+
24
+ # Whether the task should exit after doing its work
25
+ work_model :oneshot # default: :longlived
26
+
27
+
28
+ # The default port
29
+ DEFAULT_PORT = 'ssh'
30
+
31
+
32
+ ### Create a new Pinger task for the given +job+ and +queue+.
33
+ def initialize
34
+ super
35
+ end
36
+
37
+
38
+ ######
39
+ public
40
+ ######
41
+
42
+ # The hostname to ping
43
+ attr_reader :hostname
44
+
45
+ # The (TCP) port to ping
46
+ attr_reader :port
47
+
48
+ # If there is a problem pinging the remote host, this is set to the exception
49
+ # raised when doing so.
50
+ attr_reader :problem
51
+
52
+
53
+ #
54
+ # Task API
55
+ #
56
+
57
+ ### Do the ping.
58
+ def work( payload, metadata )
59
+ return ping( payload['hostname'], payload['port'] )
60
+ end
61
+
62
+
63
+ end # class Symphony::Task::Pinger
64
+
@@ -0,0 +1,57 @@
1
+ #!/usr/bin/env ruby
2
+
3
+ require 'pathname'
4
+ require 'tmpdir'
5
+ require 'symphony/task' unless defined?( Symphony::Task )
6
+ require 'symphony/metrics'
7
+
8
+ # A spike to test out various task execution outcomes.
9
+ class Simulator < Symphony::Task
10
+ prepend Symphony::Metrics
11
+
12
+ # Simulate processing all events
13
+ subscribe_to '#'
14
+
15
+ # Fetch 100 events at a time
16
+ prefetch 100
17
+
18
+ # Only allow 2 seconds for work to complete before rejecting or retrying.
19
+ timeout 2.0, action: :retry
20
+
21
+
22
+ ######
23
+ public
24
+ ######
25
+
26
+ #
27
+ # Task API
28
+ #
29
+
30
+ # Do the ping.
31
+ def work( payload, metadata )
32
+ if metadata[:properties][:headers] &&
33
+ metadata[:properties][:headers]['x-death']
34
+ puts "Deaths! %p" % [ metadata[:properties][:headers]['x-death'] ]
35
+ end
36
+
37
+ val = Random.rand
38
+ case
39
+ when val < 0.33
40
+ $stderr.puts "Simulating an error in the task (reject)."
41
+ raise "OOOOOPS!"
42
+ when val < 0.66
43
+ $stderr.puts "Simulating a soft failure in the task (reject+requeue)."
44
+ return false
45
+ when val < 0.88
46
+ $stderr.puts "Simulating a timeout case"
47
+ sleep( self.class.timeout + 1 )
48
+ else
49
+ $stderr.puts "Simulating a successful task run (accept)"
50
+ puts( payload.inspect )
51
+ return true
52
+ end
53
+ end
54
+
55
+
56
+ end # class Simulator
57
+