gilmour 0.3.4 → 0.4.1

Sign up to get free protection for your applications and to get access to all the features.
@@ -45,23 +45,23 @@ module Gilmour
45
45
  @ident = generate_ident
46
46
  end
47
47
 
48
- def ident
48
+ def ident #:nodoc:
49
49
  @ident
50
50
  end
51
51
 
52
- def generate_ident
52
+ def generate_ident #:nodoc:
53
53
  "#{Socket.gethostname}-pid-#{Process.pid}-uuid-#{SecureRandom.uuid}"
54
54
  end
55
55
 
56
- def report_health?
56
+ def report_health? #:nodoc:
57
57
  @report_health
58
58
  end
59
59
 
60
- def report_errors?
60
+ def report_errors? #:nodoc:
61
61
  @report_errors
62
62
  end
63
63
 
64
- def emit_error(message)
64
+ def emit_error(message) #:nodoc:
65
65
  report = self.report_errors?
66
66
 
67
67
  if report == false
@@ -73,7 +73,7 @@ module Gilmour
73
73
  end
74
74
  end
75
75
 
76
- def setup_pubsub(opts)
76
+ def setup_pubsub(opts) #:nodoc:
77
77
  @publisher = EM::Hiredis.connect(redis_host(opts))
78
78
  @subscriber = @publisher.pubsub_client
79
79
  register_handlers
@@ -82,7 +82,7 @@ module Gilmour
82
82
  GLogger.debug e.backtrace
83
83
  end
84
84
 
85
- def register_handlers
85
+ def register_handlers #:nodoc:
86
86
  @subscriber.on(:pmessage) do |key, topic, payload|
87
87
  pmessage_handler(key, topic, payload)
88
88
  end
@@ -100,18 +100,18 @@ module Gilmour
100
100
  end
101
101
  end
102
102
 
103
- def subscribe_topic(topic)
103
+ def subscribe_topic(topic) #:nodoc:
104
104
  method = topic.index('*') ? :psubscribe : :subscribe
105
105
  @subscriber.method(method).call(topic)
106
106
  end
107
107
 
108
- def pmessage_handler(key, matched_topic, payload)
108
+ def pmessage_handler(key, matched_topic, payload) #:nodoc:
109
109
  @subscriptions[key].each do |subscription|
110
110
  EM.defer(->{execute_handler(matched_topic, payload, subscription)})
111
111
  end
112
112
  end
113
113
 
114
- def register_response(sender, handler, timeout = 600)
114
+ def register_response(sender, handler, timeout = 600) #:nodoc:
115
115
  topic = "gilmour.response.#{sender}"
116
116
  timer = EM::Timer.new(timeout) do # Simulate error response
117
117
  GLogger.info "Timeout: Killing handler for #{sender}"
@@ -125,11 +125,11 @@ module Gilmour
125
125
  GLogger.debug e.backtrace
126
126
  end
127
127
 
128
- def publish_error(messsage)
128
+ def publish_error(messsage) #:nodoc:
129
129
  publish(messsage, Gilmour::ErrorChannel)
130
130
  end
131
131
 
132
- def queue_error(key, message)
132
+ def queue_error(key, message) #:nodoc:
133
133
  @publisher.lpush(key, message) do
134
134
  @publisher.ltrim(key, 0, GilmourErrorBufferLen) do
135
135
  Glogger.debug "Error queued"
@@ -137,7 +137,7 @@ module Gilmour
137
137
  end
138
138
  end
139
139
 
140
- def acquire_ex_lock(sender)
140
+ def acquire_ex_lock(sender) #:nodoc:
141
141
  @publisher.set(sender, sender, 'EX', 600, 'NX') do |val|
142
142
  EM.defer do
143
143
  yield val if val && block_given?
@@ -145,7 +145,7 @@ module Gilmour
145
145
  end
146
146
  end
147
147
 
148
- def response_handler(sender, payload)
148
+ def response_handler(sender, payload) #:nodoc:
149
149
  data, code, _ = Gilmour::Protocol.parse_response(payload)
150
150
  handler = @response_handlers.delete(sender)
151
151
  @subscriber.unsubscribe(sender)
@@ -158,28 +158,36 @@ module Gilmour
158
158
  GLogger.debug e.backtrace
159
159
  end
160
160
 
161
- def send_response(sender, body, code)
161
+ def send_response(sender, body, code) #:nodoc:
162
162
  publish(body, "gilmour.response.#{sender}", {}, code)
163
163
  end
164
164
 
165
- def get_subscribers
165
+ def get_subscribers #:nodoc:
166
166
  @subscriptions.keys
167
167
  end
168
168
 
169
- def setup_subscribers(subs = {})
170
- @subscriptions.merge!(subs)
171
- EM.defer do
172
- subs.keys.each { |topic| subscribe_topic(topic) }
169
+ def reply_to(topic, opts={}, &blk) #:nodoc:
170
+ if topic.index('*')
171
+ raise ArgumentError.new("Subscribers cannot have wildcard topics")
173
172
  end
173
+ super
174
174
  end
175
175
 
176
- def add_listener(topic, &handler)
176
+ def add_listener(topic, opts = {}, &blk) #:nodoc:
177
+ if opts[:excl] && exclusive_group(opts).empty?
178
+ raise ArgumentError.new("Invalid exclusive group")
179
+ end
180
+ opts[:handler] ||= blk
177
181
  @subscriptions[topic] ||= []
178
- @subscriptions[topic] << { handler: handler }
179
- subscribe_topic(topic)
182
+ @subscriptions[topic] << opts
183
+ EM.next_tick { subscribe_topic(topic) }
180
184
  end
181
185
 
182
- def remove_listener(topic, handler = nil)
186
+ def listeners(topic) #:nodoc:
187
+ @subscriptions[topic] || []
188
+ end
189
+
190
+ def remove_listener(topic, handler = nil) #:nodoc:
183
191
  if handler
184
192
  subs = @subscriptions[topic]
185
193
  subs.delete_if { |e| e[:handler] == handler }
@@ -189,30 +197,36 @@ module Gilmour
189
197
  @subscriber.unsubscribe(topic) if @subscriptions[topic].empty?
190
198
  end
191
199
 
192
- def send(sender, destination, payload, opts = {}, &blk)
200
+ def send_message(sender, destination, payload, opts = {}, &blk) #:nodoc:
193
201
  timeout = opts[:timeout] || 600
194
202
  if opts[:confirm_subscriber]
195
203
  confirm_subscriber(destination) do |present|
196
204
  if !present
197
205
  blk.call(nil, 404) if blk
198
206
  else
199
- _send(sender, destination, payload, timeout, &blk)
207
+ _send_message(sender, destination, payload, timeout, &blk)
200
208
  end
201
209
  end
202
210
  else
203
- _send(sender, destination, payload, timeout, &blk)
211
+ _send_message(sender, destination, payload, timeout, &blk)
204
212
  end
205
213
  rescue Exception => e
206
214
  GLogger.debug e.message
207
215
  GLogger.debug e.backtrace
208
216
  end
209
217
 
210
- def _send(sender, destination, payload, timeout, &blk)
218
+ def _send_message(sender, destination, payload, timeout, &blk) #:nodoc:
211
219
  register_response(sender, blk, timeout) if block_given?
212
220
  @publisher.publish(destination, payload)
213
221
  sender
214
222
  end
215
223
 
224
+ # Confirms whether an active subscriber is present.
225
+ # Params
226
+ # +dest+:: The destination topic
227
+ #
228
+ # The given block is called with a true boolean
229
+ # if active subscribers exist, else with false
216
230
  def confirm_subscriber(dest, &blk)
217
231
  @publisher.pubsub('numsub', dest) do |_, num|
218
232
  blk.call(num.to_i > 0)
@@ -222,7 +236,7 @@ module Gilmour
222
236
  GLogger.debug e.backtrace
223
237
  end
224
238
 
225
- def stop
239
+ def stop #:nodoc:
226
240
  @subscriber.close_connection
227
241
  end
228
242
 
@@ -234,7 +248,7 @@ module Gilmour
234
248
  # before a Gilmour server starts up. To circumvent this dependency, till
235
249
  # monitor is stable enough, use Redis to save/share these data structures.
236
250
  #
237
- def register_health_check
251
+ def register_health_check #:nodoc:
238
252
  @publisher.hset GilmourHealthKey, self.ident, 'active'
239
253
 
240
254
  # - Start listening on a dyanmic topic that Health Monitor can publish
@@ -255,7 +269,7 @@ module Gilmour
255
269
 
256
270
  end
257
271
 
258
- def unregister_health_check
272
+ def unregister_health_check #:nodoc:
259
273
  deleted = false
260
274
 
261
275
  @publisher.hdel(GilmourHealthKey, self.ident) do
data/lib/gilmour/base.rb CHANGED
@@ -43,8 +43,7 @@ module Gilmour
43
43
  @@subscribers = {} # rubocop:disable all
44
44
  @@registered_services = []
45
45
 
46
- # :nodoc:
47
- def inherited(child)
46
+ def inherited(child) #:nodoc:
48
47
  @@registered_services << child
49
48
  end
50
49
 
@@ -54,23 +53,21 @@ module Gilmour
54
53
  @@registered_services
55
54
  end
56
55
 
57
- # Adds a listener for the given topic
58
- # topic:: The topic to listen to
59
- # opts: Hash of optional arguments.
60
- # Supported options are:
61
- #
62
- # excl:: If true, this listener is added to a group of listeners
63
- # with the same name as the name of the class in which this method
64
- # is called. A message sent to the _topic_ will be processed by at
65
- # most one listener from a group
56
+ # This is the underlying layer of communication. Use if you
57
+ # know what you are doing. Use "reply_to" and "slot" instead.
66
58
  #
67
- # timeout: Maximum duration (seconds) that a subscriber has to
68
- # finish the task. If the execution exceeds the timeout, gilmour
69
- # responds with status {code:409, data: nil}
59
+ # Adds a listener for the given topic
60
+ # +topic+:: The topic to listen to
61
+ # +opts+:: Hash of optional arguments. Supported options are:
62
+ # excl:: If true, this listener is added to a group of listeners
63
+ # with the same name as the name of the class in which this
64
+ # method is called. A message sent to the _topic_ will be
65
+ # processed by at most one listener from a group
66
+ # timeout:: Maximum duration (seconds) that a subscriber has to
67
+ # finish the task. If the execution exceeds the timeout, gilmour
68
+ # responds with status {code:409, data: nil}
70
69
  #
71
- def listen_to(topic, opts={})
72
- handler = Proc.new
73
-
70
+ def listen_to(topic, opts={}, &handler)
74
71
  opt_defaults = {
75
72
  exclusive: false,
76
73
  timeout: 600,
@@ -84,8 +81,28 @@ module Gilmour
84
81
  @@subscribers[topic] ||= []
85
82
  @@subscribers[topic] << opt_defaults
86
83
  end
84
+ alias_method :add_listener, :listen_to
85
+
86
+ # Add a reply listener
87
+ def reply_to(topic, opts={}, &handler)
88
+ defopts = opts.merge({
89
+ type: :reply
90
+ })
91
+ listen_to(topic, defopts, &handler)
92
+ end
93
+
94
+ # Add a slot listener
95
+ def slot(topic, opts={}, &handler)
96
+ defopts = opts.merge({
97
+ type: :slot
98
+ })
99
+ listen_to(topic, defopts, &handler)
100
+ end
87
101
 
88
102
  # Returns the list of subscribers for _topic_ or all subscribers if it is nil
103
+ # Params:
104
+ # +topic+:: The topic for which to return the subscribers. All subscribers are
105
+ # returned if this is not provided
89
106
  def subscribers(topic = nil)
90
107
  if topic
91
108
  @@subscribers[topic]
@@ -96,20 +113,19 @@ module Gilmour
96
113
 
97
114
  # Loads all ruby source files inside _dir_ as subscribers
98
115
  # Should only be used inside the parent container class
116
+ # Params:
117
+ # +dir+:: relative path of directory to load subscribers from
99
118
  def load_all(dir = nil)
100
119
  dir ||= (subscribers_path || DEFAULT_SUBSCRIBER_PATH)
101
120
  Dir["#{dir}/*.rb"].each { |f| require f }
102
121
  end
103
122
 
104
- # Loads the ruby file at _path_ as a subscriber
105
- # Should only be used inside the parent container class
106
- def load_subscriber(path)
123
+ def load_subscriber(path) #:nodoc:
107
124
  require path
108
125
  end
109
126
  end
110
127
 
111
- # :nodoc:
112
- def registered_subscribers
128
+ def registered_subscribers #:nodoc:
113
129
  self.class.registered_subscribers
114
130
  end
115
131
  ############ End Register ###############
@@ -120,9 +136,13 @@ module Gilmour
120
136
  attr_reader :backends
121
137
 
122
138
  # Enable and return the given backend
123
- # Should only be used inside the parent container class
124
- # If +opts[:multi_process]+ is true, every request handler will
125
- # be run inside a new child process.
139
+ # Params
140
+ # +name+:: the backend name (currently only 'redis' is supported)
141
+ # +opts+:: backend specific options. Options for redis are
142
+ # host:: the redis server hostname
143
+ # port:: the redis server port
144
+ # braodcast_errors:: whether error reorting should be turned on
145
+ # health_check:: whether health_check hartbeats should be enabled
126
146
  def enable_backend(name, opts = {})
127
147
  Gilmour::Backend.load_backend(name)
128
148
  @backends ||= {}
@@ -130,11 +150,11 @@ module Gilmour
130
150
  end
131
151
  alias_method :get_backend, :enable_backend
132
152
 
153
+ # Cleanup the susbcribers and health checks
133
154
  def tear_down!
134
155
  subs_by_backend = subs_grouped_by_backend
135
156
  subs_by_backend.each do |b, subs|
136
157
  backend = get_backend(b)
137
- backend.setup_subscribers(subs)
138
158
  if backend.report_health?
139
159
  backend.unregister_health_check
140
160
  end
@@ -144,18 +164,29 @@ module Gilmour
144
164
  # Starts all the listeners
145
165
  # If _startloop_ is true, this method will start it's own
146
166
  # event loop and not return till Eventmachine reactor is stopped
167
+ # Params:
168
+ # +startloop+:: If true this call with join the eventmachine thred and
169
+ # block till it is done.
147
170
  def start(startloop = false)
148
171
  subs_by_backend = subs_grouped_by_backend
149
172
  subs_by_backend.each do |b, subs|
150
173
  backend = get_backend(b)
151
- backend.setup_subscribers(subs)
152
-
174
+ subs.each do |topic, handlers|
175
+ handlers.each do |handler|
176
+ if handler[:type] == :slot
177
+ backend.slot(topic, handler)
178
+ elsif handler[:type] == :reply
179
+ backend.reply_to(topic, handler)
180
+ else
181
+ backend.add_listener(topic, handler)
182
+ end
183
+ end
184
+ end
153
185
  if backend.report_health?
154
186
  backend.register_health_check
155
187
  end
156
188
  end
157
-
158
- if startloop
189
+ if startloop #Move into redis backend
159
190
  GLogger.debug 'Joining EM event loop'
160
191
  EM.reactor_thread.join
161
192
  end
@@ -0,0 +1,190 @@
1
+
2
+ module Gilmour
3
+ module Composers
4
+ class Request #:nodoc:
5
+ attr_reader :topic
6
+ def initialize(backend, spec)
7
+ @backend = backend
8
+ @spec = spec
9
+ if spec.kind_of?(Hash)
10
+ @message = spec[:message] || spec['message'] || {}
11
+ @topic = spec[:topic] || spec['topic']
12
+ if !@topic
13
+ raise ArgumentError.new("Request topic cannot be empty in a request spec")
14
+ end
15
+ @opts = spec[:opts] || spec['opts'] || {}
16
+ if @opts && !@opts.kind_of?(Hash)
17
+ raise ArgumentError.new("Request opts must be a Hash")
18
+ end
19
+ elsif spec.kind_of?(Proc)
20
+ @topic = spec.to_s
21
+ else
22
+ raise ArgumentError.new("Request spec must be a spec or proc")
23
+ end
24
+ end
25
+
26
+ def execute(data = {}, &blk)
27
+ if @spec.kind_of?(Proc)
28
+ res = @spec.call(data)
29
+ code = res ? 200 : 500
30
+ blk.call(res, code)
31
+ else
32
+ message = if @message.kind_of?(Hash) && data.kind_of?(Hash)
33
+ data.merge(@message)
34
+ else
35
+ data
36
+ end
37
+ @backend.request!(message, @topic, @opts, &blk)
38
+ end
39
+ rescue Exception => e
40
+ GLogger.debug e.message
41
+ GLogger.debug e.backtrace
42
+ end
43
+ end
44
+
45
+ class Pipeline #:nodoc:
46
+ attr_reader :pipeline
47
+
48
+ def initialize(backend, spec)
49
+ @backend = backend
50
+ unless spec.kind_of? Array
51
+ raise ArgumentError.new("Compose spec must be an array")
52
+ end
53
+ @pipeline = spec.map do |s|
54
+ if s.kind_of?(Pipeline) || s.kind_of?(Request)
55
+ s
56
+ else
57
+ Request.new(backend, s)
58
+ end
59
+ end
60
+ end
61
+
62
+ def execute
63
+ raise NotImplementedError.new
64
+ end
65
+
66
+ def continuation(queue)
67
+ self.class.new(@backend, queue)
68
+ end
69
+ end
70
+
71
+ class Compose < Pipeline
72
+ # Execute a pipeline. The output of the last stage of the
73
+ # pipeline is passed to the block, if all stages are
74
+ # successful. Otherwise, the output of the last successful stage
75
+ # is passed, along with the conitnuation which represents the
76
+ # remaining pipeline
77
+ def execute(msg = {}, &blk)
78
+ blk.call(nil, nil) if pipeline.empty?
79
+ handler = proc do |queue, data, code|
80
+ if queue.empty? || code != 200
81
+ blk.call(data, code, continuation(queue))
82
+ else
83
+ head = queue.first
84
+ tail = queue[1..-1]
85
+ head.execute(data, &handler.curry[tail])
86
+ end
87
+ end
88
+ handler.call(pipeline, msg, 200)
89
+ end
90
+ end
91
+
92
+ # Create a Compose pipeline as per the spec. This
93
+ # is roughly equivalent to the following unix construct
94
+ # cmd1 | cmd2 | cmd3 ...
95
+ # The spec is an array of hashes, each describing
96
+ # a request topic and message. The message is optional.
97
+ # If present, this message is merged into the output
98
+ # of the previous step, before passing it as the input.
99
+ # Eg, below, if msg2 was a Hash, it would be merged into
100
+ # the output from the subscriber of topic1, and the merged hash
101
+ # would be sent as the request body to topic2
102
+ # [
103
+ # {topic: topic1, message: msg1},
104
+ # {topic: topic2, message: msg2},
105
+ # ...
106
+ # ]
107
+ #
108
+ # Instead of a hash, a spec item can also be a callable
109
+ # (proc or lambda). In that case, the output of the previous step
110
+ # is passed through the callable, and the return value of the callable
111
+ # is sent as the input to the next step.
112
+ #
113
+ # In place of a hash or callable, it is also possible to have
114
+ # any kind of composition itself, allowing the creation of
115
+ # nested compositions. See examples in the source code.
116
+ #
117
+ def compose(spec)
118
+ Compose.new(self, spec)
119
+ end
120
+
121
+ class AndAnd < Pipeline
122
+ # Execute the andand pipeline. The output of the last successful
123
+ # step is passed to block, along with the remaining continuation
124
+ # See the documentation of the #andand method for more details
125
+ def execute(data={}, &blk)
126
+ blk.call(nil, nil) if pipeline.empty?
127
+ handler = proc do |queue, data, code|
128
+ if queue.empty? || code != 200
129
+ blk.call(data, code, continuation(queue))
130
+ else
131
+ head = queue.first
132
+ tail = queue[1..-1]
133
+ head.execute(&handler.curry[tail])
134
+ end
135
+ end
136
+ handler.call(pipeline, nil, 200)
137
+ end
138
+ end
139
+
140
+ # Same as compose this composition does not pass data from one step to the
141
+ # next. Execution halts on the first error
142
+ # It is roughly equivalent to the unix construct
143
+ # cmd1 && cmd2 && cmd3
144
+
145
+ def andand(spec)
146
+ AndAnd.new(self, spec)
147
+ end
148
+
149
+ class Batch < Pipeline
150
+ def initialize(backend, spec, record=false) #:nodoc:
151
+ super(backend, spec)
152
+ @record = record
153
+ end
154
+
155
+ # Execute the batch pipeline. This pipeline ignores all errors
156
+ # step is passed to block.
157
+ # See the documentation of the #batch method for more details
158
+ def execute(data={}, &blk)
159
+ results = []
160
+ blk.call(nil, nil) if pipeline.empty?
161
+ handler = proc do |queue, data, code|
162
+ results << {data: data, code: code} if @record
163
+ if queue.empty?
164
+ result = @record ? results[1..-1] : data
165
+ blk.call(result, code)
166
+ else
167
+ head = queue.first
168
+ tail = queue[1..-1]
169
+ head.execute(&handler.curry[tail])
170
+ end
171
+ end
172
+ handler.call(pipeline, nil, 200)
173
+ end
174
+ end
175
+
176
+ # Same a compose, except that no errors are checked
177
+ # and the pipeline executes all steps unconditionally
178
+ # and sequentially. It is roughly equivalent to the unix construct
179
+ # cmd1; cmd2; cmd3
180
+ # OR
181
+ # (cmd1; cmd2; cmd3)
182
+ # Params:
183
+ # +record+:: If this is false, only the output of the last step
184
+ # is passed to the block passed to execute. If true, all outputs
185
+ # are collected in an array and passed to the block
186
+ def batch(spec, record=false)
187
+ Batch.new(self, spec, record)
188
+ end
189
+ end
190
+ end