erchef-expander 11.4.0

Sign up to get free protection for your applications and to get access to all the features.
@@ -0,0 +1,275 @@
1
+ #
2
+ # Author:: Daniel DeLeo (<dan@opscode.com>)
3
+ # Author:: Seth Falcon (<seth@opscode.com>)
4
+ # Author:: Chris Walters (<cw@opscode.com>)
5
+ # Copyright:: Copyright (c) 2010-2011 Opscode, Inc.
6
+ # License:: Apache License, Version 2.0
7
+ #
8
+ # Licensed under the Apache License, Version 2.0 (the "License");
9
+ # you may not use this file except in compliance with the License.
10
+ # You may obtain a copy of the License at
11
+ #
12
+ # http://www.apache.org/licenses/LICENSE-2.0
13
+ #
14
+ # Unless required by applicable law or agreed to in writing, software
15
+ # distributed under the License is distributed on an "AS IS" BASIS,
16
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17
+ # See the License for the specific language governing permissions and
18
+ # limitations under the License.
19
+ #
20
+
21
+ require 'set'
22
+ require 'yajl'
23
+ require 'fast_xs'
24
+ require 'em-http-request'
25
+ require 'chef/expander/loggable'
26
+ require 'chef/expander/flattener'
27
+
28
+ module Chef
29
+ module Expander
30
+ class Solrizer
31
+
32
+ @active_http_requests = Set.new
33
+
34
+ def self.http_request_started(instance)
35
+ @active_http_requests << instance
36
+ end
37
+
38
+ def self.http_request_completed(instance)
39
+ @active_http_requests.delete(instance)
40
+ end
41
+
42
+ def self.http_requests_active?
43
+ !@active_http_requests.empty?
44
+ end
45
+
46
+ def self.clear_http_requests
47
+ @active_http_requests.clear
48
+ end
49
+
50
+ include Loggable
51
+
52
+ ADD = "add"
53
+ DELETE = "delete"
54
+ SKIP = "skip"
55
+
56
+ ITEM = "item"
57
+ ID = "id"
58
+ TYPE = "type"
59
+ DATABASE = "database"
60
+ ENQUEUED_AT = "enqueued_at"
61
+
62
+ DATA_BAG_ITEM = "data_bag_item"
63
+ DATA_BAG = "data_bag"
64
+
65
+ X_CHEF_id_CHEF_X = 'X_CHEF_id_CHEF_X'
66
+ X_CHEF_database_CHEF_X = 'X_CHEF_database_CHEF_X'
67
+ X_CHEF_type_CHEF_X = 'X_CHEF_type_CHEF_X'
68
+
69
+ CONTENT_TYPE_XML = {"Content-Type" => "text/xml"}
70
+
71
+ attr_reader :action
72
+
73
+ attr_reader :indexer_payload
74
+
75
+ attr_reader :chef_object
76
+
77
+ attr_reader :obj_id
78
+
79
+ attr_reader :obj_type
80
+
81
+ attr_reader :database
82
+
83
+ attr_reader :enqueued_at
84
+
85
+ def initialize(object_command_json, &on_completion_block)
86
+ @start_time = Time.now.to_f
87
+ @on_completion_block = on_completion_block
88
+ if parsed_message = parse(object_command_json)
89
+ @action = parsed_message["action"]
90
+ @indexer_payload = parsed_message["payload"]
91
+
92
+ extract_object_fields if @indexer_payload
93
+ else
94
+ @action = SKIP
95
+ end
96
+ end
97
+
98
+ def extract_object_fields
99
+ @chef_object = @indexer_payload[ITEM]
100
+ @database = @indexer_payload[DATABASE]
101
+ @obj_id = @indexer_payload[ID]
102
+ @obj_type = @indexer_payload[TYPE]
103
+ @enqueued_at = @indexer_payload[ENQUEUED_AT]
104
+ @data_bag = @obj_type == DATA_BAG_ITEM ? @chef_object[DATA_BAG] : nil
105
+ end
106
+
107
+ def parse(serialized_object)
108
+ Yajl::Parser.parse(serialized_object)
109
+ rescue Yajl::ParseError
110
+ log.error { "cannot index object because it is invalid JSON: #{serialized_object}" }
111
+ end
112
+
113
+ def run
114
+ case @action
115
+ when ADD
116
+ add
117
+ when DELETE
118
+ delete
119
+ when SKIP
120
+ completed
121
+ log.info { "not indexing this item because of malformed JSON"}
122
+ else
123
+ completed
124
+ log.error { "cannot index object becuase it has an invalid action #{@action}" }
125
+ end
126
+ end
127
+
128
+ def add
129
+ post_to_solr(pointyize_add) do
130
+ ["indexed #{indexed_object}",
131
+ "transit,xml,solr-post |",
132
+ [transit_time, @xml_time, @solr_post_time].join(","),
133
+ "|"
134
+ ].join(" ")
135
+ end
136
+ rescue Exception => e
137
+ log.error { "#{e.class.name}: #{e.message}\n#{e.backtrace.join("\n")}"}
138
+ end
139
+
140
+ def delete
141
+ post_to_solr(pointyize_delete) { "deleted #{indexed_object} transit-time[#{transit_time}s]"}
142
+ rescue Exception => e
143
+ log.error { "#{e.class.name}: #{e.message}\n#{e.backtrace.join("\n")}"}
144
+ end
145
+
146
+ def flattened_object
147
+ flattened_object = Flattener.new(@chef_object).flattened_item
148
+
149
+ flattened_object[X_CHEF_id_CHEF_X] = [@obj_id]
150
+ flattened_object[X_CHEF_database_CHEF_X] = [@database]
151
+ flattened_object[X_CHEF_type_CHEF_X] = [@obj_type]
152
+
153
+ log.debug {"adding flattened object to Solr: #{flattened_object.inspect}"}
154
+
155
+ flattened_object
156
+ end
157
+
158
+ START_XML = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n"
159
+ ADD_DOC = "<add><doc>"
160
+ DELETE_DOC = "<delete>"
161
+ ID_OPEN = "<id>"
162
+ ID_CLOSE = "</id>"
163
+ END_ADD_DOC = "</doc></add>\n"
164
+ END_DELETE = "</delete>\n"
165
+ START_CONTENT = '<field name="content">'
166
+ CLOSE_FIELD = "</field>"
167
+
168
+ FLD_CHEF_ID_FMT = '<field name="X_CHEF_id_CHEF_X">%s</field>'
169
+ FLD_CHEF_DB_FMT = '<field name="X_CHEF_database_CHEF_X">%s</field>'
170
+ FLD_CHEF_TY_FMT = '<field name="X_CHEF_type_CHEF_X">%s</field>'
171
+ FLD_DATA_BAG = '<field name="data_bag">%s</field>'
172
+
173
+ KEYVAL_FMT = "%s__=__%s "
174
+
175
+ # Takes a flattened hash where the values are arrays and converts it into
176
+ # a dignified XML document suitable for POST to Solr.
177
+ # The general structure of the output document is like this:
178
+ # <?xml version="1.0" encoding="UTF-8"?>
179
+ # <add>
180
+ # <doc>
181
+ # <field name="content">
182
+ # key__=__value
183
+ # key__=__another_value
184
+ # other_key__=__yet another value
185
+ # </field>
186
+ # </doc>
187
+ # </add>
188
+ # The document as generated has minimal newlines and formatting, however.
189
+ def pointyize_add
190
+ xml = ""
191
+ xml << START_XML << ADD_DOC
192
+ xml << (FLD_CHEF_ID_FMT % @obj_id)
193
+ xml << (FLD_CHEF_DB_FMT % @database)
194
+ xml << (FLD_CHEF_TY_FMT % @obj_type)
195
+ xml << START_CONTENT
196
+ content = ""
197
+ flattened_object.each do |field, values|
198
+ values.each do |v|
199
+ content << (KEYVAL_FMT % [field, v])
200
+ end
201
+ end
202
+ xml << content.fast_xs
203
+ xml << CLOSE_FIELD # ends content
204
+ xml << (FLD_DATA_BAG % @data_bag.fast_xs) if @data_bag
205
+ xml << END_ADD_DOC
206
+ @xml_time = Time.now.to_f - @start_time
207
+ xml
208
+ end
209
+
210
+ # Takes a succinct document id, like 2342, and turns it into something
211
+ # even more compact, like
212
+ # "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<delete><id>2342</id></delete>\n"
213
+ def pointyize_delete
214
+ xml = ""
215
+ xml << START_XML
216
+ xml << DELETE_DOC
217
+ xml << ID_OPEN
218
+ xml << @obj_id.to_s
219
+ xml << ID_CLOSE
220
+ xml << END_DELETE
221
+ xml
222
+ end
223
+
224
+ def post_to_solr(document, &logger_block)
225
+ log.debug("POSTing document to SOLR:\n#{document}")
226
+ http_req = EventMachine::HttpRequest.new(solr_url).post(:body => document, :timeout => 1200, :head => CONTENT_TYPE_XML)
227
+ http_request_started
228
+
229
+ http_req.callback do
230
+ completed
231
+ if http_req.response_header.status == 200
232
+ log.info(&logger_block)
233
+ else
234
+ log.error { "Failed to post to solr: #{indexed_object}" }
235
+ end
236
+ end
237
+ http_req.errback do
238
+ completed
239
+ log.error { "Failed to post to solr (connection error): #{indexed_object}" }
240
+ end
241
+ end
242
+
243
+ def completed
244
+ @solr_post_time = Time.now.to_f - @start_time
245
+ self.class.http_request_completed(self)
246
+ @on_completion_block.call
247
+ end
248
+
249
+ def transit_time
250
+ Time.now.utc.to_i - @enqueued_at
251
+ end
252
+
253
+ def solr_url
254
+ "#{Expander.config.solr_url}/update"
255
+ end
256
+
257
+ def indexed_object
258
+ "#{@obj_type}[#{@obj_id}] database[#{@database}]"
259
+ end
260
+
261
+ def http_request_started
262
+ self.class.http_request_started(self)
263
+ end
264
+
265
+ def eql?(other)
266
+ other.hash == hash
267
+ end
268
+
269
+ def hash
270
+ "#{action}#{indexed_object}#@enqueued_at#{self.class.name}".hash
271
+ end
272
+
273
+ end
274
+ end
275
+ end
@@ -0,0 +1,41 @@
1
+ #
2
+ # Author:: Daniel DeLeo (<dan@opscode.com>)
3
+ # Author:: Seth Falcon (<seth@opscode.com>)
4
+ # Author:: Chris Walters (<cw@opscode.com>)
5
+ # Copyright:: Copyright (c) 2010-2011 Opscode, Inc.
6
+ # License:: Apache License, Version 2.0
7
+ #
8
+ # Licensed under the Apache License, Version 2.0 (the "License");
9
+ # you may not use this file except in compliance with the License.
10
+ # You may obtain a copy of the License at
11
+ #
12
+ # http://www.apache.org/licenses/LICENSE-2.0
13
+ #
14
+ # Unless required by applicable law or agreed to in writing, software
15
+ # distributed under the License is distributed on an "AS IS" BASIS,
16
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17
+ # See the License for the specific language governing permissions and
18
+ # limitations under the License.
19
+ #
20
+
21
+ require 'open3'
22
+
23
+ module Chef
24
+ module Expander
25
+
26
+ VERSION = "11.0.0.alpha.1"
27
+
28
+ def self.version
29
+ @rev ||= begin
30
+ begin
31
+ rev = Open3.popen3("git rev-parse HEAD") {|stdin, stdout, stderr| stdout.read }.strip
32
+ rescue Errno::ENOENT
33
+ rev = ""
34
+ end
35
+ rev.empty? ? nil : " (#{rev})"
36
+ end
37
+ "#{VERSION}#@rev"
38
+ end
39
+
40
+ end
41
+ end
@@ -0,0 +1,106 @@
1
+ #
2
+ # Author:: Daniel DeLeo (<dan@opscode.com>)
3
+ # Author:: Seth Falcon (<seth@opscode.com>)
4
+ # Author:: Chris Walters (<cw@opscode.com>)
5
+ # Copyright:: Copyright (c) 2010-2011 Opscode, Inc.
6
+ # License:: Apache License, Version 2.0
7
+ #
8
+ # Licensed under the Apache License, Version 2.0 (the "License");
9
+ # you may not use this file except in compliance with the License.
10
+ # You may obtain a copy of the License at
11
+ #
12
+ # http://www.apache.org/licenses/LICENSE-2.0
13
+ #
14
+ # Unless required by applicable law or agreed to in writing, software
15
+ # distributed under the License is distributed on an "AS IS" BASIS,
16
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17
+ # See the License for the specific language governing permissions and
18
+ # limitations under the License.
19
+ #
20
+
21
+ require 'eventmachine'
22
+ require 'amqp'
23
+ require 'mq'
24
+
25
+ require 'chef/expander/loggable'
26
+ require 'chef/expander/solrizer'
27
+
28
+ module Chef
29
+ module Expander
30
+ class VNode
31
+ include Loggable
32
+
33
+ attr_reader :vnode_number
34
+
35
+ attr_reader :supervise_interval
36
+
37
+ def initialize(vnode_number, supervisor, opts={})
38
+ @vnode_number = vnode_number.to_i
39
+ @supervisor = supervisor
40
+ @queue = nil
41
+ @stopped = false
42
+ @supervise_interval = opts[:supervise_interval] || 30
43
+ end
44
+
45
+ def start
46
+ @supervisor.vnode_added(self)
47
+
48
+ subscription_confirmed = Proc.new do
49
+ abort_on_multiple_subscribe
50
+ supervise_consumer_count
51
+ end
52
+
53
+ queue.subscribe(:ack => true, :confirm => subscription_confirmed) do |headers, payload|
54
+ log.debug {"got #{payload} size(#{payload.size} bytes) on queue #{queue_name}"}
55
+ solrizer = Solrizer.new(payload) { headers.ack }
56
+ solrizer.run
57
+ end
58
+
59
+ rescue MQ::Error => e
60
+ log.error {"Failed to start subscriber on #{queue_name} #{e.class.name}: #{e.message}"}
61
+ end
62
+
63
+ def supervise_consumer_count
64
+ EM.add_periodic_timer(supervise_interval) do
65
+ abort_on_multiple_subscribe
66
+ end
67
+ end
68
+
69
+ def abort_on_multiple_subscribe
70
+ queue.status do |message_count, subscriber_count|
71
+ if subscriber_count.to_i > 1
72
+ log.error { "Detected extra consumers (#{subscriber_count} total) on queue #{queue_name}, cancelling subscription" }
73
+ stop
74
+ end
75
+ end
76
+ end
77
+
78
+ def stop
79
+ log.debug {"Cancelling subscription on queue #{queue_name.inspect}"}
80
+ queue.unsubscribe if queue.subscribed?
81
+ @supervisor.vnode_removed(self)
82
+ @stopped = true
83
+ end
84
+
85
+ def stopped?
86
+ @stopped
87
+ end
88
+
89
+ def queue
90
+ @queue ||= begin
91
+ log.debug { "declaring queue #{queue_name}" }
92
+ MQ.queue(queue_name, :passive => false, :durable => true)
93
+ end
94
+ end
95
+
96
+ def queue_name
97
+ "vnode-#{@vnode_number}"
98
+ end
99
+
100
+ def control_queue_name
101
+ "#{queue_name}-control"
102
+ end
103
+
104
+ end
105
+ end
106
+ end
@@ -0,0 +1,265 @@
1
+ #
2
+ # Author:: Daniel DeLeo (<dan@opscode.com>)
3
+ # Author:: Seth Falcon (<seth@opscode.com>)
4
+ # Author:: Chris Walters (<cw@opscode.com>)
5
+ # Copyright:: Copyright (c) 2010-2011 Opscode, Inc.
6
+ # License:: Apache License, Version 2.0
7
+ #
8
+ # Licensed under the Apache License, Version 2.0 (the "License");
9
+ # you may not use this file except in compliance with the License.
10
+ # You may obtain a copy of the License at
11
+ #
12
+ # http://www.apache.org/licenses/LICENSE-2.0
13
+ #
14
+ # Unless required by applicable law or agreed to in writing, software
15
+ # distributed under the License is distributed on an "AS IS" BASIS,
16
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17
+ # See the License for the specific language governing permissions and
18
+ # limitations under the License.
19
+ #
20
+
21
+ require 'yajl'
22
+ require 'eventmachine'
23
+ require 'amqp'
24
+ require 'mq'
25
+ require 'chef/expander/version'
26
+ require 'chef/expander/loggable'
27
+ require 'chef/expander/node'
28
+ require 'chef/expander/vnode'
29
+ require 'chef/expander/vnode_table'
30
+ require 'chef/expander/configuration'
31
+
32
+ module ::AMQP
33
+ def self.hard_reset!
34
+ MQ.reset rescue nil
35
+ stop
36
+ EM.stop rescue nil
37
+ Thread.current[:mq], @conn = nil, nil
38
+ end
39
+ end
40
+
41
+ module Chef
42
+ module Expander
43
+ class VNodeSupervisor
44
+ include Loggable
45
+ extend Loggable
46
+
47
+ COULD_NOT_CONNECT = /Could not connect to server/.freeze
48
+
49
+ def self.start_cluster_worker
50
+ @vnode_supervisor = new
51
+ @original_ppid = Process.ppid
52
+ trap_signals
53
+
54
+ vnodes = Expander.config.vnode_numbers
55
+
56
+ $0 = "chef-expander#{Expander.config.ps_tag} worker ##{Expander.config.index} (vnodes #{vnodes.min}-#{vnodes.max})"
57
+
58
+ AMQP.start(Expander.config.amqp_config) do
59
+ start_consumers
60
+ await_parent_death
61
+ end
62
+ end
63
+
64
+ def self.await_parent_death
65
+ @awaiting_parent_death = EM.add_periodic_timer(1) do
66
+ unless Process.ppid == @original_ppid
67
+ @awaiting_parent_death.cancel
68
+ stop_immediately("master process death")
69
+ end
70
+ end
71
+ end
72
+
73
+ def self.start
74
+ @vnode_supervisor = new
75
+ trap_signals
76
+
77
+ Expander.init_config(ARGV)
78
+
79
+ log.info("Chef Search Expander #{Expander.version} starting up.")
80
+
81
+ begin
82
+ AMQP.start(Expander.config.amqp_config) do
83
+ start_consumers
84
+ end
85
+ rescue AMQP::Error => e
86
+ if e.message =~ COULD_NOT_CONNECT
87
+ log.error { "Could not connect to rabbitmq. Make sure it is running and correctly configured." }
88
+ log.error { e.message }
89
+
90
+ AMQP.hard_reset!
91
+
92
+ sleep 5
93
+ retry
94
+ else
95
+ raise
96
+ end
97
+ end
98
+ end
99
+
100
+ def self.start_consumers
101
+ log.debug { "Setting prefetch count to 1"}
102
+ MQ.prefetch(1)
103
+
104
+ vnodes = Expander.config.vnode_numbers
105
+ log.info("Starting Consumers for vnodes #{vnodes.min}-#{vnodes.max}")
106
+ @vnode_supervisor.start(vnodes)
107
+ end
108
+
109
+ def self.trap_signals
110
+ Kernel.trap(:INT) { stop_immediately(:INT) }
111
+ Kernel.trap(:TERM) { stop_gracefully(:TERM) }
112
+ end
113
+
114
+ def self.stop_immediately(signal)
115
+ log.info { "Initiating immediate shutdown on signal (#{signal})" }
116
+ @vnode_supervisor.stop
117
+ EM.add_timer(1) do
118
+ AMQP.stop
119
+ EM.stop
120
+ end
121
+ end
122
+
123
+ def self.stop_gracefully(signal)
124
+ log.info { "Initiating graceful shutdown on signal (#{signal})" }
125
+ @vnode_supervisor.stop
126
+ wait_for_http_requests_to_complete
127
+ end
128
+
129
+ def self.wait_for_http_requests_to_complete
130
+ if Expander::Solrizer.http_requests_active?
131
+ log.info { "waiting for in progress HTTP Requests to complete"}
132
+ EM.add_timer(1) do
133
+ wait_for_http_requests_to_complete
134
+ end
135
+ else
136
+ log.info { "HTTP requests completed, shutting down"}
137
+ AMQP.stop
138
+ EM.stop
139
+ end
140
+ end
141
+
142
+ attr_reader :vnode_table
143
+
144
+ attr_reader :local_node
145
+
146
+ def initialize
147
+ @vnodes = {}
148
+ @vnode_table = VNodeTable.new(self)
149
+ @local_node = Node.local_node
150
+ @queue_name, @guid = nil, nil
151
+ end
152
+
153
+ def start(vnode_ids)
154
+ @local_node.start do |message|
155
+ process_control_message(message)
156
+ end
157
+
158
+ #start_vnode_table_publisher
159
+
160
+ Array(vnode_ids).each { |vnode_id| spawn_vnode(vnode_id) }
161
+ end
162
+
163
+ def stop
164
+ @local_node.stop
165
+
166
+ #log.debug { "stopping vnode table updater" }
167
+ #@vnode_table_publisher.cancel
168
+
169
+ log.info { "Stopping VNode queue subscribers"}
170
+ @vnodes.each do |vnode_number, vnode|
171
+ log.debug { "Stopping consumer on VNode #{vnode_number}"}
172
+ vnode.stop
173
+ end
174
+
175
+ end
176
+
177
+ def vnode_added(vnode)
178
+ log.debug { "vnode #{vnode.vnode_number} registered with supervisor" }
179
+ @vnodes[vnode.vnode_number.to_i] = vnode
180
+ end
181
+
182
+ def vnode_removed(vnode)
183
+ log.debug { "vnode #{vnode.vnode_number} unregistered from supervisor" }
184
+ @vnodes.delete(vnode.vnode_number.to_i)
185
+ end
186
+
187
+ def vnodes
188
+ @vnodes.keys.sort
189
+ end
190
+
191
+ def spawn_vnode(vnode_number)
192
+ VNode.new(vnode_number, self).start
193
+ end
194
+
195
+ def release_vnode
196
+ # TODO
197
+ end
198
+
199
+ def process_control_message(message)
200
+ control_message = parse_symbolic(message)
201
+ case control_message[:action]
202
+ when "claim_vnode"
203
+ spawn_vnode(control_message[:vnode_id])
204
+ when "recover_vnode"
205
+ recover_vnode(control_message[:vnode_id])
206
+ when "release_vnodes"
207
+ raise "todo"
208
+ release_vnode()
209
+ when "update_vnode_table"
210
+ @vnode_table.update_table(control_message[:data])
211
+ when "vnode_table_publish"
212
+ publish_vnode_table
213
+ when "status"
214
+ publish_status_to(control_message[:rsvp])
215
+ when "set_log_level"
216
+ set_log_level(control_message[:level], control_message[:rsvp])
217
+ else
218
+ log.error { "invalid control message #{control_message.inspect}" }
219
+ end
220
+ rescue Exception => e
221
+ log.error { "Error processing a control message."}
222
+ log.error { "#{e.class.name}: #{e.message}\n#{e.backtrace.join("\n")}" }
223
+ end
224
+
225
+
226
+ def start_vnode_table_publisher
227
+ @vnode_table_publisher = EM.add_periodic_timer(10) { publish_vnode_table }
228
+ end
229
+
230
+ def publish_vnode_table
231
+ status_update = @local_node.to_hash
232
+ status_update[:vnodes] = vnodes
233
+ status_update[:update] = :add
234
+ @local_node.broadcast_message(Yajl::Encoder.encode({:action => :update_vnode_table, :data => status_update}))
235
+ end
236
+
237
+ def publish_status_to(return_queue)
238
+ status_update = @local_node.to_hash
239
+ status_update[:vnodes] = vnodes
240
+ MQ.queue(return_queue).publish(Yajl::Encoder.encode(status_update))
241
+ end
242
+
243
+ def set_log_level(level, rsvp_to)
244
+ log.info { "setting log level to #{level} due to command from #{rsvp_to}" }
245
+ new_log_level = (Expander.config.log_level = level.to_sym)
246
+ reply = {:level => new_log_level, :node => @local_node.to_hash}
247
+ MQ.queue(rsvp_to).publish(Yajl::Encoder.encode(reply))
248
+ end
249
+
250
+ def recover_vnode(vnode_id)
251
+ if @vnode_table.local_node_is_leader?
252
+ log.debug { "Recovering vnode: #{vnode_id}" }
253
+ @local_node.shared_message(Yajl::Encoder.encode({:action => :claim_vnode, :vnode_id => vnode_id}))
254
+ else
255
+ log.debug { "Ignoring :recover_vnode message because this node is not the leader" }
256
+ end
257
+ end
258
+
259
+ def parse_symbolic(message)
260
+ Yajl::Parser.new(:symbolize_keys => true).parse(message)
261
+ end
262
+
263
+ end
264
+ end
265
+ end