chef-expander 0.10.0.beta.0

Sign up to get free protection for your applications and to get access to all the features.
@@ -0,0 +1,79 @@
1
+ #
2
+ # Author:: Daniel DeLeo (<dan@opscode.com>)
3
+ # Author:: Seth Falcon (<seth@opscode.com>)
4
+ # Author:: Chris Walters (<cw@opscode.com>)
5
+ # Copyright:: Copyright (c) 2010-2011 Opscode, Inc.
6
+ # License:: Apache License, Version 2.0
7
+ #
8
+ # Licensed under the Apache License, Version 2.0 (the "License");
9
+ # you may not use this file except in compliance with the License.
10
+ # You may obtain a copy of the License at
11
+ #
12
+ # http://www.apache.org/licenses/LICENSE-2.0
13
+ #
14
+ # Unless required by applicable law or agreed to in writing, software
15
+ # distributed under the License is distributed on an "AS IS" BASIS,
16
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17
+ # See the License for the specific language governing permissions and
18
+ # limitations under the License.
19
+ #
20
+
21
+ require 'chef/expander/configuration'
22
+
23
+ module Chef
24
+ module Expander
25
+ # Flattens and expands nested Hashes representing Chef objects
26
+ # (e.g, Nodes, Roles, DataBagItems, etc.) into flat Hashes so the
27
+ # objects are suitable to be saved into Solr. This code is more or
28
+ # less copy-pasted from chef/solr/index which may or may not be a
29
+ # great idea, though that does minimize the dependencies and
30
+ # hopefully minimize the memory use of chef-expander.
31
+ class Flattener
32
+ UNDERSCORE = '_'
33
+ X = 'X'
34
+
35
+ X_CHEF_id_CHEF_X = 'X_CHEF_id_CHEF_X'
36
+ X_CHEF_database_CHEF_X = 'X_CHEF_database_CHEF_X'
37
+ X_CHEF_type_CHEF_X = 'X_CHEF_type_CHEF_X'
38
+
39
+ def initialize(item)
40
+ @item = item
41
+ end
42
+
43
+ def flattened_item
44
+ @flattened_item || flatten_and_expand
45
+ end
46
+
47
+ def flatten_and_expand
48
+ @flattened_item = Hash.new {|hash, key| hash[key] = []}
49
+
50
+ @item.each do |key, value|
51
+ flatten_each([key.to_s], value)
52
+ end
53
+
54
+ @flattened_item.each_value { |values| values.uniq! }
55
+ @flattened_item
56
+ end
57
+
58
+ def flatten_each(keys, values)
59
+ case values
60
+ when Hash
61
+ values.each do |child_key, child_value|
62
+ add_field_value(keys, child_key)
63
+ flatten_each(keys + [child_key.to_s], child_value)
64
+ end
65
+ when Array
66
+ values.each { |child_value| flatten_each(keys, child_value) }
67
+ else
68
+ add_field_value(keys, values)
69
+ end
70
+ end
71
+
72
+ def add_field_value(keys, value)
73
+ value = value.to_s
74
+ @flattened_item[keys.join(UNDERSCORE)] << value
75
+ @flattened_item[keys.last] << value
76
+ end
77
+ end
78
+ end
79
+ end
@@ -0,0 +1,40 @@
1
+ #
2
+ # Author:: Daniel DeLeo (<dan@opscode.com>)
3
+ # Author:: Seth Falcon (<seth@opscode.com>)
4
+ # Author:: Chris Walters (<cw@opscode.com>)
5
+ # Copyright:: Copyright (c) 2010-2011 Opscode, Inc.
6
+ # License:: Apache License, Version 2.0
7
+ #
8
+ # Licensed under the Apache License, Version 2.0 (the "License");
9
+ # you may not use this file except in compliance with the License.
10
+ # You may obtain a copy of the License at
11
+ #
12
+ # http://www.apache.org/licenses/LICENSE-2.0
13
+ #
14
+ # Unless required by applicable law or agreed to in writing, software
15
+ # distributed under the License is distributed on an "AS IS" BASIS,
16
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17
+ # See the License for the specific language governing permissions and
18
+ # limitations under the License.
19
+ #
20
+
21
+ require 'chef/expander/logger'
22
+ require 'mixlib/log'
23
+
24
+ module Chef
25
+ module Expander
26
+ module Loggable
27
+
28
+ # TODO: it's admittedly janky to set up the default logging this way.
29
+ STDOUT.sync = true
30
+ LOGGER = Logger.new(STDOUT)
31
+ LOGGER.level = :debug
32
+
33
+ def log
34
+ LOGGER
35
+ end
36
+
37
+ end
38
+ end
39
+ end
40
+
@@ -0,0 +1,135 @@
1
+ #
2
+ # Author:: Daniel DeLeo (<dan@opscode.com>)
3
+ # Copyright:: Copyright (c) 2011 Opscode, Inc.
4
+ # License:: Apache License, Version 2.0
5
+ #
6
+ # Licensed under the Apache License, Version 2.0 (the "License");
7
+ # you may not use this file except in compliance with the License.
8
+ # You may obtain a copy of the License at
9
+ #
10
+ # http://www.apache.org/licenses/LICENSE-2.0
11
+ #
12
+ # Unless required by applicable law or agreed to in writing, software
13
+ # distributed under the License is distributed on an "AS IS" BASIS,
14
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15
+ # See the License for the specific language governing permissions and
16
+ # limitations under the License.
17
+ #
18
+
19
+ require 'logger'
20
+
21
+ module Chef
22
+ module Expander
23
+
24
+ class InvalidLogDevice < ArgumentError
25
+ end
26
+
27
+ class InvalidLogLevel < ArgumentError
28
+ end
29
+
30
+ # Customized Logger class that dispenses with the unnecessary mutexing.
31
+ # As long as you write one line at a time, the OS will take care of keeping
32
+ # your output in order. Expander commonly runs as a cluster of worker
33
+ # processes so the mutexing wasn't actually helping us anyway.
34
+ #
35
+ # We don't use the program name field in the logger, so support for that
36
+ # has been removed. The log format is also hardcoded since we don't ever
37
+ # change the format.
38
+ class Logger < ::Logger
39
+
40
+ LEVELS = { :debug=>DEBUG, :info=>INFO, :warn=>WARN, :error=>ERROR, :fatal=>FATAL}
41
+ LEVEL_INTEGERS = LEVELS.invert
42
+ LEVEL_TO_STR = Hash[LEVEL_INTEGERS.map {|i,sym| [i,sym.to_s.upcase]}]
43
+
44
+ LOG_DEVICES = []
45
+
46
+ at_exit do
47
+ LOG_DEVICES.each {|io| io.close if io.respond_to?(:closed?) && !io.closed?}
48
+ end
49
+
50
+ attr_reader :log_device
51
+
52
+ # (re-)initialize the Logger with a new IO object or file to log to.
53
+ def init(log_device)
54
+ @log_device = initialize_log_device(log_device)
55
+ end
56
+
57
+ def initialize(log_device)
58
+ @level = DEBUG
59
+ init(log_device)
60
+ end
61
+
62
+ def level=(new_level)
63
+ @level = if new_level.kind_of?(Fixnum) && LEVEL_INTEGERS.key?(new_level)
64
+ new
65
+ elsif LEVELS.key?(new_level)
66
+ LEVELS[new_level]
67
+ else
68
+ raise InvalidLogLevel, "#{new_level} is not a valid log level. Valid log levels are [#{LEVEL_INTEGERS.keys.join(',')}] and [#{LEVELS.join(',')}]"
69
+ end
70
+ end
71
+
72
+ def <<(msg)
73
+ @log_device.print(msg)
74
+ end
75
+
76
+ def add(severity=UNKNOWN, message = nil, progname = nil, &block)
77
+ return true unless severity >= @level
78
+
79
+ message ||= progname # level methods (e.g, #debug) pass explicit message as progname
80
+
81
+ if message.nil? && block_given?
82
+ message = yield
83
+ end
84
+
85
+ self << sprintf("[%s] %s: %s\n", Time.new.rfc2822(), LEVEL_TO_STR[severity], msg2str(message))
86
+ true
87
+ end
88
+
89
+ alias :log :add
90
+
91
+ private
92
+
93
+ def msg2str(msg)
94
+ case msg
95
+ when ::String
96
+ msg
97
+ when ::Exception
98
+ "#{ msg.message } (#{ msg.class })\n" <<
99
+ (msg.backtrace || []).join("\n")
100
+ else
101
+ msg.inspect
102
+ end
103
+ end
104
+
105
+ def logging_at_severity?(severity=nil)
106
+ end
107
+
108
+ def initialize_log_device(dev)
109
+ unless dev.respond_to? :sync=
110
+ assert_valid_path!(dev)
111
+ dev = File.open(dev.to_str, "a")
112
+ LOG_DEVICES << dev
113
+ end
114
+
115
+ dev.sync = true
116
+ dev
117
+ end
118
+
119
+ def assert_valid_path!(path)
120
+ enclosing_directory = File.dirname(path)
121
+ unless File.directory?(enclosing_directory)
122
+ raise InvalidLogDevice, "You must create the enclosing directory #{enclosing_directory} before the log file #{path} can be created."
123
+ end
124
+ if File.exist?(path)
125
+ unless File.writable?(path)
126
+ raise InvalidLogDevice, "The log file you specified (#{path}) is not writable by user #{Process.euid}"
127
+ end
128
+ elsif !File.writable?(enclosing_directory)
129
+ raise InvalidLogDevice, "You specified a log file #{path} but user #{Process.euid} is not permitted to create files there."
130
+ end
131
+ end
132
+
133
+ end
134
+ end
135
+ end
@@ -0,0 +1,177 @@
1
+ #
2
+ # Author:: Daniel DeLeo (<dan@opscode.com>)
3
+ # Author:: Seth Falcon (<seth@opscode.com>)
4
+ # Author:: Chris Walters (<cw@opscode.com>)
5
+ # Copyright:: Copyright (c) 2010-2011 Opscode, Inc.
6
+ # License:: Apache License, Version 2.0
7
+ #
8
+ # Licensed under the Apache License, Version 2.0 (the "License");
9
+ # you may not use this file except in compliance with the License.
10
+ # You may obtain a copy of the License at
11
+ #
12
+ # http://www.apache.org/licenses/LICENSE-2.0
13
+ #
14
+ # Unless required by applicable law or agreed to in writing, software
15
+ # distributed under the License is distributed on an "AS IS" BASIS,
16
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17
+ # See the License for the specific language governing permissions and
18
+ # limitations under the License.
19
+ #
20
+
21
+ require 'uuidtools'
22
+ require 'amqp'
23
+ require 'mq'
24
+ require 'open3'
25
+
26
+ require 'chef/expander/loggable'
27
+
28
+ module Chef
29
+ module Expander
30
+ class Node
31
+
32
+ include Loggable
33
+
34
+ def self.from_hash(node_info)
35
+ new(node_info[:guid], node_info[:hostname_f], node_info[:pid])
36
+ end
37
+
38
+ def self.local_node
39
+ new(guid, hostname_f, Process.pid)
40
+ end
41
+
42
+ def self.guid
43
+ return @guid if @guid
44
+ @guid = UUIDTools::UUID.random_create.to_s
45
+ end
46
+
47
+ def self.hostname_f
48
+ @hostname ||= Open3.popen3("hostname -f") {|stdin, stdout, stderr| stdout.read }.strip
49
+ end
50
+
51
+ attr_reader :guid
52
+
53
+ attr_reader :hostname_f
54
+
55
+ attr_reader :pid
56
+
57
+ def initialize(guid, hostname_f, pid)
58
+ @guid, @hostname_f, @pid = guid, hostname_f, pid
59
+ end
60
+
61
+ def start(&message_handler)
62
+ attach_to_queue(exclusive_control_queue, "exclusive control", &message_handler)
63
+ attach_to_queue(shared_control_queue, "shared_control", &message_handler)
64
+ attach_to_queue(broadcast_control_queue, "broadcast control", &message_handler)
65
+ end
66
+
67
+ def attach_to_queue(queue, colloquial_name, &message_handler)
68
+ queue.subscribe(:ack => true) do |headers, payload|
69
+ log.debug { "received message on #{colloquial_name} queue: #{payload}" }
70
+ message_handler.call(payload)
71
+ headers.ack
72
+ end
73
+ end
74
+
75
+ def stop
76
+ log.debug { "unsubscribing from broadcast control queue"}
77
+ broadcast_control_queue.unsubscribe(:nowait => false)
78
+
79
+ log.debug { "unsubscribing from shared control queue" }
80
+ shared_control_queue.unsubscribe(:nowait => false)
81
+
82
+ log.debug { "unsubscribing from exclusive control queue" }
83
+ exclusive_control_queue.unsubscribe(:nowait => false)
84
+ end
85
+
86
+ def direct_message(message)
87
+ log.debug { "publishing direct message to node #{identifier}: #{message}" }
88
+ exclusive_control_queue.publish(message)
89
+ end
90
+
91
+ def shared_message(message)
92
+ log.debug { "publishing shared message #{message}"}
93
+ shared_control_queue.publish(message)
94
+ end
95
+
96
+ def broadcast_message(message)
97
+ log.debug { "publishing broadcast message #{message}" }
98
+ broadcast_control_exchange.publish(message)
99
+ end
100
+
101
+ # The exclusive control queue is for point-to-point messaging, i.e.,
102
+ # messages directly addressed to this node
103
+ def exclusive_control_queue
104
+ @exclusive_control_queue ||= begin
105
+ log.debug { "declaring exclusive control queue #{exclusive_control_queue_name}" }
106
+ MQ.queue(exclusive_control_queue_name)
107
+ end
108
+ end
109
+
110
+ # The shared control queue is for 1 to (1 of N) messaging, i.e.,
111
+ # messages that can go to any one node.
112
+ def shared_control_queue
113
+ @shared_control_queue ||= begin
114
+ log.debug { "declaring shared control queue #{shared_control_queue_name}" }
115
+ MQ.queue(shared_control_queue_name)
116
+ end
117
+ end
118
+
119
+ # The broadcast control queue is for 1 to N messaging, i.e.,
120
+ # messages that go to every node
121
+ def broadcast_control_queue
122
+ @broadcast_control_queue ||= begin
123
+ log.debug { "declaring broadcast control queue #{broadcast_control_queue_name}"}
124
+ q = MQ.queue(broadcast_control_queue_name)
125
+ log.debug { "binding broadcast control queue to broadcast control exchange"}
126
+ q.bind(broadcast_control_exchange)
127
+ q
128
+ end
129
+ end
130
+
131
+ def broadcast_control_exchange
132
+ @broadcast_control_exchange ||= begin
133
+ log.debug { "declaring broadcast control exchange opscode-platfrom-control--broadcast" }
134
+ MQ.fanout(broadcast_control_exchange_name, :nowait => false)
135
+ end
136
+ end
137
+
138
+ def shared_control_queue_name
139
+ SHARED_CONTROL_QUEUE_NAME
140
+ end
141
+
142
+ def broadcast_control_queue_name
143
+ @broadcast_control_queue_name ||= "#{identifier}--broadcast"
144
+ end
145
+
146
+ def broadcast_control_exchange_name
147
+ BROADCAST_CONTROL_EXCHANGE_NAME
148
+ end
149
+
150
+ def exclusive_control_queue_name
151
+ @exclusive_control_queue_name ||= "#{identifier}--exclusive-control"
152
+ end
153
+
154
+ def identifier
155
+ "#{hostname_f}--#{pid}--#{guid}"
156
+ end
157
+
158
+ def ==(other)
159
+ other.respond_to?(:guid) && other.respond_to?(:hostname_f) && other.respond_to?(:pid) &&
160
+ (other.guid == guid) && (other.hostname_f == hostname_f) && (other.pid == pid)
161
+ end
162
+
163
+ def eql?(other)
164
+ (other.class == self.class) && (other.hash == hash)
165
+ end
166
+
167
+ def hash
168
+ identifier.hash
169
+ end
170
+
171
+ def to_hash
172
+ {:guid => @guid, :hostname_f => @hostname_f, :pid => @pid}
173
+ end
174
+
175
+ end
176
+ end
177
+ end
@@ -0,0 +1,275 @@
1
+ #
2
+ # Author:: Daniel DeLeo (<dan@opscode.com>)
3
+ # Author:: Seth Falcon (<seth@opscode.com>)
4
+ # Author:: Chris Walters (<cw@opscode.com>)
5
+ # Copyright:: Copyright (c) 2010-2011 Opscode, Inc.
6
+ # License:: Apache License, Version 2.0
7
+ #
8
+ # Licensed under the Apache License, Version 2.0 (the "License");
9
+ # you may not use this file except in compliance with the License.
10
+ # You may obtain a copy of the License at
11
+ #
12
+ # http://www.apache.org/licenses/LICENSE-2.0
13
+ #
14
+ # Unless required by applicable law or agreed to in writing, software
15
+ # distributed under the License is distributed on an "AS IS" BASIS,
16
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17
+ # See the License for the specific language governing permissions and
18
+ # limitations under the License.
19
+ #
20
+
21
+ require 'set'
22
+ require 'yajl'
23
+ require 'fast_xs'
24
+ require 'em-http-request'
25
+ require 'chef/expander/loggable'
26
+ require 'chef/expander/flattener'
27
+
28
+ module Chef
29
+ module Expander
30
+ class Solrizer
31
+
32
+ @active_http_requests = Set.new
33
+
34
+ def self.http_request_started(instance)
35
+ @active_http_requests << instance
36
+ end
37
+
38
+ def self.http_request_completed(instance)
39
+ @active_http_requests.delete(instance)
40
+ end
41
+
42
+ def self.http_requests_active?
43
+ !@active_http_requests.empty?
44
+ end
45
+
46
+ def self.clear_http_requests
47
+ @active_http_requests.clear
48
+ end
49
+
50
+ include Loggable
51
+
52
+ ADD = "add"
53
+ DELETE = "delete"
54
+ SKIP = "skip"
55
+
56
+ ITEM = "item"
57
+ ID = "id"
58
+ TYPE = "type"
59
+ DATABASE = "database"
60
+ ENQUEUED_AT = "enqueued_at"
61
+
62
+ DATA_BAG_ITEM = "data_bag_item"
63
+ DATA_BAG = "data_bag"
64
+
65
+ X_CHEF_id_CHEF_X = 'X_CHEF_id_CHEF_X'
66
+ X_CHEF_database_CHEF_X = 'X_CHEF_database_CHEF_X'
67
+ X_CHEF_type_CHEF_X = 'X_CHEF_type_CHEF_X'
68
+
69
+ CONTENT_TYPE_XML = {"Content-Type" => "text/xml"}
70
+
71
+ attr_reader :action
72
+
73
+ attr_reader :indexer_payload
74
+
75
+ attr_reader :chef_object
76
+
77
+ attr_reader :obj_id
78
+
79
+ attr_reader :obj_type
80
+
81
+ attr_reader :database
82
+
83
+ attr_reader :enqueued_at
84
+
85
+ def initialize(object_command_json, &on_completion_block)
86
+ @start_time = Time.now.to_f
87
+ @on_completion_block = on_completion_block
88
+ if parsed_message = parse(object_command_json)
89
+ @action = parsed_message["action"]
90
+ @indexer_payload = parsed_message["payload"]
91
+
92
+ extract_object_fields if @indexer_payload
93
+ else
94
+ @action = SKIP
95
+ end
96
+ end
97
+
98
+ def extract_object_fields
99
+ @chef_object = @indexer_payload[ITEM]
100
+ @database = @indexer_payload[DATABASE]
101
+ @obj_id = @indexer_payload[ID]
102
+ @obj_type = @indexer_payload[TYPE]
103
+ @enqueued_at = @indexer_payload[ENQUEUED_AT]
104
+ @data_bag = @obj_type == DATA_BAG_ITEM ? @chef_object[DATA_BAG] : nil
105
+ end
106
+
107
+ def parse(serialized_object)
108
+ Yajl::Parser.parse(serialized_object)
109
+ rescue Yajl::ParseError
110
+ log.error { "cannot index object because it is invalid JSON: #{serialized_object}" }
111
+ end
112
+
113
+ def run
114
+ case @action
115
+ when ADD
116
+ add
117
+ when DELETE
118
+ delete
119
+ when SKIP
120
+ completed
121
+ log.info { "not indexing this item because of malformed JSON"}
122
+ else
123
+ completed
124
+ log.error { "cannot index object becuase it has an invalid action #{@action}" }
125
+ end
126
+ end
127
+
128
+ def add
129
+ post_to_solr(pointyize_add) do
130
+ ["indexed #{indexed_object}",
131
+ "transit,xml,solr-post |",
132
+ [transit_time, @xml_time, @solr_post_time].join(","),
133
+ "|"
134
+ ].join(" ")
135
+ end
136
+ rescue Exception => e
137
+ log.error { "#{e.class.name}: #{e.message}\n#{e.backtrace.join("\n")}"}
138
+ end
139
+
140
+ def delete
141
+ post_to_solr(pointyize_delete) { "deleted #{indexed_object} transit-time[#{transit_time}s]"}
142
+ rescue Exception => e
143
+ log.error { "#{e.class.name}: #{e.message}\n#{e.backtrace.join("\n")}"}
144
+ end
145
+
146
+ def flattened_object
147
+ flattened_object = Flattener.new(@chef_object).flattened_item
148
+
149
+ flattened_object[X_CHEF_id_CHEF_X] = [@obj_id]
150
+ flattened_object[X_CHEF_database_CHEF_X] = [@database]
151
+ flattened_object[X_CHEF_type_CHEF_X] = [@obj_type]
152
+
153
+ log.debug {"adding flattened object to Solr: #{flattened_object.inspect}"}
154
+
155
+ flattened_object
156
+ end
157
+
158
+ START_XML = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n"
159
+ ADD_DOC = "<add><doc>"
160
+ DELETE_DOC = "<delete>"
161
+ ID_OPEN = "<id>"
162
+ ID_CLOSE = "</id>"
163
+ END_ADD_DOC = "</doc></add>\n"
164
+ END_DELETE = "</delete>\n"
165
+ START_CONTENT = '<field name="content">'
166
+ CLOSE_FIELD = "</field>"
167
+
168
+ FLD_CHEF_ID_FMT = '<field name="X_CHEF_id_CHEF_X">%s</field>'
169
+ FLD_CHEF_DB_FMT = '<field name="X_CHEF_database_CHEF_X">%s</field>'
170
+ FLD_CHEF_TY_FMT = '<field name="X_CHEF_type_CHEF_X">%s</field>'
171
+ FLD_DATA_BAG = '<field name="data_bag">%s</field>'
172
+
173
+ KEYVAL_FMT = "%s__=__%s "
174
+
175
+ # Takes a flattened hash where the values are arrays and converts it into
176
+ # a dignified XML document suitable for POST to Solr.
177
+ # The general structure of the output document is like this:
178
+ # <?xml version="1.0" encoding="UTF-8"?>
179
+ # <add>
180
+ # <doc>
181
+ # <field name="content">
182
+ # key__=__value
183
+ # key__=__another_value
184
+ # other_key__=__yet another value
185
+ # </field>
186
+ # </doc>
187
+ # </add>
188
+ # The document as generated has minimal newlines and formatting, however.
189
+ def pointyize_add
190
+ xml = ""
191
+ xml << START_XML << ADD_DOC
192
+ xml << (FLD_CHEF_ID_FMT % @obj_id)
193
+ xml << (FLD_CHEF_DB_FMT % @database)
194
+ xml << (FLD_CHEF_TY_FMT % @obj_type)
195
+ xml << START_CONTENT
196
+ content = ""
197
+ flattened_object.each do |field, values|
198
+ values.each do |v|
199
+ content << (KEYVAL_FMT % [field, v])
200
+ end
201
+ end
202
+ xml << content.fast_xs
203
+ xml << CLOSE_FIELD # ends content
204
+ xml << (FLD_DATA_BAG % @data_bag.fast_xs) if @data_bag
205
+ xml << END_ADD_DOC
206
+ @xml_time = Time.now.to_f - @start_time
207
+ xml
208
+ end
209
+
210
+ # Takes a succinct document id, like 2342, and turns it into something
211
+ # even more compact, like
212
+ # "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<delete><id>2342</id></delete>\n"
213
+ def pointyize_delete
214
+ xml = ""
215
+ xml << START_XML
216
+ xml << DELETE_DOC
217
+ xml << ID_OPEN
218
+ xml << @obj_id.to_s
219
+ xml << ID_CLOSE
220
+ xml << END_DELETE
221
+ xml
222
+ end
223
+
224
+ def post_to_solr(document, &logger_block)
225
+ log.debug("POSTing document to SOLR:\n#{document}")
226
+ http_req = EventMachine::HttpRequest.new(solr_url).post(:body => document, :timeout => 1200, :head => CONTENT_TYPE_XML)
227
+ http_request_started
228
+
229
+ http_req.callback do
230
+ completed
231
+ if http_req.response_header.status == 200
232
+ log.info(&logger_block)
233
+ else
234
+ log.error { "Failed to post to solr: #{indexed_object}" }
235
+ end
236
+ end
237
+ http_req.errback do
238
+ completed
239
+ log.error { "Failed to post to solr (connection error): #{indexed_object}" }
240
+ end
241
+ end
242
+
243
+ def completed
244
+ @solr_post_time = Time.now.to_f - @start_time
245
+ self.class.http_request_completed(self)
246
+ @on_completion_block.call
247
+ end
248
+
249
+ def transit_time
250
+ Time.now.utc.to_i - @enqueued_at
251
+ end
252
+
253
+ def solr_url
254
+ 'http://127.0.0.1:8983/solr/update'
255
+ end
256
+
257
+ def indexed_object
258
+ "#{@obj_type}[#{@obj_id}] database[#{@database}]"
259
+ end
260
+
261
+ def http_request_started
262
+ self.class.http_request_started(self)
263
+ end
264
+
265
+ def eql?(other)
266
+ other.hash == hash
267
+ end
268
+
269
+ def hash
270
+ "#{action}#{indexed_object}#@enqueued_at#{self.class.name}".hash
271
+ end
272
+
273
+ end
274
+ end
275
+ end