logstash-input-elasticsearch 0.1.0 → 0.1.1

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,15 +1,7 @@
1
1
  ---
2
- !binary "U0hBMQ==":
3
- metadata.gz: !binary |-
4
- ZGM3OGYyOWE4NTc1ODgxNGFmOTBkNzI1MWExYmMyOWExZTZlZjQyNg==
5
- data.tar.gz: !binary |-
6
- YjczYjE1ZWRhMjg4NmMzNjg0M2EwZTExMjEyNDI4YTk4MzU1MmZhYw==
2
+ SHA1:
3
+ metadata.gz: 1bc979eb2c5fe6019bd02366835ed40451cf3b6d
4
+ data.tar.gz: 60dfe9d9f4445652dbdf03145690ddd492c98d08
7
5
  SHA512:
8
- metadata.gz: !binary |-
9
- NDNiNmFhMDRjMjEwMjAyMDg3NmE3ZmM2M2I4MzdiMThhMDI0Mjk0YmJiYWY1
10
- NzFjNjM1YTcyOWYyY2NkMzkwMTVhOGQ5N2NmNTZjZGVmODQ3YTYwN2VmMDAy
11
- MGM5NTViNThhNzJmNWU4NWYyOTg0MTk2NjQwOWFkYzY5Zjk2YTM=
12
- data.tar.gz: !binary |-
13
- NGYxYjNlY2I4NjJiNWU1YTkyZjYwYmNjYzFhNGE3ZDI1MWY3N2VlMTFjNjBm
14
- OGEzNWUyZjQ2Yjc0ZmJmNGU0NTVmYTE2Y2JhMTlhMDgwY2UzZjBhZWFkZTI4
15
- ZmYwNTk4MmNiMjRhYWVlMzEyNzEyMDI0MjYyMTM3OTQwZDY3MWE=
6
+ metadata.gz: 4c2f801e6ca9e6acf2a8b4ead31fa9ca0f3251afe52f989cf15792893118aaf410dca883761e1c195eeb5ff3a50970c7a8b86a7a24057d4055422ed921c60e76
7
+ data.tar.gz: 141701089ffcfbfd945d21a30fd5c45377eb683a8ac5b263d639ec6e7a0fcb7b022f4c9e0a951a56b11d170fcd84d2b279694acd03d4f551eba2160e172a6297
data/Gemfile CHANGED
@@ -1,4 +1,3 @@
1
- source 'http://rubygems.org'
2
- gem 'rake'
3
- gem 'gem_publisher'
4
- gem 'archive-tar-minitar'
1
+ source 'https://rubygems.org'
2
+ gemspec
3
+ gem "logstash", :github => "elasticsearch/logstash", :branch => "1.5"
data/LICENSE ADDED
@@ -0,0 +1,13 @@
1
+ Copyright (c) 2012-2014 Elasticsearch <http://www.elasticsearch.org>
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
data/Rakefile CHANGED
@@ -4,3 +4,4 @@ task :default do
4
4
  system("rake -T")
5
5
  end
6
6
 
7
+ require "logstash/devutils/rake"
@@ -1,35 +1,34 @@
1
1
  # encoding: utf-8
2
2
  require "logstash/inputs/base"
3
3
  require "logstash/namespace"
4
- require "logstash/util/socket_peer"
5
- require "logstash/json"
4
+ require "base64"
6
5
 
7
6
  # Read from an Elasticsearch cluster, based on search query results.
8
7
  # This is useful for replaying test logs, reindexing, etc.
9
8
  #
10
9
  # Example:
11
- #
10
+ # [source,ruby]
12
11
  # input {
13
12
  # # Read all documents from Elasticsearch matching the given query
14
13
  # elasticsearch {
15
14
  # host => "localhost"
16
- # query => "ERROR"
15
+ # query => '{ "query": { "match": { "statuscode": 200 } } }'
17
16
  # }
18
17
  # }
19
18
  #
20
19
  # This would create an Elasticsearch query with the following format:
20
+ # [source,json]
21
+ # http://localhost:9200/logstash-*/_search?q='{ "query": { "match": { "statuscode": 200 } } }'&scroll=1m&size=1000
21
22
  #
22
- # http://localhost:9200/logstash-*/_search?q=ERROR&scroll=1m&size=1000
23
- #
24
- # * TODO(sissel): Option to keep the index, type, and doc id so we can do reindexing?
23
+ # TODO(sissel): Option to keep the index, type, and doc id so we can do reindexing?
25
24
  class LogStash::Inputs::Elasticsearch < LogStash::Inputs::Base
26
25
  config_name "elasticsearch"
27
26
  milestone 1
28
27
 
29
28
  default :codec, "json"
30
29
 
31
- # The IP address or hostname of your Elasticsearch server.
32
- config :host, :validate => :string, :required => true
30
+ # List of elasticsearch hosts to use for querying.
31
+ config :hosts, :validate => :array
33
32
 
34
33
  # The HTTP port of your Elasticsearch server's REST interface.
35
34
  config :port, :validate => :number, :default => 9200
@@ -52,83 +51,74 @@ class LogStash::Inputs::Elasticsearch < LogStash::Inputs::Base
52
51
  # round trip (i.e. between the previous scan scroll request, to the next).
53
52
  config :scroll, :validate => :string, :default => "1m"
54
53
 
54
+ # Basic Auth - username
55
+ config :user, :validate => :string
56
+
57
+ # Basic Auth - password
58
+ config :password, :validate => :password
59
+
60
+ # SSL
61
+ config :ssl, :validate => :boolean, :default => false
62
+
63
+ # SSL Certificate Authority file
64
+ config :ca_file, :validate => :path
65
+
55
66
  public
56
67
  def register
57
- require "ftw"
58
- @agent = FTW::Agent.new
68
+ require "elasticsearch"
59
69
 
60
- params = {
61
- "q" => @query,
62
- "scroll" => @scroll,
63
- "size" => "#{@size}",
70
+ @options = {
71
+ index: @index,
72
+ body: @query,
73
+ scroll: @scroll,
74
+ size: @size
64
75
  }
65
- params['search_type'] = "scan" if @scan
66
76
 
67
- @search_url = "http://#{@host}:#{@port}/#{@index}/_search?#{encode(params)}"
68
- @scroll_url = "http://#{@host}:#{@port}/_search/scroll?#{encode({"scroll" => @scroll})}"
69
- end # def register
77
+ @options[:search_type] = 'scan' if @scan
70
78
 
71
- private
72
- def encode(hash)
73
- return hash.collect do |key, value|
74
- CGI.escape(key) + "=" + CGI.escape(value)
75
- end.join("&")
76
- end # def encode
79
+ transport_options = {}
77
80
 
78
- private
79
- def execute_search_request
80
- response = @agent.get!(@search_url)
81
- json = ""
82
- response.read_body { |c| json << c }
83
- json
84
- end
81
+ if @user && @password
82
+ token = Base64.strict_encode64("#{@user}:#{@password.value}")
83
+ transport_options[:headers] = { Authorization: "Basic #{token}" }
84
+ end
85
85
 
86
- private
87
- def execute_scroll_request(scroll_id)
88
- response = @agent.post!(@scroll_url, :body => scroll_id)
89
- json = ""
90
- response.read_body { |c| json << c }
91
- json
92
- end
86
+ hosts = if @ssl then
87
+ @hosts.map {|h| { host: h, scheme: 'https' } }
88
+ else
89
+ @hosts
90
+ end
91
+
92
+ if @ssl && @ca_file
93
+ transport_options[:ssl] = { ca_file: @ca_file }
94
+ end
95
+
96
+ @client = Elasticsearch::Client.new hosts: hosts, transport_options: transport_options
97
+
98
+ end # def register
93
99
 
94
100
  public
95
101
  def run(output_queue)
96
- result = LogStash::Json.load(execute_search_request)
97
- scroll_id = result["_scroll_id"]
98
102
 
99
- # When using the search_type=scan we don't get an initial result set.
100
- # So we do it here.
103
+ # get first wave of data
104
+ r = @client.search @options
105
+
106
+ # since 'scan' doesn't return data on the search call, do an extra scroll
101
107
  if @scan
102
- result = LogStash::Json.load(execute_scroll_request(scroll_id))
108
+ r = scroll_request(r['_scroll_id'])
103
109
  end
104
110
 
105
- loop do
106
- break if result.nil?
107
- hits = result["hits"]["hits"]
108
- break if hits.empty?
109
-
110
- hits.each do |hit|
111
- # Hack to make codecs work
112
- @codec.decode(LogStash::Json.dump(hit["_source"])) do |event|
113
- decorate(event)
114
- output_queue << event
115
- end
116
- end
117
-
118
- # Get the scroll id from the previous result set and use it for getting the next data set
119
- scroll_id = result["_scroll_id"]
120
-
121
- # Fetch the next result set
122
- result = LogStash::Json.load(execute_scroll_request(scroll_id))
123
-
124
- if result["error"]
125
- @logger.warn(result["error"], :request => scroll_url)
126
- # TODO(sissel): raise an error instead of breaking
127
- break
111
+ while r['hits']['hits'].any? do
112
+ r['hits']['hits'].each do |event|
113
+ decorate(event)
114
+ output_queue << event
128
115
  end
129
-
116
+ r = scroll_request(r['_scroll_id'])
130
117
  end
131
- rescue LogStash::ShutdownSignal
132
- # Do nothing, let us quit.
133
118
  end # def run
119
+
120
+ private
121
+ def scroll_request scroll_id
122
+ @client.scroll(body: scroll_id, scroll: @scroll)
123
+ end
134
124
  end # class LogStash::Inputs::Elasticsearch
@@ -1,13 +1,13 @@
1
1
  Gem::Specification.new do |s|
2
2
 
3
3
  s.name = 'logstash-input-elasticsearch'
4
- s.version = '0.1.0'
4
+ s.version = '0.1.1'
5
5
  s.licenses = ['Apache License (2.0)']
6
6
  s.summary = "Read from an Elasticsearch cluster, based on search query results"
7
- s.description = "Read from an Elasticsearch cluster, based on search query results"
7
+ s.description = "This gem is a logstash plugin required to be installed on top of the Logstash core pipeline using $LS_HOME/bin/plugin install gemname. This gem is not a stand-alone program"
8
8
  s.authors = ["Elasticsearch"]
9
- s.email = 'richard.pijnenburg@elasticsearch.com'
10
- s.homepage = "http://logstash.net/"
9
+ s.email = 'info@elasticsearch.com'
10
+ s.homepage = "http://www.elasticsearch.org/guide/en/logstash/current/index.html"
11
11
  s.require_paths = ["lib"]
12
12
 
13
13
  # Files
@@ -17,13 +17,15 @@ Gem::Specification.new do |s|
17
17
  s.test_files = s.files.grep(%r{^(test|spec|features)/})
18
18
 
19
19
  # Special flag to let us know this is actually a logstash plugin
20
- s.metadata = { "logstash_plugin" => "true", "group" => "input" }
20
+ s.metadata = { "logstash_plugin" => "true", "logstash_group" => "input" }
21
21
 
22
22
  # Gem dependencies
23
23
  s.add_runtime_dependency 'logstash', '>= 1.4.0', '< 2.0.0'
24
24
 
25
- s.add_runtime_dependency 'ftw', ['~> 0.0.39']
25
+ s.add_runtime_dependency 'elasticsearch', ['>= 1.0.6', '~> 1.0']
26
+
26
27
  s.add_runtime_dependency 'logstash-codec-json'
27
28
 
29
+ s.add_development_dependency 'logstash-devutils'
28
30
  end
29
31
 
@@ -1,60 +1,115 @@
1
- require "spec_helper"
1
+ require "logstash/devutils/rspec/spec_helper"
2
2
  require "logstash/inputs/elasticsearch"
3
+ require "elasticsearch"
3
4
 
4
5
  describe "inputs/elasticsearch" do
5
-
6
-
7
- search_response = <<-RESPONSE
8
- {
9
- "_scroll_id":"xxx",
10
- "took":5,
11
- "timed_out":false,
12
- "_shards":{"total":15,"successful":15,"failed":0},
13
- "hits":{
14
- "total":1000050,
15
- "max_score":1.0,
16
- "hits":[
17
- {
18
- "_index":"logstash2",
19
- "_type":"logs",
20
- "_id":"AmaqL7VuSWKF-F6N_Gz72g",
21
- "_score":1.0,
22
- "_source" : {
23
- "message":"foobar",
24
- "@version":"1",
25
- "@timestamp":"2014-05-19T21:08:39.000Z",
26
- "host":"colin-mbp13r"
27
- }
28
- }
29
- ]
6
+
7
+ it "should retrieve json event from elasticseach" do
8
+
9
+ config = %q[
10
+ input {
11
+ elasticsearch {
12
+ hosts => ["node01"]
13
+ scan => false
14
+ query => '{ "query": { "match": { "city_name": "Okinawa" } }, "fields": ["message"] }'
15
+ }
30
16
  }
31
- }
32
- RESPONSE
17
+ ]
33
18
 
34
- scroll_response = <<-RESPONSE
35
- {
36
- "hits":{
37
- "hits":[]
19
+ response = {
20
+ "_scroll_id" => "cXVlcnlUaGVuRmV0Y2g",
21
+ "took" => 27,
22
+ "timed_out" => false,
23
+ "_shards" => {
24
+ "total" => 169,
25
+ "successful" => 169,
26
+ "failed" => 0
27
+ },
28
+ "hits" => {
29
+ "total" => 1,
30
+ "max_score" => 1.0,
31
+ "hits" => [ {
32
+ "_index" => "logstash-2014.10.12",
33
+ "_type" => "logs",
34
+ "_id" => "C5b2xLQwTZa76jBmHIbwHQ",
35
+ "_score" => 1.0,
36
+ "fields" => {"message" => ["ohayo"] }
37
+ } ]
38
38
  }
39
39
  }
40
- RESPONSE
41
40
 
42
- config <<-CONFIG
43
- input {
44
- elasticsearch {
45
- host => "localhost"
46
- scan => false
41
+ scroll_reponse = {
42
+ "_scroll_id" => "r453Wc1jh0caLJhSDg",
43
+ "hits" => { "hits" => [] }
44
+ }
45
+
46
+ client = Elasticsearch::Client.new
47
+ expect(Elasticsearch::Client).to receive(:new).with(any_args).and_return(client)
48
+ expect(client).to receive(:search).with(any_args).and_return(response)
49
+ expect(client).to receive(:scroll).with({:body=>"cXVlcnlUaGVuRmV0Y2g", :scroll=>"1m"}).and_return(scroll_reponse)
50
+
51
+ pipeline = LogStash::Pipeline.new(config)
52
+ queue = Queue.new
53
+ pipeline.instance_eval do
54
+ @output_func = lambda { |event| queue << event }
55
+ end
56
+ pipeline_thread = Thread.new { pipeline.run }
57
+ event = queue.pop
58
+
59
+ insist { event["fields"]["message"] } == [ "ohayo" ]
60
+
61
+ pipeline_thread.join
62
+ end
63
+
64
+ it "should retrieve json event from elasticseach with scan" do
65
+
66
+ config = %q[
67
+ input {
68
+ elasticsearch {
69
+ hosts => ["node01"]
70
+ scan => true
71
+ query => '{ "query": { "match": { "city_name": "Okinawa" } }, "fields": ["message"] }'
72
+ }
47
73
  }
74
+ ]
75
+
76
+ scan_response = {
77
+ "_scroll_id" => "DcrY3G1xff6SB",
48
78
  }
49
- CONFIG
50
79
 
51
- it "should retrieve json event from elasticseach" do
52
- # I somewhat duplicated our "input" rspec extension because I needed to add mocks for the the actual ES calls
53
- # and rspec expectations need to be in "it" statement but the "input" extension defines the "it"
54
- # TODO(colin) see how we can improve our rspec extension to better integrate in these scenarios
80
+ scroll_responses = [
81
+ {
82
+ "_scroll_id" => "cXVlcnlUaGVuRmV0Y2g",
83
+ "took" => 27,
84
+ "timed_out" => false,
85
+ "_shards" => {
86
+ "total" => 169,
87
+ "successful" => 169,
88
+ "failed" => 0
89
+ },
90
+ "hits" => {
91
+ "total" => 1,
92
+ "max_score" => 1.0,
93
+ "hits" => [ {
94
+ "_index" => "logstash-2014.10.12",
95
+ "_type" => "logs",
96
+ "_id" => "C5b2xLQwTZa76jBmHIbwHQ",
97
+ "_score" => 1.0,
98
+ "fields" => {"message" => ["ohayo"] }
99
+ } ]
100
+ }
101
+ },
102
+ {
103
+ "_scroll_id" => "r453Wc1jh0caLJhSDg",
104
+ "hits" => { "hits" => [] }
105
+ }
106
+ ]
55
107
 
56
- expect_any_instance_of(LogStash::Inputs::Elasticsearch).to receive(:execute_search_request).and_return(search_response)
57
- expect_any_instance_of(LogStash::Inputs::Elasticsearch).to receive(:execute_scroll_request).with(any_args).and_return(scroll_response)
108
+ client = Elasticsearch::Client.new
109
+ expect(Elasticsearch::Client).to receive(:new).with(any_args).and_return(client)
110
+ expect(client).to receive(:search).with(any_args).and_return(scan_response)
111
+ expect(client).to receive(:scroll).with({:body=>"DcrY3G1xff6SB", :scroll=>"1m"}).and_return(scroll_responses.first)
112
+ expect(client).to receive(:scroll).with({:body=>"cXVlcnlUaGVuRmV0Y2g", :scroll=>"1m"}).and_return(scroll_responses.last)
58
113
 
59
114
  pipeline = LogStash::Pipeline.new(config)
60
115
  queue = Queue.new
@@ -64,17 +119,8 @@ describe "inputs/elasticsearch" do
64
119
  pipeline_thread = Thread.new { pipeline.run }
65
120
  event = queue.pop
66
121
 
67
- insist { event["message"] } == "foobar"
68
-
69
- # do not call pipeline.shutdown here, as it will stop the plugin execution randomly
70
- # and maybe kill input before calling execute_scroll_request.
71
- # TODO(colin) we should rework the pipeliene shutdown to allow a soft/clean shutdown mecanism,
72
- # using a shutdown event which can be fed into each plugin queue and when the plugin sees it
73
- # exits after completing its processing.
74
- #
75
- # pipeline.shutdown
76
- #
77
- # instead, since our scroll_response will terminate the plugin, we can just join the pipeline thread
122
+ insist { event["fields"]["message"] } == [ "ohayo" ]
123
+
78
124
  pipeline_thread.join
79
125
  end
80
126
  end
metadata CHANGED
@@ -1,101 +1,120 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: logstash-input-elasticsearch
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.1.0
4
+ version: 0.1.1
5
5
  platform: ruby
6
6
  authors:
7
7
  - Elasticsearch
8
- autorequire:
8
+ autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2014-11-03 00:00:00.000000000 Z
11
+ date: 2014-11-19 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: logstash
15
- requirement: !ruby/object:Gem::Requirement
15
+ version_requirements: !ruby/object:Gem::Requirement
16
16
  requirements:
17
- - - ! '>='
17
+ - - '>='
18
18
  - !ruby/object:Gem::Version
19
19
  version: 1.4.0
20
20
  - - <
21
21
  - !ruby/object:Gem::Version
22
22
  version: 2.0.0
23
- type: :runtime
24
- prerelease: false
25
- version_requirements: !ruby/object:Gem::Requirement
23
+ requirement: !ruby/object:Gem::Requirement
26
24
  requirements:
27
- - - ! '>='
25
+ - - '>='
28
26
  - !ruby/object:Gem::Version
29
27
  version: 1.4.0
30
28
  - - <
31
29
  - !ruby/object:Gem::Version
32
30
  version: 2.0.0
31
+ prerelease: false
32
+ type: :runtime
33
33
  - !ruby/object:Gem::Dependency
34
- name: ftw
35
- requirement: !ruby/object:Gem::Requirement
34
+ name: elasticsearch
35
+ version_requirements: !ruby/object:Gem::Requirement
36
36
  requirements:
37
+ - - '>='
38
+ - !ruby/object:Gem::Version
39
+ version: 1.0.6
37
40
  - - ~>
38
41
  - !ruby/object:Gem::Version
39
- version: 0.0.39
40
- type: :runtime
41
- prerelease: false
42
- version_requirements: !ruby/object:Gem::Requirement
42
+ version: '1.0'
43
+ requirement: !ruby/object:Gem::Requirement
43
44
  requirements:
45
+ - - '>='
46
+ - !ruby/object:Gem::Version
47
+ version: 1.0.6
44
48
  - - ~>
45
49
  - !ruby/object:Gem::Version
46
- version: 0.0.39
50
+ version: '1.0'
51
+ prerelease: false
52
+ type: :runtime
47
53
  - !ruby/object:Gem::Dependency
48
54
  name: logstash-codec-json
55
+ version_requirements: !ruby/object:Gem::Requirement
56
+ requirements:
57
+ - - '>='
58
+ - !ruby/object:Gem::Version
59
+ version: '0'
49
60
  requirement: !ruby/object:Gem::Requirement
50
61
  requirements:
51
- - - ! '>='
62
+ - - '>='
52
63
  - !ruby/object:Gem::Version
53
64
  version: '0'
54
- type: :runtime
55
65
  prerelease: false
66
+ type: :runtime
67
+ - !ruby/object:Gem::Dependency
68
+ name: logstash-devutils
56
69
  version_requirements: !ruby/object:Gem::Requirement
57
70
  requirements:
58
- - - ! '>='
71
+ - - '>='
59
72
  - !ruby/object:Gem::Version
60
73
  version: '0'
61
- description: Read from an Elasticsearch cluster, based on search query results
62
- email: richard.pijnenburg@elasticsearch.com
74
+ requirement: !ruby/object:Gem::Requirement
75
+ requirements:
76
+ - - '>='
77
+ - !ruby/object:Gem::Version
78
+ version: '0'
79
+ prerelease: false
80
+ type: :development
81
+ description: This gem is a logstash plugin required to be installed on top of the Logstash core pipeline using $LS_HOME/bin/plugin install gemname. This gem is not a stand-alone program
82
+ email: info@elasticsearch.com
63
83
  executables: []
64
84
  extensions: []
65
85
  extra_rdoc_files: []
66
86
  files:
67
87
  - .gitignore
68
88
  - Gemfile
89
+ - LICENSE
69
90
  - Rakefile
70
91
  - lib/logstash/inputs/elasticsearch.rb
71
92
  - logstash-input-elasticsearch.gemspec
72
- - rakelib/publish.rake
73
- - rakelib/vendor.rake
74
93
  - spec/inputs/elasticsearch_spec.rb
75
- homepage: http://logstash.net/
94
+ homepage: http://www.elasticsearch.org/guide/en/logstash/current/index.html
76
95
  licenses:
77
96
  - Apache License (2.0)
78
97
  metadata:
79
98
  logstash_plugin: 'true'
80
- group: input
81
- post_install_message:
99
+ logstash_group: input
100
+ post_install_message:
82
101
  rdoc_options: []
83
102
  require_paths:
84
103
  - lib
85
104
  required_ruby_version: !ruby/object:Gem::Requirement
86
105
  requirements:
87
- - - ! '>='
106
+ - - '>='
88
107
  - !ruby/object:Gem::Version
89
108
  version: '0'
90
109
  required_rubygems_version: !ruby/object:Gem::Requirement
91
110
  requirements:
92
- - - ! '>='
111
+ - - '>='
93
112
  - !ruby/object:Gem::Version
94
113
  version: '0'
95
114
  requirements: []
96
- rubyforge_project:
97
- rubygems_version: 2.4.1
98
- signing_key:
115
+ rubyforge_project:
116
+ rubygems_version: 2.2.2
117
+ signing_key:
99
118
  specification_version: 4
100
119
  summary: Read from an Elasticsearch cluster, based on search query results
101
120
  test_files:
data/rakelib/publish.rake DELETED
@@ -1,9 +0,0 @@
1
- require "gem_publisher"
2
-
3
- desc "Publish gem to RubyGems.org"
4
- task :publish_gem do |t|
5
- gem_file = Dir.glob(File.expand_path('../*.gemspec',File.dirname(__FILE__))).first
6
- gem = GemPublisher.publish_if_updated(gem_file, :rubygems)
7
- puts "Published #{gem}" if gem
8
- end
9
-
data/rakelib/vendor.rake DELETED
@@ -1,169 +0,0 @@
1
- require "net/http"
2
- require "uri"
3
- require "digest/sha1"
4
-
5
- def vendor(*args)
6
- return File.join("vendor", *args)
7
- end
8
-
9
- directory "vendor/" => ["vendor"] do |task, args|
10
- mkdir task.name
11
- end
12
-
13
- def fetch(url, sha1, output)
14
-
15
- puts "Downloading #{url}"
16
- actual_sha1 = download(url, output)
17
-
18
- if actual_sha1 != sha1
19
- fail "SHA1 does not match (expected '#{sha1}' but got '#{actual_sha1}')"
20
- end
21
- end # def fetch
22
-
23
- def file_fetch(url, sha1)
24
- filename = File.basename( URI(url).path )
25
- output = "vendor/#{filename}"
26
- task output => [ "vendor/" ] do
27
- begin
28
- actual_sha1 = file_sha1(output)
29
- if actual_sha1 != sha1
30
- fetch(url, sha1, output)
31
- end
32
- rescue Errno::ENOENT
33
- fetch(url, sha1, output)
34
- end
35
- end.invoke
36
-
37
- return output
38
- end
39
-
40
- def file_sha1(path)
41
- digest = Digest::SHA1.new
42
- fd = File.new(path, "r")
43
- while true
44
- begin
45
- digest << fd.sysread(16384)
46
- rescue EOFError
47
- break
48
- end
49
- end
50
- return digest.hexdigest
51
- ensure
52
- fd.close if fd
53
- end
54
-
55
- def download(url, output)
56
- uri = URI(url)
57
- digest = Digest::SHA1.new
58
- tmp = "#{output}.tmp"
59
- Net::HTTP.start(uri.host, uri.port, :use_ssl => (uri.scheme == "https")) do |http|
60
- request = Net::HTTP::Get.new(uri.path)
61
- http.request(request) do |response|
62
- fail "HTTP fetch failed for #{url}. #{response}" if [200, 301].include?(response.code)
63
- size = (response["content-length"].to_i || -1).to_f
64
- count = 0
65
- File.open(tmp, "w") do |fd|
66
- response.read_body do |chunk|
67
- fd.write(chunk)
68
- digest << chunk
69
- if size > 0 && $stdout.tty?
70
- count += chunk.bytesize
71
- $stdout.write(sprintf("\r%0.2f%%", count/size * 100))
72
- end
73
- end
74
- end
75
- $stdout.write("\r \r") if $stdout.tty?
76
- end
77
- end
78
-
79
- File.rename(tmp, output)
80
-
81
- return digest.hexdigest
82
- rescue SocketError => e
83
- puts "Failure while downloading #{url}: #{e}"
84
- raise
85
- ensure
86
- File.unlink(tmp) if File.exist?(tmp)
87
- end # def download
88
-
89
- def untar(tarball, &block)
90
- require "archive/tar/minitar"
91
- tgz = Zlib::GzipReader.new(File.open(tarball))
92
- # Pull out typesdb
93
- tar = Archive::Tar::Minitar::Input.open(tgz)
94
- tar.each do |entry|
95
- path = block.call(entry)
96
- next if path.nil?
97
- parent = File.dirname(path)
98
-
99
- mkdir_p parent unless File.directory?(parent)
100
-
101
- # Skip this file if the output file is the same size
102
- if entry.directory?
103
- mkdir path unless File.directory?(path)
104
- else
105
- entry_mode = entry.instance_eval { @mode } & 0777
106
- if File.exists?(path)
107
- stat = File.stat(path)
108
- # TODO(sissel): Submit a patch to archive-tar-minitar upstream to
109
- # expose headers in the entry.
110
- entry_size = entry.instance_eval { @size }
111
- # If file sizes are same, skip writing.
112
- next if stat.size == entry_size && (stat.mode & 0777) == entry_mode
113
- end
114
- puts "Extracting #{entry.full_name} from #{tarball} #{entry_mode.to_s(8)}"
115
- File.open(path, "w") do |fd|
116
- # eof? check lets us skip empty files. Necessary because the API provided by
117
- # Archive::Tar::Minitar::Reader::EntryStream only mostly acts like an
118
- # IO object. Something about empty files in this EntryStream causes
119
- # IO.copy_stream to throw "can't convert nil into String" on JRuby
120
- # TODO(sissel): File a bug about this.
121
- while !entry.eof?
122
- chunk = entry.read(16384)
123
- fd.write(chunk)
124
- end
125
- #IO.copy_stream(entry, fd)
126
- end
127
- File.chmod(entry_mode, path)
128
- end
129
- end
130
- tar.close
131
- File.unlink(tarball) if File.file?(tarball)
132
- end # def untar
133
-
134
- def ungz(file)
135
-
136
- outpath = file.gsub('.gz', '')
137
- tgz = Zlib::GzipReader.new(File.open(file))
138
- begin
139
- File.open(outpath, "w") do |out|
140
- IO::copy_stream(tgz, out)
141
- end
142
- File.unlink(file)
143
- rescue
144
- File.unlink(outpath) if File.file?(outpath)
145
- raise
146
- end
147
- tgz.close
148
- end
149
-
150
- desc "Process any vendor files required for this plugin"
151
- task "vendor" do |task, args|
152
-
153
- @files.each do |file|
154
- download = file_fetch(file['url'], file['sha1'])
155
- if download =~ /.tar.gz/
156
- prefix = download.gsub('.tar.gz', '').gsub('vendor/', '')
157
- untar(download) do |entry|
158
- if !file['files'].nil?
159
- next unless file['files'].include?(entry.full_name.gsub(prefix, ''))
160
- out = entry.full_name.split("/").last
161
- end
162
- File.join('vendor', out)
163
- end
164
- elsif download =~ /.gz/
165
- ungz(download)
166
- end
167
- end
168
-
169
- end