hyperflow-amqp-executor 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/.document +5 -0
- data/Gemfile +21 -0
- data/Gemfile.lock +110 -0
- data/LICENSE.txt +22 -0
- data/README.md +119 -0
- data/Rakefile +47 -0
- data/VERSION +1 -0
- data/bin/hyperflow-amqp-executor +56 -0
- data/bin/hyperflow-amqp-metric-collector +44 -0
- data/example/settings.yml +19 -0
- data/example/task.json +7 -0
- data/hyperflow-amqp-executor.gemspec +94 -0
- data/lib/hyperflow-amqp-executor.rb +49 -0
- data/lib/hyperflow-amqp-executor/cloud_storage.rb +34 -0
- data/lib/hyperflow-amqp-executor/gridftp_storage.rb +39 -0
- data/lib/hyperflow-amqp-executor/helpers.rb +6 -0
- data/lib/hyperflow-amqp-executor/job.rb +110 -0
- data/lib/hyperflow-amqp-executor/local_storage.rb +8 -0
- data/lib/hyperflow-amqp-executor/nfs_storage.rb +23 -0
- data/lib/hyperflow-amqp-executor/plgdata_storage.rb +47 -0
- data/lib/hyperflow-amqp-executor/settings.rb +34 -0
- data/test/helper.rb +18 -0
- data/test/test_hyperflow-amqp-executor.rb +7 -0
- metadata +224 -0
checksums.yaml
ADDED
@@ -0,0 +1,7 @@
|
|
1
|
+
---
|
2
|
+
SHA1:
|
3
|
+
metadata.gz: aa8e10ac439e7a475bd1a019a7bf9b911238a648
|
4
|
+
data.tar.gz: 50c7432942f8e4c29ecd3039ceb54c2a81ccbfbc
|
5
|
+
SHA512:
|
6
|
+
metadata.gz: 3aa7281cea2f3ff3e7dd9f164d4fe27bf798e85ce706c107c9fcf662733c0789b9ef03484597ef656669d1405de1cdb31281e0ebecdd9639258f4ad8769d6bf7
|
7
|
+
data.tar.gz: c46df737e23a3acca22bac4519b72791b9560fe9321cac81abaa672fa265615ea1b2b845a8f50863140a359c03573eae84eefe88f1c67dcd13092300eb9c9996
|
data/.document
ADDED
data/Gemfile
ADDED
@@ -0,0 +1,21 @@
|
|
1
|
+
source "https://rubygems.org"
|
2
|
+
# Add dependencies required to use your gem here.
|
3
|
+
|
4
|
+
#ruby "2.1.0"
|
5
|
+
|
6
|
+
gem 'fog', "~> 1.18"
|
7
|
+
gem 'unf', "~> 0.1" # for AWS fog
|
8
|
+
gem 'recursive-open-struct', "~> 0.4"
|
9
|
+
gem 'amqp', "~> 1.1"
|
10
|
+
gem 'deep_merge', "~> 1.0"
|
11
|
+
gem 'httpclient', "~> 2.6"
|
12
|
+
|
13
|
+
# Add dependencies to develop your gem here.
|
14
|
+
# Include everything needed to run rake, tests, features, etc.
|
15
|
+
group :development do
|
16
|
+
gem "shoulda", ">= 0"
|
17
|
+
gem "rdoc", "~> 3.12"
|
18
|
+
gem "bundler", "~> 1.0"
|
19
|
+
gem "jeweler", "~> 1.8"
|
20
|
+
gem "pry"
|
21
|
+
end
|
data/Gemfile.lock
ADDED
@@ -0,0 +1,110 @@
|
|
1
|
+
GEM
|
2
|
+
remote: https://rubygems.org/
|
3
|
+
specs:
|
4
|
+
activesupport (4.2.1)
|
5
|
+
i18n (~> 0.7)
|
6
|
+
json (~> 1.7, >= 1.7.7)
|
7
|
+
minitest (~> 5.1)
|
8
|
+
thread_safe (~> 0.3, >= 0.3.4)
|
9
|
+
tzinfo (~> 1.1)
|
10
|
+
addressable (2.3.8)
|
11
|
+
amq-protocol (1.9.2)
|
12
|
+
amqp (1.5.0)
|
13
|
+
amq-protocol (>= 1.9.2)
|
14
|
+
eventmachine
|
15
|
+
builder (3.2.2)
|
16
|
+
coderay (1.1.0)
|
17
|
+
deep_merge (1.0.1)
|
18
|
+
eventmachine (1.0.7)
|
19
|
+
excon (0.31.0)
|
20
|
+
faraday (0.8.9)
|
21
|
+
multipart-post (~> 1.2.0)
|
22
|
+
fog (1.19.0)
|
23
|
+
builder
|
24
|
+
excon (~> 0.31.0)
|
25
|
+
formatador (~> 0.2.0)
|
26
|
+
mime-types
|
27
|
+
multi_json (~> 1.0)
|
28
|
+
net-scp (~> 1.1)
|
29
|
+
net-ssh (>= 2.1.3)
|
30
|
+
nokogiri (~> 1.5)
|
31
|
+
ruby-hmac
|
32
|
+
formatador (0.2.5)
|
33
|
+
git (1.2.9.1)
|
34
|
+
github_api (0.10.1)
|
35
|
+
addressable
|
36
|
+
faraday (~> 0.8.1)
|
37
|
+
hashie (>= 1.2)
|
38
|
+
multi_json (~> 1.4)
|
39
|
+
nokogiri (~> 1.5.2)
|
40
|
+
oauth2
|
41
|
+
hashie (3.4.1)
|
42
|
+
highline (1.7.2)
|
43
|
+
httpclient (2.6.0.1)
|
44
|
+
i18n (0.7.0)
|
45
|
+
jeweler (1.8.8)
|
46
|
+
builder
|
47
|
+
bundler (~> 1.0)
|
48
|
+
git (>= 1.2.5)
|
49
|
+
github_api (= 0.10.1)
|
50
|
+
highline (>= 1.6.15)
|
51
|
+
nokogiri (= 1.5.10)
|
52
|
+
rake
|
53
|
+
rdoc
|
54
|
+
json (1.8.2)
|
55
|
+
jwt (1.4.1)
|
56
|
+
method_source (0.8.2)
|
57
|
+
mime-types (2.5)
|
58
|
+
minitest (5.6.1)
|
59
|
+
multi_json (1.11.0)
|
60
|
+
multi_xml (0.5.5)
|
61
|
+
multipart-post (1.2.0)
|
62
|
+
net-scp (1.2.1)
|
63
|
+
net-ssh (>= 2.6.5)
|
64
|
+
net-ssh (2.9.2)
|
65
|
+
nokogiri (1.5.10)
|
66
|
+
oauth2 (1.0.0)
|
67
|
+
faraday (>= 0.8, < 0.10)
|
68
|
+
jwt (~> 1.0)
|
69
|
+
multi_json (~> 1.3)
|
70
|
+
multi_xml (~> 0.5)
|
71
|
+
rack (~> 1.2)
|
72
|
+
pry (0.10.1)
|
73
|
+
coderay (~> 1.1.0)
|
74
|
+
method_source (~> 0.8.1)
|
75
|
+
slop (~> 3.4)
|
76
|
+
rack (1.6.0)
|
77
|
+
rake (10.4.2)
|
78
|
+
rdoc (3.12.2)
|
79
|
+
json (~> 1.4)
|
80
|
+
recursive-open-struct (0.6.3)
|
81
|
+
ruby-hmac (0.4.0)
|
82
|
+
shoulda (3.5.0)
|
83
|
+
shoulda-context (~> 1.0, >= 1.0.1)
|
84
|
+
shoulda-matchers (>= 1.4.1, < 3.0)
|
85
|
+
shoulda-context (1.2.1)
|
86
|
+
shoulda-matchers (2.8.0)
|
87
|
+
activesupport (>= 3.0.0)
|
88
|
+
slop (3.6.0)
|
89
|
+
thread_safe (0.3.5)
|
90
|
+
tzinfo (1.2.2)
|
91
|
+
thread_safe (~> 0.1)
|
92
|
+
unf (0.1.4)
|
93
|
+
unf_ext
|
94
|
+
unf_ext (0.0.7.1)
|
95
|
+
|
96
|
+
PLATFORMS
|
97
|
+
ruby
|
98
|
+
|
99
|
+
DEPENDENCIES
|
100
|
+
amqp (~> 1.1)
|
101
|
+
bundler (~> 1.0)
|
102
|
+
deep_merge (~> 1.0)
|
103
|
+
fog (~> 1.18)
|
104
|
+
httpclient (~> 2.6)
|
105
|
+
jeweler (~> 1.8)
|
106
|
+
pry
|
107
|
+
rdoc (~> 3.12)
|
108
|
+
recursive-open-struct (~> 0.4)
|
109
|
+
shoulda
|
110
|
+
unf (~> 0.1)
|
data/LICENSE.txt
ADDED
@@ -0,0 +1,22 @@
|
|
1
|
+
The MIT License
|
2
|
+
|
3
|
+
Copyright (c) 2013-2015 Kamil Figiela (kfigiela@agh.edu.pl)
|
4
|
+
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining
|
6
|
+
a copy of this software and associated documentation files (the
|
7
|
+
"Software"), to deal in the Software without restriction, including
|
8
|
+
without limitation the rights to use, copy, modify, merge, publish,
|
9
|
+
distribute, sublicense, and/or sell copies of the Software, and to
|
10
|
+
permit persons to whom the Software is furnished to do so, subject to
|
11
|
+
the following conditions:
|
12
|
+
|
13
|
+
The above copyright notice and this permission notice shall be
|
14
|
+
included in all copies or substantial portions of the Software.
|
15
|
+
|
16
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
17
|
+
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
18
|
+
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
19
|
+
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
20
|
+
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
21
|
+
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
22
|
+
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
data/README.md
ADDED
@@ -0,0 +1,119 @@
|
|
1
|
+
# hyperflow-amqp-executor
|
2
|
+
|
3
|
+
Job executor for [Hyperflow](http://github.com/dice-cyfronet/hyperflow) workflow engine.
|
4
|
+
|
5
|
+
## Usage
|
6
|
+
|
7
|
+
Executor may be configured in two ways:
|
8
|
+
|
9
|
+
* by using YAML configuration file (see `examples/settings.yml`) - path to that file should be passed as only argument
|
10
|
+
* via environment variables:
|
11
|
+
* `STORAGE` = cloud | nfs | local | plgdata | gridftp, defaults to `cloud` (Amazon S3)
|
12
|
+
* `AWS_ACCESS_KEY_ID` (for S3 cloud storage)
|
13
|
+
* `AWS_SECRET_ACCESS_KEY` (for S3 cloud storage)
|
14
|
+
* `AMQP_URL` – address of AMQP queue
|
15
|
+
* `AMQP_QUEUE` – name of AMQP queue (defaults to `hyperflow.jobs`)
|
16
|
+
* `THREADS` (defaults to number of cores or 1 if it couldn't be determined)
|
17
|
+
* `X509_USER_PROXY` – user proxy certificate for PLGData and GridFTP storage
|
18
|
+
* `PLGDATA_ENDPOINT` – endpoint of PLGData service (defaults to: `https://data.plgrid.pl`)
|
19
|
+
|
20
|
+
To execute jobs:
|
21
|
+
|
22
|
+
`$ hyperflow-amqp-executor`
|
23
|
+
|
24
|
+
To collect some metrics:
|
25
|
+
|
26
|
+
`$ hyperflow-amqp-metric-collector`
|
27
|
+
|
28
|
+
## Supported data storage services
|
29
|
+
|
30
|
+
### cloud: Cloud object data storage service
|
31
|
+
|
32
|
+
In this scenario, input and output files of jobs are stored in cloud object data storage service such as [Amazon S3](http://aws.amazon.com/s3/). In that case executor does the follwing:
|
33
|
+
|
34
|
+
* Create temporary directory
|
35
|
+
* Download input files
|
36
|
+
* Execute job in temporary directory
|
37
|
+
* Upload output files
|
38
|
+
* Remove temporary directory (anyway, app should remove temporary files itself)
|
39
|
+
|
40
|
+
Each task needs to provide some options:
|
41
|
+
|
42
|
+
* `s3_bucket` - name of Amazon S3 bucket,
|
43
|
+
* `s3_prefix` - path prefix in bucket,
|
44
|
+
* *(optionally)* cloud_storage – same hash as in config file to use other than default storage provider.
|
45
|
+
|
46
|
+
### nfs: (Slow) network file system
|
47
|
+
|
48
|
+
This case is similar to previous one, but executor will copy files from and copy results back to locally available filesystem. It may be NFS, SSHFS or other file system where working directly on remote data is not recommended.
|
49
|
+
|
50
|
+
Job options:
|
51
|
+
|
52
|
+
* `workdir` - working directory (files will be copied to local temporary workdir for task processing).
|
53
|
+
|
54
|
+
### local: Local/network file system
|
55
|
+
|
56
|
+
In this scenario we assume that job is executed in shared directory available on execution node. It may be NFS/SMB/etc. share or local disk for single node deployments. There is no stage-in or stage-out phase, job is executed directly in specified directory, so that job must not leave any temporary files.
|
57
|
+
|
58
|
+
Job options:
|
59
|
+
|
60
|
+
* `workdir` - working directory (tasks will be executed in this working directory).
|
61
|
+
|
62
|
+
### plgdata: PL-Grid Data
|
63
|
+
|
64
|
+
Behaves like *cloud* storage, but uses [PL-Grid Data](https://data.plgrid.pl) service as a backend. It requires path of user proxy certificate in `$X509_USER_PROXY`. Does not have any external requirements, however in PLGData file uploads are limited to 256 MiB per file.
|
65
|
+
|
66
|
+
Each task needs to provide some options:
|
67
|
+
|
68
|
+
* `prefix` – path of working directory on the storage infrastructure (probably something like /people/plgyourlogin/workflowdir).
|
69
|
+
|
70
|
+
### gridftp: GridFTP
|
71
|
+
|
72
|
+
Behaves like *cloud* storage, but uses [GridFTP](https://www.globus.org/toolkit/docs/latest-stable/gridftp/) service as a backend. It requires path of user proxy certificate in `$X509_USER_PROXY`. Requires `globus-url-copy` which is part of Globus Toolkit installed.
|
73
|
+
|
74
|
+
Each task needs to provide some options:
|
75
|
+
|
76
|
+
* `prefix` – path of working directory on the storage infrastructure (probably something like gsiftp://example.com/people/plgyourlogin/workflowdir).
|
77
|
+
|
78
|
+
## Execution event monitoring
|
79
|
+
|
80
|
+
Executor publishes events for monitoring purpose by `hyperflow.events` exchange created on AMQP server. The exchange is topic type, so one may subscribe for specific event type. To request all messages request wildcard routing key `#`. Routing keys are as follows:
|
81
|
+
|
82
|
+
* `executor.ready`
|
83
|
+
* `job.{job-id}.started`
|
84
|
+
* `job.{job-id}.finished`
|
85
|
+
* `job.{job-id}.stage-in.started` - not applicable for `local` storage
|
86
|
+
* `job.{job-id}.stage-in.finished` - not applicable for `local` storage
|
87
|
+
* `job.{job-id}.execution.started`
|
88
|
+
* `job.{job-id}.execution.finished`
|
89
|
+
* `job.{job-id}.stage-out.started` - not applicable for `local` storage
|
90
|
+
* `job.{job-id}.stage-out.finished` - not applicable for `local` storage
|
91
|
+
|
92
|
+
Each event published in JSON provides:
|
93
|
+
|
94
|
+
* `executor`: UUID executor id
|
95
|
+
* `timestamp`: UNIX timestamp (float in UTC timezone)
|
96
|
+
* `type`: Like routing key, but without job id part.
|
97
|
+
|
98
|
+
Events related to jobs also are provided with:
|
99
|
+
|
100
|
+
* `id`: Job id (AMQP correlation id)
|
101
|
+
* `thread`: Executor thread id (random looking number)
|
102
|
+
|
103
|
+
Stage finish events send some additional info: `job.*.stage-in.finished` and `job.*.stage-out.finished` provide `time` and `bytes` for transfer time and data size respectively. The event of `job.*.execution.finished` provides `executable` name, `metrics` and `exit_status`.
|
104
|
+
|
105
|
+
|
106
|
+
## Contributing to hyperflow-amqp-executor
|
107
|
+
|
108
|
+
* Check out the latest master to make sure the feature hasn't been implemented or the bug hasn't been fixed yet.
|
109
|
+
* Check out the issue tracker to make sure someone already hasn't requested it and/or contributed it.
|
110
|
+
* Fork the project.
|
111
|
+
* Start a feature/bugfix branch.
|
112
|
+
* Commit and push until you are happy with your contribution.
|
113
|
+
* Make sure to add tests for it. This is important so I don't break it in a future version unintentionally.
|
114
|
+
* Please try not to mess with the Rakefile, version, or history. If you want to have your own version, or is otherwise necessary, that is fine, but please isolate to its own commit so I can cherry-pick around it.
|
115
|
+
|
116
|
+
## Copyright
|
117
|
+
|
118
|
+
Copyright © 2013-2015 Kamil Figiela (kfigiela@agh.edu.pl). Distributed under MIT License. See LICENSE.txt for further details.
|
119
|
+
|
data/Rakefile
ADDED
@@ -0,0 +1,47 @@
|
|
1
|
+
# encoding: utf-8
|
2
|
+
|
3
|
+
require 'rubygems'
|
4
|
+
require 'bundler'
|
5
|
+
begin
|
6
|
+
Bundler.setup(:default, :development)
|
7
|
+
rescue Bundler::BundlerError => e
|
8
|
+
$stderr.puts e.message
|
9
|
+
$stderr.puts "Run `bundle install` to install missing gems"
|
10
|
+
exit e.status_code
|
11
|
+
end
|
12
|
+
require 'rake'
|
13
|
+
|
14
|
+
require 'jeweler'
|
15
|
+
Jeweler::Tasks.new do |gem|
|
16
|
+
# gem is a Gem::Specification... see http://docs.rubygems.org/read/chapter/20 for more options
|
17
|
+
gem.name = "hyperflow-amqp-executor"
|
18
|
+
gem.homepage = "http://github.com/kfigiela/hyperflow-amqp-executor"
|
19
|
+
gem.license = "MIT"
|
20
|
+
gem.summary = %Q{AMQP job executor for Hyperflow workflow engine}
|
21
|
+
gem.description = %Q{AMQP job executor for Hyperflow workflow engine (http://github.com/dice-cyfronet/hyperflow)}
|
22
|
+
gem.email = "kamil.figiela@gmail.com"
|
23
|
+
gem.authors = ["Kamil Figiela"]
|
24
|
+
gem.executables = %w{hyperflow-amqp-executor hyperflow-amqp-metric-collector}
|
25
|
+
# dependencies defined in Gemfile
|
26
|
+
end
|
27
|
+
Jeweler::RubygemsDotOrgTasks.new
|
28
|
+
|
29
|
+
# require 'rake/testtask'
|
30
|
+
# Rake::TestTask.new(:test) do |test|
|
31
|
+
# test.libs << 'lib' << 'test'
|
32
|
+
# test.pattern = 'test/**/test_*.rb'
|
33
|
+
# test.verbose = true
|
34
|
+
# end
|
35
|
+
#
|
36
|
+
#
|
37
|
+
# task :default => :test
|
38
|
+
|
39
|
+
require 'rdoc/task'
|
40
|
+
Rake::RDocTask.new do |rdoc|
|
41
|
+
version = File.exist?('VERSION') ? File.read('VERSION') : ""
|
42
|
+
|
43
|
+
rdoc.rdoc_dir = 'rdoc'
|
44
|
+
rdoc.title = "hyperflow-amqp-executor #{version}"
|
45
|
+
rdoc.rdoc_files.include('README*')
|
46
|
+
rdoc.rdoc_files.include('lib/**/*.rb')
|
47
|
+
end
|
data/VERSION
ADDED
@@ -0,0 +1 @@
|
|
1
|
+
1.0.0
|
@@ -0,0 +1,56 @@
|
|
1
|
+
#!/usr/bin/env ruby
|
2
|
+
require_relative '../lib/hyperflow-amqp-executor'
|
3
|
+
|
4
|
+
include Executor
|
5
|
+
|
6
|
+
task_queue_name = (ENV['AMQP_QUEUE'] or "hyperflow.jobs")
|
7
|
+
|
8
|
+
Executor::id = SecureRandom.uuid
|
9
|
+
Executor::logger.info "Starting worker #{Executor.id}"
|
10
|
+
|
11
|
+
Executor::settings = Executor::Settings.load(ARGV.first)
|
12
|
+
|
13
|
+
Executor::logger.info "Running #{Executor::settings.threads} worker threads"
|
14
|
+
EM.threadpool_size = Executor::settings.threads
|
15
|
+
|
16
|
+
EM.run do
|
17
|
+
AMQP.connect(Executor::settings.amqp_url) do |connection|
|
18
|
+
Executor::logger.info "Connected to AMQP broker... "
|
19
|
+
|
20
|
+
channel = AMQP::Channel.new(connection)
|
21
|
+
channel.prefetch(Executor::settings.threads)
|
22
|
+
|
23
|
+
Executor::events_exchange = channel.topic('hyperflow.events', durable: true)
|
24
|
+
queue = channel.queue(task_queue_name, durable: true)
|
25
|
+
|
26
|
+
queue.subscribe(ack: true) do |header, payload|
|
27
|
+
begin
|
28
|
+
job_data = RecursiveOpenStruct.new(JSON.parse(payload), recurse_over_arrays: true)
|
29
|
+
job = Job.new(header.correlation_id, job_data)
|
30
|
+
op = -> {
|
31
|
+
begin
|
32
|
+
job.run
|
33
|
+
rescue Exception => e
|
34
|
+
Executor::logger.error "[#{header.correlation_id}] Error running job: #{e}"
|
35
|
+
Executor::logger.debug "[#{header.correlation_id}] Backtrace\n#{e.backtrace.join("\n")}"
|
36
|
+
{exit_status: -2, exceptions: [e]}
|
37
|
+
end
|
38
|
+
}
|
39
|
+
cb = ->(output){
|
40
|
+
channel.default_exchange.publish(JSON.dump(output), content_type: 'application/json', routing_key: header.reply_to, correlation_id: header.correlation_id, mandatory: true)
|
41
|
+
header.ack
|
42
|
+
}
|
43
|
+
EM.defer(op, cb)
|
44
|
+
rescue JSON::ParserError
|
45
|
+
Executor::logger.error "[#{header.correlation_id}] Invalid JSON"
|
46
|
+
header.ack
|
47
|
+
end
|
48
|
+
end
|
49
|
+
|
50
|
+
Executor::publish_event('executor.ready', 'executor.ready', hostname: `hostname -f`)
|
51
|
+
|
52
|
+
Signal.trap("INT") {
|
53
|
+
connection.close { EM.stop }
|
54
|
+
}
|
55
|
+
end
|
56
|
+
end
|
@@ -0,0 +1,44 @@
|
|
1
|
+
#!/usr/bin/env ruby
|
2
|
+
# encoding: utf-8
|
3
|
+
|
4
|
+
require "amqp"
|
5
|
+
require "recursive-open-struct"
|
6
|
+
require "json"
|
7
|
+
|
8
|
+
EM.run do
|
9
|
+
AMQP.connect(ENV['AMQP_URL']) do |connection|
|
10
|
+
warn "Connected to AMQP broker..."
|
11
|
+
|
12
|
+
channel = AMQP::Channel.new(connection)
|
13
|
+
queue = channel.queue("", auto_delete: true, durable: false)
|
14
|
+
queue.bind("hyperflow.events", routing_key: "job.*.finished")
|
15
|
+
|
16
|
+
queue.subscribe do |payload|
|
17
|
+
data = RecursiveOpenStruct.new(JSON.parse(payload))
|
18
|
+
if data.metrics
|
19
|
+
puts [Time.now,
|
20
|
+
data.executor,
|
21
|
+
data.thread,
|
22
|
+
data.job,
|
23
|
+
data.executable,
|
24
|
+
data.metrics.execution,
|
25
|
+
# data.metrics.stage_in,
|
26
|
+
# data.metrics.stage_out,
|
27
|
+
data.metrics.input_size,
|
28
|
+
data.metrics.output_size,
|
29
|
+
# data.metrics.timestamps['job.started'],
|
30
|
+
# data.metrics.timestamps['stage_in.started'],
|
31
|
+
# data.metrics.timestamps['stage_in.finished'],
|
32
|
+
# data.metrics.timestamps['execution.started'],
|
33
|
+
# data.metrics.timestamps['execution.finished'],
|
34
|
+
# data.metrics.timestamps['stage_out.started'],
|
35
|
+
# data.metrics.timestamps['stage_out.finished'],
|
36
|
+
# data.metrics.timestamps['job.finished']
|
37
|
+
].map {|v| if v.nil? then "NA" else v end }.join("\t")
|
38
|
+
else
|
39
|
+
warn payload
|
40
|
+
end
|
41
|
+
end
|
42
|
+
Signal.trap("INT") { connection.close { EM.stop } }
|
43
|
+
end
|
44
|
+
end
|
@@ -0,0 +1,19 @@
|
|
1
|
+
# RabbitMQ URL, defaults to ENV['AMQP_URL']
|
2
|
+
amqp_url: <%= ENV['AMQP_URL'] %>
|
3
|
+
|
4
|
+
# Execution storage scenario: cloud | nfs | local | plgdata
|
5
|
+
# This setting may be overriden for each task
|
6
|
+
storage: <%= ENV['STORAGE'] || 'cloud' %>
|
7
|
+
|
8
|
+
# Number of execution threads (defaults to: ENV['THREADS'], `nprocs` if available or 1)
|
9
|
+
threads: <%= Executor::cpu_count %>
|
10
|
+
|
11
|
+
# Cloud storage settings, the following hash is passed directyl to Fog::Storage, see http://fog.io/storage/ for further reference
|
12
|
+
# Defaults to AWS S3 with credentials passed in ENV
|
13
|
+
cloud_storage:
|
14
|
+
provider: AWS
|
15
|
+
aws_access_key_id: <%= ENV['AWS_ACCESS_KEY_ID'] %>
|
16
|
+
aws_secret_access_key: <%= ENV['AWS_SECRET_ACCESS_KEY'] %>
|
17
|
+
|
18
|
+
plgdata:
|
19
|
+
proxy: <%= ENV['X509_USER_PROXY'] or "./grid_proxy "%>
|
data/example/task.json
ADDED
@@ -0,0 +1,94 @@
|
|
1
|
+
# Generated by jeweler
|
2
|
+
# DO NOT EDIT THIS FILE DIRECTLY
|
3
|
+
# Instead, edit Jeweler::Tasks in Rakefile, and run 'rake gemspec'
|
4
|
+
# -*- encoding: utf-8 -*-
|
5
|
+
# stub: hyperflow-amqp-executor 1.0.0 ruby lib
|
6
|
+
|
7
|
+
Gem::Specification.new do |s|
|
8
|
+
s.name = "hyperflow-amqp-executor"
|
9
|
+
s.version = "1.0.0"
|
10
|
+
|
11
|
+
s.required_rubygems_version = Gem::Requirement.new(">= 0") if s.respond_to? :required_rubygems_version=
|
12
|
+
s.require_paths = ["lib"]
|
13
|
+
s.authors = ["Kamil Figiela"]
|
14
|
+
s.date = "2015-04-30"
|
15
|
+
s.description = "AMQP job executor for Hyperflow workflow engine (http://github.com/dice-cyfronet/hyperflow)"
|
16
|
+
s.email = "kamil.figiela@gmail.com"
|
17
|
+
s.executables = ["hyperflow-amqp-executor", "hyperflow-amqp-metric-collector"]
|
18
|
+
s.extra_rdoc_files = [
|
19
|
+
"LICENSE.txt",
|
20
|
+
"README.md"
|
21
|
+
]
|
22
|
+
s.files = [
|
23
|
+
".document",
|
24
|
+
"Gemfile",
|
25
|
+
"Gemfile.lock",
|
26
|
+
"LICENSE.txt",
|
27
|
+
"README.md",
|
28
|
+
"Rakefile",
|
29
|
+
"VERSION",
|
30
|
+
"bin/hyperflow-amqp-executor",
|
31
|
+
"bin/hyperflow-amqp-metric-collector",
|
32
|
+
"example/settings.yml",
|
33
|
+
"example/task.json",
|
34
|
+
"hyperflow-amqp-executor.gemspec",
|
35
|
+
"lib/hyperflow-amqp-executor.rb",
|
36
|
+
"lib/hyperflow-amqp-executor/cloud_storage.rb",
|
37
|
+
"lib/hyperflow-amqp-executor/gridftp_storage.rb",
|
38
|
+
"lib/hyperflow-amqp-executor/helpers.rb",
|
39
|
+
"lib/hyperflow-amqp-executor/job.rb",
|
40
|
+
"lib/hyperflow-amqp-executor/local_storage.rb",
|
41
|
+
"lib/hyperflow-amqp-executor/nfs_storage.rb",
|
42
|
+
"lib/hyperflow-amqp-executor/plgdata_storage.rb",
|
43
|
+
"lib/hyperflow-amqp-executor/settings.rb",
|
44
|
+
"test/helper.rb",
|
45
|
+
"test/test_hyperflow-amqp-executor.rb"
|
46
|
+
]
|
47
|
+
s.homepage = "http://github.com/kfigiela/hyperflow-amqp-executor"
|
48
|
+
s.licenses = ["MIT"]
|
49
|
+
s.rubygems_version = "2.2.2"
|
50
|
+
s.summary = "AMQP job executor for Hyperflow workflow engine"
|
51
|
+
|
52
|
+
if s.respond_to? :specification_version then
|
53
|
+
s.specification_version = 4
|
54
|
+
|
55
|
+
if Gem::Version.new(Gem::VERSION) >= Gem::Version.new('1.2.0') then
|
56
|
+
s.add_runtime_dependency(%q<fog>, ["~> 1.18"])
|
57
|
+
s.add_runtime_dependency(%q<unf>, ["~> 0.1"])
|
58
|
+
s.add_runtime_dependency(%q<recursive-open-struct>, ["~> 0.4"])
|
59
|
+
s.add_runtime_dependency(%q<amqp>, ["~> 1.1"])
|
60
|
+
s.add_runtime_dependency(%q<deep_merge>, ["~> 1.0"])
|
61
|
+
s.add_runtime_dependency(%q<httpclient>, ["~> 2.6"])
|
62
|
+
s.add_development_dependency(%q<shoulda>, [">= 0"])
|
63
|
+
s.add_development_dependency(%q<rdoc>, ["~> 3.12"])
|
64
|
+
s.add_development_dependency(%q<bundler>, ["~> 1.0"])
|
65
|
+
s.add_development_dependency(%q<jeweler>, ["~> 1.8"])
|
66
|
+
s.add_development_dependency(%q<pry>, [">= 0"])
|
67
|
+
else
|
68
|
+
s.add_dependency(%q<fog>, ["~> 1.18"])
|
69
|
+
s.add_dependency(%q<unf>, ["~> 0.1"])
|
70
|
+
s.add_dependency(%q<recursive-open-struct>, ["~> 0.4"])
|
71
|
+
s.add_dependency(%q<amqp>, ["~> 1.1"])
|
72
|
+
s.add_dependency(%q<deep_merge>, ["~> 1.0"])
|
73
|
+
s.add_dependency(%q<httpclient>, ["~> 2.6"])
|
74
|
+
s.add_dependency(%q<shoulda>, [">= 0"])
|
75
|
+
s.add_dependency(%q<rdoc>, ["~> 3.12"])
|
76
|
+
s.add_dependency(%q<bundler>, ["~> 1.0"])
|
77
|
+
s.add_dependency(%q<jeweler>, ["~> 1.8"])
|
78
|
+
s.add_dependency(%q<pry>, [">= 0"])
|
79
|
+
end
|
80
|
+
else
|
81
|
+
s.add_dependency(%q<fog>, ["~> 1.18"])
|
82
|
+
s.add_dependency(%q<unf>, ["~> 0.1"])
|
83
|
+
s.add_dependency(%q<recursive-open-struct>, ["~> 0.4"])
|
84
|
+
s.add_dependency(%q<amqp>, ["~> 1.1"])
|
85
|
+
s.add_dependency(%q<deep_merge>, ["~> 1.0"])
|
86
|
+
s.add_dependency(%q<httpclient>, ["~> 2.6"])
|
87
|
+
s.add_dependency(%q<shoulda>, [">= 0"])
|
88
|
+
s.add_dependency(%q<rdoc>, ["~> 3.12"])
|
89
|
+
s.add_dependency(%q<bundler>, ["~> 1.0"])
|
90
|
+
s.add_dependency(%q<jeweler>, ["~> 1.8"])
|
91
|
+
s.add_dependency(%q<pry>, [">= 0"])
|
92
|
+
end
|
93
|
+
end
|
94
|
+
|
@@ -0,0 +1,49 @@
|
|
1
|
+
require 'amqp'
|
2
|
+
require 'json'
|
3
|
+
require 'recursive-open-struct'
|
4
|
+
require 'open3'
|
5
|
+
require 'tmpdir'
|
6
|
+
require 'logger'
|
7
|
+
|
8
|
+
require_relative 'hyperflow-amqp-executor/helpers'
|
9
|
+
require_relative 'hyperflow-amqp-executor/job'
|
10
|
+
require_relative 'hyperflow-amqp-executor/local_storage'
|
11
|
+
require_relative 'hyperflow-amqp-executor/cloud_storage'
|
12
|
+
require_relative 'hyperflow-amqp-executor/nfs_storage'
|
13
|
+
require_relative 'hyperflow-amqp-executor/plgdata_storage'
|
14
|
+
require_relative 'hyperflow-amqp-executor/gridftp_storage'
|
15
|
+
require_relative 'hyperflow-amqp-executor/settings'
|
16
|
+
|
17
|
+
module Executor
|
18
|
+
class << self
|
19
|
+
attr_accessor :events_exchange, :id, :settings
|
20
|
+
|
21
|
+
def logger
|
22
|
+
@logger ||= Logger.new($stdout)
|
23
|
+
end
|
24
|
+
|
25
|
+
def cpu_count
|
26
|
+
unless ENV['THREADS'].nil?
|
27
|
+
ENV['THREADS']
|
28
|
+
else
|
29
|
+
begin
|
30
|
+
`nproc`
|
31
|
+
rescue
|
32
|
+
1
|
33
|
+
end
|
34
|
+
end.to_i
|
35
|
+
end
|
36
|
+
|
37
|
+
def publish_event(type, routing_key, payload = {})
|
38
|
+
data = payload
|
39
|
+
data['timestamp'] = Time.now.utc.to_f
|
40
|
+
data['type'] = type
|
41
|
+
data['executor'] = @id
|
42
|
+
EM.next_tick do
|
43
|
+
logger.debug "Publishing event #{type}"
|
44
|
+
@events_exchange.publish(JSON.dump(data), content_type: 'application/json', routing_key: routing_key)
|
45
|
+
end
|
46
|
+
data['timestamp']
|
47
|
+
end
|
48
|
+
end
|
49
|
+
end
|
@@ -0,0 +1,34 @@
|
|
1
|
+
require 'fog'
|
2
|
+
|
3
|
+
module Executor
|
4
|
+
module CloudStorage
|
5
|
+
def storage_init
|
6
|
+
@provider = Fog::Storage.new(@job.options.cloud_storage || Executor::settings.cloud_storage.to_h)
|
7
|
+
end
|
8
|
+
|
9
|
+
def stage_in
|
10
|
+
@bucket = @provider.directories.get(@job.options.bucket)
|
11
|
+
|
12
|
+
@job.inputs.each do |file|
|
13
|
+
Executor::logger.debug "[#{@id}] Downloading #{file.name}"
|
14
|
+
File.open(@workdir+"/"+file.name, File::RDWR|File::CREAT) do |local_file|
|
15
|
+
@bucket.files.get(@job.options.prefix+file.name) do |chunk, remaining_bytes, total_bytes|
|
16
|
+
local_file.write(chunk)
|
17
|
+
# print "\rDownloading #{file.name}: #{100*(total_bytes-remaining_bytes)/total_bytes}%"
|
18
|
+
end
|
19
|
+
end
|
20
|
+
end
|
21
|
+
end
|
22
|
+
|
23
|
+
def stage_out
|
24
|
+
@job.outputs.each do |file|
|
25
|
+
Executor::logger.debug "[#{@id}] Uploading #{file.name}"
|
26
|
+
@bucket.files.create(key: @job.options.prefix+file.name, body: File.open(@workdir+"/"+file.name))
|
27
|
+
end
|
28
|
+
end
|
29
|
+
|
30
|
+
def workdir(&block)
|
31
|
+
Dir::mktmpdir(&block)
|
32
|
+
end
|
33
|
+
end
|
34
|
+
end
|
@@ -0,0 +1,39 @@
|
|
1
|
+
require 'pry'
|
2
|
+
module Executor
|
3
|
+
module GridFTPStorage
|
4
|
+
def storage_init
|
5
|
+
raise Exception, "Unable to locate user proxy certificate" if Executor::settings.plgdata.proxy.nil? or !File.exists?(Executor::settings.plgdata.proxy)
|
6
|
+
@proxy_file = Executor::settings.gridftp.proxy
|
7
|
+
end
|
8
|
+
|
9
|
+
def stage_in
|
10
|
+
@job.inputs.each do |file|
|
11
|
+
local_file_name = @workdir + "/" + file.name
|
12
|
+
url = @job.options.prefix + "/" + file.name
|
13
|
+
|
14
|
+
Executor::logger.debug "[#{@id}] Downloading #{url} to #{local_file_name}"
|
15
|
+
stdout, stderr, status = Open3.capture3({'X509_USER_PROXY' => @proxy_file}, 'globus-url-copy', url, local_file_name, chdir: @workdir)
|
16
|
+
unless status == 0
|
17
|
+
raise Exception, "Failed downloading input from GridFTP, status: #{status}\nstdout:\n#{stdout}\n\n stderr:\n#{stderr}"
|
18
|
+
end
|
19
|
+
end
|
20
|
+
end
|
21
|
+
|
22
|
+
def stage_out
|
23
|
+
@job.outputs.each do |file|
|
24
|
+
local_file_name = @workdir + "/" + file.name
|
25
|
+
url = @job.options.prefix + "/" + file.name
|
26
|
+
|
27
|
+
Executor::logger.debug "[#{@id}] Uploading #{file.name} to #{url}"
|
28
|
+
stdout, stderr, status = Open3.capture3({'X509_USER_PROXY' => @proxy_file}, 'globus-url-copy', local_file_name, url, chdir: @workdir)
|
29
|
+
unless status == 0
|
30
|
+
raise Exception, "Failed uploading input from GridFTP, status: #{status}\nstdout:\n#{stdout}\n\n stderr:\n#{stderr}"
|
31
|
+
end
|
32
|
+
end
|
33
|
+
end
|
34
|
+
|
35
|
+
def workdir(&block)
|
36
|
+
Dir::mktmpdir(&block)
|
37
|
+
end
|
38
|
+
end
|
39
|
+
end
|
@@ -0,0 +1,110 @@
|
|
1
|
+
module Executor
|
2
|
+
class Job
|
3
|
+
attr_reader :metrics
|
4
|
+
|
5
|
+
def initialize(id, job)
|
6
|
+
@job = job
|
7
|
+
@id = id
|
8
|
+
@metrics = {
|
9
|
+
timestamps: { },
|
10
|
+
executor: Executor::id
|
11
|
+
}
|
12
|
+
|
13
|
+
storage_module = case (@job.options.storage or Executor::settings.storage)
|
14
|
+
when 's3', 'cloud'
|
15
|
+
CloudStorage
|
16
|
+
when 'local'
|
17
|
+
LocalStorage
|
18
|
+
when 'nfs'
|
19
|
+
NFSStorage
|
20
|
+
when 'plgdata'
|
21
|
+
PLGDataStorage
|
22
|
+
when 'gridftp'
|
23
|
+
GridFTPStorage
|
24
|
+
else
|
25
|
+
raise "Unknown storage #{@job.storage}"
|
26
|
+
end
|
27
|
+
self.extend(storage_module)
|
28
|
+
end
|
29
|
+
|
30
|
+
def run
|
31
|
+
@metrics[:timestamps]["job.started"] = Executor::publish_event 'job.started', "job.#{@id}.started", job: @id, thread: Thread.current.__id__
|
32
|
+
@metrics[:thread] = Thread.current.__id__
|
33
|
+
|
34
|
+
results = {}
|
35
|
+
|
36
|
+
workdir do |tmpdir|
|
37
|
+
@workdir = tmpdir
|
38
|
+
raise "Couldn't get workdir" unless @workdir
|
39
|
+
|
40
|
+
storage_init if self.respond_to? :storage_init
|
41
|
+
|
42
|
+
if self.respond_to? :stage_in
|
43
|
+
publish_events "stage_in" do
|
44
|
+
_ , @metrics[:stage_in] = time { stage_in }
|
45
|
+
@metrics[:input_size] = input_size
|
46
|
+
{bytes: @metrics[:input_size], time: @metrics[:stage_in]}
|
47
|
+
end
|
48
|
+
else
|
49
|
+
@metrics[:input_size] = input_size
|
50
|
+
end
|
51
|
+
|
52
|
+
publish_events "execution" do
|
53
|
+
results, @metrics[:execution] = time { execute }
|
54
|
+
{ executable: @job.executable, exit_status: results[:exit_status], time: @metrics[:execution] }
|
55
|
+
end
|
56
|
+
|
57
|
+
if self.respond_to? :stage_out
|
58
|
+
publish_events "stage_out" do
|
59
|
+
_, @metrics[:stage_out] = time { stage_out }
|
60
|
+
@metrics[:output_size] = output_size
|
61
|
+
{ bytes: @metrics[:output_size], time: @metrics[:stage_out] }
|
62
|
+
end
|
63
|
+
else
|
64
|
+
@metrics[:output_size] = output_size
|
65
|
+
end
|
66
|
+
|
67
|
+
end
|
68
|
+
@metrics[:timestamps]["job.finished"] = Executor::publish_event 'job.finished', "job.#{@id}.finished", job: @id, executable: @job.executable, exit_status: results[:exit_status], metrics: @metrics, thread: Thread.current.__id__
|
69
|
+
|
70
|
+
results[:metrics] = @metrics
|
71
|
+
results
|
72
|
+
end
|
73
|
+
|
74
|
+
def publish_events(name)
|
75
|
+
@metrics[:timestamps]["#{name}.started"] = Executor::publish_event "job.#{name}.started", "job.#{@id}.#{name}.started", job: @id, thread: Thread.current.__id__
|
76
|
+
results = yield
|
77
|
+
@metrics[:timestamps]["#{name}.finished"] = Executor::publish_event "job.#{name}.finished", "job.#{@id}.#{name}.finished", {job: @id, thread: Thread.current.__id__}.merge(results || {})
|
78
|
+
results
|
79
|
+
end
|
80
|
+
|
81
|
+
def cmdline
|
82
|
+
if @job.args.is_a? Array
|
83
|
+
([@job.executable] + @job.args).map { |e| e.to_s }
|
84
|
+
else
|
85
|
+
"#{@job.executable} #{@job.args}"
|
86
|
+
end
|
87
|
+
end
|
88
|
+
|
89
|
+
def execute
|
90
|
+
begin
|
91
|
+
Executor::logger.debug "[#{@id}] Executing #{cmdline}"
|
92
|
+
stdout, stderr, status = Open3.capture3(*cmdline, chdir: @workdir)
|
93
|
+
|
94
|
+
{exit_status: status, stderr: stderr, stdout: stdout}
|
95
|
+
rescue Exception => e
|
96
|
+
Executor::logger.error "[#{@id}] Error executing job: #{e}"
|
97
|
+
Executor::logger.debug "[#{@id}] Backtrace\n#{e.backtrace.join("\n")}"
|
98
|
+
{exit_status: -1, exceptions: [e]}
|
99
|
+
end
|
100
|
+
end
|
101
|
+
|
102
|
+
def input_size
|
103
|
+
@job.inputs.map{ |file| begin File.size(@workdir+"/"+file.name) rescue 0 end }.reduce(:+) or 0
|
104
|
+
end
|
105
|
+
|
106
|
+
def output_size
|
107
|
+
@job.outputs.map{ |file| begin File.size(@workdir+"/"+file.name) rescue 0 end }.reduce(:+) or 0
|
108
|
+
end
|
109
|
+
end
|
110
|
+
end
|
@@ -0,0 +1,23 @@
|
|
1
|
+
require 'fog'
|
2
|
+
|
3
|
+
module Executor
|
4
|
+
module NFSStorage
|
5
|
+
def stage_in
|
6
|
+
@job.inputs.each do |file|
|
7
|
+
Executor::logger.debug "[#{@id}] Copying #{file.name} to tmpdir"
|
8
|
+
FileUtils.copy(@job.options.workdir + file.name, @workdir + "/" + file.name)
|
9
|
+
end
|
10
|
+
end
|
11
|
+
|
12
|
+
def stage_out
|
13
|
+
@job.outputs.each do |file|
|
14
|
+
Executor::logger.debug "[#{@id}] Copying #{file.name} from tmpdir"
|
15
|
+
FileUtils.copy(@workdir + "/" + file.name, @job.options.workdir + file.name)
|
16
|
+
end
|
17
|
+
end
|
18
|
+
|
19
|
+
def workdir(&block)
|
20
|
+
Dir::mktmpdir(&block)
|
21
|
+
end
|
22
|
+
end
|
23
|
+
end
|
@@ -0,0 +1,47 @@
|
|
1
|
+
require 'httpclient'
|
2
|
+
|
3
|
+
module Executor
|
4
|
+
module PLGDataStorage
|
5
|
+
PLGDATA_ENDPOINT = (ENV['PLGDATA_ENDPOINT'] or 'https://data.plgrid.pl')
|
6
|
+
def storage_init
|
7
|
+
@http_client = HTTPClient.new()
|
8
|
+
|
9
|
+
raise Exception, "Unable to load proxy certificate" unless File.exists?(Executor::settings.plgdata.proxy)
|
10
|
+
@proxy_string = File.read(Executor::settings.plgdata.proxy)
|
11
|
+
end
|
12
|
+
|
13
|
+
def stage_in
|
14
|
+
@job.inputs.each do |file|
|
15
|
+
url = PLGDATA_ENDPOINT+'/download/' + @job.options.prefix + "/" + file.name
|
16
|
+
local_file_name = @workdir + "/" + file.name
|
17
|
+
|
18
|
+
Executor::logger.debug "[#{@id}] Downloading #{url} to #{local_file_name}"
|
19
|
+
File.open(local_file_name, File::RDWR|File::CREAT) do |local_file|
|
20
|
+
payload = {proxy: @proxy_string}
|
21
|
+
response = @http_client.get(url, payload) do |chunk|
|
22
|
+
local_file.write(chunk)
|
23
|
+
end
|
24
|
+
raise Exception, "Failed downloading input file" unless response.ok?
|
25
|
+
end
|
26
|
+
end
|
27
|
+
end
|
28
|
+
|
29
|
+
def stage_out
|
30
|
+
@job.outputs.each do |file|
|
31
|
+
url = PLGDATA_ENDPOINT+'/upload/' + @job.options.prefix + "/" + File.dirname(file.name)
|
32
|
+
local_file_name = @workdir+"/"+file.name
|
33
|
+
|
34
|
+
Executor::logger.debug "[#{@id}] Uploading #{file.name} to #{url}"
|
35
|
+
File.open(local_file_name) do |local_file|
|
36
|
+
payload = {proxy: @proxy_string, file: local_file}
|
37
|
+
response = @http_client.post(url, payload)
|
38
|
+
raise Exception, "Failed uploading output file: #{response.content}" unless response.ok?
|
39
|
+
end
|
40
|
+
end
|
41
|
+
end
|
42
|
+
|
43
|
+
def workdir(&block)
|
44
|
+
Dir::mktmpdir(&block)
|
45
|
+
end
|
46
|
+
end
|
47
|
+
end
|
@@ -0,0 +1,34 @@
|
|
1
|
+
require 'deep_merge'
|
2
|
+
module Executor
|
3
|
+
class Settings
|
4
|
+
class << self
|
5
|
+
def load(file=nil)
|
6
|
+
settings = defaults
|
7
|
+
unless file.nil?
|
8
|
+
file_settings = YAML.load(ERB.new(File.read(file)).result)
|
9
|
+
settings.deep_merge! file_settings
|
10
|
+
end
|
11
|
+
RecursiveOpenStruct.new(settings)
|
12
|
+
end
|
13
|
+
|
14
|
+
def defaults
|
15
|
+
{
|
16
|
+
amqp_url: ENV['AMQP_URL'],
|
17
|
+
storage: 'cloud',
|
18
|
+
threads: Executor::cpu_count,
|
19
|
+
cloud_storage: {
|
20
|
+
provider: "AWS",
|
21
|
+
aws_access_key_id: ENV['AWS_ACCESS_KEY_ID'],
|
22
|
+
aws_secret_access_key: ENV['AWS_SECRET_ACCESS_KEY']
|
23
|
+
},
|
24
|
+
plgdata: {
|
25
|
+
proxy: ENV['X509_USER_PROXY']
|
26
|
+
},
|
27
|
+
gridftp: {
|
28
|
+
proxy: ENV['X509_USER_PROXY']
|
29
|
+
}
|
30
|
+
}
|
31
|
+
end
|
32
|
+
end
|
33
|
+
end
|
34
|
+
end
|
data/test/helper.rb
ADDED
@@ -0,0 +1,18 @@
|
|
1
|
+
require 'rubygems'
|
2
|
+
require 'bundler'
|
3
|
+
begin
|
4
|
+
Bundler.setup(:default, :development)
|
5
|
+
rescue Bundler::BundlerError => e
|
6
|
+
$stderr.puts e.message
|
7
|
+
$stderr.puts "Run `bundle install` to install missing gems"
|
8
|
+
exit e.status_code
|
9
|
+
end
|
10
|
+
require 'test/unit'
|
11
|
+
require 'shoulda'
|
12
|
+
|
13
|
+
$LOAD_PATH.unshift(File.join(File.dirname(__FILE__), '..', 'lib'))
|
14
|
+
$LOAD_PATH.unshift(File.dirname(__FILE__))
|
15
|
+
require 'hyperflow-amqp-executor'
|
16
|
+
|
17
|
+
class Test::Unit::TestCase
|
18
|
+
end
|
metadata
ADDED
@@ -0,0 +1,224 @@
|
|
1
|
+
--- !ruby/object:Gem::Specification
|
2
|
+
name: hyperflow-amqp-executor
|
3
|
+
version: !ruby/object:Gem::Version
|
4
|
+
version: 1.0.0
|
5
|
+
platform: ruby
|
6
|
+
authors:
|
7
|
+
- Kamil Figiela
|
8
|
+
autorequire:
|
9
|
+
bindir: bin
|
10
|
+
cert_chain: []
|
11
|
+
date: 2015-04-30 00:00:00.000000000 Z
|
12
|
+
dependencies:
|
13
|
+
- !ruby/object:Gem::Dependency
|
14
|
+
name: fog
|
15
|
+
requirement: !ruby/object:Gem::Requirement
|
16
|
+
requirements:
|
17
|
+
- - "~>"
|
18
|
+
- !ruby/object:Gem::Version
|
19
|
+
version: '1.18'
|
20
|
+
type: :runtime
|
21
|
+
prerelease: false
|
22
|
+
version_requirements: !ruby/object:Gem::Requirement
|
23
|
+
requirements:
|
24
|
+
- - "~>"
|
25
|
+
- !ruby/object:Gem::Version
|
26
|
+
version: '1.18'
|
27
|
+
- !ruby/object:Gem::Dependency
|
28
|
+
name: unf
|
29
|
+
requirement: !ruby/object:Gem::Requirement
|
30
|
+
requirements:
|
31
|
+
- - "~>"
|
32
|
+
- !ruby/object:Gem::Version
|
33
|
+
version: '0.1'
|
34
|
+
type: :runtime
|
35
|
+
prerelease: false
|
36
|
+
version_requirements: !ruby/object:Gem::Requirement
|
37
|
+
requirements:
|
38
|
+
- - "~>"
|
39
|
+
- !ruby/object:Gem::Version
|
40
|
+
version: '0.1'
|
41
|
+
- !ruby/object:Gem::Dependency
|
42
|
+
name: recursive-open-struct
|
43
|
+
requirement: !ruby/object:Gem::Requirement
|
44
|
+
requirements:
|
45
|
+
- - "~>"
|
46
|
+
- !ruby/object:Gem::Version
|
47
|
+
version: '0.4'
|
48
|
+
type: :runtime
|
49
|
+
prerelease: false
|
50
|
+
version_requirements: !ruby/object:Gem::Requirement
|
51
|
+
requirements:
|
52
|
+
- - "~>"
|
53
|
+
- !ruby/object:Gem::Version
|
54
|
+
version: '0.4'
|
55
|
+
- !ruby/object:Gem::Dependency
|
56
|
+
name: amqp
|
57
|
+
requirement: !ruby/object:Gem::Requirement
|
58
|
+
requirements:
|
59
|
+
- - "~>"
|
60
|
+
- !ruby/object:Gem::Version
|
61
|
+
version: '1.1'
|
62
|
+
type: :runtime
|
63
|
+
prerelease: false
|
64
|
+
version_requirements: !ruby/object:Gem::Requirement
|
65
|
+
requirements:
|
66
|
+
- - "~>"
|
67
|
+
- !ruby/object:Gem::Version
|
68
|
+
version: '1.1'
|
69
|
+
- !ruby/object:Gem::Dependency
|
70
|
+
name: deep_merge
|
71
|
+
requirement: !ruby/object:Gem::Requirement
|
72
|
+
requirements:
|
73
|
+
- - "~>"
|
74
|
+
- !ruby/object:Gem::Version
|
75
|
+
version: '1.0'
|
76
|
+
type: :runtime
|
77
|
+
prerelease: false
|
78
|
+
version_requirements: !ruby/object:Gem::Requirement
|
79
|
+
requirements:
|
80
|
+
- - "~>"
|
81
|
+
- !ruby/object:Gem::Version
|
82
|
+
version: '1.0'
|
83
|
+
- !ruby/object:Gem::Dependency
|
84
|
+
name: httpclient
|
85
|
+
requirement: !ruby/object:Gem::Requirement
|
86
|
+
requirements:
|
87
|
+
- - "~>"
|
88
|
+
- !ruby/object:Gem::Version
|
89
|
+
version: '2.6'
|
90
|
+
type: :runtime
|
91
|
+
prerelease: false
|
92
|
+
version_requirements: !ruby/object:Gem::Requirement
|
93
|
+
requirements:
|
94
|
+
- - "~>"
|
95
|
+
- !ruby/object:Gem::Version
|
96
|
+
version: '2.6'
|
97
|
+
- !ruby/object:Gem::Dependency
|
98
|
+
name: shoulda
|
99
|
+
requirement: !ruby/object:Gem::Requirement
|
100
|
+
requirements:
|
101
|
+
- - ">="
|
102
|
+
- !ruby/object:Gem::Version
|
103
|
+
version: '0'
|
104
|
+
type: :development
|
105
|
+
prerelease: false
|
106
|
+
version_requirements: !ruby/object:Gem::Requirement
|
107
|
+
requirements:
|
108
|
+
- - ">="
|
109
|
+
- !ruby/object:Gem::Version
|
110
|
+
version: '0'
|
111
|
+
- !ruby/object:Gem::Dependency
|
112
|
+
name: rdoc
|
113
|
+
requirement: !ruby/object:Gem::Requirement
|
114
|
+
requirements:
|
115
|
+
- - "~>"
|
116
|
+
- !ruby/object:Gem::Version
|
117
|
+
version: '3.12'
|
118
|
+
type: :development
|
119
|
+
prerelease: false
|
120
|
+
version_requirements: !ruby/object:Gem::Requirement
|
121
|
+
requirements:
|
122
|
+
- - "~>"
|
123
|
+
- !ruby/object:Gem::Version
|
124
|
+
version: '3.12'
|
125
|
+
- !ruby/object:Gem::Dependency
|
126
|
+
name: bundler
|
127
|
+
requirement: !ruby/object:Gem::Requirement
|
128
|
+
requirements:
|
129
|
+
- - "~>"
|
130
|
+
- !ruby/object:Gem::Version
|
131
|
+
version: '1.0'
|
132
|
+
type: :development
|
133
|
+
prerelease: false
|
134
|
+
version_requirements: !ruby/object:Gem::Requirement
|
135
|
+
requirements:
|
136
|
+
- - "~>"
|
137
|
+
- !ruby/object:Gem::Version
|
138
|
+
version: '1.0'
|
139
|
+
- !ruby/object:Gem::Dependency
|
140
|
+
name: jeweler
|
141
|
+
requirement: !ruby/object:Gem::Requirement
|
142
|
+
requirements:
|
143
|
+
- - "~>"
|
144
|
+
- !ruby/object:Gem::Version
|
145
|
+
version: '1.8'
|
146
|
+
type: :development
|
147
|
+
prerelease: false
|
148
|
+
version_requirements: !ruby/object:Gem::Requirement
|
149
|
+
requirements:
|
150
|
+
- - "~>"
|
151
|
+
- !ruby/object:Gem::Version
|
152
|
+
version: '1.8'
|
153
|
+
- !ruby/object:Gem::Dependency
|
154
|
+
name: pry
|
155
|
+
requirement: !ruby/object:Gem::Requirement
|
156
|
+
requirements:
|
157
|
+
- - ">="
|
158
|
+
- !ruby/object:Gem::Version
|
159
|
+
version: '0'
|
160
|
+
type: :development
|
161
|
+
prerelease: false
|
162
|
+
version_requirements: !ruby/object:Gem::Requirement
|
163
|
+
requirements:
|
164
|
+
- - ">="
|
165
|
+
- !ruby/object:Gem::Version
|
166
|
+
version: '0'
|
167
|
+
description: AMQP job executor for Hyperflow workflow engine (http://github.com/dice-cyfronet/hyperflow)
|
168
|
+
email: kamil.figiela@gmail.com
|
169
|
+
executables:
|
170
|
+
- hyperflow-amqp-executor
|
171
|
+
- hyperflow-amqp-metric-collector
|
172
|
+
extensions: []
|
173
|
+
extra_rdoc_files:
|
174
|
+
- LICENSE.txt
|
175
|
+
- README.md
|
176
|
+
files:
|
177
|
+
- ".document"
|
178
|
+
- Gemfile
|
179
|
+
- Gemfile.lock
|
180
|
+
- LICENSE.txt
|
181
|
+
- README.md
|
182
|
+
- Rakefile
|
183
|
+
- VERSION
|
184
|
+
- bin/hyperflow-amqp-executor
|
185
|
+
- bin/hyperflow-amqp-metric-collector
|
186
|
+
- example/settings.yml
|
187
|
+
- example/task.json
|
188
|
+
- hyperflow-amqp-executor.gemspec
|
189
|
+
- lib/hyperflow-amqp-executor.rb
|
190
|
+
- lib/hyperflow-amqp-executor/cloud_storage.rb
|
191
|
+
- lib/hyperflow-amqp-executor/gridftp_storage.rb
|
192
|
+
- lib/hyperflow-amqp-executor/helpers.rb
|
193
|
+
- lib/hyperflow-amqp-executor/job.rb
|
194
|
+
- lib/hyperflow-amqp-executor/local_storage.rb
|
195
|
+
- lib/hyperflow-amqp-executor/nfs_storage.rb
|
196
|
+
- lib/hyperflow-amqp-executor/plgdata_storage.rb
|
197
|
+
- lib/hyperflow-amqp-executor/settings.rb
|
198
|
+
- test/helper.rb
|
199
|
+
- test/test_hyperflow-amqp-executor.rb
|
200
|
+
homepage: http://github.com/kfigiela/hyperflow-amqp-executor
|
201
|
+
licenses:
|
202
|
+
- MIT
|
203
|
+
metadata: {}
|
204
|
+
post_install_message:
|
205
|
+
rdoc_options: []
|
206
|
+
require_paths:
|
207
|
+
- lib
|
208
|
+
required_ruby_version: !ruby/object:Gem::Requirement
|
209
|
+
requirements:
|
210
|
+
- - ">="
|
211
|
+
- !ruby/object:Gem::Version
|
212
|
+
version: '0'
|
213
|
+
required_rubygems_version: !ruby/object:Gem::Requirement
|
214
|
+
requirements:
|
215
|
+
- - ">="
|
216
|
+
- !ruby/object:Gem::Version
|
217
|
+
version: '0'
|
218
|
+
requirements: []
|
219
|
+
rubyforge_project:
|
220
|
+
rubygems_version: 2.2.2
|
221
|
+
signing_key:
|
222
|
+
specification_version: 4
|
223
|
+
summary: AMQP job executor for Hyperflow workflow engine
|
224
|
+
test_files: []
|