mortar 0.15.38 → 0.15.39

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,15 @@
1
1
  ---
2
- SHA1:
3
- metadata.gz: ffc0b8718edcb166d6efb97aa313f93f62cbe05a
4
- data.tar.gz: e50a708d63e897a055528aeb61b0a2a023c6cb78
2
+ !binary "U0hBMQ==":
3
+ metadata.gz: !binary |-
4
+ N2RjODE0YjhlMmJjNDAyMjAxNmZhM2MxM2M2YTI1MjZlMjY2YWU4Yg==
5
+ data.tar.gz: !binary |-
6
+ MzI0Y2NiMWEwNTI2MWRjNGIxYjRiMDhhZGJiNTZjNzY5ZDZkMDAwOQ==
5
7
  SHA512:
6
- metadata.gz: 0c57ea3f3a078fd0896b92973d5bd731bdedd2024c2ddfae3f74121dce10fddef75b374c4b6f6618345f724ce79963cf501065553b7494c919627aa64815b715
7
- data.tar.gz: b62dc6bb899b601ea4c6162a2bcdf068d47e23d13d1acf75cfefb40bcefdd757d0475d0dd91d6325b7031352ea4a3097e96a733ec65123eff2d08f4ca0d96cc0
8
+ metadata.gz: !binary |-
9
+ MjQ0MWRiMWUxZGJlNTQ2YjI5ODljNzJkZjg3ZjUwZDAyYjVhZmY4OGU5NzUy
10
+ OWZkMzQ5MWEzYWMxZDg2ZmZkN2VhNmFjZjgyYWI3MGE5YzM3Nzg1MDY2ZmEy
11
+ NDdmZjljYzMwNjI1YTAyZTUxYWUxYjM3M2FmMzk0MTg2MzEzZWQ=
12
+ data.tar.gz: !binary |-
13
+ N2NhNjE5MThkNDQ2MjVmZTc1ZWEwMWNmYTJjZmM2OWE3NzdjMWI0NGYyODFk
14
+ NmZmYjgzYjNkMzAxNmI4MjliMDM0MzMzYTM3NTk3NmFjMGJiZjI5ZWNmYjkw
15
+ ZDc1MjI1YzQ5NmU5NzdlM2ZjMDU0ZmFjMmM0MzFmM2I5ZmY4MzY=
@@ -60,6 +60,7 @@ class Mortar::Command::Jobs < Mortar::Command::Base
60
60
  #
61
61
  # -c, --clusterid CLUSTERID # Run job on an existing cluster with ID of CLUSTERID (optional)
62
62
  # -s, --clustersize NUMNODES # Run job on a new cluster, with NUMNODES nodes (optional; must be >= 2 if provided)
63
+ # -t, --clustertags A,B,C # Run job on an existing cluster with specified tags
63
64
  # -1, --singlejobcluster # Stop the cluster after job completes. (Default: false--cluster can be used for other jobs, and will shut down after 1 hour of inactivity)
64
65
  # -2, --permanentcluster # Don't automatically stop the cluster after it has been idle for an hour (Default: false--cluster will be shut down after 1 hour of inactivity)
65
66
  # -3, --spot # Use spot instances for this cluster (Default: false, only applicable to new clusters)
@@ -112,21 +113,42 @@ class Mortar::Command::Jobs < Mortar::Command::Base
112
113
  error "Unknown Script Type"
113
114
  end
114
115
  end
115
-
116
+
117
+ if options[:clustertags]
118
+ cluster_tags = options[:clustertags].split(',')
119
+ else
120
+ cluster_tags = []
121
+ end
122
+
116
123
  unless options[:clusterid] || options[:clustersize]
117
124
  clusters = api.get_clusters(pig_version.cluster_backend).body['clusters']
118
125
 
119
- largest_free_cluster = clusters.select{ |c| \
120
- c['running_jobs'].length == 0 && c['status_code'] == Mortar::API::Clusters::STATUS_RUNNING }.
121
- max_by{|c| c['size']}
126
+ if cluster_tags.length > 0
127
+ tagged_clusters = clusters.select{
128
+ |c| c['status_code'] == Mortar::API::Clusters::STATUS_RUNNING && (cluster_tags - c['tags']).empty?
129
+ }
130
+ if tagged_clusters.length == 0
131
+ error "There're no clusters with tags [" + options[:clustertags] + "]"
132
+ end
122
133
 
123
- if largest_free_cluster.nil?
124
- options[:clustersize] = 2
125
- display("Defaulting to running job on new cluster of size 2")
134
+ largest_cluster = tagged_clusters.max_by{|c| c['size']}
135
+
136
+ options[:clusterid] = largest_cluster['cluster_id']
137
+ display("Running job on the cluster with tags [" + options[:clustertags] + "], id = " + largest_cluster['cluster_id'] +
138
+ ", size = " + largest_cluster['size'].to_s)
126
139
  else
127
- options[:clusterid] = largest_free_cluster['cluster_id']
128
- display("Defaulting to running job on largest existing free cluster, id = " +
129
- largest_free_cluster['cluster_id'] + ", size = " + largest_free_cluster['size'].to_s)
140
+ largest_free_cluster = clusters.select{ |c| \
141
+ c['running_jobs'].length == 0 && c['status_code'] == Mortar::API::Clusters::STATUS_RUNNING }.
142
+ max_by{|c| c['size']}
143
+
144
+ if largest_free_cluster.nil?
145
+ options[:clustersize] = 2
146
+ display("Defaulting to running job on new cluster of size 2")
147
+ else
148
+ options[:clusterid] = largest_free_cluster['cluster_id']
149
+ display("Defaulting to running job on largest existing free cluster, id = " +
150
+ largest_free_cluster['cluster_id'] + ", size = " + largest_free_cluster['size'].to_s)
151
+ end
130
152
  end
131
153
  end
132
154
 
@@ -137,7 +159,7 @@ class Mortar::Command::Jobs < Mortar::Command::Base
137
159
  end
138
160
  end
139
161
  end
140
-
162
+
141
163
  if options[:project]
142
164
  if options[:branch]
143
165
  git_ref = options[:branch]
@@ -29,6 +29,7 @@ class Mortar::Command::Spark < Mortar::Command::Base
29
29
  #
30
30
  # -c, --clusterid CLUSTERID # Run job on an existing cluster with ID of CLUSTERID (Default: runs on an existing available cluster)
31
31
  # -s, --clustersize NUMNODES # Run job with NUMNODES nodes (optional; must be >= 2 if provided)
32
+ # -t, --clustertags A,B,C # Run job on an existing cluster with specified tags
32
33
  # -3, --spot # Use spot instances for this cluster (Default: true)
33
34
  # -P, --project PROJECTNAME # Use a project that is not checked out in the current directory. Runs code from project's master branch in GitHub rather than snapshotting local code.
34
35
  # -B, --branch BRANCHNAME # Used with --project to specify a non-master branch
@@ -65,20 +66,45 @@ class Mortar::Command::Spark < Mortar::Command::Base
65
66
  git_ref = sync_code_with_cloud()
66
67
  end
67
68
 
69
+ if options[:clustertags]
70
+ cluster_tags = options[:clustertags].split(',')
71
+ else
72
+ cluster_tags = []
73
+ end
74
+
68
75
  unless options[:clusterid] || options[:clustersize]
69
76
  clusters = api.get_clusters(Mortar::API::Jobs::CLUSTER_BACKEND__EMR_SPARK_JOBSERVER).body['clusters']
70
77
 
71
- largest_free_cluster = clusters.select{ |c| \
72
- c['running_jobs'].length == 0 && c['status_code'] == Mortar::API::Clusters::STATUS_RUNNING }.
73
- max_by{|c| c['size']}
74
-
75
- if largest_free_cluster.nil?
76
- error('No running clusters with Spark Job Server detected, please, launch a SparkJobServer cluster first')
77
- end
78
+ if cluster_tags.length > 0
79
+ tagged_clusters = clusters.select{
80
+ |c| c['status_code'] == Mortar::API::Clusters::STATUS_RUNNING && (cluster_tags - c['tags']).empty?
81
+ }
82
+ if tagged_clusters.length > 1
83
+ display(tagged_clusters)
84
+ error "There're " + tagged_clusters.length + " clusters with tags [" + options[:clustertags] +
85
+ "]. Please, select one cluster."
86
+ elsif tagged_clusters.length == 0
87
+ error "There're no clusters with tags [" + options[:clustertags] + "]"
88
+ end
89
+
90
+ largest_cluster = tagged_clusters.max_by{|c| c['size']}
78
91
 
79
- options[:clusterid] = largest_free_cluster['cluster_id']
80
- display("Defaulting to running job on largest existing free cluster, id = " +
81
- largest_free_cluster['cluster_id'] + ", size = " + largest_free_cluster['size'].to_s)
92
+ options[:clusterid] = largest_cluster['cluster_id']
93
+ display("Running job on the cluster with tags [" + options[:clustertags] + "], id = " + largest_cluster['cluster_id'] +
94
+ ", size = " + largest_cluster['size'].to_s)
95
+ else
96
+ largest_free_cluster = clusters.select{ |c| \
97
+ c['running_jobs'].length == 0 && c['status_code'] == Mortar::API::Clusters::STATUS_RUNNING }.
98
+ max_by{|c| c['size']}
99
+
100
+ if largest_free_cluster.nil?
101
+ error('No running clusters with Spark Job Server detected, please, launch a SparkJobServer cluster first')
102
+ end
103
+
104
+ options[:clusterid] = largest_free_cluster['cluster_id']
105
+ display("Defaulting to running job on largest existing free cluster, id = " +
106
+ largest_free_cluster['cluster_id'] + ", size = " + largest_free_cluster['size'].to_s)
107
+ end
82
108
  end
83
109
 
84
110
  response = action("Requesting job execution") do
@@ -85,6 +85,10 @@ module Mortar
85
85
  @luigiscripts
86
86
  end
87
87
 
88
+ def sparkjobs_path
89
+ File.join(@root_path, "src/main/scala")
90
+ end
91
+
88
92
  def sparkscripts_path
89
93
  File.join(@root_path, "sparkscripts")
90
94
  end
@@ -16,5 +16,5 @@
16
16
 
17
17
  module Mortar
18
18
  # see http://semver.org/
19
- VERSION = "0.15.38"
19
+ VERSION = "0.15.39"
20
20
  end
@@ -439,6 +439,65 @@ STDOUT
439
439
  end
440
440
  end
441
441
 
442
+ it "runs a job by default on the largest existing running cluster using cluster tags" do
443
+ with_git_initialized_project do |p|
444
+ job_id = "c571a8c7f76a4fd4a67c103d753e2dd5"
445
+ job_url = "http://127.0.0.1:5000/jobs/job_detail?job_id=c571a8c7f76a4fd4a67c103d753e2dd5"
446
+
447
+ small_cluster_id = '510beb6b3004860820ab6538'
448
+ small_cluster_size = 2
449
+ small_cluster_status = Mortar::API::Clusters::STATUS_RUNNING
450
+ large_cluster_id = '510bf0db3004860820ab6590'
451
+ large_cluster_size = 5
452
+ large_cluster_status = Mortar::API::Clusters::STATUS_RUNNING
453
+ starting_cluster_id = '510bf0db3004860820abaaaa'
454
+ starting_cluster_size = 10
455
+ starting_cluster_status = Mortar::API::Clusters::STATUS_STARTING
456
+ huge_busy_cluster_id = '510bf0db3004860820ab6621'
457
+ huge_busy_cluster_size = 20
458
+ huge_busy_cluster_status = Mortar::API::Clusters::STATUS_RUNNING
459
+
460
+
461
+ mock(Mortar::Auth.api).get_clusters(Mortar::API::Jobs::CLUSTER_BACKEND__EMR_HADOOP_2) {
462
+ Excon::Response.new(:body => {
463
+ 'clusters' => [
464
+ { 'cluster_id' => small_cluster_id, 'size' => small_cluster_size, 'running_jobs' => [], 'status_code' => small_cluster_status, 'tags' => ['small'] },
465
+ { 'cluster_id' => large_cluster_id, 'size' => large_cluster_size, 'running_jobs' => [], 'status_code' => large_cluster_status, 'tags' => ['large', 'cool'] },
466
+ { 'cluster_id' => starting_cluster_id, 'size' => starting_cluster_size, 'running_jobs' => [], 'status_code' => starting_cluster_status, 'tags' => ['starting'] },
467
+ { 'cluster_id' => huge_busy_cluster_id, 'size' => huge_busy_cluster_size, 'tags' => ['huge'],
468
+ 'running_jobs' => [ { 'job_id' => 'c571a8c7f76a4fd4a67c103d753e2dd5',
469
+ 'job_name' => "", 'start_timestamp' => ""} ], 'status_code' => huge_busy_cluster_status }
470
+ ]})
471
+ }
472
+ mock(Mortar::Auth.api).post_pig_job_existing_cluster("myproject", "my_script", is_a(String), large_cluster_id,
473
+ :pig_version => "0.12-Hadoop-2",
474
+ :project_script_path => be_a_kind_of(String),
475
+ :parameters => [],
476
+ :notify_on_job_finish => true,
477
+ :is_control_script => false) {Excon::Response.new(:body => {"job_id" => job_id, "web_job_url" => job_url})}
478
+
479
+ write_file(File.join(p.pigscripts_path, "my_script.pig"))
480
+ stderr, stdout = execute("jobs:run pigscripts/my_script.pig --clustertags large,cool", p, @git)
481
+ stdout.should == <<-STDOUT
482
+ Running job on the cluster with tags [large,cool], id = 510bf0db3004860820ab6590, size = 5
483
+ Taking code snapshot... done
484
+ Sending code snapshot to Mortar... done
485
+ Requesting job execution... done
486
+ job_id: c571a8c7f76a4fd4a67c103d753e2dd5
487
+
488
+ Job status can be viewed on the web at:
489
+
490
+ http://127.0.0.1:5000/jobs/job_detail?job_id=c571a8c7f76a4fd4a67c103d753e2dd5
491
+
492
+ Or by running:
493
+
494
+ mortar jobs:status c571a8c7f76a4fd4a67c103d753e2dd5 --poll
495
+
496
+ STDOUT
497
+
498
+ end
499
+ end
500
+
442
501
  it "runs a job by default on the largest existing running cluster for Hadoop 2" do
443
502
  with_git_initialized_project do |p|
444
503
  job_id = "c571a8c7f76a4fd4a67c103d753e2dd5"
@@ -0,0 +1,205 @@
1
+ #
2
+ # Copyright 2014 Mortar Data Inc.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+
17
+ require 'spec_helper'
18
+ require 'fakefs/spec_helpers'
19
+ require 'mortar/command/spark'
20
+ require 'mortar/api/jobs'
21
+
22
+ module Mortar::Command
23
+ describe Spark do
24
+
25
+ before(:each) do
26
+ stub_core
27
+ @git = Mortar::Git::Git.new
28
+ end
29
+
30
+ context("index") do
31
+ it "shows help when user adds help argument" do
32
+ with_git_initialized_project do |p|
33
+ stderr_dash_h, stdout_dash_h = execute("spark -h", p, @git)
34
+ stderr_help, stdout_help = execute("spark help", p, @git)
35
+ stdout_dash_h.should == stdout_help
36
+ stderr_dash_h.should == stderr_help
37
+ end
38
+ end
39
+
40
+ it "runs a spark job with no arguments new cluster" do
41
+ with_git_initialized_project do |p|
42
+ # stub api requests
43
+ job_id = "c571a8c7f76a4fd4a67c103d753e2dd5"
44
+ job_url = "http://127.0.0.1:5000/jobs/spark_job_detail?job_id=c571a8c7f76a4fd4a67c103d753e2dd5"
45
+ cluster_id = nil
46
+ mock(Mortar::Auth.api).post_spark_job_on_jobserver("myproject", "com.mortar.Job", is_a(String), cluster_id,
47
+ :script_arguments => '',
48
+ :clustersize => '4'
49
+ ) { Excon::Response.new(:body => {"job_id" => job_id, "web_job_url" => job_url}) }
50
+
51
+ write_file(File.join(p.sparkjobs_path, "com/mortar/Job.scala"))
52
+ stderr, stdout = execute("spark com.mortar.Job --clustersize 4", p, @git)
53
+ puts stderr
54
+ stdout.should == <<-STDOUT
55
+ Taking code snapshot... done
56
+ Sending code snapshot to Mortar... done
57
+ Requesting job execution... done
58
+ job_id: c571a8c7f76a4fd4a67c103d753e2dd5
59
+
60
+ Job status can be viewed on the web at:
61
+
62
+ http://127.0.0.1:5000/jobs/spark_job_detail?job_id=c571a8c7f76a4fd4a67c103d753e2dd5
63
+
64
+ STDOUT
65
+ end
66
+ end
67
+
68
+ it "runs a spark job with script_arguments existing cluster" do
69
+ with_git_initialized_project do |p|
70
+ # stub api requests
71
+ job_id = "c571a8c7f76a4fd4a67c103d753e2dd5"
72
+ cluster_id = "c571a8c7f76a4fd4a67c103d753e2dd7"
73
+ job_url = "http://127.0.0.1:5000/jobs/spark_job_detail?job_id=c571a8c7f76a4fd4a67c103d753e2dd5"
74
+ script_arguments = "--arg1 v1 --arg2 v2 --arg3"
75
+ mock(Mortar::Auth.api).post_spark_job_on_jobserver("myproject", "com.mortar.Job", is_a(String), cluster_id,
76
+ :script_arguments => script_arguments
77
+ ) {Excon::Response.new(:body => {"job_id" => job_id, "web_job_url" => job_url})}
78
+
79
+ write_file(File.join(p.sparkjobs_path, "com/mortar/Job.scala"))
80
+ stderr, stdout = execute("spark com.mortar.Job --clusterid #{cluster_id} #{script_arguments}", p, @git)
81
+ stdout.should == <<-STDOUT
82
+ Taking code snapshot... done
83
+ Sending code snapshot to Mortar... done
84
+ Requesting job execution... done
85
+ job_id: c571a8c7f76a4fd4a67c103d753e2dd5
86
+
87
+ Job status can be viewed on the web at:
88
+
89
+ http://127.0.0.1:5000/jobs/spark_job_detail?job_id=c571a8c7f76a4fd4a67c103d753e2dd5
90
+
91
+ STDOUT
92
+ end
93
+ end
94
+
95
+ it "runs a spark job with on free cluster" do
96
+ with_git_initialized_project do |p|
97
+ # stub api requests
98
+ job_id = "c571a8c7f76a4fd4a67c103d753e2dd5"
99
+ job_url = "http://127.0.0.1:5000/jobs/spark_job_detail?job_id=c571a8c7f76a4fd4a67c103d753e2dd5"
100
+ script_arguments = "arg1 arg2 arg3"
101
+
102
+ small_cluster_id = '510beb6b3004860820ab6538'
103
+ small_cluster_size = 2
104
+ small_cluster_status = Mortar::API::Clusters::STATUS_RUNNING
105
+ large_cluster_id = '510bf0db3004860820ab6590'
106
+ large_cluster_size = 5
107
+ large_cluster_status = Mortar::API::Clusters::STATUS_RUNNING
108
+ starting_cluster_id = '510bf0db3004860820abaaaa'
109
+ starting_cluster_size = 10
110
+ starting_cluster_status = Mortar::API::Clusters::STATUS_STARTING
111
+ huge_busy_cluster_id = '510bf0db3004860820ab6621'
112
+ huge_busy_cluster_size = 20
113
+ huge_busy_cluster_status = Mortar::API::Clusters::STATUS_RUNNING
114
+
115
+
116
+ mock(Mortar::Auth.api).get_clusters(Mortar::API::Jobs::CLUSTER_BACKEND__EMR_SPARK_JOBSERVER) {
117
+ Excon::Response.new(:body => {
118
+ 'clusters' => [
119
+ { 'cluster_id' => small_cluster_id, 'size' => small_cluster_size, 'running_jobs' => [], 'status_code' => small_cluster_status },
120
+ { 'cluster_id' => large_cluster_id, 'size' => large_cluster_size, 'running_jobs' => [], 'status_code' => large_cluster_status },
121
+ { 'cluster_id' => starting_cluster_id, 'size' => starting_cluster_size, 'running_jobs' => [], 'status_code' => starting_cluster_status },
122
+ { 'cluster_id' => huge_busy_cluster_id, 'size' => huge_busy_cluster_size,
123
+ 'running_jobs' => [ { 'job_id' => 'c571a8c7f76a4fd4a67c103d753e2dd5',
124
+ 'job_name' => "", 'start_timestamp' => ""} ], 'status_code' => huge_busy_cluster_status }
125
+ ]})
126
+ }
127
+
128
+ mock(Mortar::Auth.api).post_spark_job_on_jobserver("myproject", "com.mortar.Job", is_a(String), large_cluster_id,
129
+ :script_arguments => script_arguments
130
+ ) {Excon::Response.new(:body => {"job_id" => job_id, "web_job_url" => job_url})}
131
+
132
+ write_file(File.join(p.sparkjobs_path, "com/mortar/Job.scala"))
133
+ stderr, stdout = execute("spark com.mortar.Job #{script_arguments}", p, @git)
134
+ stdout.should == <<-STDOUT
135
+ Taking code snapshot... done
136
+ Sending code snapshot to Mortar... done
137
+ Defaulting to running job on largest existing free cluster, id = 510bf0db3004860820ab6590, size = 5
138
+ Requesting job execution... done
139
+ job_id: c571a8c7f76a4fd4a67c103d753e2dd5
140
+
141
+ Job status can be viewed on the web at:
142
+
143
+ http://127.0.0.1:5000/jobs/spark_job_detail?job_id=c571a8c7f76a4fd4a67c103d753e2dd5
144
+
145
+ STDOUT
146
+ end
147
+ end
148
+
149
+ it "runs a spark job using cluster tags" do
150
+ with_git_initialized_project do |p|
151
+ # stub api requests
152
+ job_id = "c571a8c7f76a4fd4a67c103d753e2dd5"
153
+ job_url = "http://127.0.0.1:5000/jobs/spark_job_detail?job_id=c571a8c7f76a4fd4a67c103d753e2dd5"
154
+ script_arguments = "arg1 arg2 arg3"
155
+
156
+ small_cluster_id = '510beb6b3004860820ab6538'
157
+ small_cluster_size = 2
158
+ small_cluster_status = Mortar::API::Clusters::STATUS_RUNNING
159
+ large_cluster_id = '510bf0db3004860820ab6590'
160
+ large_cluster_size = 5
161
+ large_cluster_status = Mortar::API::Clusters::STATUS_RUNNING
162
+ starting_cluster_id = '510bf0db3004860820abaaaa'
163
+ starting_cluster_size = 10
164
+ starting_cluster_status = Mortar::API::Clusters::STATUS_STARTING
165
+ huge_busy_cluster_id = '510bf0db3004860820ab6621'
166
+ huge_busy_cluster_size = 20
167
+ huge_busy_cluster_status = Mortar::API::Clusters::STATUS_RUNNING
168
+
169
+
170
+ mock(Mortar::Auth.api).get_clusters(Mortar::API::Jobs::CLUSTER_BACKEND__EMR_SPARK_JOBSERVER) {
171
+ Excon::Response.new(:body => {
172
+ 'clusters' => [
173
+ { 'cluster_id' => small_cluster_id, 'size' => small_cluster_size, 'running_jobs' => [], 'status_code' => small_cluster_status, 'tags' => ['small'] },
174
+ { 'cluster_id' => large_cluster_id, 'size' => large_cluster_size, 'running_jobs' => [], 'status_code' => large_cluster_status, 'tags' => ['large', 'cool'] },
175
+ { 'cluster_id' => starting_cluster_id, 'size' => starting_cluster_size, 'running_jobs' => [], 'status_code' => starting_cluster_status, 'tags' => ['starting'] },
176
+ { 'cluster_id' => huge_busy_cluster_id, 'size' => huge_busy_cluster_size, 'tags' => ['huge'],
177
+ 'running_jobs' => [ { 'job_id' => 'c571a8c7f76a4fd4a67c103d753e2dd5',
178
+ 'job_name' => "", 'start_timestamp' => ""} ], 'status_code' => huge_busy_cluster_status }
179
+ ]})
180
+ }
181
+
182
+ mock(Mortar::Auth.api).post_spark_job_on_jobserver("myproject", "com.mortar.Job", is_a(String), large_cluster_id,
183
+ :script_arguments => script_arguments
184
+ ) {Excon::Response.new(:body => {"job_id" => job_id, "web_job_url" => job_url})}
185
+
186
+ write_file(File.join(p.sparkjobs_path, "com/mortar/Job.scala"))
187
+ stderr, stdout = execute("spark com.mortar.Job #{script_arguments} --clustertags large,cool", p, @git)
188
+ stdout.should == <<-STDOUT
189
+ Taking code snapshot... done
190
+ Sending code snapshot to Mortar... done
191
+ Running job on the cluster with tags [large,cool], id = 510bf0db3004860820ab6590, size = 5
192
+ Requesting job execution... done
193
+ job_id: c571a8c7f76a4fd4a67c103d753e2dd5
194
+
195
+ Job status can be viewed on the web at:
196
+
197
+ http://127.0.0.1:5000/jobs/spark_job_detail?job_id=c571a8c7f76a4fd4a67c103d753e2dd5
198
+
199
+ STDOUT
200
+ end
201
+ end
202
+
203
+ end
204
+ end
205
+ end
metadata CHANGED
@@ -1,195 +1,195 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: mortar
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.15.38
4
+ version: 0.15.39
5
5
  platform: ruby
6
6
  authors:
7
7
  - Mortar Data
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2016-03-16 00:00:00.000000000 Z
11
+ date: 2016-03-28 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: rdoc
15
15
  requirement: !ruby/object:Gem::Requirement
16
16
  requirements:
17
- - - ">="
17
+ - - ! '>='
18
18
  - !ruby/object:Gem::Version
19
19
  version: 4.0.0
20
20
  type: :runtime
21
21
  prerelease: false
22
22
  version_requirements: !ruby/object:Gem::Requirement
23
23
  requirements:
24
- - - ">="
24
+ - - ! '>='
25
25
  - !ruby/object:Gem::Version
26
26
  version: 4.0.0
27
27
  - !ruby/object:Gem::Dependency
28
28
  name: mortar-api-ruby
29
29
  requirement: !ruby/object:Gem::Requirement
30
30
  requirements:
31
- - - "~>"
31
+ - - ~>
32
32
  - !ruby/object:Gem::Version
33
33
  version: 0.8.15
34
34
  type: :runtime
35
35
  prerelease: false
36
36
  version_requirements: !ruby/object:Gem::Requirement
37
37
  requirements:
38
- - - "~>"
38
+ - - ~>
39
39
  - !ruby/object:Gem::Version
40
40
  version: 0.8.15
41
41
  - !ruby/object:Gem::Dependency
42
42
  name: netrc
43
43
  requirement: !ruby/object:Gem::Requirement
44
44
  requirements:
45
- - - "~>"
45
+ - - ~>
46
46
  - !ruby/object:Gem::Version
47
47
  version: '0.7'
48
48
  type: :runtime
49
49
  prerelease: false
50
50
  version_requirements: !ruby/object:Gem::Requirement
51
51
  requirements:
52
- - - "~>"
52
+ - - ~>
53
53
  - !ruby/object:Gem::Version
54
54
  version: '0.7'
55
55
  - !ruby/object:Gem::Dependency
56
56
  name: launchy
57
57
  requirement: !ruby/object:Gem::Requirement
58
58
  requirements:
59
- - - "~>"
59
+ - - ~>
60
60
  - !ruby/object:Gem::Version
61
61
  version: '2.1'
62
62
  type: :runtime
63
63
  prerelease: false
64
64
  version_requirements: !ruby/object:Gem::Requirement
65
65
  requirements:
66
- - - "~>"
66
+ - - ~>
67
67
  - !ruby/object:Gem::Version
68
68
  version: '2.1'
69
69
  - !ruby/object:Gem::Dependency
70
70
  name: parseconfig
71
71
  requirement: !ruby/object:Gem::Requirement
72
72
  requirements:
73
- - - "~>"
73
+ - - ~>
74
74
  - !ruby/object:Gem::Version
75
75
  version: 1.0.2
76
76
  type: :runtime
77
77
  prerelease: false
78
78
  version_requirements: !ruby/object:Gem::Requirement
79
79
  requirements:
80
- - - "~>"
80
+ - - ~>
81
81
  - !ruby/object:Gem::Version
82
82
  version: 1.0.2
83
83
  - !ruby/object:Gem::Dependency
84
84
  name: aws-sdk
85
85
  requirement: !ruby/object:Gem::Requirement
86
86
  requirements:
87
- - - "~>"
87
+ - - ~>
88
88
  - !ruby/object:Gem::Version
89
89
  version: '1.0'
90
90
  type: :runtime
91
91
  prerelease: false
92
92
  version_requirements: !ruby/object:Gem::Requirement
93
93
  requirements:
94
- - - "~>"
94
+ - - ~>
95
95
  - !ruby/object:Gem::Version
96
96
  version: '1.0'
97
97
  - !ruby/object:Gem::Dependency
98
98
  name: nokogiri
99
99
  requirement: !ruby/object:Gem::Requirement
100
100
  requirements:
101
- - - "~>"
101
+ - - ~>
102
102
  - !ruby/object:Gem::Version
103
103
  version: 1.5.0
104
104
  type: :runtime
105
105
  prerelease: false
106
106
  version_requirements: !ruby/object:Gem::Requirement
107
107
  requirements:
108
- - - "~>"
108
+ - - ~>
109
109
  - !ruby/object:Gem::Version
110
110
  version: 1.5.0
111
111
  - !ruby/object:Gem::Dependency
112
112
  name: excon
113
113
  requirement: !ruby/object:Gem::Requirement
114
114
  requirements:
115
- - - "~>"
115
+ - - ~>
116
116
  - !ruby/object:Gem::Version
117
117
  version: '0.28'
118
118
  type: :development
119
119
  prerelease: false
120
120
  version_requirements: !ruby/object:Gem::Requirement
121
121
  requirements:
122
- - - "~>"
122
+ - - ~>
123
123
  - !ruby/object:Gem::Version
124
124
  version: '0.28'
125
125
  - !ruby/object:Gem::Dependency
126
126
  name: fakefs
127
127
  requirement: !ruby/object:Gem::Requirement
128
128
  requirements:
129
- - - "~>"
129
+ - - ~>
130
130
  - !ruby/object:Gem::Version
131
131
  version: 0.4.2
132
132
  type: :development
133
133
  prerelease: false
134
134
  version_requirements: !ruby/object:Gem::Requirement
135
135
  requirements:
136
- - - "~>"
136
+ - - ~>
137
137
  - !ruby/object:Gem::Version
138
138
  version: 0.4.2
139
139
  - !ruby/object:Gem::Dependency
140
140
  name: gem-release
141
141
  requirement: !ruby/object:Gem::Requirement
142
142
  requirements:
143
- - - ">="
143
+ - - ! '>='
144
144
  - !ruby/object:Gem::Version
145
145
  version: '0'
146
146
  type: :development
147
147
  prerelease: false
148
148
  version_requirements: !ruby/object:Gem::Requirement
149
149
  requirements:
150
- - - ">="
150
+ - - ! '>='
151
151
  - !ruby/object:Gem::Version
152
152
  version: '0'
153
153
  - !ruby/object:Gem::Dependency
154
154
  name: rake
155
155
  requirement: !ruby/object:Gem::Requirement
156
156
  requirements:
157
- - - "~>"
157
+ - - ~>
158
158
  - !ruby/object:Gem::Version
159
159
  version: 10.1.1
160
160
  type: :development
161
161
  prerelease: false
162
162
  version_requirements: !ruby/object:Gem::Requirement
163
163
  requirements:
164
- - - "~>"
164
+ - - ~>
165
165
  - !ruby/object:Gem::Version
166
166
  version: 10.1.1
167
167
  - !ruby/object:Gem::Dependency
168
168
  name: rr
169
169
  requirement: !ruby/object:Gem::Requirement
170
170
  requirements:
171
- - - ">="
171
+ - - ! '>='
172
172
  - !ruby/object:Gem::Version
173
173
  version: '0'
174
174
  type: :development
175
175
  prerelease: false
176
176
  version_requirements: !ruby/object:Gem::Requirement
177
177
  requirements:
178
- - - ">="
178
+ - - ! '>='
179
179
  - !ruby/object:Gem::Version
180
180
  version: '0'
181
181
  - !ruby/object:Gem::Dependency
182
182
  name: rspec
183
183
  requirement: !ruby/object:Gem::Requirement
184
184
  requirements:
185
- - - "~>"
185
+ - - ~>
186
186
  - !ruby/object:Gem::Version
187
187
  version: '2.0'
188
188
  type: :development
189
189
  prerelease: false
190
190
  version_requirements: !ruby/object:Gem::Requirement
191
191
  requirements:
192
- - - "~>"
192
+ - - ~>
193
193
  - !ruby/object:Gem::Version
194
194
  version: '2.0'
195
195
  description: Client library and command-line tool to interact with the Mortar service.
@@ -322,6 +322,7 @@ files:
322
322
  - spec/mortar/command/pigscripts_spec.rb
323
323
  - spec/mortar/command/projects_spec.rb
324
324
  - spec/mortar/command/s3_spec.rb
325
+ - spec/mortar/command/spark_spec.rb
325
326
  - spec/mortar/command/validate_spec.rb
326
327
  - spec/mortar/command/version_spec.rb
327
328
  - spec/mortar/command_spec.rb
@@ -351,19 +352,18 @@ require_paths:
351
352
  - lib
352
353
  required_ruby_version: !ruby/object:Gem::Requirement
353
354
  requirements:
354
- - - ">="
355
+ - - ! '>='
355
356
  - !ruby/object:Gem::Version
356
357
  version: 1.8.7
357
358
  required_rubygems_version: !ruby/object:Gem::Requirement
358
359
  requirements:
359
- - - ">="
360
+ - - ! '>='
360
361
  - !ruby/object:Gem::Version
361
362
  version: '0'
362
363
  requirements: []
363
364
  rubyforge_project:
364
- rubygems_version: 2.4.8
365
+ rubygems_version: 2.4.3
365
366
  signing_key:
366
367
  specification_version: 4
367
368
  summary: Client library and CLI to interact with the Mortar service.
368
369
  test_files: []
369
- has_rdoc: