browsery 0.1.0 → 0.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/.ruby-version +1 -0
- data/bin/browsery +5 -0
- data/browsery.gemspec +8 -0
- data/lib/browsery.rb +22 -0
- data/lib/browsery/connector.rb +287 -0
- data/lib/browsery/console.rb +15 -0
- data/lib/browsery/init.rb +60 -0
- data/lib/browsery/logger.rb +12 -0
- data/lib/browsery/page_objects.rb +23 -0
- data/lib/browsery/page_objects/base.rb +266 -0
- data/lib/browsery/page_objects/element_container.rb +50 -0
- data/lib/browsery/page_objects/overlay/base.rb +85 -0
- data/lib/browsery/page_objects/widgets/base.rb +52 -0
- data/lib/browsery/parallel.rb +265 -0
- data/lib/browsery/runner.rb +111 -0
- data/lib/browsery/settings.rb +114 -0
- data/lib/browsery/test_case.rb +266 -0
- data/lib/browsery/test_cases.rb +7 -0
- data/lib/browsery/utils.rb +10 -0
- data/lib/browsery/utils/assertion_helper.rb +35 -0
- data/lib/browsery/utils/castable.rb +103 -0
- data/lib/browsery/utils/data_generator_helper.rb +145 -0
- data/lib/browsery/utils/loggable.rb +16 -0
- data/lib/browsery/utils/overlay_and_widget_helper.rb +78 -0
- data/lib/browsery/utils/page_object_helper.rb +263 -0
- data/lib/browsery/version.rb +1 -1
- data/lib/minitap/minitest5_browsery.rb +22 -0
- data/lib/minitest/autobot_settings_plugin.rb +83 -0
- data/lib/selenium/webdriver/common/element_browsery.rb +21 -0
- data/lib/tapout/custom_reporters/fancy_tap_reporter.rb +94 -0
- data/lib/yard/tagged_test_case_handler.rb +61 -0
- metadata +131 -5
@@ -0,0 +1,265 @@
|
|
1
|
+
module Browsery
|
2
|
+
class Parallel
|
3
|
+
|
4
|
+
attr_reader :all_tests, :simultaneous_jobs
|
5
|
+
|
6
|
+
def initialize(simultaneous_jobs, all_tests)
|
7
|
+
@start_time = Time.now
|
8
|
+
|
9
|
+
@result_dir = 'logs/tap_results'
|
10
|
+
|
11
|
+
connector = Browsery.settings.connector
|
12
|
+
@on_sauce = true if connector.include? 'saucelabs'
|
13
|
+
@platform = connector.split(':')[2] || ''
|
14
|
+
|
15
|
+
@simultaneous_jobs = simultaneous_jobs
|
16
|
+
@simultaneous_jobs = 10 if run_on_mac? # saucelabs account limit for parallel is 10 for mac
|
17
|
+
@all_tests = all_tests
|
18
|
+
|
19
|
+
@pids = []
|
20
|
+
@static_run_command = "browsery -c #{Browsery.settings.connector} -e #{Browsery.settings.env}"
|
21
|
+
if Browsery.settings.rerun_failure
|
22
|
+
@static_run_command += " -R #{Browsery.settings.rerun_failure}"
|
23
|
+
end
|
24
|
+
tap_reporter_path = Browsery.gem_root.join('lib/tapout/custom_reporters/fancy_tap_reporter.rb')
|
25
|
+
@pipe_tap = "--tapy | tapout --no-color -r #{tap_reporter_path.to_s} fancytap"
|
26
|
+
end
|
27
|
+
|
28
|
+
# return true only if specified to run on mac in connector
|
29
|
+
# @return [boolean]
|
30
|
+
def run_on_mac?
|
31
|
+
@platform.include?('osx')
|
32
|
+
end
|
33
|
+
|
34
|
+
# remove all results files under @result_dir if there's any
|
35
|
+
def clean_result!
|
36
|
+
raise Exception, '@result_dir is not set' if @result_dir.nil?
|
37
|
+
unless Dir.glob("#{@result_dir}/*").empty?
|
38
|
+
FileUtils.rm_rf(Dir.glob("#{@result_dir}/*"))
|
39
|
+
end
|
40
|
+
puts "Cleaning result files.\n"
|
41
|
+
end
|
42
|
+
|
43
|
+
def remove_redundant_tap
|
44
|
+
ever_failed_tests_file = "#{@result_dir}/ever_failed_tests.json"
|
45
|
+
if File.file? ever_failed_tests_file
|
46
|
+
data_hash = JSON.parse(File.read(ever_failed_tests_file))
|
47
|
+
data_hash.keys.each do |test|
|
48
|
+
if test.start_with? 'test_'
|
49
|
+
tap_result_file = "#{@result_dir}/#{test}.t"
|
50
|
+
result_lines = IO.readlines(tap_result_file)
|
51
|
+
last_tap_start_index = 0
|
52
|
+
last_tap_end_index = result_lines.size - 1
|
53
|
+
result_lines.each_with_index do |l, index|
|
54
|
+
last_tap_start_index = index if l.delete!("\n") == '1..1'
|
55
|
+
end
|
56
|
+
File.open(tap_result_file, 'w') do |f|
|
57
|
+
f.puts result_lines[last_tap_start_index..last_tap_end_index]
|
58
|
+
end
|
59
|
+
puts "Processed #{tap_result_file}"
|
60
|
+
else
|
61
|
+
next
|
62
|
+
end
|
63
|
+
end
|
64
|
+
else
|
65
|
+
puts "==> File #{ever_failed_tests_file} doesn't exist - all tests passed!"
|
66
|
+
end
|
67
|
+
end
|
68
|
+
|
69
|
+
# Aggregate all individual test_*.t files
|
70
|
+
# replace them with one file - test_aggregated_result.tap
|
71
|
+
# so they will be considered as one test plan by tap result parser
|
72
|
+
def aggregate_tap_results
|
73
|
+
results_count = Dir.glob("#{@result_dir}/*.t").size
|
74
|
+
File.open("#{@result_dir}/test_aggregated_result.tap", 'a+') do |result_file|
|
75
|
+
result_stats = {
|
76
|
+
'pass' => 0,
|
77
|
+
'fail' => 0,
|
78
|
+
'errs' => 0,
|
79
|
+
'todo' => 0,
|
80
|
+
'omit' => 0
|
81
|
+
}
|
82
|
+
result_stats_line_start = ' # 1 tests:'
|
83
|
+
result_file.puts "1..#{results_count}"
|
84
|
+
file_count = 0
|
85
|
+
Dir.glob("#{@result_dir}/*.t") do |filename|
|
86
|
+
file_count += 1
|
87
|
+
File.open(filename, 'r') do |file|
|
88
|
+
breakpoint_line = 0
|
89
|
+
file.each_with_index do |line, index|
|
90
|
+
next if index == 0 || (breakpoint_line > 0 && index > breakpoint_line)
|
91
|
+
if line.start_with?(result_stats_line_start)
|
92
|
+
pass, fail, errs, todo, omit = line.match(/(\d+) pass, (\d+) fail, (\d+) errs, (\d+) todo, (\d+) omit/).captures
|
93
|
+
one_test_result = {
|
94
|
+
'pass' => pass.to_i,
|
95
|
+
'fail' => fail.to_i,
|
96
|
+
'errs' => errs.to_i,
|
97
|
+
'todo' => todo.to_i,
|
98
|
+
'omit' => omit.to_i
|
99
|
+
}
|
100
|
+
result_stats = result_stats.merge(one_test_result) { |k, total, one| total + one }
|
101
|
+
breakpoint_line = index
|
102
|
+
elsif line.strip == '#'
|
103
|
+
next
|
104
|
+
else
|
105
|
+
if line.start_with?('ok 1') || line.start_with?('not ok 1')
|
106
|
+
line_begin, line_end = line.split('1 -')
|
107
|
+
result_file.puts [line_begin, line_end].join("#{file_count} -")
|
108
|
+
else
|
109
|
+
result_file.puts line
|
110
|
+
end
|
111
|
+
end
|
112
|
+
end
|
113
|
+
end
|
114
|
+
File.delete(filename)
|
115
|
+
end
|
116
|
+
result_file.puts ' #'
|
117
|
+
result_file.puts " # #{results_count} tests: #{result_stats['pass']} pass, #{result_stats['fail']} fail, #{result_stats['errs']} errs, #{result_stats['todo']} todo, #{result_stats['omit']} omit"
|
118
|
+
result_file.puts " # [00:00:00.00 0.00t/s 00.0000s/t] Finished at: #{Time.now}"
|
119
|
+
end
|
120
|
+
end
|
121
|
+
|
122
|
+
def count_browsery_process
|
123
|
+
counting_process_output = IO.popen "ps -ef | grep 'bin/#{@static_run_command}' -c"
|
124
|
+
counting_process_output.readlines[0].to_i - 1 # minus grep process
|
125
|
+
end
|
126
|
+
|
127
|
+
# run multiple commands with logging to start multiple tests in parallel
|
128
|
+
# @param [Integer, Array]
|
129
|
+
# n = number of tests will be running in parallel
|
130
|
+
def run_in_parallel!
|
131
|
+
size = all_tests.size
|
132
|
+
if size <= simultaneous_jobs
|
133
|
+
run_test_set(all_tests)
|
134
|
+
puts "CAUTION! All #{size} tests are starting at the same time!"
|
135
|
+
puts "will not really run it since computer will die" if size > 30
|
136
|
+
sleep 20
|
137
|
+
else
|
138
|
+
first_test_set = all_tests[0, simultaneous_jobs]
|
139
|
+
all_to_run = all_tests[simultaneous_jobs..(all_tests.size - 1)]
|
140
|
+
run_test_set(first_test_set)
|
141
|
+
keep_running_full(all_to_run)
|
142
|
+
end
|
143
|
+
|
144
|
+
Process.waitall
|
145
|
+
puts "\nAll Complete! Started at #{@start_time} and finished at #{Time.now}\n"
|
146
|
+
end
|
147
|
+
|
148
|
+
# runs each test from a test set in a separate child process
|
149
|
+
def run_test_set(test_set)
|
150
|
+
test_set.each do |test|
|
151
|
+
run_command = "#{@static_run_command} -n #{test} #{@pipe_tap} > #{@result_dir}/#{test}.t"
|
152
|
+
pipe = IO.popen(run_command)
|
153
|
+
puts "Running #{test} #{pipe.pid}"
|
154
|
+
end
|
155
|
+
end
|
156
|
+
|
157
|
+
# recursively keep running #{simultaneous_jobs} number of tests in parallel
|
158
|
+
# exit when no test left to run
|
159
|
+
def keep_running_full(all_to_run)
|
160
|
+
running_subprocess_count = count_browsery_process - 1 # minus parent process
|
161
|
+
puts "WARNING: running_subprocess_count = #{running_subprocess_count}
|
162
|
+
is more than what it is supposed to run(#{simultaneous_jobs}),
|
163
|
+
notify browsery maintainers" if running_subprocess_count > simultaneous_jobs + 1
|
164
|
+
while running_subprocess_count >= simultaneous_jobs
|
165
|
+
sleep 5
|
166
|
+
running_subprocess_count = count_browsery_process - 1
|
167
|
+
end
|
168
|
+
to_run_count = simultaneous_jobs - running_subprocess_count
|
169
|
+
tests_to_run = all_to_run.slice!(0, to_run_count)
|
170
|
+
|
171
|
+
run_test_set(tests_to_run)
|
172
|
+
|
173
|
+
keep_running_full(all_to_run) if all_to_run.size > 0
|
174
|
+
end
|
175
|
+
|
176
|
+
# @deprecated Use more native wait/check of Process
|
177
|
+
def wait_for_pids(pids)
|
178
|
+
running_pids = pids # assume all pids are running at this moment
|
179
|
+
while running_pids.size > 1
|
180
|
+
sleep 5
|
181
|
+
puts "running_pids = #{running_pids}"
|
182
|
+
running_pids.each do |pid|
|
183
|
+
unless process_running?(pid)
|
184
|
+
puts "#{pid} is not running, removing it from pool"
|
185
|
+
running_pids.delete(pid)
|
186
|
+
end
|
187
|
+
end
|
188
|
+
end
|
189
|
+
end
|
190
|
+
|
191
|
+
# @deprecated Too time consuming and fragile, should use more native wait/check of Process
|
192
|
+
def wait_all_done_saucelabs
|
193
|
+
size = all_tests.size
|
194
|
+
job_statuses = saucelabs_last_n_statuses(size)
|
195
|
+
while job_statuses.include?('in progress')
|
196
|
+
puts "There are tests still running, waiting..."
|
197
|
+
sleep 20
|
198
|
+
job_statuses = saucelabs_last_n_statuses(size)
|
199
|
+
end
|
200
|
+
end
|
201
|
+
|
202
|
+
private
|
203
|
+
|
204
|
+
# call saucelabs REST API to get last #{limit} jobs' statuses
|
205
|
+
# possible job status: complete, error, in progress
|
206
|
+
def saucelabs_last_n_statuses(limit)
|
207
|
+
username = Browsery.settings.sauce_username
|
208
|
+
access_key = Browsery.settings.sauce_access_key
|
209
|
+
|
210
|
+
# call api to get most recent #{limit} jobs' ids
|
211
|
+
http_auth = "https://#{username}:#{access_key}@saucelabs.com/rest/v1/#{username}/jobs?limit=#{limit}"
|
212
|
+
response = get_response_with_retry(http_auth) # response was originally an array of hashs, but RestClient converts it to a string
|
213
|
+
# convert response back to array
|
214
|
+
response[0] = ''
|
215
|
+
response[response.length-1] = ''
|
216
|
+
array_of_hash = response.split(',')
|
217
|
+
id_array = Array.new
|
218
|
+
array_of_hash.each do |hash|
|
219
|
+
hash = hash.gsub(':', '=>')
|
220
|
+
hash = eval(hash)
|
221
|
+
id_array << hash['id'] # each hash contains key 'id' and value of id
|
222
|
+
end
|
223
|
+
|
224
|
+
# call api to get job statuses
|
225
|
+
statuses = Array.new
|
226
|
+
id_array.each do |id|
|
227
|
+
http_auth = "https://#{username}:#{access_key}@saucelabs.com/rest/v1/#{username}/jobs/#{id}"
|
228
|
+
response = get_response_with_retry(http_auth)
|
229
|
+
begin
|
230
|
+
# convert response back to hash
|
231
|
+
str = response.gsub(':', '=>')
|
232
|
+
# this is a good example why using eval is dangerous, the string has to contain only proper Ruby syntax, here it has 'null' instead of 'nil'
|
233
|
+
formatted_str = str.gsub('null', 'nil')
|
234
|
+
hash = eval(formatted_str)
|
235
|
+
statuses << hash['status']
|
236
|
+
rescue SyntaxError
|
237
|
+
puts "SyntaxError, response from saucelabs has syntax error"
|
238
|
+
end
|
239
|
+
end
|
240
|
+
return statuses
|
241
|
+
end
|
242
|
+
|
243
|
+
def get_response_with_retry(url)
|
244
|
+
retries = 5 # number of retries
|
245
|
+
begin
|
246
|
+
response = RestClient.get(url) # returns a String
|
247
|
+
rescue
|
248
|
+
puts "Failed at getting response from #{url} via RestClient \n Retrying..."
|
249
|
+
retries -= 1
|
250
|
+
retry if retries > 0
|
251
|
+
response = RestClient.get(url) # retry the last time, fail if it still throws exception
|
252
|
+
end
|
253
|
+
end
|
254
|
+
|
255
|
+
def process_running?(pid)
|
256
|
+
begin
|
257
|
+
Process.getpgid(pid)
|
258
|
+
true
|
259
|
+
rescue Errno::ESRCH
|
260
|
+
false
|
261
|
+
end
|
262
|
+
end
|
263
|
+
|
264
|
+
end
|
265
|
+
end
|
@@ -0,0 +1,111 @@
|
|
1
|
+
module Browsery
|
2
|
+
class Runner
|
3
|
+
|
4
|
+
attr_accessor :options
|
5
|
+
@after_hooks = []
|
6
|
+
@@rerun_count = 0
|
7
|
+
|
8
|
+
def self.after_run(&blk)
|
9
|
+
@after_hooks << blk
|
10
|
+
end
|
11
|
+
|
12
|
+
def self.run!(args)
|
13
|
+
exit_code = self.run(args)
|
14
|
+
@after_hooks.reverse_each(&:call)
|
15
|
+
Kernel.exit(exit_code || false)
|
16
|
+
end
|
17
|
+
|
18
|
+
def self.run args = []
|
19
|
+
Minitest.load_plugins
|
20
|
+
|
21
|
+
@options = Minitest.process_args args
|
22
|
+
|
23
|
+
self.before_run
|
24
|
+
|
25
|
+
reporter = self.single_run
|
26
|
+
|
27
|
+
rerun_failure = @options[:rerun_failure]
|
28
|
+
if rerun_failure && !reporter.passed?
|
29
|
+
while @@rerun_count < rerun_failure && !reporter.passed?
|
30
|
+
reporter = self.single_run
|
31
|
+
@@rerun_count += 1
|
32
|
+
end
|
33
|
+
end
|
34
|
+
|
35
|
+
reporter.passed?
|
36
|
+
end
|
37
|
+
|
38
|
+
# Inialize a new reporter, run test
|
39
|
+
# Return reporter, which carrys test result
|
40
|
+
def self.single_run
|
41
|
+
reporter = Minitest::CompositeReporter.new
|
42
|
+
reporter << Minitest::SummaryReporter.new(@options[:io], @options)
|
43
|
+
reporter << Minitest::ProgressReporter.new(@options[:io], @options)
|
44
|
+
|
45
|
+
Minitest.reporter = reporter # this makes it available to plugins
|
46
|
+
Minitest.init_plugins @options
|
47
|
+
Minitest.reporter = nil # runnables shouldn't depend on the reporter, ever
|
48
|
+
|
49
|
+
reporter.start
|
50
|
+
Minitest.__run reporter, @options
|
51
|
+
Minitest.parallel_executor.shutdown
|
52
|
+
reporter.report
|
53
|
+
|
54
|
+
reporter
|
55
|
+
end
|
56
|
+
|
57
|
+
# before hook where you have parsed @options when loading tests
|
58
|
+
def self.before_run
|
59
|
+
tests_yml_full_path = Browsery.root.join('config/browsery', 'tests.yml').to_s
|
60
|
+
if File.exist? tests_yml_full_path
|
61
|
+
self.load_tests(tests_yml_full_path)
|
62
|
+
else
|
63
|
+
puts "Config file #{tests_yml_full_path} doesn't exist"
|
64
|
+
puts "browsery doesn't know where your tests are located and how they are structured"
|
65
|
+
end
|
66
|
+
end
|
67
|
+
|
68
|
+
# only load tests you need by specifying env option in command line
|
69
|
+
def self.load_tests(tests_yml_full_path)
|
70
|
+
tests_yml = YAML.load_file tests_yml_full_path
|
71
|
+
|
72
|
+
self.check_config(tests_yml)
|
73
|
+
|
74
|
+
tests_dir_relative_path = tests_yml['tests_dir']['relative_path']
|
75
|
+
multi_host_flag = tests_yml['tests_dir']['multi-host']
|
76
|
+
default_host = tests_yml['tests_dir']['default_host']
|
77
|
+
host = @options[:env].split(/_/)[0] rescue default_host
|
78
|
+
|
79
|
+
self.configure_load_path(tests_dir_relative_path)
|
80
|
+
|
81
|
+
# load page_objects.rb first
|
82
|
+
Dir.glob("#{tests_dir_relative_path}/#{multi_host_flag ? host+'/' : ''}*.rb") do |f|
|
83
|
+
f.sub!(/^#{tests_dir_relative_path}\//, '')
|
84
|
+
require f
|
85
|
+
end
|
86
|
+
|
87
|
+
# files under subdirectories shouldn't be loaded, eg. archive/
|
88
|
+
Dir.glob("#{tests_dir_relative_path}/#{multi_host_flag ? host+'/' : ''}test_cases/*.rb") do |f|
|
89
|
+
f.sub!(/^#{tests_dir_relative_path}\//, '')
|
90
|
+
require f
|
91
|
+
end
|
92
|
+
end
|
93
|
+
|
94
|
+
def self.check_config(tests_yml)
|
95
|
+
raise "relative_path must be provided in #{tests_yml}" unless tests_yml['tests_dir']['relative_path'].is_a? String
|
96
|
+
raise "multi-host must be provided in #{tests_yml}" unless [true, false].include?(tests_yml['tests_dir']['multi-host'])
|
97
|
+
raise "default_host must be provided in #{tests_yml}" unless tests_yml['tests_dir']['default_host'].is_a? String
|
98
|
+
end
|
99
|
+
|
100
|
+
def self.configure_load_path(tests_dir_relative_path)
|
101
|
+
tests_dir_full_path = Browsery.root.join(tests_dir_relative_path).to_s
|
102
|
+
if Dir.exist? tests_dir_full_path
|
103
|
+
$LOAD_PATH << tests_dir_full_path
|
104
|
+
else
|
105
|
+
puts "Tests directory #{tests_dir_full_path} doesn't exist"
|
106
|
+
puts "No test will run."
|
107
|
+
end
|
108
|
+
end
|
109
|
+
|
110
|
+
end
|
111
|
+
end
|
@@ -0,0 +1,114 @@
|
|
1
|
+
module Browsery
|
2
|
+
|
3
|
+
# An object that holds runtime settings.
|
4
|
+
#
|
5
|
+
# Furthermore, Minitest doesn't provide any good way of passing a hash of
|
6
|
+
# options to each test.
|
7
|
+
#
|
8
|
+
# TODO: We're importing ActiveSupport's extensions to Hash, which means that
|
9
|
+
# we'll be amending the way Hash objects work; once AS updates themselves to
|
10
|
+
# ruby 2.0 refinements, let's move towards that.
|
11
|
+
class Settings
|
12
|
+
|
13
|
+
def initialize
|
14
|
+
@hsh = {}
|
15
|
+
end
|
16
|
+
|
17
|
+
def inspect
|
18
|
+
settings = self.class.public_instance_methods(false).sort.map(&:inspect).join(', ')
|
19
|
+
"#<Browsery::Settings #{settings}>"
|
20
|
+
end
|
21
|
+
|
22
|
+
def auto_finalize?
|
23
|
+
hsh.fetch(:auto_finalize, true)
|
24
|
+
end
|
25
|
+
|
26
|
+
def connector
|
27
|
+
hsh.fetch(:connector, :firefox).to_s
|
28
|
+
end
|
29
|
+
|
30
|
+
def env
|
31
|
+
# add a gitignored env file which stores a default env
|
32
|
+
# pass the default env in as default
|
33
|
+
hsh.fetch(:env, :rent_qa).to_s
|
34
|
+
end
|
35
|
+
|
36
|
+
def sauce_session_http_auth(driver)
|
37
|
+
session_id = driver.session_id
|
38
|
+
"https://#{sauce_username}:#{sauce_access_key}@saucelabs.com/rest/v1/#{sauce_username}/jobs/#{session_id}"
|
39
|
+
end
|
40
|
+
|
41
|
+
def sauce_username
|
42
|
+
sauce_user["user"]
|
43
|
+
end
|
44
|
+
|
45
|
+
def sauce_access_key
|
46
|
+
sauce_user["pass"]
|
47
|
+
end
|
48
|
+
|
49
|
+
def io
|
50
|
+
hsh[:io]
|
51
|
+
end
|
52
|
+
|
53
|
+
def merge!(other)
|
54
|
+
hsh.merge!(other.symbolize_keys)
|
55
|
+
self
|
56
|
+
end
|
57
|
+
|
58
|
+
# can be used as a flag no matter parallel option is used in command line or not
|
59
|
+
# can also be used to fetch the value if a valid value is specified
|
60
|
+
def parallel
|
61
|
+
if hsh[:parallel] == 0
|
62
|
+
return nil
|
63
|
+
else
|
64
|
+
hsh.fetch(:parallel).to_i
|
65
|
+
end
|
66
|
+
end
|
67
|
+
|
68
|
+
def raw_arguments
|
69
|
+
hsh.fetch(:args, nil).to_s
|
70
|
+
end
|
71
|
+
|
72
|
+
def reuse_driver?
|
73
|
+
hsh.fetch(:reuse_driver, false)
|
74
|
+
end
|
75
|
+
|
76
|
+
def rerun_failure
|
77
|
+
hsh.fetch(:rerun_failure)
|
78
|
+
end
|
79
|
+
|
80
|
+
def seed
|
81
|
+
hsh.fetch(:seed, nil).to_i
|
82
|
+
end
|
83
|
+
|
84
|
+
def tags
|
85
|
+
hsh[:tags] ||= []
|
86
|
+
end
|
87
|
+
|
88
|
+
def verbose?
|
89
|
+
verbosity_level > 0
|
90
|
+
end
|
91
|
+
|
92
|
+
def verbosity_level
|
93
|
+
hsh.fetch(:verbosity_level, 0).to_i
|
94
|
+
end
|
95
|
+
|
96
|
+
private
|
97
|
+
attr_reader :hsh
|
98
|
+
|
99
|
+
def sauce_user
|
100
|
+
overrides = connector.split(/:/)
|
101
|
+
file_name = overrides.shift
|
102
|
+
path = Browsery.root.join('config/browsery', 'connectors')
|
103
|
+
filepath = path.join("#{file_name}.yml")
|
104
|
+
raise ArgumentError, "Cannot load profile #{file_name.inspect} because #{filepath.inspect} does not exist" unless filepath.exist?
|
105
|
+
|
106
|
+
cfg = YAML.load(File.read(filepath))
|
107
|
+
cfg = Connector.resolve(cfg, overrides)
|
108
|
+
cfg.freeze
|
109
|
+
cfg["hub"]
|
110
|
+
end
|
111
|
+
|
112
|
+
end
|
113
|
+
|
114
|
+
end
|