taf 0.3.6 → 0.3.7
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/lib/main.rb +3 -5
- data/lib/report/junit_report.rb +7 -6
- data/lib/report/report.rb +4 -4
- data/lib/report/report_summary.rb +6 -7
- data/lib/utils/create_directories.rb +6 -7
- data/lib/version.rb +1 -1
- metadata +2 -2
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 436860d3248c2b1548a9409a70f6edafe44c1f3239c8ec03fb1991b69e6a45e5
|
4
|
+
data.tar.gz: 97ded2847979e394f7a01680d1274fea84d69aa65b4f46b4612c22c541cf96eb
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 9a814d67bcbc4f17493282b383b4874a2bd6b94e09db6e6478a897ee477f825a3baebdbb14e6ca6630044721a71bd3c105d0aaecd0aafb93538da4820332de04
|
7
|
+
data.tar.gz: '04829e2953ad7ab526f8603ecec94f6373244843f1b353848b9eba45e24d53e431113b7b9059705506097ca364a049c1379b4d7ee007f39285c18d69317586bc'
|
data/lib/main.rb
CHANGED
@@ -18,15 +18,13 @@ module Main
|
|
18
18
|
|
19
19
|
# process the test files to execute the tests
|
20
20
|
total_passes, total_failures, total_skipped = TestEngine.process_testfiles
|
21
|
-
|
21
|
+
total_metrics = [total_passes, total_failures, total_skipped]
|
22
22
|
# get the overall test suite end time
|
23
23
|
ts_end_time = Report.current_time
|
24
24
|
|
25
25
|
# output the overall test summary
|
26
|
-
ReportSummary.overall_test_summary(ts_start_time, ts_end_time,
|
27
|
-
|
28
|
-
JunitReport.test_summary_junit(ts_start_time, ts_end_time, total_passes,
|
29
|
-
total_failures, total_skipped)
|
26
|
+
ReportSummary.overall_test_summary(ts_start_time, ts_end_time, total_metrics)
|
27
|
+
JunitReport.test_summary_junit(ts_start_time, ts_end_time, total_metrics)
|
30
28
|
|
31
29
|
# Exit status code.
|
32
30
|
Process.exit(total_failures.zero? ? 0 : 1)
|
data/lib/report/junit_report.rb
CHANGED
@@ -29,17 +29,16 @@ module JunitReport
|
|
29
29
|
}
|
30
30
|
end
|
31
31
|
|
32
|
-
def self.test_summary_junit(ts_start_time, ts_end_time,
|
33
|
-
total_failures, total_skipped)
|
32
|
+
def self.test_summary_junit(ts_start_time, ts_end_time, total_metrics)
|
34
33
|
# output to XML file format for Junit for CI.
|
35
34
|
builder = Nokogiri::XML::Builder.new(encoding: 'UTF-8') do |xml|
|
36
35
|
testsuite_attrs = {
|
37
36
|
'classname' => CMDLine.tests_folder.to_s,
|
38
37
|
'name' => CMDLine.tests_folder.to_s,
|
39
|
-
'tests' =>
|
40
|
-
'failures' =>
|
38
|
+
'tests' => total_metrics[0].to_s,
|
39
|
+
'failures' => total_metrics[1].to_s,
|
41
40
|
'timestamp' => ts_start_time.to_s,
|
42
|
-
'skipped' =>
|
41
|
+
'skipped' => total_metrics[2].to_s,
|
43
42
|
'time' => TimeDifference.between(ts_end_time, ts_start_time)
|
44
43
|
.in_seconds
|
45
44
|
}
|
@@ -64,7 +63,9 @@ module JunitReport
|
|
64
63
|
|
65
64
|
# the test suite summary is a XML report generated will be called
|
66
65
|
# 'report_uuid.xml'
|
67
|
-
|
66
|
+
project_id = JsonParser.project_id.delete(' ')
|
67
|
+
xml_dir = File.join('Results', project_id)
|
68
|
+
ts_xml_file = "#{xml_dir}/report_#{SecureRandom.uuid}.xml"
|
68
69
|
|
69
70
|
ts_summary_file_xml = File.open(ts_xml_file, 'w')
|
70
71
|
ts_summary_file_xml.write builder.to_xml
|
data/lib/report/report.rb
CHANGED
@@ -40,11 +40,11 @@ module Report
|
|
40
40
|
# print the Pass / Fail status of a test to the test results file
|
41
41
|
def self.test_pass_fail(pass_fail, test_file_name, test_step_idx, metrics)
|
42
42
|
if pass_fail == true
|
43
|
-
|
43
|
+
@current_test_fail = false
|
44
44
|
metrics.stepPasses += 1
|
45
45
|
MyLog.log.info "Test #{test_step_idx} has Passed ".green
|
46
46
|
elsif pass_fail == false
|
47
|
-
|
47
|
+
@current_test_fail = true
|
48
48
|
metrics.stepFailures += 1
|
49
49
|
MyLog.log.info "Test #{test_step_idx} has FAILED ".red
|
50
50
|
sc_file_name = Screenshot.save_screenshot(test_step_idx)
|
@@ -64,7 +64,7 @@ module Report
|
|
64
64
|
$failtestStep_xml[test_file_name] ||= []
|
65
65
|
$failtestStep_xml[test_file_name][test_step_idx] = failstep
|
66
66
|
else
|
67
|
-
|
67
|
+
@current_test_fail = false
|
68
68
|
metrics.stepSkipped += 1
|
69
69
|
MyLog.log.info "Test #{test_step_idx} no checks performed ".blue
|
70
70
|
skipstep = {
|
@@ -96,7 +96,7 @@ module Report
|
|
96
96
|
# If a certain number of consecutive tests fail then throw an exception
|
97
97
|
def self.check_failure_threshold(test_file_name)
|
98
98
|
consecutive_fail_threshold = 5
|
99
|
-
if
|
99
|
+
if @current_test_fail
|
100
100
|
@consecutive_test_fail += 1
|
101
101
|
else
|
102
102
|
@consecutive_test_fail = 0
|
@@ -24,8 +24,7 @@ module ReportSummary
|
|
24
24
|
end
|
25
25
|
|
26
26
|
# output the overall test results summary
|
27
|
-
def self.overall_test_summary(ts_start_time, ts_end_time,
|
28
|
-
total_failures, total_skipped)
|
27
|
+
def self.overall_test_summary(ts_start_time, ts_end_time, total_metrics)
|
29
28
|
# output to the console
|
30
29
|
|
31
30
|
MyLog.log.info 'Finished processing all test files ' \
|
@@ -44,11 +43,11 @@ module ReportSummary
|
|
44
43
|
MyLog.log.info "Total Tests started at: #{ts_start_time}"
|
45
44
|
MyLog.log.info "Total Tests finished at: #{ts_end_time}"
|
46
45
|
MyLog.log.info "Total Tests duration: #{duration}"
|
47
|
-
MyLog.log.info "Total Tests Passed: #{
|
48
|
-
MyLog.log.info "Total Tests Failed: #{
|
49
|
-
MyLog.log.info "Total Tests Skipped: #{
|
50
|
-
total_tests = [
|
51
|
-
|
46
|
+
MyLog.log.info "Total Tests Passed: #{total_metrics[0]}".green
|
47
|
+
MyLog.log.info "Total Tests Failed: #{total_metrics[1]}".red
|
48
|
+
MyLog.log.info "Total Tests Skipped: #{total_metrics[2]}".blue
|
49
|
+
total_tests = [total_metrics[0], total_metrics[1],
|
50
|
+
total_metrics[2]].sum
|
52
51
|
MyLog.log.info "Total Tests: #{total_tests}\n"
|
53
52
|
end
|
54
53
|
end
|
@@ -21,28 +21,27 @@ module CreateDirectories
|
|
21
21
|
# create top-level directory if it doesn't already exist:
|
22
22
|
# Results/Project_id
|
23
23
|
project_id = JsonParser.project_id.delete(' ')
|
24
|
-
|
24
|
+
project_dir = File.join('Results', project_id)
|
25
25
|
|
26
|
-
FileUtils.mkdir_p(
|
26
|
+
FileUtils.mkdir_p(project_dir)
|
27
27
|
|
28
28
|
# Generate UUID
|
29
|
-
|
29
|
+
@run_uuid = SecureRandom.uuid
|
30
30
|
end
|
31
31
|
|
32
32
|
def self.construct_testspecdirs
|
33
33
|
# create directories for each test spec for screenshots:
|
34
34
|
# Results/Project_id/Run_ID_UUID
|
35
|
-
|
35
|
+
project_id = JsonParser.project_id.delete(' ')
|
36
|
+
screenshot_dir = File.join('Results', project_id, "Run_ID_#{@run_uuid}")
|
36
37
|
|
37
38
|
abs_path_screenshot_dir = File.absolute_path(screenshot_dir)
|
38
|
-
# abs_path_run_no_dir = File.absolute_path(runNoDir)
|
39
39
|
FileUtils.mkdir_p(abs_path_screenshot_dir)
|
40
40
|
# if any issues then set error message and re-raise the exception
|
41
41
|
rescue StandardError => e
|
42
42
|
# construct the error message from custom text and the actual system error
|
43
43
|
# message (converted to a string)
|
44
|
-
error_to_display = 'Error creating
|
45
|
-
' opening the test results file : ' + e.to_s
|
44
|
+
error_to_display = 'Error creating directory:' + e.to_s
|
46
45
|
raise error_to_display
|
47
46
|
else
|
48
47
|
# if no exception then return the screenshot file directory path
|
data/lib/version.rb
CHANGED
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: taf
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.3.
|
4
|
+
version: 0.3.7
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Andy Perrett
|
8
8
|
autorequire:
|
9
9
|
bindir: bin
|
10
10
|
cert_chain: []
|
11
|
-
date: 2019-05-
|
11
|
+
date: 2019-05-28 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: bundler
|