watchdog-dashboard 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (32) hide show
  1. checksums.yaml +7 -0
  2. data/MIT-LICENSE +20 -0
  3. data/README.md +123 -0
  4. data/Rakefile +8 -0
  5. data/app/assets/config/watchdog_dashboard_manifest.js +1 -0
  6. data/app/assets/stylesheets/watchdog/dashboard/application.css +15 -0
  7. data/app/controllers/concerns/flaky_stats.rb +217 -0
  8. data/app/controllers/concerns/metric_stats.rb +217 -0
  9. data/app/controllers/watchdog/dashboard/application_controller.rb +6 -0
  10. data/app/controllers/watchdog/dashboard/dashboard_controller.rb +71 -0
  11. data/app/controllers/watchdog/dashboard/metrics_controller.rb +37 -0
  12. data/app/helpers/watchdog/dashboard/application_helper.rb +6 -0
  13. data/app/helpers/watchdog/dashboard/dashboard_helper.rb +4 -0
  14. data/app/helpers/watchdog/dashboard/metrics_helper.rb +4 -0
  15. data/app/jobs/watchdog/dashboard/application_job.rb +6 -0
  16. data/app/mailers/watchdog/dashboard/application_mailer.rb +8 -0
  17. data/app/models/watchdog/dashboard/application_record.rb +7 -0
  18. data/app/models/watchdog/dashboard/metric.rb +5 -0
  19. data/app/views/layouts/watchdog/dashboard/application.html.erb +28 -0
  20. data/app/views/watchdog/dashboard/dashboard/_sidebar.html.erb +83 -0
  21. data/app/views/watchdog/dashboard/dashboard/flakies.html.erb +263 -0
  22. data/app/views/watchdog/dashboard/dashboard/historic.html.erb +495 -0
  23. data/app/views/watchdog/dashboard/dashboard/index.html.erb +219 -0
  24. data/app/views/watchdog/dashboard/dashboard/metrics.html.erb +263 -0
  25. data/config/initializers/assets.rb +1 -0
  26. data/config/routes.rb +10 -0
  27. data/db/migrate/20250331204354_create_watchdog_dashboard_metrics.rb +14 -0
  28. data/lib/tasks/watchdog/dashboard_tasks.rake +4 -0
  29. data/lib/watchdog/dashboard/engine.rb +7 -0
  30. data/lib/watchdog/dashboard/version.rb +5 -0
  31. data/lib/watchdog/dashboard.rb +19 -0
  32. metadata +92 -0
checksums.yaml ADDED
@@ -0,0 +1,7 @@
1
+ ---
2
+ SHA256:
3
+ metadata.gz: 65b4507650fdd7435efc95b76a25a8c33b4b3032c08517273155076913a6c5a7
4
+ data.tar.gz: 974cbf45cac6e1d356a75d0eae1605e2e87e21997373d807ae3d23a5bad7fe63
5
+ SHA512:
6
+ metadata.gz: 37bcb377cb845f5bca3ef10fd562984739a07d1ce54c73841a9293650f75ac43360e51b95e49e55490ec1075e69296fe6e2dc50426830e53b56e2d76371a19ef
7
+ data.tar.gz: '093f0d8c65cd4ac55c0ac7516e257e6327929bbd80f92cc4ad236b6319eb1d9dcd661f59f85532dea696a6d81ec8a5bad0308ca902b9bbc39c346f0adc703c3a'
data/MIT-LICENSE ADDED
@@ -0,0 +1,20 @@
1
+ Copyright AFornio
2
+
3
+ Permission is hereby granted, free of charge, to any person obtaining
4
+ a copy of this software and associated documentation files (the
5
+ "Software"), to deal in the Software without restriction, including
6
+ without limitation the rights to use, copy, modify, merge, publish,
7
+ distribute, sublicense, and/or sell copies of the Software, and to
8
+ permit persons to whom the Software is furnished to do so, subject to
9
+ the following conditions:
10
+
11
+ The above copyright notice and this permission notice shall be
12
+ included in all copies or substantial portions of the Software.
13
+
14
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
15
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
16
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
17
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
18
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
19
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
20
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
data/README.md ADDED
@@ -0,0 +1,123 @@
1
+ # Watchdog::Dashboard
2
+
3
+ Watchdog Dashboard is a Rails engine that provides a visual interface for monitoring the performance and reliability of your RSpec tests. This dashboard is part of the [RspecWatchdog](https://github.com/windmotion-io/rspec-watchdog) ecosystem but is specifically designed to integrate with Rails applications.
4
+
5
+ ![Watchdog Dashboard](dashboard_1.png "Watchdog Dashboard")
6
+
7
+ ## Features
8
+
9
+ - **Metrics Visualization**: Charts and tables to analyze your test execution times
10
+ - **Flaky Test Detection**: Identify tests that fail intermittently
11
+ - **Trend Tracking**: Monitor the health of your test suite over time
12
+ - **Comprehensive Statistics**: Visualize data such as total runs, failures, and average test times
13
+ - **Simple Integration**: Easy to set up in any existing Rails application
14
+
15
+ ## Screenshots
16
+
17
+ [You can include some screenshots of the dashboard here]
18
+
19
+ ## Installation
20
+
21
+ ### Step 1: Add the gem to your Gemfile
22
+
23
+ ```ruby
24
+ gem 'watchdog-dashboard'
25
+ ```
26
+
27
+ Then run:
28
+
29
+ ```bash
30
+ bundle install
31
+ ```
32
+
33
+ ### Step 2: Install and run migrations
34
+
35
+ ```bash
36
+ bin/rails watchdog_dashboard:install:migrations
37
+ bin/rails db:migrate
38
+ ```
39
+
40
+ ### Step 3: Mount the engine in your routes
41
+
42
+ In `config/routes.rb`, add:
43
+
44
+ ```ruby
45
+ mount Watchdog::Dashboard::Engine => "/watchdog"
46
+ ```
47
+
48
+ This will make the dashboard available at `/watchdog` in your Rails app.
49
+
50
+ ### Step 4: Configuration
51
+
52
+ Create a configuration file in `config/initializers/watchdog_dashboard.rb`:
53
+
54
+ ```ruby
55
+ Watchdog::Dashboard.configure do |config|
56
+ config.watchdog_api_token = "your_secret_token" # Change this to a secure value
57
+ end
58
+ ```
59
+
60
+ ## Using with RspecWatchdog
61
+
62
+ To get the maximum benefit, you should use Watchdog Dashboard together with the [RspecWatchdog](https://github.com/windmotion-io/rspec-watchdog) gem, which handles data collection during your test execution.
63
+
64
+ Configure RspecWatchdog in your `spec/rails_helper.rb`:
65
+
66
+ ```ruby
67
+ require "rspec_watchdog"
68
+
69
+ RspecWatchdog.configure do |config|
70
+ config.show_logs = true
71
+ config.watchdog_api_url = "http://localhost:3000/watchdog/analytics"
72
+ config.watchdog_api_token = "your_secret_token" # Must match the dashboard token
73
+ end
74
+
75
+ RSpec.configure do |config|
76
+ config.add_formatter(:progress) # Default RSpec formatter
77
+ config.add_formatter(SlowSpecFormatter)
78
+ end
79
+ ```
80
+
81
+ **Important**: Make sure that the `watchdog_api_token` matches between RspecWatchdog and Watchdog::Dashboard.
82
+
83
+ ## Configuration Options
84
+
85
+ ### `watchdog_api_token`
86
+
87
+ This token is used to validate that requests sent to the dashboard API are legitimate.
88
+
89
+ - If you're running tests in a CI/CD environment (e.g., GitHub Actions or CircleCI), you can set this value as an environment variable.
90
+ - For development environments, you can use a constant value, but make sure not to include it in version control.
91
+
92
+ ## Navigating the Dashboard
93
+
94
+ Once configured, the dashboard will be available at `/watchdog` and will display:
95
+
96
+ - **Main Panel**: Summary of key metrics and general trends
97
+ - **Slow Tests**: List of tests that take longer to execute
98
+ - **Flaky Tests**: Identification of tests that fail intermittently
99
+ - **Execution History**: Tracking of all test runs
100
+ - **Settings**: Adjustments to customize the dashboard
101
+
102
+ ## CI/CD Integration
103
+
104
+ To make the most of Watchdog Dashboard in a CI/CD environment:
105
+
106
+ 1. Configure the token as a secure environment variable in your CI/CD platform
107
+ 2. Ensure your tests send data to the correct endpoint
108
+ 3. Use the dashboard to analyze trends after each CI run
109
+
110
+ ## Contributing
111
+
112
+ Contributions are welcome. If you have ideas, suggestions, or find a bug, please open an issue or submit a pull request on GitHub.
113
+
114
+ ## License
115
+
116
+ This engine is available as open source under the terms of the [MIT License](https://opensource.org/licenses/MIT).
117
+
118
+ ## Extra Images
119
+
120
+ ![Watchdog Dashboard - Summary](dashboard_1.png "Watchdog Dashboard - Summary")
121
+ ![Watchdog Dashboard - Metrics Visualization](dashboard_2.png "Watchdog Dashboard - Metrics Visualization")
122
+ ![Watchdog Dashboard - Flaky Tests](dashboard_3.png "Watchdog Dashboard - Flaky Tests")
123
+ ![Watchdog Dashboard - Historic](dashboard_4.png "Watchdog Dashboard - Historic")
data/Rakefile ADDED
@@ -0,0 +1,8 @@
1
+ require "bundler/setup"
2
+
3
+ APP_RAKEFILE = File.expand_path("test/dummy/Rakefile", __dir__)
4
+ load "rails/tasks/engine.rake"
5
+
6
+ load "rails/tasks/statistics.rake"
7
+
8
+ require "bundler/gem_tasks"
@@ -0,0 +1 @@
1
+ //= link_directory ../stylesheets/watchdog/dashboard .css
@@ -0,0 +1,15 @@
1
+ /*
2
+ * This is a manifest file that'll be compiled into application.css, which will include all the files
3
+ * listed below.
4
+ *
5
+ * Any CSS and SCSS file within this directory, lib/assets/stylesheets, vendor/assets/stylesheets,
6
+ * or any plugin's vendor/assets/stylesheets directory can be referenced here using a relative path.
7
+ *
8
+ * You're free to add application-wide styles to this file and they'll appear at the bottom of the
9
+ * compiled file so the styles you add here take precedence over styles defined in any other CSS/SCSS
10
+ * files in this directory. Styles in this file should be added after the last require_* statement.
11
+ * It is generally better to create a new file per style scope.
12
+ *
13
+ *= require_tree .
14
+ *= require_self
15
+ */
@@ -0,0 +1,217 @@
1
+ module FlakyStats
2
+ extend ActiveSupport::Concern
3
+
4
+ def flaky_average_time
5
+ Watchdog::Dashboard::Metric.flaky.average(:run_time)
6
+ end
7
+
8
+ def flaky_fastest_test
9
+ Watchdog::Dashboard::Metric.flaky.order(:run_time).first
10
+ end
11
+
12
+ def flaky_slowest_test
13
+ Watchdog::Dashboard::Metric.flaky.order(run_time: :desc).first
14
+ end
15
+
16
+ def flaky_percentiles
17
+ total = Watchdog::Dashboard::Metric.flaky.count
18
+ [ 0.25, 0.5, 0.75 ].map do |p|
19
+ index = (total * p).round - 1
20
+ example = Watchdog::Dashboard::Metric.flaky.order(:run_time).limit(1).offset(index).first
21
+ {
22
+ percentile: (p * 100).to_i,
23
+ description: example.description,
24
+ file_path: example.file_path,
25
+ run_time: example.run_time
26
+ }
27
+ end
28
+ end
29
+
30
+ def flaky_failed_tests
31
+ Watchdog::Dashboard::Metric.flaky.where(status: "failed")
32
+ end
33
+
34
+ def flaky_tests_grouped_by_file
35
+ Watchdog::Dashboard::Metric.flaky.group(:file_path).order(:file_path)
36
+ end
37
+
38
+ def flaky_tests_that_took_longer_than(threshold)
39
+ Watchdog::Dashboard::Metric.flaky.where("run_time > ?", threshold)
40
+ end
41
+
42
+ def flaky_time_distribution_analysis
43
+ total_tests = Watchdog::Dashboard::Metric.flaky.count
44
+ categories = {
45
+ "⚡ Ultra Fast (< 0.01s)" => 0,
46
+ "🚀 Fast (0.01s - 0.1s)" => 0,
47
+ "🏃 Normal (0.1s - 0.5s)" => 0,
48
+ "🚶 Slow (0.5s - 1s)" => 0,
49
+ "🐢 Very Slow (> 1s)" => 0
50
+ }
51
+
52
+ Watchdog::Dashboard::Metric.flaky.find_each do |ex|
53
+ case ex.run_time
54
+ when 0...0.01
55
+ categories["⚡ Ultra Fast (< 0.01s)"] += 1
56
+ when 0.01...0.1
57
+ categories["🚀 Fast (0.01s - 0.1s)"] += 1
58
+ when 0.1...0.5
59
+ categories["🏃 Normal (0.1s - 0.5s)"] += 1
60
+ when 0.5...1.0
61
+ categories["🚶 Slow (0.5s - 1s)"] += 1
62
+ else
63
+ categories["🐢 Very Slow (> 1s)"] += 1
64
+ end
65
+ end
66
+
67
+ categories
68
+ end
69
+
70
+ def flaky_test_stability_analysis
71
+ total_tests = Watchdog::Dashboard::Metric.flaky.count
72
+ passed = Watchdog::Dashboard::Metric.flaky.where(status: "passed").count
73
+ failed = Watchdog::Dashboard::Metric.flaky.where(status: "failed").count
74
+ pending = Watchdog::Dashboard::Metric.flaky.where(status: "pending").count
75
+
76
+ {
77
+ total_tests: total_tests,
78
+ passed_percentage: (passed.to_f / total_tests * 100).round(2),
79
+ failed_percentage: (failed.to_f / total_tests * 100).round(2),
80
+ pending_percentage: (pending.to_f / total_tests * 100).round(2)
81
+ }
82
+ end
83
+
84
+ def flaky_execution_time_variance
85
+ run_times = Watchdog::Dashboard::Metric.flaky.pluck(:run_time)
86
+ mean = run_times.sum / run_times.size
87
+ variance = run_times.map { |time| (time - mean) ** 2 }.sum / run_times.size
88
+ std_dev = Math.sqrt(variance)
89
+
90
+ {
91
+ mean: mean,
92
+ variance: variance,
93
+ standard_deviation: std_dev
94
+ }
95
+ end
96
+
97
+ def flaky_temporal_complexity_analysis
98
+ sorted_by_complexity = Watchdog::Dashboard::Metric.flaky.order(:run_time)
99
+
100
+ sorted_by_complexity.first(3).map do |ex|
101
+ {
102
+ description: ex.description,
103
+ file_path: ex.file_path,
104
+ execution_time: ex.run_time
105
+ }
106
+ end
107
+ end
108
+
109
+ def flaky_test_dependency_analysis
110
+ file_dependencies = Watchdog::Dashboard::Metric.flaky.group(:file_path).having("count(*) > 1").count
111
+
112
+ file_dependencies.map do |file, count|
113
+ {
114
+ file: file,
115
+ number_of_tests: count,
116
+ average_execution_time: Watchdog::Dashboard::Metric.flaky.where(file_path: file).average(:run_time)
117
+ }
118
+ end
119
+ end
120
+
121
+ # HISTORIC DATA
122
+ def flaky_run_time_distribution(bin_size = 1.0)
123
+ Watchdog::Dashboard::Metric.flaky.select("FLOOR(run_time / #{bin_size}) as run_time_bin,
124
+ COUNT(*) as test_count")
125
+ .group("run_time_bin")
126
+ .order("run_time_bin")
127
+ end
128
+
129
+ def flaky_performance_trend(days = 30)
130
+ Watchdog::Dashboard::Metric.flaky
131
+ .select("DATE(created_at) as test_date, AVG(run_time) as average_run_time")
132
+ .where(created_at: days.days.ago..Time.current.end_of_day) # ← INCLUYE HOY COMPLETO
133
+ .group("DATE(created_at)")
134
+ .order("DATE(created_at)")
135
+ .map { |m| { test_date: m.test_date.to_s, average_run_time: m.average_run_time.to_f } }
136
+ end
137
+
138
+ def flaky_test_count_trend(days = 30)
139
+ Watchdog::Dashboard::Metric.flaky
140
+ .select("DATE(created_at) as test_date, COUNT(*) as test_count")
141
+ .where(created_at: days.days.ago..Time.current.end_of_day)
142
+ .group("DATE(created_at)")
143
+ .order("DATE(created_at)")
144
+ .map { |m| { test_date: m.test_date.to_s, test_count: m.test_count } }
145
+ end
146
+
147
+ def flaky_longest_tests_by_day(days = 30)
148
+ Watchdog::Dashboard::Metric.flaky
149
+ .select("DATE(created_at) as test_date, description, run_time")
150
+ .where(created_at: days.days.ago..Time.current.end_of_day)
151
+ .where("run_time = (SELECT MAX(run_time) FROM watchdog_dashboard_metric.flakys AS m2 WHERE DATE(m2.created_at) = DATE(watchdog_dashboard_metric.flakys.created_at))")
152
+ .order("test_date DESC") # Ordenamos por fecha
153
+ .map { |m| { test_date: m.test_date.to_s, description: m.description, run_time: m.run_time.to_f } }
154
+ end
155
+
156
+ def flaky_total_execution_time_by_day(days = 30)
157
+ Watchdog::Dashboard::Metric.flaky
158
+ .select("DATE(created_at) as test_date, SUM(run_time) as total_run_time")
159
+ .where(created_at: days.days.ago..Time.current.end_of_day)
160
+ .group("DATE(created_at)")
161
+ .order("DATE(created_at)")
162
+ .map { |m| { test_date: m.test_date.to_s, total_run_time: m.total_run_time } }
163
+ end
164
+
165
+ def flaky_tests_exceeding_time_threshold(threshold = 1.0, days = 30)
166
+ Watchdog::Dashboard::Metric.flaky
167
+ .select("DATE(created_at) as test_date, COUNT(*) as test_count")
168
+ .where("run_time > ?", threshold)
169
+ .where(created_at: days.days.ago..Time.current.end_of_day)
170
+ .group("DATE(created_at)")
171
+ .order("DATE(created_at)")
172
+ .map { |m| { test_date: m.test_date.to_s, test_count: m.test_count } }
173
+ end
174
+
175
+ def flaky_failed_tests_trend_by_file(days = 30)
176
+ Watchdog::Dashboard::Metric.flaky
177
+ .select("DATE(created_at) as test_date, file_path, COUNT(*) as failed_count")
178
+ .where(status: "failed", created_at: days.days.ago..Time.current.end_of_day)
179
+ .group("DATE(created_at), file_path")
180
+ .order("DATE(created_at), failed_count DESC")
181
+ .map { |m| { test_date: m.test_date.to_s, file_path: m.file_path, failed_count: m.failed_count } }
182
+ end
183
+
184
+ def flaky_avg_execution_time_by_file(days = 30)
185
+ Watchdog::Dashboard::Metric.flaky
186
+ .select("file_path, AVG(run_time) as average_run_time")
187
+ .where(created_at: days.days.ago..Time.current.end_of_day)
188
+ .where("run_time IS NOT NULL") # Filtrar nulos si es necesario
189
+ .group("file_path")
190
+ .order("average_run_time DESC")
191
+ .map { |m| { file_path: m.file_path, average_run_time: m.average_run_time.to_f.round(2) } } # Asegurarse de que sea flotante
192
+ end
193
+
194
+
195
+ def flaky_stability_trend(days = 30)
196
+ Watchdog::Dashboard::Metric.flaky
197
+ .select("DATE(created_at) as test_date,
198
+ SUM(CASE WHEN status = 'passed' THEN 1 ELSE 0 END) as passed_count,
199
+ SUM(CASE WHEN status = 'failed' THEN 1 ELSE 0 END) as failed_count,
200
+ SUM(CASE WHEN status = 'skipped' THEN 1 ELSE 0 END) as skipped_count,
201
+ SUM(CASE WHEN status = 'pending' THEN 1 ELSE 0 END) as pending_count,
202
+ SUM(CASE WHEN status = 'error' THEN 1 ELSE 0 END) as error_count")
203
+ .where(created_at: days.days.ago..Time.current.end_of_day)
204
+ .group("DATE(created_at)") # Asegúrate de que solo agrupas por la fecha, no por otros campos
205
+ .order("DATE(created_at)")
206
+ .map { |m|
207
+ {
208
+ test_date: m.test_date.to_s,
209
+ passed_count: m.passed_count,
210
+ failed_count: m.failed_count,
211
+ skipped_count: m.skipped_count,
212
+ pending_count: m.pending_count,
213
+ error_count: m.error_count
214
+ }
215
+ }
216
+ end
217
+ end
@@ -0,0 +1,217 @@
1
+ module MetricStats
2
+ extend ActiveSupport::Concern
3
+
4
+ def average_time
5
+ Watchdog::Dashboard::Metric.all.average(:run_time)
6
+ end
7
+
8
+ def fastest_test
9
+ Watchdog::Dashboard::Metric.order(:run_time).first
10
+ end
11
+
12
+ def slowest_test
13
+ Watchdog::Dashboard::Metric.order(run_time: :desc).first
14
+ end
15
+
16
+ def percentiles
17
+ total = Watchdog::Dashboard::Metric.count
18
+ [0.25, 0.5, 0.75].map do |p|
19
+ index = (total * p).round - 1
20
+ example = Watchdog::Dashboard::Metric.order(:run_time).limit(1).offset(index).first
21
+ {
22
+ percentile: (p * 100).to_i,
23
+ description: example.description,
24
+ file_path: example.file_path,
25
+ run_time: example.run_time
26
+ }
27
+ end
28
+ end
29
+
30
+ def failed_tests
31
+ Watchdog::Dashboard::Metric.where(status: 'failed')
32
+ end
33
+
34
+ def tests_grouped_by_file
35
+ Watchdog::Dashboard::Metric.group(:file_path).order(:file_path)
36
+ end
37
+
38
+ def tests_that_took_longer_than(threshold)
39
+ Watchdog::Dashboard::Metric.where('run_time > ?', threshold)
40
+ end
41
+
42
+ def time_distribution_analysis
43
+ total_tests = Watchdog::Dashboard::Metric.count
44
+ categories = {
45
+ "⚡ Ultra Fast (< 0.01s)" => 0,
46
+ "🚀 Fast (0.01s - 0.1s)" => 0,
47
+ "🏃 Normal (0.1s - 0.5s)" => 0,
48
+ "🚶 Slow (0.5s - 1s)" => 0,
49
+ "🐢 Very Slow (> 1s)" => 0
50
+ }
51
+
52
+ Watchdog::Dashboard::Metric.find_each do |ex|
53
+ case ex.run_time
54
+ when 0...0.01
55
+ categories["⚡ Ultra Fast (< 0.01s)"] += 1
56
+ when 0.01...0.1
57
+ categories["🚀 Fast (0.01s - 0.1s)"] += 1
58
+ when 0.1...0.5
59
+ categories["🏃 Normal (0.1s - 0.5s)"] += 1
60
+ when 0.5...1.0
61
+ categories["🚶 Slow (0.5s - 1s)"] += 1
62
+ else
63
+ categories["🐢 Very Slow (> 1s)"] += 1
64
+ end
65
+ end
66
+
67
+ categories
68
+ end
69
+
70
+ def test_stability_analysis
71
+ total_tests = Watchdog::Dashboard::Metric.count
72
+ passed = Watchdog::Dashboard::Metric.where(status: 'passed').count
73
+ failed = Watchdog::Dashboard::Metric.where(status: 'failed').count
74
+ pending = Watchdog::Dashboard::Metric.where(status: 'pending').count
75
+
76
+ {
77
+ total_tests: total_tests,
78
+ passed_percentage: (passed.to_f / total_tests * 100).round(2),
79
+ failed_percentage: (failed.to_f / total_tests * 100).round(2),
80
+ pending_percentage: (pending.to_f / total_tests * 100).round(2)
81
+ }
82
+ end
83
+
84
+ def execution_time_variance
85
+ run_times = Watchdog::Dashboard::Metric.pluck(:run_time)
86
+ mean = run_times.sum / run_times.size
87
+ variance = run_times.map { |time| (time - mean) ** 2 }.sum / run_times.size
88
+ std_dev = Math.sqrt(variance)
89
+
90
+ {
91
+ mean: mean,
92
+ variance: variance,
93
+ standard_deviation: std_dev
94
+ }
95
+ end
96
+
97
+ def temporal_complexity_analysis
98
+ sorted_by_complexity = Watchdog::Dashboard::Metric.order(:run_time)
99
+
100
+ sorted_by_complexity.first(3).map do |ex|
101
+ {
102
+ description: ex.description,
103
+ file_path: ex.file_path,
104
+ execution_time: ex.run_time
105
+ }
106
+ end
107
+ end
108
+
109
+ def test_dependency_analysis
110
+ file_dependencies = Watchdog::Dashboard::Metric.group(:file_path).having("count(*) > 1").count
111
+
112
+ file_dependencies.map do |file, count|
113
+ {
114
+ file: file,
115
+ number_of_tests: count,
116
+ average_execution_time: Watchdog::Dashboard::Metric.where(file_path: file).average(:run_time)
117
+ }
118
+ end
119
+ end
120
+
121
+ # HISTORIC DATA
122
+ def run_time_distribution(bin_size = 1.0)
123
+ Watchdog::Dashboard::Metric.select("FLOOR(run_time / #{bin_size}) as run_time_bin,
124
+ COUNT(*) as test_count")
125
+ .group("run_time_bin")
126
+ .order("run_time_bin")
127
+ end
128
+
129
+ def performance_trend(days = 30)
130
+ Watchdog::Dashboard::Metric
131
+ .select("DATE(created_at) as test_date, AVG(run_time) as average_run_time")
132
+ .where(created_at: days.days.ago..Time.current.end_of_day) # ← INCLUYE HOY COMPLETO
133
+ .group("DATE(created_at)")
134
+ .order("DATE(created_at)")
135
+ .map { |m| { test_date: m.test_date.to_s, average_run_time: m.average_run_time.to_f } }
136
+ end
137
+
138
+ def test_count_trend(days = 30)
139
+ Watchdog::Dashboard::Metric
140
+ .select("DATE(created_at) as test_date, COUNT(*) as test_count")
141
+ .where(created_at: days.days.ago..Time.current.end_of_day)
142
+ .group("DATE(created_at)")
143
+ .order("DATE(created_at)")
144
+ .map { |m| { test_date: m.test_date.to_s, test_count: m.test_count } }
145
+ end
146
+
147
+ def longest_tests_by_day(days = 30)
148
+ Watchdog::Dashboard::Metric
149
+ .select("DATE(created_at) as test_date, description, run_time")
150
+ .where(created_at: days.days.ago..Time.current.end_of_day)
151
+ .where("run_time = (SELECT MAX(run_time) FROM watchdog_dashboard_metrics AS m2 WHERE DATE(m2.created_at) = DATE(watchdog_dashboard_metrics.created_at))")
152
+ .order("test_date DESC") # Ordenamos por fecha
153
+ .map { |m| { test_date: m.test_date.to_s, description: m.description, run_time: m.run_time.to_f } }
154
+ end
155
+
156
+ def total_execution_time_by_day(days = 30)
157
+ Watchdog::Dashboard::Metric
158
+ .select("DATE(created_at) as test_date, SUM(run_time) as total_run_time")
159
+ .where(created_at: days.days.ago..Time.current.end_of_day)
160
+ .group("DATE(created_at)")
161
+ .order("DATE(created_at)")
162
+ .map { |m| { test_date: m.test_date.to_s, total_run_time: m.total_run_time } }
163
+ end
164
+
165
+ def tests_exceeding_time_threshold(threshold = 1.0, days = 30)
166
+ Watchdog::Dashboard::Metric
167
+ .select("DATE(created_at) as test_date, COUNT(*) as test_count")
168
+ .where('run_time > ?', threshold)
169
+ .where(created_at: days.days.ago..Time.current.end_of_day)
170
+ .group("DATE(created_at)")
171
+ .order("DATE(created_at)")
172
+ .map { |m| { test_date: m.test_date.to_s, test_count: m.test_count } }
173
+ end
174
+
175
+ def failed_tests_trend_by_file(days = 30)
176
+ Watchdog::Dashboard::Metric
177
+ .select("DATE(created_at) as test_date, file_path, COUNT(*) as failed_count")
178
+ .where(status: 'failed', created_at: days.days.ago..Time.current.end_of_day)
179
+ .group("DATE(created_at), file_path")
180
+ .order("DATE(created_at), failed_count DESC")
181
+ .map { |m| { test_date: m.test_date.to_s, file_path: m.file_path, failed_count: m.failed_count } }
182
+ end
183
+
184
+ def avg_execution_time_by_file(days = 30)
185
+ Watchdog::Dashboard::Metric
186
+ .select("file_path, AVG(run_time) as average_run_time")
187
+ .where(created_at: days.days.ago..Time.current.end_of_day)
188
+ .where("run_time IS NOT NULL") # Filtrar nulos si es necesario
189
+ .group("file_path")
190
+ .order("average_run_time DESC")
191
+ .map { |m| { file_path: m.file_path, average_run_time: m.average_run_time.to_f.round(2) } } # Asegurarse de que sea flotante
192
+ end
193
+
194
+
195
+ def stability_trend(days = 30)
196
+ Watchdog::Dashboard::Metric
197
+ .select("DATE(created_at) as test_date,
198
+ SUM(CASE WHEN status = 'passed' THEN 1 ELSE 0 END) as passed_count,
199
+ SUM(CASE WHEN status = 'failed' THEN 1 ELSE 0 END) as failed_count,
200
+ SUM(CASE WHEN status = 'skipped' THEN 1 ELSE 0 END) as skipped_count,
201
+ SUM(CASE WHEN status = 'pending' THEN 1 ELSE 0 END) as pending_count,
202
+ SUM(CASE WHEN status = 'error' THEN 1 ELSE 0 END) as error_count")
203
+ .where(created_at: days.days.ago..Time.current.end_of_day)
204
+ .group("DATE(created_at)") # Asegúrate de que solo agrupas por la fecha, no por otros campos
205
+ .order("DATE(created_at)")
206
+ .map { |m|
207
+ {
208
+ test_date: m.test_date.to_s,
209
+ passed_count: m.passed_count,
210
+ failed_count: m.failed_count,
211
+ skipped_count: m.skipped_count,
212
+ pending_count: m.pending_count,
213
+ error_count: m.error_count
214
+ }
215
+ }
216
+ end
217
+ end
@@ -0,0 +1,6 @@
1
+ module Watchdog
2
+ module Dashboard
3
+ class ApplicationController < ActionController::Base
4
+ end
5
+ end
6
+ end