funktor 0.5.0 → 0.6.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/Gemfile.lock +26 -11
- data/funktor-testapp/Gemfile.lock +2 -2
- data/funktor-testapp/app/services/job_flood.rb +1 -1
- data/funktor-testapp/app/workers/single_thread_audit_worker.rb +3 -0
- data/funktor-testapp/funktor_config/environment.yml +2 -2
- data/funktor-testapp/funktor_config/function_definitions/default_queue_handler.yml +3 -1
- data/funktor-testapp/funktor_config/function_definitions/incoming_job_handler.yml +3 -1
- data/funktor-testapp/funktor_config/function_definitions/job_activator.yml +1 -2
- data/funktor-testapp/funktor_config/function_definitions/low_concurrency_queue_handler.yml +13 -0
- data/funktor-testapp/funktor_config/funktor.yml +25 -25
- data/funktor-testapp/funktor_config/iam_permissions/{single_thread_queue.yml → low_concurrency_queue.yml} +1 -1
- data/funktor-testapp/funktor_config/resources/cloudwatch_dashboard.yml +22 -17
- data/funktor-testapp/funktor_config/resources/default_queue.yml +2 -2
- data/funktor-testapp/funktor_config/resources/incoming_job_queue.yml +2 -2
- data/funktor-testapp/funktor_config/resources/jobs_table.yml +16 -4
- data/funktor-testapp/funktor_config/resources/low_concurrency_queue.yml +22 -0
- data/funktor-testapp/funktor_init.yml +16 -8
- data/funktor-testapp/lambda_event_handlers/{single_thread_queue_handler.rb → low_concurrency_queue_handler.rb} +0 -0
- data/funktor-testapp/serverless.yml +4 -3
- data/funktor.gemspec +3 -1
- data/lib/funktor/activity_tracker.rb +6 -2
- data/lib/funktor/cli/templates/funktor_config/function_definitions/incoming_job_handler.yml +3 -1
- data/lib/funktor/cli/templates/funktor_config/function_definitions/job_activator.yml +1 -2
- data/lib/funktor/cli/templates/funktor_config/function_definitions/work_queue_handler.yml +3 -1
- data/lib/funktor/cli/templates/funktor_config/funktor.yml +6 -6
- data/lib/funktor/cli/templates/funktor_config/resources/cloudwatch_dashboard.yml +3 -2
- data/lib/funktor/cli/templates/funktor_config/resources/incoming_job_queue.yml +2 -2
- data/lib/funktor/cli/templates/funktor_config/resources/jobs_table.yml +16 -4
- data/lib/funktor/cli/templates/funktor_config/resources/work_queue.yml +2 -2
- data/lib/funktor/cli/templates/funktor_init.yml.tt +14 -8
- data/lib/funktor/cli/templates/serverless.yml +1 -0
- data/lib/funktor/incoming_job_handler.rb +11 -15
- data/lib/funktor/job.rb +50 -5
- data/lib/funktor/job_activator.rb +52 -26
- data/lib/funktor/shard_utils.rb +6 -0
- data/lib/funktor/testing.rb +1 -0
- data/lib/funktor/version.rb +1 -1
- data/lib/funktor/web/application.rb +139 -0
- data/lib/funktor/web/views/index.erb +3 -0
- data/lib/funktor/web/views/layout.erb +58 -0
- data/lib/funktor/web/views/processing.erb +29 -0
- data/lib/funktor/web/views/queued.erb +29 -0
- data/lib/funktor/web/views/retries.erb +35 -0
- data/lib/funktor/web/views/scheduled.erb +26 -0
- data/lib/funktor/web/views/stats.erb +9 -0
- data/lib/funktor/web/views/table_stats_with_buttons.erb +11 -0
- data/lib/funktor/web.rb +1 -0
- data/lib/funktor/work_queue_handler.rb +41 -0
- data/lib/funktor/worker/funktor_options.rb +3 -1
- data/lib/funktor/worker.rb +8 -11
- data/lib/funktor.rb +16 -16
- metadata +46 -6
- data/funktor-testapp/funktor_config/function_definitions/single_thread_queue_handler.yml +0 -11
- data/funktor-testapp/funktor_config/resources/single_thread_queue.yml +0 -22
@@ -26,6 +26,7 @@ provider:
|
|
26
26
|
lambdaHashingVersion: 20201221
|
27
27
|
environment: ${file(funktor_config/environment.yml)}
|
28
28
|
versionFunctions: false # Reduces the amount of storage used since all Lambdas together are limited to 75GB
|
29
|
+
logRetentionInDays: 7
|
29
30
|
iamRoleStatements:
|
30
31
|
<%- all_iam_permissions.each do |iam_permission| -%>
|
31
32
|
- ${file(<%= iam_permission %>)}
|
@@ -30,6 +30,11 @@ module Funktor
|
|
30
30
|
# TODO : This number should be configurable via ENV var
|
31
31
|
if job.delay < 60 # for now we're testing with just one minute * 5 # 5 minutes
|
32
32
|
Funktor.logger.debug "pushing to work queue for delay = #{job.delay}"
|
33
|
+
# We push to the jobs table first becauase the work queue handler will expect to be able
|
34
|
+
# to update the stats of a record that's already in the table.
|
35
|
+
# TODO : For time sensitive jobs this is probably less than optimal. Can we update the
|
36
|
+
# work queue handler to be ok with a job that's not yet in the table?
|
37
|
+
push_to_jobs_table(job, "queued")
|
33
38
|
push_to_work_queue(job)
|
34
39
|
if job.is_retry?
|
35
40
|
@tracker.track(:retryActivated, job)
|
@@ -38,7 +43,7 @@ module Funktor
|
|
38
43
|
end
|
39
44
|
else
|
40
45
|
Funktor.logger.debug "pushing to jobs table for delay = #{job.delay}"
|
41
|
-
push_to_jobs_table(job)
|
46
|
+
push_to_jobs_table(job, nil)
|
42
47
|
if job.is_retry?
|
43
48
|
# do nothing for tracking
|
44
49
|
else
|
@@ -49,18 +54,10 @@ module Funktor
|
|
49
54
|
end
|
50
55
|
end
|
51
56
|
|
52
|
-
def queue_for_job(job)
|
53
|
-
queue_name = job.queue || 'default'
|
54
|
-
queue_constant = "FUNKTOR_#{queue_name.underscore.upcase}_QUEUE"
|
55
|
-
Funktor.logger.debug "queue_constant = #{queue_constant}"
|
56
|
-
Funktor.logger.debug "ENV value = #{ENV[queue_constant]}"
|
57
|
-
ENV[queue_constant] || ENV['FUNKTOR_DEFAULT_QUEUE']
|
58
|
-
end
|
59
|
-
|
60
57
|
def push_to_work_queue(job)
|
61
58
|
Funktor.logger.debug "job = #{job.to_json}"
|
62
59
|
sqs_client.send_message({
|
63
|
-
queue_url:
|
60
|
+
queue_url: job.work_queue_url,
|
64
61
|
message_body: job.to_json,
|
65
62
|
delay_seconds: job.delay
|
66
63
|
})
|
@@ -70,16 +67,15 @@ module Funktor
|
|
70
67
|
ENV['FUNKTOR_JOBS_TABLE']
|
71
68
|
end
|
72
69
|
|
73
|
-
def push_to_jobs_table(job)
|
74
|
-
perform_at = (Time.now + job.delay).utc
|
70
|
+
def push_to_jobs_table(job, category = nil)
|
75
71
|
resp = dynamodb_client.put_item({
|
76
72
|
item: {
|
77
73
|
payload: job.to_json,
|
78
74
|
jobId: job.job_id,
|
79
|
-
performAt: perform_at.iso8601,
|
75
|
+
performAt: job.perform_at.iso8601,
|
80
76
|
jobShard: job.shard,
|
81
|
-
|
82
|
-
category: job.is_retry? ? "retry" : "scheduled"
|
77
|
+
queueable: category.present? ? "false" : "true",
|
78
|
+
category: category || (job.is_retry? ? "retry" : "scheduled")
|
83
79
|
},
|
84
80
|
table_name: delayed_job_table
|
85
81
|
})
|
data/lib/funktor/job.rb
CHANGED
@@ -1,5 +1,8 @@
|
|
1
|
+
require_relative 'shard_utils'
|
2
|
+
|
1
3
|
module Funktor
|
2
4
|
class Job
|
5
|
+
include ShardUtils
|
3
6
|
attr_accessor :job_string
|
4
7
|
attr_accessor :job_data
|
5
8
|
def initialize(job_string)
|
@@ -11,7 +14,15 @@ module Funktor
|
|
11
14
|
end
|
12
15
|
|
13
16
|
def queue
|
14
|
-
job_data["queue"]
|
17
|
+
job_data["queue"] || 'default'
|
18
|
+
end
|
19
|
+
|
20
|
+
def work_queue_url
|
21
|
+
queue_name = self.queue
|
22
|
+
queue_constant = "FUNKTOR_#{queue_name.underscore.upcase}_QUEUE"
|
23
|
+
Funktor.logger.debug "queue_constant = #{queue_constant}"
|
24
|
+
Funktor.logger.debug "ENV value = #{ENV[queue_constant]}"
|
25
|
+
ENV[queue_constant] || ENV['FUNKTOR_DEFAULT_QUEUE']
|
15
26
|
end
|
16
27
|
|
17
28
|
def worker_class_name
|
@@ -23,8 +34,7 @@ module Funktor
|
|
23
34
|
end
|
24
35
|
|
25
36
|
def shard
|
26
|
-
|
27
|
-
job_data["job_id"].hash % 64
|
37
|
+
calculate_shard(job_data["job_id"])
|
28
38
|
end
|
29
39
|
|
30
40
|
def worker_params
|
@@ -43,12 +53,43 @@ module Funktor
|
|
43
53
|
job_data["retries"] = retries
|
44
54
|
end
|
45
55
|
|
56
|
+
def perform_at
|
57
|
+
if job_data["perform_at"].present?
|
58
|
+
job_data["perform_at"].is_a?(Time) ? job_data["perform_at"] : Time.parse(job_data["perform_at"])
|
59
|
+
else
|
60
|
+
Time.now.utc
|
61
|
+
end
|
62
|
+
end
|
63
|
+
|
46
64
|
def delay
|
47
|
-
|
65
|
+
delay = (perform_at - Time.now.utc).to_i
|
66
|
+
if delay < 0
|
67
|
+
delay = 0
|
68
|
+
end
|
69
|
+
return delay
|
48
70
|
end
|
49
71
|
|
50
72
|
def delay=(delay)
|
51
|
-
job_data["
|
73
|
+
job_data["perform_at"] = Time.now.utc + delay
|
74
|
+
end
|
75
|
+
|
76
|
+
def error_class
|
77
|
+
job_data["error_class"]
|
78
|
+
end
|
79
|
+
|
80
|
+
def error_message
|
81
|
+
job_data["error_message"]
|
82
|
+
end
|
83
|
+
|
84
|
+
def error_backtrace
|
85
|
+
job_data["error_backtrace"].present? ? Funktor.parse_json(job_data["error_backtrace"]) : []
|
86
|
+
end
|
87
|
+
|
88
|
+
def error=(error)
|
89
|
+
# TODO We should maybe compress this?
|
90
|
+
job_data["error_class"] = error.class.name
|
91
|
+
job_data["error_message"] = error.message
|
92
|
+
job_data["error_backtrace"] = Funktor.dump_json(error.backtrace)
|
52
93
|
end
|
53
94
|
|
54
95
|
def execute
|
@@ -84,6 +125,10 @@ module Funktor
|
|
84
125
|
|
85
126
|
def retry_queue_url
|
86
127
|
worker_class&.custom_queue_url || ENV['FUNKTOR_INCOMING_JOB_QUEUE']
|
128
|
+
rescue NameError, TypeError
|
129
|
+
# In the web ui we may not have access to the the worker classes
|
130
|
+
# TODO : We should mayb handle this differently somehow? This just feels a bit icky...
|
131
|
+
ENV['FUNKTOR_INCOMING_JOB_QUEUE']
|
87
132
|
end
|
88
133
|
end
|
89
134
|
end
|
@@ -16,23 +16,25 @@ module Funktor
|
|
16
16
|
@sqs_client ||= ::Aws::SQS::Client.new
|
17
17
|
end
|
18
18
|
|
19
|
-
def active_job_queue
|
20
|
-
ENV['FUNKTOR_ACTIVE_JOB_QUEUE']
|
21
|
-
end
|
22
|
-
|
23
19
|
def delayed_job_table
|
24
20
|
ENV['FUNKTOR_JOBS_TABLE']
|
25
21
|
end
|
26
22
|
|
27
23
|
def jobs_to_activate
|
28
|
-
|
24
|
+
# TODO : The lookahead time here should be configurable
|
25
|
+
# If this doesn't match the setting in the IncomingJobHandler some jobs
|
26
|
+
# might be activated and then immediately re-scheduled instead of being
|
27
|
+
# queued, which leads to kind of confusing stats for the "incoming" stat.
|
28
|
+
# (Come to think of it, the incoming stat is kind of confusting anyway since
|
29
|
+
# it reflects retries and scheduled jobs activations...)
|
30
|
+
target_time = (Time.now + 60).utc
|
29
31
|
query_params = {
|
30
32
|
expression_attribute_values: {
|
31
|
-
":
|
33
|
+
":queueable" => "true",
|
32
34
|
":targetTime" => target_time.iso8601
|
33
35
|
},
|
34
|
-
key_condition_expression: "
|
35
|
-
projection_expression: "
|
36
|
+
key_condition_expression: "queueable = :queueable AND performAt < :targetTime",
|
37
|
+
projection_expression: "jobId, jobShard, category",
|
36
38
|
table_name: delayed_job_table,
|
37
39
|
index_name: "performAtIndex"
|
38
40
|
}
|
@@ -49,38 +51,62 @@ module Funktor
|
|
49
51
|
end
|
50
52
|
|
51
53
|
def handle_item(item)
|
52
|
-
|
53
|
-
|
54
|
-
|
55
|
-
delay = (Time.parse(item["performAt"]) - Time.now.utc).to_i
|
56
|
-
if delay < 0
|
57
|
-
delay = 0
|
58
|
-
end
|
54
|
+
job_shard = item["jobShard"]
|
55
|
+
job_id = item["jobId"]
|
56
|
+
current_category = item["category"]
|
59
57
|
Funktor.logger.debug "jobShard = #{item['jobShard']}"
|
60
58
|
Funktor.logger.debug "jobId = #{item['jobId']}"
|
61
|
-
|
62
|
-
|
63
|
-
|
59
|
+
Funktor.logger.debug "current_category = #{current_category}"
|
60
|
+
activate_job(job_shard, job_id, current_category)
|
61
|
+
end
|
62
|
+
|
63
|
+
def activate_job(job_shard, job_id, current_category, queue_immediately = false)
|
64
|
+
# First we conditionally update the item in Dynamo to be sure that another scheduler hasn't gotten
|
65
|
+
# to it, and if that works then send to SQS. This is basically how Sidekiq scheduler works.
|
66
|
+
response = dynamodb_client.update_item({
|
64
67
|
key: {
|
65
|
-
"jobShard" =>
|
66
|
-
"jobId" =>
|
68
|
+
"jobShard" => job_shard,
|
69
|
+
"jobId" => job_id
|
70
|
+
},
|
71
|
+
update_expression: "SET category = :category, queueable = :queueable",
|
72
|
+
condition_expression: "category = :current_category",
|
73
|
+
expression_attribute_values: {
|
74
|
+
":current_category" => current_category,
|
75
|
+
":queueable" => "false",
|
76
|
+
":category" => "queued"
|
67
77
|
},
|
68
78
|
table_name: delayed_job_table,
|
69
79
|
return_values: "ALL_OLD"
|
70
80
|
})
|
71
|
-
if response.attributes # this means the record was still there
|
81
|
+
if response.attributes # this means the record was still there in the state we expected
|
82
|
+
Funktor.logger.debug "response.attributes ====== "
|
83
|
+
Funktor.logger.debug response.attributes
|
84
|
+
job = Funktor::Job.new(response.attributes["payload"])
|
85
|
+
Funktor.logger.debug "we created a job from payload"
|
86
|
+
Funktor.logger.debug response.attributes["payload"]
|
87
|
+
Funktor.logger.debug "queueing to #{job.retry_queue_url}"
|
88
|
+
if queue_immediately
|
89
|
+
job.delay = 0
|
90
|
+
end
|
72
91
|
sqs_client.send_message({
|
73
|
-
|
74
|
-
|
75
|
-
|
76
|
-
delay_seconds: delay
|
92
|
+
queue_url: job.retry_queue_url,
|
93
|
+
message_body: job.to_json
|
94
|
+
#delay_seconds: job.delay
|
77
95
|
})
|
78
96
|
if job.is_retry?
|
79
|
-
|
97
|
+
# We don't track here because we send stuff back to the incoming job queue and we track the
|
98
|
+
# :retryActivated even there.
|
99
|
+
# TODO - Once we're sure this is all working right we can delete the commented out line.
|
100
|
+
#@tracker.track(:retryActivated, job)
|
80
101
|
else
|
81
102
|
@tracker.track(:scheduledJobActivated, job)
|
82
103
|
end
|
83
104
|
end
|
105
|
+
rescue ::Aws::DynamoDB::Errors::ConditionalCheckFailedException => e
|
106
|
+
# This means that a different instance of the JobActivator (or someone doing stuff in the web UI)
|
107
|
+
# got to the job first.
|
108
|
+
Funktor.logger.debug "#{e.to_s} : #{e.message}"
|
109
|
+
Funktor.logger.debug e.backtrace.join("\n")
|
84
110
|
end
|
85
111
|
|
86
112
|
def call(event:, context:)
|
data/lib/funktor/testing.rb
CHANGED
data/lib/funktor/version.rb
CHANGED
@@ -0,0 +1,139 @@
|
|
1
|
+
require 'sinatra/base'
|
2
|
+
require 'aws-sdk-dynamodb'
|
3
|
+
require_relative '../../funktor'
|
4
|
+
require_relative '../../funktor/shard_utils'
|
5
|
+
require_relative '../../funktor/activity_tracker'
|
6
|
+
|
7
|
+
module Funktor
|
8
|
+
module Web
|
9
|
+
class Application < Sinatra::Base
|
10
|
+
include ShardUtils
|
11
|
+
|
12
|
+
get '/' do
|
13
|
+
erb :index, layout: :layout, locals: {
|
14
|
+
activity_data: get_activity_data
|
15
|
+
}
|
16
|
+
end
|
17
|
+
|
18
|
+
get '/scheduled' do
|
19
|
+
erb :scheduled, layout: :layout, locals: {
|
20
|
+
activity_data: get_activity_data,
|
21
|
+
jobs: get_jobs('scheduled')
|
22
|
+
}
|
23
|
+
end
|
24
|
+
|
25
|
+
get '/retries' do
|
26
|
+
erb :retries, layout: :layout, locals: {
|
27
|
+
activity_data: get_activity_data,
|
28
|
+
jobs: get_jobs('retry')
|
29
|
+
}
|
30
|
+
end
|
31
|
+
|
32
|
+
get '/queued' do
|
33
|
+
erb :queued, layout: :layout, locals: {
|
34
|
+
activity_data: get_activity_data,
|
35
|
+
jobs: get_jobs('queued')
|
36
|
+
}
|
37
|
+
end
|
38
|
+
|
39
|
+
get '/processing' do
|
40
|
+
erb :processing, layout: :layout, locals: {
|
41
|
+
activity_data: get_activity_data,
|
42
|
+
jobs: get_jobs('processing')
|
43
|
+
}
|
44
|
+
end
|
45
|
+
|
46
|
+
post '/update_jobs' do
|
47
|
+
job_ids = params[:job_id]
|
48
|
+
if job_ids.is_a?(String)
|
49
|
+
job_ids = [job_ids]
|
50
|
+
end
|
51
|
+
job_ids ||= []
|
52
|
+
puts "params[:submit] = #{params[:submit]}"
|
53
|
+
puts "job_ids = #{job_ids}"
|
54
|
+
puts "params[:source] = #{params[:source]}"
|
55
|
+
if params[:submit] == "Delete Selected Jobs"
|
56
|
+
delete_jobs(job_ids, params[:source])
|
57
|
+
elsif params[:submit] == "Queue Selected Jobs"
|
58
|
+
queue_jobs(job_ids, params[:source])
|
59
|
+
end
|
60
|
+
redirect request.referrer
|
61
|
+
end
|
62
|
+
|
63
|
+
def get_jobs(category)
|
64
|
+
"Jobs of type #{category}"
|
65
|
+
query_params = {
|
66
|
+
expression_attribute_values: {
|
67
|
+
":category" => category
|
68
|
+
},
|
69
|
+
key_condition_expression: "category = :category",
|
70
|
+
projection_expression: "payload, performAt, jobId, jobShard",
|
71
|
+
table_name: ENV['FUNKTOR_JOBS_TABLE'],
|
72
|
+
index_name: "categoryIndex"
|
73
|
+
}
|
74
|
+
resp = dynamodb_client.query(query_params)
|
75
|
+
@items = resp.items
|
76
|
+
@jobs = @items.map{ |item| Funktor::Job.new(item["payload"]) }
|
77
|
+
return @jobs
|
78
|
+
end
|
79
|
+
|
80
|
+
def get_activity_data
|
81
|
+
query_params = {
|
82
|
+
expression_attribute_values: {
|
83
|
+
":category" => "stat"
|
84
|
+
},
|
85
|
+
key_condition_expression: "category = :category",
|
86
|
+
projection_expression: "statName, stat_value",
|
87
|
+
table_name: ENV['FUNKTOR_ACTIVITY_TABLE']
|
88
|
+
}
|
89
|
+
resp = dynamodb_client.query(query_params)
|
90
|
+
@activity_stats = {}
|
91
|
+
resp.items.each do |item|
|
92
|
+
@activity_stats[item["statName"]] = item["stat_value"].to_i
|
93
|
+
end
|
94
|
+
return @activity_stats
|
95
|
+
end
|
96
|
+
|
97
|
+
def queue_jobs(job_ids, source)
|
98
|
+
job_activator = Funktor::JobActivator.new
|
99
|
+
job_ids.each do |job_id|
|
100
|
+
job_shard = calculate_shard(job_id)
|
101
|
+
job_activator.activate_job(job_shard, job_id, source, true)
|
102
|
+
end
|
103
|
+
end
|
104
|
+
|
105
|
+
def delete_jobs(job_ids, source)
|
106
|
+
@tracker = Funktor::ActivityTracker.new
|
107
|
+
job_ids.each do |job_id|
|
108
|
+
delete_single_job(job_id, source)
|
109
|
+
end
|
110
|
+
end
|
111
|
+
|
112
|
+
def delete_single_job(job_id, source)
|
113
|
+
response = dynamodb_client.delete_item({
|
114
|
+
key: {
|
115
|
+
"jobShard" => calculate_shard(job_id),
|
116
|
+
"jobId" => job_id
|
117
|
+
},
|
118
|
+
table_name: ENV['FUNKTOR_JOBS_TABLE'],
|
119
|
+
return_values: "ALL_OLD"
|
120
|
+
})
|
121
|
+
if response.attributes # this means the record was still there
|
122
|
+
if source == "scheduled"
|
123
|
+
@tracker.track(:scheduledJobDeleted, nil)
|
124
|
+
elsif source == "retry"
|
125
|
+
@tracker.track(:retryDeleted, nil)
|
126
|
+
end
|
127
|
+
end
|
128
|
+
end
|
129
|
+
|
130
|
+
def dynamodb_client
|
131
|
+
@dynamodb_client ||= ::Aws::DynamoDB::Client.new
|
132
|
+
end
|
133
|
+
|
134
|
+
# start the server if ruby file executed directly
|
135
|
+
run! if app_file == $0
|
136
|
+
end
|
137
|
+
end
|
138
|
+
end
|
139
|
+
|
@@ -0,0 +1,58 @@
|
|
1
|
+
<html>
|
2
|
+
<head>
|
3
|
+
<link rel="stylesheet" href="https://unpkg.com/@picocss/pico@latest/css/pico.min.css">
|
4
|
+
<style type="text/css" media="screen">
|
5
|
+
/* Green Light scheme (Default) */
|
6
|
+
/* Can be forced with data-theme="light" */
|
7
|
+
[data-theme="light"],
|
8
|
+
:root:not([data-theme="dark"]) {
|
9
|
+
--primary: #43a047;
|
10
|
+
--primary-hover: #388e3c;
|
11
|
+
--primary-focus: rgba(67, 160, 71, 0.125);
|
12
|
+
--primary-inverse: #FFF;
|
13
|
+
}
|
14
|
+
|
15
|
+
/* Green Dark scheme (Auto) */
|
16
|
+
/* Automatically enabled if user has Dark mode enabled */
|
17
|
+
@media only screen and (prefers-color-scheme: dark) {
|
18
|
+
:root:not([data-theme="light"]) {
|
19
|
+
--primary: #43a047;
|
20
|
+
--primary-hover: #4caf50;
|
21
|
+
--primary-focus: rgba(67, 160, 71, 0.25);
|
22
|
+
--primary-inverse: #FFF;
|
23
|
+
}
|
24
|
+
}
|
25
|
+
|
26
|
+
/* Green Dark scheme (Forced) */
|
27
|
+
/* Enabled if forced with data-theme="dark" */
|
28
|
+
[data-theme="dark"] {
|
29
|
+
--primary: #43a047;
|
30
|
+
--primary-hover: #4caf50;
|
31
|
+
--primary-focus: rgba(67, 160, 71, 0.25);
|
32
|
+
--primary-inverse: #FFF;
|
33
|
+
}
|
34
|
+
|
35
|
+
/* Green (Common styles) */
|
36
|
+
:root {
|
37
|
+
--form-element-active-border-color: var(--primary);
|
38
|
+
--form-element-focus-color: var(--primary-focus);
|
39
|
+
--switch-color: var(--primary-inverse);
|
40
|
+
--switch-checked-background-color: var(--primary);
|
41
|
+
}
|
42
|
+
|
43
|
+
/* custom stuff for the funktor dashboard */
|
44
|
+
table.header h5{
|
45
|
+
margin-bottom: 0;
|
46
|
+
}
|
47
|
+
body > main{
|
48
|
+
padding: 0;
|
49
|
+
}
|
50
|
+
</style>
|
51
|
+
</head>
|
52
|
+
<body>
|
53
|
+
<%= erb :stats %>
|
54
|
+
<main class="container-fluid">
|
55
|
+
<%= yield %>
|
56
|
+
</main>
|
57
|
+
</body>
|
58
|
+
</html>
|