kubernetes-deploy 0.22.0 → 0.23.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +5 -5
- data/.rubocop.yml +8 -0
- data/CHANGELOG.md +16 -0
- data/README.md +32 -0
- data/exe/kubernetes-deploy +2 -15
- data/exe/kubernetes-render +32 -0
- data/kubernetes-deploy.gemspec +5 -3
- data/lib/kubernetes-deploy.rb +5 -3
- data/lib/kubernetes-deploy/cluster_resource_discovery.rb +34 -0
- data/lib/kubernetes-deploy/container_logs.rb +25 -13
- data/lib/kubernetes-deploy/deploy_task.rb +68 -50
- data/lib/kubernetes-deploy/errors.rb +1 -0
- data/lib/kubernetes-deploy/formatted_logger.rb +16 -2
- data/lib/kubernetes-deploy/kubeclient_builder/google_friendly_config.rb +4 -6
- data/lib/kubernetes-deploy/kubectl.rb +20 -9
- data/lib/kubernetes-deploy/kubernetes_resource.rb +5 -6
- data/lib/kubernetes-deploy/kubernetes_resource/cloudsql.rb +3 -4
- data/lib/kubernetes-deploy/kubernetes_resource/daemon_set.rb +4 -5
- data/lib/kubernetes-deploy/kubernetes_resource/deployment.rb +7 -8
- data/lib/kubernetes-deploy/kubernetes_resource/memcached.rb +4 -5
- data/lib/kubernetes-deploy/kubernetes_resource/pod.rb +7 -5
- data/lib/kubernetes-deploy/kubernetes_resource/pod_set_base.rb +12 -6
- data/lib/kubernetes-deploy/kubernetes_resource/redis.rb +5 -6
- data/lib/kubernetes-deploy/kubernetes_resource/replica_set.rb +23 -5
- data/lib/kubernetes-deploy/kubernetes_resource/role.rb +22 -0
- data/lib/kubernetes-deploy/kubernetes_resource/service.rb +8 -4
- data/lib/kubernetes-deploy/kubernetes_resource/stateful_set.rb +2 -3
- data/lib/kubernetes-deploy/oj.rb +4 -0
- data/lib/kubernetes-deploy/options_helper.rb +27 -0
- data/lib/kubernetes-deploy/remote_logs.rb +10 -4
- data/lib/kubernetes-deploy/render_task.rb +119 -0
- data/lib/kubernetes-deploy/renderer.rb +1 -1
- data/lib/kubernetes-deploy/resource_cache.rb +64 -0
- data/lib/kubernetes-deploy/resource_watcher.rb +27 -6
- data/lib/kubernetes-deploy/restart_task.rb +5 -6
- data/lib/kubernetes-deploy/runner_task.rb +6 -10
- data/lib/kubernetes-deploy/statsd.rb +60 -7
- data/lib/kubernetes-deploy/template_discovery.rb +15 -0
- data/lib/kubernetes-deploy/version.rb +1 -1
- data/pull_request_template.md +8 -0
- metadata +47 -5
- data/lib/kubernetes-deploy/resource_discovery.rb +0 -19
- data/lib/kubernetes-deploy/sync_mediator.rb +0 -80
@@ -6,10 +6,9 @@ module KubernetesDeploy
|
|
6
6
|
ONDELETE = 'OnDelete'
|
7
7
|
attr_reader :pods
|
8
8
|
|
9
|
-
|
10
|
-
def sync(mediator)
|
9
|
+
def sync(cache)
|
11
10
|
super
|
12
|
-
@pods = exists? ? find_pods(
|
11
|
+
@pods = exists? ? find_pods(cache) : []
|
13
12
|
end
|
14
13
|
|
15
14
|
def status
|
@@ -0,0 +1,27 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module KubernetesDeploy
|
4
|
+
module OptionsHelper
|
5
|
+
def self.default_and_check_template_dir(template_dir)
|
6
|
+
if !template_dir && ENV.key?("ENVIRONMENT")
|
7
|
+
template_dir = "config/deploy/#{ENV['ENVIRONMENT']}"
|
8
|
+
end
|
9
|
+
|
10
|
+
if !template_dir || template_dir.empty?
|
11
|
+
puts "Template directory is unknown. " \
|
12
|
+
"Either specify --template-dir argument or set $ENVIRONMENT to use config/deploy/$ENVIRONMENT " \
|
13
|
+
+ "as a default path."
|
14
|
+
exit 1
|
15
|
+
end
|
16
|
+
|
17
|
+
template_dir
|
18
|
+
end
|
19
|
+
|
20
|
+
def self.revision_from_environment
|
21
|
+
ENV.fetch('REVISION') do
|
22
|
+
puts "ENV['REVISION'] is missing. Please specify the commit SHA"
|
23
|
+
exit 1
|
24
|
+
end
|
25
|
+
end
|
26
|
+
end
|
27
|
+
end
|
@@ -5,11 +5,17 @@ module KubernetesDeploy
|
|
5
5
|
class RemoteLogs
|
6
6
|
attr_reader :container_logs
|
7
7
|
|
8
|
-
def initialize(logger:, parent_id:, container_names:)
|
8
|
+
def initialize(logger:, parent_id:, container_names:, namespace:, context:)
|
9
9
|
@logger = logger
|
10
10
|
@parent_id = parent_id
|
11
11
|
@container_logs = container_names.map do |n|
|
12
|
-
ContainerLogs.new(
|
12
|
+
ContainerLogs.new(
|
13
|
+
logger: logger,
|
14
|
+
container_name: n,
|
15
|
+
parent_id: parent_id,
|
16
|
+
namespace: namespace,
|
17
|
+
context: context
|
18
|
+
)
|
13
19
|
end
|
14
20
|
end
|
15
21
|
|
@@ -17,8 +23,8 @@ module KubernetesDeploy
|
|
17
23
|
@container_logs.all?(&:empty?)
|
18
24
|
end
|
19
25
|
|
20
|
-
def sync
|
21
|
-
@container_logs.each
|
26
|
+
def sync
|
27
|
+
@container_logs.each(&:sync)
|
22
28
|
end
|
23
29
|
|
24
30
|
def print_latest
|
@@ -0,0 +1,119 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
require 'tempfile'
|
3
|
+
|
4
|
+
require 'kubernetes-deploy/renderer'
|
5
|
+
require 'kubernetes-deploy/template_discovery'
|
6
|
+
|
7
|
+
module KubernetesDeploy
|
8
|
+
class RenderTask
|
9
|
+
def initialize(logger:, current_sha:, template_dir:, bindings:)
|
10
|
+
@logger = logger
|
11
|
+
@template_dir = template_dir
|
12
|
+
@renderer = KubernetesDeploy::Renderer.new(
|
13
|
+
current_sha: current_sha,
|
14
|
+
bindings: bindings,
|
15
|
+
template_dir: @template_dir,
|
16
|
+
logger: @logger,
|
17
|
+
)
|
18
|
+
end
|
19
|
+
|
20
|
+
def run(*args)
|
21
|
+
run!(*args)
|
22
|
+
true
|
23
|
+
rescue KubernetesDeploy::FatalDeploymentError
|
24
|
+
false
|
25
|
+
end
|
26
|
+
|
27
|
+
def run!(stream, only_filenames = [])
|
28
|
+
@logger.reset
|
29
|
+
@logger.phase_heading("Initializing render task")
|
30
|
+
|
31
|
+
filenames = if only_filenames.empty?
|
32
|
+
TemplateDiscovery.new(@template_dir).templates
|
33
|
+
else
|
34
|
+
only_filenames
|
35
|
+
end
|
36
|
+
|
37
|
+
validate_configuration(filenames)
|
38
|
+
render_filenames(stream, filenames)
|
39
|
+
|
40
|
+
@logger.summary.add_action("Successfully rendered #{filenames.size} template(s)")
|
41
|
+
@logger.print_summary(:success)
|
42
|
+
rescue KubernetesDeploy::FatalDeploymentError
|
43
|
+
@logger.print_summary(:failure)
|
44
|
+
raise
|
45
|
+
end
|
46
|
+
|
47
|
+
private
|
48
|
+
|
49
|
+
def render_filenames(stream, filenames)
|
50
|
+
exceptions = []
|
51
|
+
@logger.phase_heading("Rendering template(s)")
|
52
|
+
|
53
|
+
filenames.each do |filename|
|
54
|
+
begin
|
55
|
+
render_filename(filename, stream)
|
56
|
+
rescue KubernetesDeploy::InvalidTemplateError => exception
|
57
|
+
exceptions << exception
|
58
|
+
log_invalid_template(exception)
|
59
|
+
end
|
60
|
+
end
|
61
|
+
|
62
|
+
unless exceptions.empty?
|
63
|
+
raise exceptions[0]
|
64
|
+
end
|
65
|
+
end
|
66
|
+
|
67
|
+
def render_filename(filename, stream)
|
68
|
+
@logger.info("Rendering #{File.basename(filename)} ...")
|
69
|
+
file_content = File.read(File.join(@template_dir, filename))
|
70
|
+
rendered_content = @renderer.render_template(filename, file_content)
|
71
|
+
YAML.load_stream(rendered_content, "<rendered> #{filename}") do |doc|
|
72
|
+
stream.puts YAML.dump(doc)
|
73
|
+
end
|
74
|
+
@logger.info("Rendered #{File.basename(filename)}")
|
75
|
+
rescue Psych::SyntaxError => exception
|
76
|
+
raise InvalidTemplateError.new("Template is not valid YAML. #{exception.message}", filename: filename)
|
77
|
+
end
|
78
|
+
|
79
|
+
def validate_configuration(filenames)
|
80
|
+
@logger.info("Validating configuration")
|
81
|
+
errors = []
|
82
|
+
|
83
|
+
if filenames.empty?
|
84
|
+
errors << "no templates found in template dir #{@template_dir}"
|
85
|
+
end
|
86
|
+
|
87
|
+
absolute_template_dir = File.expand_path(@template_dir)
|
88
|
+
|
89
|
+
filenames.each do |filename|
|
90
|
+
absolute_file = File.expand_path(File.join(@template_dir, filename))
|
91
|
+
if !File.exist?(absolute_file)
|
92
|
+
errors << "Filename \"#{absolute_file}\" could not be found"
|
93
|
+
elsif !File.file?(absolute_file)
|
94
|
+
errors << "Filename \"#{absolute_file}\" is not a file"
|
95
|
+
elsif !absolute_file.start_with?(absolute_template_dir)
|
96
|
+
errors << "Filename \"#{absolute_file}\" is outside the template directory," \
|
97
|
+
" which was resolved as #{absolute_template_dir}"
|
98
|
+
end
|
99
|
+
end
|
100
|
+
|
101
|
+
unless errors.empty?
|
102
|
+
@logger.summary.add_action("Configuration invalid")
|
103
|
+
@logger.summary.add_paragraph(errors.map { |err| "- #{err}" }.join("\n"))
|
104
|
+
raise KubernetesDeploy::TaskConfigurationError, "Configuration invalid: #{errors.join(', ')}"
|
105
|
+
end
|
106
|
+
end
|
107
|
+
|
108
|
+
def log_invalid_template(exception)
|
109
|
+
@logger.error("Failed to render #{exception.filename}")
|
110
|
+
|
111
|
+
debug_msg = ColorizedString.new("Invalid template: #{exception.filename}\n").red
|
112
|
+
debug_msg += "> Error message:\n#{FormattedLogger.indent_four(exception.to_s)}"
|
113
|
+
if exception.content
|
114
|
+
debug_msg += "\n> Template content:\n#{FormattedLogger.indent_four(exception.content)}"
|
115
|
+
end
|
116
|
+
@logger.summary.add_paragraph(debug_msg)
|
117
|
+
end
|
118
|
+
end
|
119
|
+
end
|
@@ -52,7 +52,7 @@ module KubernetesDeploy
|
|
52
52
|
template = File.read(partial_path)
|
53
53
|
expanded_template = ERB.new(template, nil, '-').result(erb_binding)
|
54
54
|
|
55
|
-
docs = Psych.parse_stream(expanded_template)
|
55
|
+
docs = Psych.parse_stream(expanded_template, partial_path)
|
56
56
|
# If the partial contains multiple documents or has an explicit document header,
|
57
57
|
# we know it cannot validly be indented in the parent, so return it immediately.
|
58
58
|
return expanded_template unless docs.children.one? && docs.children.first.implicit
|
@@ -0,0 +1,64 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require 'concurrent/hash'
|
4
|
+
|
5
|
+
module KubernetesDeploy
|
6
|
+
class ResourceCache
|
7
|
+
def initialize(namespace, context, logger)
|
8
|
+
@namespace = namespace
|
9
|
+
@context = context
|
10
|
+
@logger = logger
|
11
|
+
|
12
|
+
@kind_fetcher_locks = Concurrent::Hash.new { |hash, key| hash[key] = Mutex.new }
|
13
|
+
@data = Concurrent::Hash.new
|
14
|
+
@kubectl = Kubectl.new(namespace: @namespace, context: @context, logger: @logger, log_failure_by_default: false)
|
15
|
+
end
|
16
|
+
|
17
|
+
def get_instance(kind, resource_name, raise_if_not_found: false)
|
18
|
+
instance = use_or_populate_cache(kind).fetch(resource_name, {})
|
19
|
+
if instance.blank? && raise_if_not_found
|
20
|
+
raise KubernetesDeploy::Kubectl::ResourceNotFoundError, "Resource does not exist (used cache for kind #{kind})"
|
21
|
+
end
|
22
|
+
instance
|
23
|
+
rescue KubectlError
|
24
|
+
{}
|
25
|
+
end
|
26
|
+
|
27
|
+
def get_all(kind, selector = nil)
|
28
|
+
instances = use_or_populate_cache(kind).values
|
29
|
+
return instances unless selector
|
30
|
+
|
31
|
+
instances.select do |r|
|
32
|
+
labels = r.dig("metadata", "labels") || {}
|
33
|
+
labels >= selector
|
34
|
+
end
|
35
|
+
rescue KubectlError
|
36
|
+
[]
|
37
|
+
end
|
38
|
+
|
39
|
+
private
|
40
|
+
|
41
|
+
def statsd_tags
|
42
|
+
{ namespace: @namespace, context: @context }
|
43
|
+
end
|
44
|
+
|
45
|
+
def use_or_populate_cache(kind)
|
46
|
+
@kind_fetcher_locks[kind].synchronize do
|
47
|
+
return @data[kind] if @data.key?(kind)
|
48
|
+
@data[kind] = fetch_by_kind(kind)
|
49
|
+
end
|
50
|
+
end
|
51
|
+
|
52
|
+
def fetch_by_kind(kind)
|
53
|
+
raw_json, _, st = @kubectl.run("get", kind, "--chunk-size=0", "--output=json", attempts: 5)
|
54
|
+
raise KubectlError unless st.success?
|
55
|
+
|
56
|
+
instances = {}
|
57
|
+
JSON.parse(raw_json)["items"].each do |resource|
|
58
|
+
resource_name = resource.dig("metadata", "name")
|
59
|
+
instances[resource_name] = resource
|
60
|
+
end
|
61
|
+
instances
|
62
|
+
end
|
63
|
+
end
|
64
|
+
end
|
@@ -1,8 +1,10 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
2
|
module KubernetesDeploy
|
3
3
|
class ResourceWatcher
|
4
|
-
|
5
|
-
|
4
|
+
extend KubernetesDeploy::StatsD::MeasureMethods
|
5
|
+
|
6
|
+
def initialize(resources:, logger:, context:, namespace:,
|
7
|
+
deploy_started_at: Time.now.utc, operation_name: "deploy", timeout: nil, sha: nil)
|
6
8
|
unless resources.is_a?(Enumerable)
|
7
9
|
raise ArgumentError, <<~MSG
|
8
10
|
ResourceWatcher expects Enumerable collection, got `#{resources.class}` instead
|
@@ -10,10 +12,12 @@ module KubernetesDeploy
|
|
10
12
|
end
|
11
13
|
@resources = resources
|
12
14
|
@logger = logger
|
13
|
-
@
|
15
|
+
@namespace = namespace
|
16
|
+
@context = context
|
14
17
|
@deploy_started_at = deploy_started_at
|
15
18
|
@operation_name = operation_name
|
16
19
|
@timeout = timeout
|
20
|
+
@sha = sha
|
17
21
|
end
|
18
22
|
|
19
23
|
def run(delay_sync: 3.seconds, reminder_interval: 30.seconds, record_summary: true)
|
@@ -24,8 +28,7 @@ module KubernetesDeploy
|
|
24
28
|
report_and_give_up(remainder) if global_timeout?(monitoring_started)
|
25
29
|
sleep_until_next_sync(delay_sync)
|
26
30
|
|
27
|
-
|
28
|
-
remainder.each(&:after_sync)
|
31
|
+
sync_resources(remainder)
|
29
32
|
|
30
33
|
new_successes, remainder = remainder.partition(&:deploy_succeeded?)
|
31
34
|
new_failures, remainder = remainder.partition(&:deploy_failed?)
|
@@ -45,6 +48,21 @@ module KubernetesDeploy
|
|
45
48
|
|
46
49
|
private
|
47
50
|
|
51
|
+
def sync_resources(resources)
|
52
|
+
cache = ResourceCache.new(@namespace, @context, @logger)
|
53
|
+
KubernetesDeploy::Concurrency.split_across_threads(resources) { |r| r.sync(cache) }
|
54
|
+
resources.each(&:after_sync)
|
55
|
+
end
|
56
|
+
measure_method(:sync_resources, "sync.duration")
|
57
|
+
|
58
|
+
def statsd_tags
|
59
|
+
{
|
60
|
+
namespace: @namespace,
|
61
|
+
context: @context,
|
62
|
+
sha: @sha
|
63
|
+
}
|
64
|
+
end
|
65
|
+
|
48
66
|
def global_timeout?(started_at)
|
49
67
|
@timeout && (Time.now.utc - started_at > @timeout)
|
50
68
|
end
|
@@ -118,9 +136,12 @@ module KubernetesDeploy
|
|
118
136
|
"failed to #{@operation_name} #{failures.length} #{'resource'.pluralize(failures.length)}"
|
119
137
|
)
|
120
138
|
end
|
139
|
+
|
140
|
+
kubectl = Kubectl.new(namespace: @namespace, context: @context, logger: @logger, log_failure_by_default: false)
|
121
141
|
KubernetesDeploy::Concurrency.split_across_threads(failed_resources + global_timeouts) do |r|
|
122
|
-
r.sync_debug_info(
|
142
|
+
r.sync_debug_info(kubectl)
|
123
143
|
end
|
144
|
+
|
124
145
|
failed_resources.each { |r| @logger.summary.add_paragraph(r.debug_message) }
|
125
146
|
global_timeouts.each { |r| @logger.summary.add_paragraph(r.debug_message(:gave_up, timeout: @timeout)) }
|
126
147
|
end
|
@@ -24,7 +24,6 @@ module KubernetesDeploy
|
|
24
24
|
@context = context
|
25
25
|
@namespace = namespace
|
26
26
|
@logger = logger
|
27
|
-
@sync_mediator = SyncMediator.new(namespace: @namespace, context: @context, logger: @logger)
|
28
27
|
@max_watch_seconds = max_watch_seconds
|
29
28
|
end
|
30
29
|
|
@@ -50,22 +49,22 @@ module KubernetesDeploy
|
|
50
49
|
|
51
50
|
@logger.phase_heading("Waiting for rollout")
|
52
51
|
resources = build_watchables(deployments, start)
|
53
|
-
ResourceWatcher.new(resources: resources,
|
54
|
-
|
52
|
+
ResourceWatcher.new(resources: resources, logger: @logger, operation_name: "restart",
|
53
|
+
timeout: @max_watch_seconds, namespace: @namespace, context: @context).run
|
55
54
|
failed_resources = resources.reject(&:deploy_succeeded?)
|
56
55
|
success = failed_resources.empty?
|
57
56
|
if !success && failed_resources.all?(&:deploy_timed_out?)
|
58
57
|
raise DeploymentTimeoutError
|
59
58
|
end
|
60
59
|
raise FatalDeploymentError unless success
|
61
|
-
|
60
|
+
StatsD.distribution('restart.duration', StatsD.duration(start), tags: tags('success', deployments))
|
62
61
|
@logger.print_summary(:success)
|
63
62
|
rescue DeploymentTimeoutError
|
64
|
-
|
63
|
+
StatsD.distribution('restart.duration', StatsD.duration(start), tags: tags('timeout', deployments))
|
65
64
|
@logger.print_summary(:timed_out)
|
66
65
|
raise
|
67
66
|
rescue FatalDeploymentError => error
|
68
|
-
|
67
|
+
StatsD.distribution('restart.duration', StatsD.duration(start), tags: tags('failure', deployments))
|
69
68
|
@logger.summary.add_action(error.message) if error.message != error.class.to_s
|
70
69
|
@logger.print_summary(:failure)
|
71
70
|
raise
|
@@ -8,7 +8,6 @@ module KubernetesDeploy
|
|
8
8
|
class RunnerTask
|
9
9
|
include KubeclientBuilder
|
10
10
|
|
11
|
-
class TaskConfigurationError < FatalDeploymentError; end
|
12
11
|
class TaskTemplateMissingError < TaskConfigurationError; end
|
13
12
|
|
14
13
|
attr_reader :pod_name
|
@@ -45,14 +44,14 @@ module KubernetesDeploy
|
|
45
44
|
else
|
46
45
|
record_status_once(pod)
|
47
46
|
end
|
48
|
-
|
47
|
+
StatsD.distribution('task_runner.duration', StatsD.duration(start), tags: statsd_tags('success'))
|
49
48
|
@logger.print_summary(:success)
|
50
49
|
rescue DeploymentTimeoutError
|
51
|
-
|
50
|
+
StatsD.distribution('task_runner.duration', StatsD.duration(start), tags: statsd_tags('timeout'))
|
52
51
|
@logger.print_summary(:timed_out)
|
53
52
|
raise
|
54
53
|
rescue FatalDeploymentError
|
55
|
-
|
54
|
+
StatsD.distribution('task_runner.duration', StatsD.duration(start), tags: statsd_tags('failure'))
|
56
55
|
@logger.print_summary(:failure)
|
57
56
|
raise
|
58
57
|
end
|
@@ -87,14 +86,15 @@ module KubernetesDeploy
|
|
87
86
|
|
88
87
|
def watch_pod(pod)
|
89
88
|
rw = ResourceWatcher.new(resources: [pod], logger: @logger, timeout: @max_watch_seconds,
|
90
|
-
|
89
|
+
operation_name: "run", namespace: @namespace, context: @context)
|
91
90
|
rw.run(delay_sync: 1, reminder_interval: 30.seconds)
|
92
91
|
raise DeploymentTimeoutError if pod.deploy_timed_out?
|
93
92
|
raise FatalDeploymentError if pod.deploy_failed?
|
94
93
|
end
|
95
94
|
|
96
95
|
def record_status_once(pod)
|
97
|
-
|
96
|
+
cache = ResourceCache.new(@namespace, @context, @logger)
|
97
|
+
pod.sync(cache)
|
98
98
|
warning = <<~STRING
|
99
99
|
#{ColorizedString.new('Result verification is disabled for this task.').yellow}
|
100
100
|
The following status was observed immediately after pod creation:
|
@@ -197,10 +197,6 @@ module KubernetesDeploy
|
|
197
197
|
@kubectl ||= Kubectl.new(namespace: @namespace, context: @context, logger: @logger, log_failure_by_default: true)
|
198
198
|
end
|
199
199
|
|
200
|
-
def sync_mediator
|
201
|
-
@sync_mediator ||= SyncMediator.new(namespace: @namespace, context: @context, logger: @logger)
|
202
|
-
end
|
203
|
-
|
204
200
|
def kubeclient
|
205
201
|
@kubeclient ||= build_v1_kubeclient(@context)
|
206
202
|
end
|
@@ -4,23 +4,76 @@ require 'logger'
|
|
4
4
|
|
5
5
|
module KubernetesDeploy
|
6
6
|
class StatsD
|
7
|
+
extend ::StatsD
|
8
|
+
|
9
|
+
PREFIX = "KubernetesDeploy"
|
10
|
+
|
7
11
|
def self.duration(start_time)
|
8
12
|
(Time.now.utc - start_time).round(1)
|
9
13
|
end
|
10
14
|
|
11
15
|
def self.build
|
12
|
-
::StatsD.default_sample_rate = 1.0
|
13
|
-
::StatsD.prefix = "KubernetesDeploy"
|
14
|
-
|
15
16
|
if ENV['STATSD_DEV'].present?
|
16
|
-
|
17
|
+
self.backend = ::StatsD::Instrument::Backends::LoggerBackend.new(Logger.new($stderr))
|
17
18
|
elsif ENV['STATSD_ADDR'].present?
|
18
19
|
statsd_impl = ENV['STATSD_IMPLEMENTATION'].present? ? ENV['STATSD_IMPLEMENTATION'] : "datadog"
|
19
|
-
|
20
|
+
self.backend = ::StatsD::Instrument::Backends::UDPBackend.new(ENV['STATSD_ADDR'], statsd_impl)
|
20
21
|
else
|
21
|
-
|
22
|
+
self.backend = ::StatsD::Instrument::Backends::NullBackend.new
|
23
|
+
end
|
24
|
+
end
|
25
|
+
|
26
|
+
# It is not sufficient to set the prefix field on the KubernetesDeploy::StatsD singleton itself, since its value
|
27
|
+
# is overridden in the underlying calls to the ::StatsD library, hence the need to pass it in as a custom prefix
|
28
|
+
# via the metric_options hash. This is done since KubernetesDeploy may be included as a library and should not
|
29
|
+
# change the global StatsD configuration of the importing application.
|
30
|
+
def self.increment(key, value = 1, **metric_options)
|
31
|
+
metric_options[:prefix] = PREFIX
|
32
|
+
super
|
33
|
+
end
|
34
|
+
|
35
|
+
def self.distribution(key, value = nil, **metric_options, &block)
|
36
|
+
metric_options[:prefix] = PREFIX
|
37
|
+
super
|
38
|
+
end
|
39
|
+
|
40
|
+
module MeasureMethods
|
41
|
+
def measure_method(method_name, metric = nil)
|
42
|
+
unless method_defined?(method_name) || private_method_defined?(method_name)
|
43
|
+
raise NotImplementedError, "Cannot instrument undefined method #{method_name}"
|
44
|
+
end
|
45
|
+
|
46
|
+
unless const_defined?("InstrumentationProxy")
|
47
|
+
const_set("InstrumentationProxy", Module.new)
|
48
|
+
should_prepend = true
|
49
|
+
end
|
50
|
+
|
51
|
+
metric ||= "#{method_name}.duration"
|
52
|
+
self::InstrumentationProxy.send(:define_method, method_name) do |*args, &block|
|
53
|
+
begin
|
54
|
+
start_time = Time.now.utc
|
55
|
+
super(*args, &block)
|
56
|
+
rescue
|
57
|
+
error = true
|
58
|
+
raise
|
59
|
+
ensure
|
60
|
+
dynamic_tags = send(:statsd_tags) if respond_to?(:statsd_tags, true)
|
61
|
+
dynamic_tags ||= {}
|
62
|
+
if error
|
63
|
+
dynamic_tags[:error] = error if dynamic_tags.is_a?(Hash)
|
64
|
+
dynamic_tags << "error:#{error}" if dynamic_tags.is_a?(Array)
|
65
|
+
end
|
66
|
+
|
67
|
+
StatsD.distribution(
|
68
|
+
metric,
|
69
|
+
KubernetesDeploy::StatsD.duration(start_time),
|
70
|
+
tags: dynamic_tags
|
71
|
+
)
|
72
|
+
end
|
73
|
+
end
|
74
|
+
|
75
|
+
prepend(self::InstrumentationProxy) if should_prepend
|
22
76
|
end
|
23
|
-
::StatsD.backend
|
24
77
|
end
|
25
78
|
end
|
26
79
|
end
|