kubernetes_leader_election 0.2.0 → 0.3.0

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: c2bdfad9e4b793eee6f26f74d8de8194d949f3be60217485a8872128a2c55f3a
4
- data.tar.gz: fe7bc82ccc2d62c47af0f5af51fdc1029906a827a4625b194a7a9ec58e658549
3
+ metadata.gz: 8249770ccbb46bf6eb75d9fd0e10200b9ab5e4c471b4f5bbccc9dfa7e82cb102
4
+ data.tar.gz: d587d3fb3292107e29a67d35da72d106b453942b0112369906142a4da23d85d5
5
5
  SHA512:
6
- metadata.gz: 67dd25e229543178fdb51be20948f87ac7f4f0d1be0f12793bce57ef59e10aa4b5c5f433a2180afbda623ccd8ca96d8ffe9797ba3d3beebb3be3fd47abe6b8d3
7
- data.tar.gz: d4071e90d7bc03ee28f01f94fb2678088be1cc79ba52dd2753c634580fa49fc6e685a1466f00c5259db8eba96229f075102bad548e41262d5c5147a96dce50fe
6
+ metadata.gz: c9bfbf38f35665c19401f23e26399378bdae1747fd934bd6875b626c1edfe22104a9198a96a38f4cf5dc880ebff252ee8f35e9139b4441be60c2f005efc6b0ab
7
+ data.tar.gz: d2b9afc8dab855511bf0b430b213a64a9438e4c5b4c425377d40fbfed8c492c190d9d52837397cef21343c6ca0c561a06aba68596ecd15b6fd7aa3168b69de96
@@ -1,4 +1,4 @@
1
1
  # frozen_string_literal: true
2
2
  class KubernetesLeaderElection
3
- VERSION = "0.2.0"
3
+ VERSION = "0.3.0"
4
4
  end
@@ -9,12 +9,13 @@ class KubernetesLeaderElection
9
9
  FAILED_KUBERNETES_REQUEST =
10
10
  [Timeout::Error, OpenSSL::SSL::SSLError, Kubeclient::HttpError, SystemCallError, HTTP::ConnectionError].freeze
11
11
 
12
- def initialize(name, kubeclient, logger:, statsd: nil, interval: 30)
12
+ def initialize(name, kubeclient, logger:, statsd: nil, interval: 30, retry_backoffs: [0.1, 0.5, 1, 2, 4])
13
13
  @name = name
14
14
  @kubeclient = kubeclient
15
15
  @statsd = statsd
16
16
  @logger = logger
17
17
  @interval = interval
18
+ @retry_backoffs = retry_backoffs
18
19
  end
19
20
 
20
21
  # not using `call` since we never want to be restarted
@@ -42,7 +43,7 @@ class KubernetesLeaderElection
42
43
 
43
44
  # show that we are alive or crash because we cannot reach the api (split-brain az)
44
45
  def signal_alive
45
- with_retries(*FAILED_KUBERNETES_REQUEST, times: 3) do
46
+ with_retries(*FAILED_KUBERNETES_REQUEST) do
46
47
  patch = { spec: { renewTime: microtime } }
47
48
  reply = kubeclient.patch_entity(
48
49
  "leases", @name, patch, 'strategic-merge-patch', ENV.fetch("POD_NAMESPACE")
@@ -70,7 +71,7 @@ class KubernetesLeaderElection
70
71
  # retry request on regular api errors
71
72
  reraise = ->(e) { e.is_a?(Kubeclient::HttpError) && e.error_code == ALREADY_EXISTS_CODE }
72
73
 
73
- with_retries(*FAILED_KUBERNETES_REQUEST, reraise: reraise, times: 3) do
74
+ with_retries(*FAILED_KUBERNETES_REQUEST, reraise: reraise) do
74
75
  kubeclient.create_entity(
75
76
  "Lease",
76
77
  "leases",
@@ -98,7 +99,7 @@ class KubernetesLeaderElection
98
99
  rescue Kubeclient::HttpError => e
99
100
  raise e unless e.error_code == ALREADY_EXISTS_CODE # lease already exists
100
101
 
101
- lease = with_retries(*FAILED_KUBERNETES_REQUEST, times: 3) do
102
+ lease = with_retries(*FAILED_KUBERNETES_REQUEST) do
102
103
  kubeclient.get_entity("leases", @name, namespace)
103
104
  rescue Kubeclient::ResourceNotFoundError
104
105
  nil
@@ -114,7 +115,7 @@ class KubernetesLeaderElection
114
115
  # this is still a race-condition since we could be deleting the newly succeeded leader
115
116
  # see https://github.com/kubernetes/kubernetes/issues/20572
116
117
  @logger.info message: "deleting stale lease"
117
- with_retries(*FAILED_KUBERNETES_REQUEST, times: 3) do
118
+ with_retries(*FAILED_KUBERNETES_REQUEST) do
118
119
  kubeclient.delete_entity("leases", @name, namespace)
119
120
  end
120
121
  false # leader is dead, do not assume leadership here to avoid race condition
@@ -123,14 +124,14 @@ class KubernetesLeaderElection
123
124
  end
124
125
  end
125
126
 
126
- def with_retries(*errors, times:, reraise: nil, backoff: [0.1, 0.5, 1])
127
+ def with_retries(*errors, times: @retry_backoffs.size, reraise: nil)
127
128
  yield
128
129
  rescue *errors => e
129
130
  retries ||= -1
130
131
  retries += 1
131
132
  raise if retries >= times || reraise&.call(e)
132
133
  @logger.warn message: "Retryable error", type: e.class.to_s, retries: times - retries
133
- sleep backoff[retries] || backoff.last
134
+ sleep @retry_backoffs[retries] || @retry_backoffs.last
134
135
  retry
135
136
  end
136
137
  end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: kubernetes_leader_election
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.2.0
4
+ version: 0.3.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - Michael Grosser
8
- autorequire:
8
+ autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2022-01-19 00:00:00.000000000 Z
11
+ date: 2024-02-24 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: kubeclient
@@ -24,7 +24,7 @@ dependencies:
24
24
  - - ">="
25
25
  - !ruby/object:Gem::Version
26
26
  version: '0'
27
- description:
27
+ description:
28
28
  email: michael@grosser.it
29
29
  executables: []
30
30
  extensions: []
@@ -37,7 +37,7 @@ homepage: https://github.com/grosser/kubernetes_leader_election
37
37
  licenses:
38
38
  - MIT
39
39
  metadata: {}
40
- post_install_message:
40
+ post_install_message:
41
41
  rdoc_options: []
42
42
  require_paths:
43
43
  - lib
@@ -45,7 +45,7 @@ required_ruby_version: !ruby/object:Gem::Requirement
45
45
  requirements:
46
46
  - - ">="
47
47
  - !ruby/object:Gem::Version
48
- version: 2.6.0
48
+ version: 2.7.0
49
49
  required_rubygems_version: !ruby/object:Gem::Requirement
50
50
  requirements:
51
51
  - - ">="
@@ -53,7 +53,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
53
53
  version: '0'
54
54
  requirements: []
55
55
  rubygems_version: 3.1.6
56
- signing_key:
56
+ signing_key:
57
57
  specification_version: 4
58
58
  summary: Elect a kubernetes leader using leases for ruby
59
59
  test_files: []