network_resiliency 0.3.0 → 0.3.1

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: fb3d39476429afedf4ec8532e509dd25d0c121e21fa59f36b69bad40a0985dc6
4
- data.tar.gz: 2ef67d94b01e34a758ae52117e1e9ac5eb9fa8901841d5bcb9ef7c1cc3011f6f
3
+ metadata.gz: a9a1103993c635cbda0e6a3ba3ef523c8179680353fa67b93842f0bdfa44b197
4
+ data.tar.gz: aeb11a089950492bc6099933c137ccf79c27447a18ce6335e2a46c9982201591
5
5
  SHA512:
6
- metadata.gz: b3c19d756696ab7d4d0a11e13d6d99c70a27bd2414774e71eb31147e08685e74da0a4f5b56d15260cbbc0faf9e954b1976db307f116a47208697e83d2ff3f7ef
7
- data.tar.gz: 28a018c2ad35c15e01309da7a4f0368a67f4c21aacaba8f81fbc1fb2b41435fea368b98ec84274fca03e406416f9fa6eb403e0426adb046ba9a8ea1ae826bd83
6
+ metadata.gz: 7ea2cedcf4e6044299111e69c211a92a3d7df5305e66b4d6be83d32b949425daee581af33142f73f4d618ec178bd10e19ea0599175420838f57b0b61f157a8f5
7
+ data.tar.gz: 772d5749b23a32e6be7542c10d18794f1905fd64dde7da5b702bba4b4fb6ceb62fa3b971ce120c550db2fbc89ccec11670e3029ee0649ec7c21266597b0cb637
data/CHANGELOG.md CHANGED
@@ -1,3 +1,12 @@
1
+ ### v0.3.1 (2023-11-02)
2
+ - sync thread safety
3
+ - order of magnitude stats
4
+ - stats engine
5
+ - improve stats aggregation
6
+ - lua v1
7
+ - thread safety
8
+ - postgres adapter
9
+
1
10
  ### v0.3.0 (2023-10-04)
2
11
  - mysql adapter
3
12
  - faraday version constraint
data/Gemfile.lock CHANGED
@@ -1,18 +1,21 @@
1
1
  PATH
2
2
  remote: .
3
3
  specs:
4
- network_resiliency (0.3.0)
4
+ network_resiliency (0.3.1)
5
5
 
6
6
  GEM
7
7
  remote: https://rubygems.org/
8
8
  specs:
9
9
  byebug (11.1.3)
10
- ddtrace (1.14.0)
11
- debase-ruby_core_source (= 3.2.1)
12
- libdatadog (~> 3.0.0.1.0)
13
- libddwaf (~> 1.9.0.0.0)
10
+ datadog-ci (0.2.0)
14
11
  msgpack
15
- debase-ruby_core_source (3.2.1)
12
+ ddtrace (1.15.0)
13
+ datadog-ci (~> 0.2.0)
14
+ debase-ruby_core_source (= 3.2.2)
15
+ libdatadog (~> 5.0.0.1.0)
16
+ libddwaf (~> 1.14.0.0.0)
17
+ msgpack
18
+ debase-ruby_core_source (3.2.2)
16
19
  diff-lcs (1.5.0)
17
20
  docile (1.4.0)
18
21
  dogstatsd-ruby (4.8.3)
@@ -39,13 +42,14 @@ GEM
39
42
  faraday-patron (1.0.0)
40
43
  faraday-rack (1.0.0)
41
44
  faraday-retry (1.0.3)
42
- ffi (1.15.5)
43
- libdatadog (3.0.0.1.0)
44
- libddwaf (1.9.0.0.1)
45
+ ffi (1.16.3)
46
+ libdatadog (5.0.0.1.0)
47
+ libddwaf (1.14.0.0.0)
45
48
  ffi (~> 1.0)
46
49
  msgpack (1.7.2)
47
50
  multipart-post (2.3.0)
48
51
  mysql2 (0.5.5)
52
+ pg (1.5.4)
49
53
  rack (3.0.8)
50
54
  rack-test (2.1.0)
51
55
  rack (>= 1.3)
@@ -82,6 +86,7 @@ DEPENDENCIES
82
86
  faraday-rack
83
87
  mysql2 (>= 0.5)
84
88
  network_resiliency!
89
+ pg (~> 1.1)
85
90
  rack
86
91
  rack-test
87
92
  redis (~> 4)
@@ -89,4 +94,4 @@ DEPENDENCIES
89
94
  simplecov
90
95
 
91
96
  BUNDLED WITH
92
- 2.4.19
97
+ 2.4.20
data/README.md CHANGED
@@ -3,13 +3,22 @@ NetworkResiliency
3
3
  ![Gem](https://img.shields.io/gem/dt/network_resiliency?style=plastic)
4
4
  [![codecov](https://codecov.io/gh/dpep/network_resiliency_rb/branch/main/graph/badge.svg)](https://codecov.io/gh/dpep/network_resiliency_rb)
5
5
 
6
- Making networks more resilient to errors.
7
-
8
- Resiliency: the ability to recover from adversity or adjust to change.
6
+ Making network requests more resilient to error.
7
+ - less errors, by retrying
8
+ - less time, by setting granular timeouts
9
9
 
10
10
 
11
11
  ```ruby
12
12
  require "network_resiliency"
13
+
14
+ NetworkResiliency.configure do |conf|
15
+ conf.statsd = Datadog::Statsd.new
16
+
17
+ # patch Redis instances
18
+ conf.patch :redis
19
+ end
20
+
21
+ Redis.new.connect
13
22
  ```
14
23
 
15
24
 
@@ -0,0 +1,47 @@
1
+ gem "pg", "~> 1.1"
2
+ require "pg"
3
+
4
+ module NetworkResiliency
5
+ module Adapter
6
+ module Postgres
7
+ extend self
8
+
9
+ def patch
10
+ return if patched?
11
+
12
+ PG::Connection.singleton_class.prepend(Instrumentation)
13
+ end
14
+
15
+ def patched?
16
+ PG::Connection.singleton_class.ancestors.include?(Instrumentation)
17
+ end
18
+
19
+ module Instrumentation
20
+ def connect_start(opts)
21
+ host = opts[:host].split(",")[0] if opts[:host]
22
+
23
+ return super unless NetworkResiliency.enabled?(:postgres)
24
+
25
+ begin
26
+ ts = -NetworkResiliency.timestamp
27
+
28
+ super
29
+ rescue PG::Error => e
30
+ # capture error
31
+ raise
32
+ ensure
33
+ ts += NetworkResiliency.timestamp
34
+
35
+ NetworkResiliency.record(
36
+ adapter: "postgres",
37
+ action: "connect",
38
+ destination: host,
39
+ error: e&.class,
40
+ duration: ts,
41
+ )
42
+ end
43
+ end
44
+ end
45
+ end
46
+ end
47
+ end
@@ -0,0 +1,9 @@
1
+ module NetworkResiliency
2
+ module Refinements
3
+ refine Numeric do
4
+ def order_of_magnitude
5
+ self == 0 ? 0 : 10 ** Math.log10(self).ceil
6
+ end
7
+ end
8
+ end
9
+ end
@@ -2,20 +2,33 @@ module NetworkResiliency
2
2
  class Stats
3
3
  attr_reader :n, :avg
4
4
 
5
- def self.from(n, avg, sq_dist)
6
- new.tap do |instance|
7
- instance.instance_eval do
8
- @n = n
9
- @avg = avg
10
- @sq_dist = sq_dist
5
+ class << self
6
+ def from(n:, avg:, sq_dist:)
7
+ new.tap do |instance|
8
+ instance.instance_eval do
9
+ @n = n.to_i
10
+ @avg = avg.to_f
11
+ @sq_dist = sq_dist.to_f
12
+ end
11
13
  end
12
14
  end
15
+
16
+ private
17
+
18
+ def synchronize(fn_name)
19
+ make_private = private_method_defined?(fn_name)
20
+ fn = instance_method(fn_name)
21
+
22
+ define_method(fn_name) do |*args|
23
+ @lock.synchronize { fn.bind(self).call(*args) }
24
+ end
25
+ private fn_name if make_private
26
+ end
13
27
  end
14
28
 
15
29
  def initialize(values = [])
16
- @n = 0
17
- @avg = 0.0
18
- @sq_dist = 0.0 # sum of squared distance from mean
30
+ @lock = Thread::Mutex.new
31
+ reset
19
32
 
20
33
  values.each {|x| update(x) }
21
34
  end
@@ -34,7 +47,7 @@ module NetworkResiliency
34
47
  end
35
48
 
36
49
  def variance(sample: false)
37
- @sq_dist / (sample ? (@n - 1) : @n)
50
+ @n == 0 ? 0 : @sq_dist / (sample ? (@n - 1) : @n)
38
51
  end
39
52
 
40
53
  def stdev
@@ -46,24 +59,161 @@ module NetworkResiliency
46
59
  end
47
60
  alias_method :+, :merge
48
61
 
49
- def merge!(other)
62
+ synchronize def merge!(other)
50
63
  raise ArgumentError unless other.is_a?(self.class)
51
64
 
52
- prev_n = n
53
- @n += other.n
65
+ if @n == 0
66
+ @n = other.n
67
+ @avg = other.avg
68
+ @sq_dist = other.sq_dist
69
+ elsif other.n > 0
70
+ prev_n = @n
71
+ @n += other.n
54
72
 
55
- delta = other.avg - avg
56
- @avg += delta * other.n / n
73
+ delta = other.avg - avg
74
+ @avg += delta * other.n / @n
57
75
 
58
- @sq_dist += other.instance_variable_get(:@sq_dist)
59
- @sq_dist += (delta ** 2) * prev_n * other.n / n
76
+ @sq_dist += other.sq_dist
77
+ @sq_dist += (delta ** 2) * prev_n * other.n / @n
78
+ end
60
79
 
61
80
  self
62
81
  end
63
82
 
83
+ def ==(other)
84
+ return false unless other.is_a?(self.class)
85
+
86
+ @n == other.n &&
87
+ @avg == other.avg &&
88
+ @sq_dist == other.sq_dist
89
+ end
90
+
91
+ synchronize def reset
92
+ @n = 0
93
+ @avg = 0.0
94
+ @sq_dist = 0.0 # sum of squared distance from mean
95
+ end
96
+
97
+ MIN_SAMPLE_SIZE = 1000
98
+ MAX_WINDOW_LENGTH = 1000
99
+ STATS_TTL = 24 * 60 * 60 # 1 day
100
+ CACHE_TTL = 60 # seconds
101
+
102
+ LUA_SCRIPT = <<~LUA
103
+ local results = {}
104
+
105
+ for i = 0, #KEYS / 2 - 1 do
106
+ local state_key = KEYS[i * 2 + 1]
107
+ local cache_key = KEYS[i * 2 + 2]
108
+
109
+ local n = tonumber(ARGV[i * 3 + 1])
110
+ local avg = ARGV[i * 3 + 2]
111
+ local sq_dist = math.floor(ARGV[i * 3 + 3])
112
+
113
+ if n > 0 then
114
+ -- save new data
115
+ local window_len = redis.call(
116
+ 'LPUSH',
117
+ state_key,
118
+ string.format('%d|%f|%d', n, avg, sq_dist)
119
+ )
120
+ redis.call('EXPIRE', state_key, #{STATS_TTL})
121
+
122
+ if window_len > #{MAX_WINDOW_LENGTH} then
123
+ -- trim stats to window length
124
+ redis.call('LTRIM', state_key, 0, #{MAX_WINDOW_LENGTH - 1})
125
+ end
126
+ end
127
+
128
+ -- retrieve aggregated stats
129
+
130
+ local cached_stats = redis.call('GET', cache_key)
131
+ if cached_stats then
132
+ -- use cached stats
133
+ n, avg, sq_dist = string.match(cached_stats, "(%d+)|([%d.]+)|(%d+)")
134
+ n = tonumber(n)
135
+ else
136
+ -- calculate aggregated stats
137
+ n = 0
138
+ avg = 0.0
139
+ sq_dist = 0
140
+
141
+ local stats = redis.call('LRANGE', state_key, 0, -1)
142
+ for _, entry in ipairs(stats) do
143
+ local other_n, other_avg, other_sq_dist = string.match(entry, "(%d+)|([%d.]+)|(%d+)")
144
+ other_n = tonumber(other_n)
145
+ other_avg = tonumber(other_avg) + 0.0
146
+ other_sq_dist = tonumber(other_sq_dist)
147
+
148
+ local prev_n = n
149
+ n = n + other_n
150
+
151
+ local delta = other_avg - avg
152
+ avg = avg + delta * other_n / n
153
+
154
+ sq_dist = sq_dist + other_sq_dist
155
+ sq_dist = sq_dist + (delta ^ 2) * prev_n * other_n / n
156
+ end
157
+ end
158
+
159
+ -- update cache
160
+ if n >= #{MIN_SAMPLE_SIZE} then
161
+ cached_stats = string.format('%d|%f|%d', n, avg, sq_dist)
162
+ redis.call('SET', cache_key, cached_stats, 'EX', #{CACHE_TTL})
163
+ end
164
+
165
+ -- accumulate results
166
+ table.insert(results, n)
167
+ table.insert(results, tostring(avg))
168
+ table.insert(results, sq_dist)
169
+ end
170
+
171
+ return results
172
+ LUA
173
+
174
+ def sync(redis, key)
175
+ self.class.sync(redis, key => self)[key]
176
+ end
177
+
178
+ def self.sync(redis, **data)
179
+ keys = []
180
+ args = []
181
+
182
+ data.each do |key, stats|
183
+ keys += [
184
+ "network_resiliency:stats:#{key}",
185
+ "network_resiliency:stats:cache:#{key}",
186
+ ]
187
+
188
+ args += [ stats.n, stats.avg, stats.send(:sq_dist) ]
189
+ end
190
+
191
+ res = redis.eval(LUA_SCRIPT, keys, args)
192
+ data.keys.zip(res.each_slice(3)).map do |key, stats|
193
+ n, avg, sq_dist = *stats
194
+
195
+ [ key, Stats.from(n: n, avg: avg, sq_dist: sq_dist) ]
196
+ end.to_h
197
+ end
198
+
199
+ def self.fetch(redis, keys)
200
+ data = Array(keys).map { |k| [ k, new ] }.to_h
201
+ res = sync(redis, **data)
202
+
203
+ keys.is_a?(Array) ? res : res[keys]
204
+ end
205
+
206
+ def to_s
207
+ "#<#{self.class.name}:#{object_id} n=#{n} avg=#{avg} sq_dist=#{sq_dist}>"
208
+ end
209
+
210
+ protected
211
+
212
+ attr_reader :sq_dist
213
+
64
214
  private
65
215
 
66
- def update(value)
216
+ synchronize def update(value)
67
217
  raise ArgumentError unless value.is_a?(Numeric)
68
218
 
69
219
  @n += 1
@@ -72,19 +222,6 @@ module NetworkResiliency
72
222
  @avg += (value - @avg) / @n
73
223
 
74
224
  @sq_dist += (value - prev_avg) * (value - @avg)
75
- # @sq_dist += (sq_dist - @sq_dist) / @n
76
-
77
- # for x, w in data_weight_pairs:
78
- # w_sum = w_sum + w
79
- # mean_old = mean
80
- # mean = mean_old + (w / w_sum) * (x - mean_old)
81
- # S = S + w * (x - mean_old) * (x - mean)
82
-
83
- # count += 1
84
- # delta = newValue - mean
85
- # mean += delta / count
86
- # delta2 = newValue - mean
87
- # M2 += delta * delta2
88
225
  end
89
226
  end
90
227
  end
@@ -0,0 +1,112 @@
1
+ module NetworkResiliency
2
+ module StatsEngine
3
+ extend self
4
+
5
+ LOCK = Thread::Mutex.new
6
+ STATS = {}
7
+ SYNC_LIMIT = 100
8
+
9
+ def add(key, value)
10
+ local, _ = synchronize do
11
+ STATS[key] ||= [ Stats.new, Stats.new ]
12
+ end
13
+
14
+ local << value
15
+ end
16
+
17
+ def get(key)
18
+ local, remote = synchronize do
19
+ STATS[key] ||= [ Stats.new, Stats.new ]
20
+ end
21
+
22
+ local + remote
23
+ end
24
+
25
+ def reset
26
+ synchronize { STATS.clear }
27
+ end
28
+
29
+ def sync(redis)
30
+ dirty_keys = {}
31
+
32
+ # select data to be synced
33
+ data = synchronize do
34
+ # ensure sync is not run concurrently
35
+ return [] if @syncing
36
+ @syncing = Thread.current
37
+
38
+ dirty_keys = STATS.map do |key, (local, remote)|
39
+ # skip if no new local stats and remote already synced
40
+ next if local.n == 0 && remote.n > 0
41
+
42
+ [ key, local.n ]
43
+ end.compact.to_h
44
+
45
+ # select keys to sync, prioritizing most used
46
+ keys = dirty_keys.sort_by do |key, weight|
47
+ -weight
48
+ end.take(SYNC_LIMIT).map(&:first)
49
+
50
+ # update stats for keys being synced
51
+ keys.map do |key|
52
+ local, remote = STATS[key]
53
+
54
+ remote << local # update remote stats until sync completes
55
+ STATS[key][0] = Stats.new # reset local stats
56
+
57
+ [ key, local ]
58
+ end.to_h
59
+ end
60
+
61
+ NetworkResiliency.statsd&.distribution(
62
+ "network_resiliency.sync.keys",
63
+ data.size,
64
+ tags: {
65
+ empty: data.empty?,
66
+ truncated: data.size < dirty_keys.size,
67
+ }.select { |_, v| v },
68
+ )
69
+
70
+ NetworkResiliency.statsd&.distribution(
71
+ "network_resiliency.sync.keys.dirty",
72
+ dirty_keys.select { |_, n| n > 0 }.count,
73
+ )
74
+
75
+ return [] if data.empty?
76
+
77
+ # sync data to redis
78
+ remote_stats = if NetworkResiliency.statsd
79
+ NetworkResiliency.statsd&.time("network_resiliency.sync") do
80
+ Stats.sync(redis, **data)
81
+ end
82
+ else
83
+ Stats.sync(redis, **data)
84
+ end
85
+
86
+ # integrate new remote stats
87
+ synchronize do
88
+ remote_stats.each do |key, stats|
89
+ local, remote = STATS[key]
90
+
91
+ remote.reset
92
+ remote << stats
93
+ end
94
+ end
95
+
96
+ remote_stats.keys
97
+ ensure
98
+ # release sync lock
99
+ @syncing = nil if @syncing == Thread.current
100
+ end
101
+
102
+ def syncing?
103
+ !!@syncing
104
+ end
105
+
106
+ private
107
+
108
+ def synchronize
109
+ LOCK.synchronize { yield }
110
+ end
111
+ end
112
+ end
@@ -1,3 +1,3 @@
1
1
  module NetworkResiliency
2
- VERSION = "0.3.0"
2
+ VERSION = "0.3.1"
3
3
  end
@@ -1,19 +1,27 @@
1
+ require "network_resiliency/refinements"
2
+ require "network_resiliency/stats"
3
+ require "network_resiliency/stats_engine"
1
4
  require "network_resiliency/version"
2
5
 
6
+ using NetworkResiliency::Refinements
7
+
3
8
  module NetworkResiliency
4
9
  module Adapter
5
10
  autoload :HTTP, "network_resiliency/adapter/http"
6
11
  autoload :Faraday, "network_resiliency/adapter/faraday"
7
12
  autoload :Redis, "network_resiliency/adapter/redis"
8
13
  autoload :Mysql, "network_resiliency/adapter/mysql"
14
+ autoload :Postgres, "network_resiliency/adapter/postgres"
9
15
  end
10
16
 
11
17
  extend self
12
18
 
13
- attr_accessor :statsd
19
+ attr_accessor :statsd, :redis
14
20
 
15
21
  def configure
16
- yield self
22
+ yield self if block_given?
23
+
24
+ start_syncing if redis
17
25
  end
18
26
 
19
27
  def patch(*adapters)
@@ -25,6 +33,8 @@ module NetworkResiliency
25
33
  Adapter::Redis.patch
26
34
  when :mysql
27
35
  Adapter::Mysql.patch
36
+ when :postgres
37
+ Adapter::Postgres.patch
28
38
  else
29
39
  raise NotImplementedError
30
40
  end
@@ -32,6 +42,7 @@ module NetworkResiliency
32
42
  end
33
43
 
34
44
  def enabled?(adapter)
45
+ return thread_state["enabled"] if thread_state.key?("enabled")
35
46
  return true if @enabled.nil?
36
47
 
37
48
  if @enabled.is_a?(Proc)
@@ -55,20 +66,20 @@ module NetworkResiliency
55
66
 
56
67
  def enable!
57
68
  original = @enabled
58
- @enabled = true
69
+ thread_state["enabled"] = true
59
70
 
60
71
  yield if block_given?
61
72
  ensure
62
- @enabled = original if block_given?
73
+ thread_state.delete("enabled") if block_given?
63
74
  end
64
75
 
65
76
  def disable!
66
77
  original = @enabled
67
- @enabled = false
78
+ thread_state["enabled"] = false
68
79
 
69
80
  yield if block_given?
70
81
  ensure
71
- @enabled = original if block_given?
82
+ thread_state.delete("enabled") if block_given?
72
83
  end
73
84
 
74
85
  def timestamp
@@ -93,9 +104,56 @@ module NetworkResiliency
93
104
  error: error,
94
105
  }.compact,
95
106
  )
107
+
108
+ NetworkResiliency.statsd&.distribution(
109
+ "network_resiliency.#{action}.magnitude",
110
+ duration.order_of_magnitude,
111
+ tags: {
112
+ adapter: adapter,
113
+ destination: destination,
114
+ error: error,
115
+ }.compact,
116
+ )
117
+
118
+ key = [ adapter, action, destination ].join(":")
119
+ StatsEngine.add(key, duration)
120
+ rescue => e
121
+ NetworkResiliency.statsd&.increment(
122
+ "network_resiliency.error",
123
+ tags: {
124
+ type: e.class,
125
+ },
126
+ )
127
+
128
+ warn "[ERROR] NetworkResiliency: #{e.class}: #{e.message}"
96
129
  end
97
130
 
98
131
  def reset
99
132
  @enabled = nil
133
+ Thread.current["network_resiliency"] = nil
134
+ StatsEngine.reset
135
+ @sync_worker.kill if @sync_worker
136
+ end
137
+
138
+ private
139
+
140
+ def thread_state
141
+ Thread.current["network_resiliency"] ||= {}
142
+ end
143
+
144
+ def start_syncing
145
+ @sync_worker.kill if @sync_worker
146
+
147
+ raise "Redis not configured" unless redis
148
+
149
+ @sync_worker = Thread.new do
150
+ while true do
151
+ StatsEngine.sync(redis)
152
+
153
+ sleep(3)
154
+ end
155
+ rescue Interrupt
156
+ # goodbye
157
+ end
100
158
  end
101
159
  end
@@ -19,6 +19,7 @@ Gem::Specification.new do |s|
19
19
  s.add_development_dependency "faraday", "~> 1"
20
20
  s.add_development_dependency "faraday-rack"
21
21
  s.add_development_dependency "mysql2", ">= 0.5"
22
+ s.add_development_dependency "pg", "~> 1.1"
22
23
  s.add_development_dependency "rack"
23
24
  s.add_development_dependency "rack-test"
24
25
  s.add_development_dependency "redis", "~> 4"
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: network_resiliency
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.3.0
4
+ version: 0.3.1
5
5
  platform: ruby
6
6
  authors:
7
7
  - Daniel Pepper
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2023-10-04 00:00:00.000000000 Z
11
+ date: 2023-11-02 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: byebug
@@ -94,6 +94,20 @@ dependencies:
94
94
  - - ">="
95
95
  - !ruby/object:Gem::Version
96
96
  version: '0.5'
97
+ - !ruby/object:Gem::Dependency
98
+ name: pg
99
+ requirement: !ruby/object:Gem::Requirement
100
+ requirements:
101
+ - - "~>"
102
+ - !ruby/object:Gem::Version
103
+ version: '1.1'
104
+ type: :development
105
+ prerelease: false
106
+ version_requirements: !ruby/object:Gem::Requirement
107
+ requirements:
108
+ - - "~>"
109
+ - !ruby/object:Gem::Version
110
+ version: '1.1'
97
111
  - !ruby/object:Gem::Dependency
98
112
  name: rack
99
113
  requirement: !ruby/object:Gem::Requirement
@@ -179,8 +193,11 @@ files:
179
193
  - lib/network_resiliency/adapter/faraday.rb
180
194
  - lib/network_resiliency/adapter/http.rb
181
195
  - lib/network_resiliency/adapter/mysql.rb
196
+ - lib/network_resiliency/adapter/postgres.rb
182
197
  - lib/network_resiliency/adapter/redis.rb
198
+ - lib/network_resiliency/refinements.rb
183
199
  - lib/network_resiliency/stats.rb
200
+ - lib/network_resiliency/stats_engine.rb
184
201
  - lib/network_resiliency/version.rb
185
202
  - network_resiliency.gemspec
186
203
  homepage: https://github.com/dpep/network_resiliency_rb
@@ -202,7 +219,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
202
219
  - !ruby/object:Gem::Version
203
220
  version: '0'
204
221
  requirements: []
205
- rubygems_version: 3.3.7
222
+ rubygems_version: 3.2.33
206
223
  signing_key:
207
224
  specification_version: 4
208
225
  summary: NetworkResiliency