apisonator 3.0.1 → 3.3.0

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: d679cf1a4c4c0a5aac65b32fdfe2680621b404f07ec3079c2c58ecccd72bf7aa
4
- data.tar.gz: 03aa0a028f26da2a168cc5c75a8303413cc8a839a21b2b18345ea03087c90dee
3
+ metadata.gz: a8e132216edb5cd3b44967acab917d4ca4f50c692c22fb5b81c7ffff9c5d598b
4
+ data.tar.gz: e2e31c92b450398dbcd3a3dcc3d31ed39b2d530048de23aa06b95c4930809249
5
5
  SHA512:
6
- metadata.gz: 0ea513d0c1ec6728e7a0ee6159645a98854aca8247f01aecfe8c38459efa03fb4a37fe72a822acb894ec8a9d0afea5b5d48cf9f1182864dce63ea7386d8460a9
7
- data.tar.gz: 5cc2a09921f3c2cf7afc16d673a77fc701cda7377e5ff9a063585d541c5363f29c9593dfb2f5fc100fc4d8e11d17812a6522344df269d5bb0a63d53d67e6d4ea
6
+ metadata.gz: 288b9c506a32caf81e2b3bcbf06b347e1f39f32a295a61d146ebc9b7f5953458f27114c3eceb08ef00da6293487d02f49b5c7688af16f87c0aa0ff4c70cfa48f
7
+ data.tar.gz: 025f8fcf1edb087de5e97ad43ecca61e25c97f061f0002b75d210a1052fa76355c5ea8a34515018d41b21428791d255341e2f36d2c178872ae549d3f97f2e52e
data/CHANGELOG.md CHANGED
@@ -2,6 +2,80 @@
2
2
 
3
3
  Notable changes to Apisonator will be tracked in this document.
4
4
 
5
+ ## 3.3.0 - 2021-02-09
6
+
7
+ ### Added
8
+
9
+ - Rake task to delete stats keys set to 0 in the DB left there because of [this
10
+ issue](https://github.com/3scale/apisonator/pull/247)
11
+ ([#250](https://github.com/3scale/apisonator/pull/250)).
12
+
13
+ ### Fixed
14
+
15
+ - Made the worker more reliable when configured in async mode. Now it handles
16
+ connection errors better
17
+ ([#253](https://github.com/3scale/apisonator/pull/253)),
18
+ ([#254](https://github.com/3scale/apisonator/pull/254)), and
19
+ ([#255](https://github.com/3scale/apisonator/pull/255)).
20
+
21
+ ### Changed
22
+
23
+ - Updated async-redis to v0.5.1
24
+ ([#251](https://github.com/3scale/apisonator/pull/251)).
25
+
26
+ ## 3.2.1 - 2021-01-22
27
+
28
+ ### Fixed
29
+
30
+ - Reports of 0 hits no longer generate unnecessary stats keys in Redis
31
+ ([#247](https://github.com/3scale/apisonator/pull/247)).
32
+
33
+ ## 3.2.0 - 2021-01-19
34
+
35
+ ### Added
36
+
37
+ - New endpoint in the internal API to get the provider key for a given (token,
38
+ service_id) pair ([#243](https://github.com/3scale/apisonator/pull/243)).
39
+
40
+ ### Changed
41
+
42
+ - The config file used when running in a Docker image now parses "1" and "true"
43
+ (case-insensitive) as true
44
+ ([#245](https://github.com/3scale/apisonator/pull/245)).
45
+
46
+ ### Fixed
47
+
48
+ - Fixed some metrics of the internal API that were not being counted
49
+ correctly([#244](https://github.com/3scale/apisonator/pull/244)).
50
+
51
+
52
+ ## 3.1.0 - 2020-10-14
53
+
54
+ ### Added
55
+
56
+ - Prometheus metrics for the internal API
57
+ ([#236](https://github.com/3scale/apisonator/pull/236)).
58
+ - Docs with a detailed explanation about how counter updates are performed
59
+ ([#239](https://github.com/3scale/apisonator/pull/239)).
60
+
61
+ ### Changed
62
+
63
+ - NotifyJobs are run only when the service ID is explicitly defined
64
+ ([#238](https://github.com/3scale/apisonator/pull/238)).
65
+
66
+ ### Fixed
67
+
68
+ - Fixed corner case that raised "TransactionTimestampNotWithinRange" in notify
69
+ jobs ([#235](https://github.com/3scale/apisonator/pull/235)).
70
+
71
+
72
+ ## 3.0.1.1 - 2020-07-28
73
+
74
+ ### Changed
75
+
76
+ - Updated json gem to v2.3.1
77
+ ([#232](https://github.com/3scale/apisonator/pull/232)).
78
+
5
79
  ## 3.0.1 - 2020-07-14
6
80
 
7
81
  ### Fixed
data/Gemfile.base CHANGED
@@ -53,13 +53,14 @@ gem 'rake', '~> 13.0'
53
53
  gem 'builder', '= 3.2.3'
54
54
  # Use a patched resque to allow reusing their Airbrake Failure class
55
55
  gem 'resque', git: 'https://github.com/3scale/resque', branch: '3scale'
56
+ gem 'redis-namespace', '~>1.8.0'
56
57
  gem 'rack', '~> 2.1.4'
57
58
  gem 'sinatra', '~> 2.0.3'
58
59
  gem 'sinatra-contrib', '~> 2.0.3'
59
60
  # Optional external error logging services
60
61
  gem 'bugsnag', '~> 6', require: nil
61
62
  gem 'yabeda-prometheus', '~> 0.5.0'
62
- gem 'async-redis', '~> 0.5'
63
+ gem 'async-redis', '~> 0.5.1'
63
64
  gem 'falcon', '~> 0.35'
64
65
 
65
66
  # Use a patched redis-rb that fixes an issue when trying to connect with
data/Gemfile.lock CHANGED
@@ -35,7 +35,7 @@ GIT
35
35
  PATH
36
36
  remote: .
37
37
  specs:
38
- apisonator (3.0.1)
38
+ apisonator (3.3.0)
39
39
 
40
40
  GEM
41
41
  remote: https://rubygems.org/
@@ -70,7 +70,7 @@ GEM
70
70
  async (~> 1.14)
71
71
  async-pool (0.2.0)
72
72
  async (~> 1.8)
73
- async-redis (0.5.0)
73
+ async-redis (0.5.1)
74
74
  async (~> 1.8)
75
75
  async-io (~> 1.10)
76
76
  async-pool (~> 0.2)
@@ -119,7 +119,7 @@ GEM
119
119
  i18n (1.8.2)
120
120
  concurrent-ruby (~> 1.0)
121
121
  jmespath (1.3.1)
122
- json (2.1.0)
122
+ json (2.3.1)
123
123
  license_finder (5.9.2)
124
124
  bundler
125
125
  rubyzip
@@ -142,7 +142,7 @@ GEM
142
142
  net-scp (1.2.1)
143
143
  net-ssh (>= 2.6.5)
144
144
  net-ssh (4.2.0)
145
- nio4r (2.5.2)
145
+ nio4r (2.5.4)
146
146
  nokogiri (1.10.9)
147
147
  mini_portile2 (~> 2.4.0)
148
148
  parslet (1.8.2)
@@ -178,7 +178,7 @@ GEM
178
178
  rack-test (0.8.2)
179
179
  rack (>= 1.0, < 3)
180
180
  rake (13.0.1)
181
- redis-namespace (1.6.0)
181
+ redis-namespace (1.8.0)
182
182
  redis (>= 3.0.4)
183
183
  resque_spec (0.17.0)
184
184
  resque (>= 1.19.0)
@@ -241,7 +241,7 @@ GEM
241
241
  thread_safe (0.3.6)
242
242
  tilt (2.0.8)
243
243
  timecop (0.9.1)
244
- timers (4.3.0)
244
+ timers (4.3.2)
245
245
  toml (0.2.0)
246
246
  parslet (~> 1.8.0)
247
247
  tzinfo (1.2.7)
@@ -267,7 +267,7 @@ PLATFORMS
267
267
  DEPENDENCIES
268
268
  airbrake (= 4.3.1)
269
269
  apisonator!
270
- async-redis (~> 0.5)
270
+ async-redis (~> 0.5.1)
271
271
  async-rspec
272
272
  aws-sdk (= 2.4.2)
273
273
  benchmark-ips (~> 2.7.2)
@@ -291,6 +291,7 @@ DEPENDENCIES
291
291
  rack-test (~> 0.8.2)
292
292
  rake (~> 13.0)
293
293
  redis!
294
+ redis-namespace (~> 1.8.0)
294
295
  resque!
295
296
  resque_spec (~> 0.17.0)
296
297
  resque_unit (~> 0.4.4)!
data/Gemfile.on_prem.lock CHANGED
@@ -35,7 +35,7 @@ GIT
35
35
  PATH
36
36
  remote: .
37
37
  specs:
38
- apisonator (3.0.1)
38
+ apisonator (3.3.0)
39
39
 
40
40
  GEM
41
41
  remote: https://rubygems.org/
@@ -67,7 +67,7 @@ GEM
67
67
  async (~> 1.14)
68
68
  async-pool (0.2.0)
69
69
  async (~> 1.8)
70
- async-redis (0.5.0)
70
+ async-redis (0.5.1)
71
71
  async (~> 1.8)
72
72
  async-io (~> 1.10)
73
73
  async-pool (~> 0.2)
@@ -108,7 +108,7 @@ GEM
108
108
  hiredis (0.6.3)
109
109
  i18n (1.8.2)
110
110
  concurrent-ruby (~> 1.0)
111
- json (2.1.0)
111
+ json (2.3.1)
112
112
  license_finder (5.9.2)
113
113
  bundler
114
114
  rubyzip
@@ -131,7 +131,7 @@ GEM
131
131
  net-scp (1.2.1)
132
132
  net-ssh (>= 2.6.5)
133
133
  net-ssh (4.2.0)
134
- nio4r (2.5.2)
134
+ nio4r (2.5.4)
135
135
  nokogiri (1.10.9)
136
136
  mini_portile2 (~> 2.4.0)
137
137
  parslet (1.8.2)
@@ -166,7 +166,7 @@ GEM
166
166
  rack-test (0.8.2)
167
167
  rack (>= 1.0, < 3)
168
168
  rake (13.0.1)
169
- redis-namespace (1.6.0)
169
+ redis-namespace (1.8.0)
170
170
  redis (>= 3.0.4)
171
171
  resque_spec (0.17.0)
172
172
  resque (>= 1.19.0)
@@ -227,7 +227,7 @@ GEM
227
227
  thread_safe (0.3.6)
228
228
  tilt (2.0.8)
229
229
  timecop (0.9.1)
230
- timers (4.3.0)
230
+ timers (4.3.2)
231
231
  toml (0.2.0)
232
232
  parslet (~> 1.8.0)
233
233
  tzinfo (1.2.7)
@@ -250,7 +250,7 @@ PLATFORMS
250
250
 
251
251
  DEPENDENCIES
252
252
  apisonator!
253
- async-redis (~> 0.5)
253
+ async-redis (~> 0.5.1)
254
254
  async-rspec
255
255
  benchmark-ips (~> 2.7.2)
256
256
  bugsnag (~> 6)
@@ -272,6 +272,7 @@ DEPENDENCIES
272
272
  rack-test (~> 0.8.2)
273
273
  rake (~> 13.0)
274
274
  redis!
275
+ redis-namespace (~> 1.8.0)
275
276
  resque!
276
277
  resque_spec (~> 0.17.0)
277
278
  resque_unit (~> 0.4.4)!
data/Rakefile CHANGED
@@ -261,27 +261,49 @@ task :reschedule_failed_jobs do
261
261
  "Pending failed jobs: #{result[:failed_current]}."
262
262
  end
263
263
 
264
- desc 'Delete stats of services marked for deletion'
265
264
  namespace :stats do
265
+ desc 'Delete stats of services marked for deletion'
266
266
  task :cleanup, [:redis_urls, :log_deleted_keys] do |_, args|
267
- redis_urls = args[:redis_urls] && args[:redis_urls].split(' ')
267
+ redis_conns = redis_conns(args[:redis_urls])
268
268
 
269
- if redis_urls.nil? || redis_urls.empty?
269
+ if redis_conns.empty?
270
270
  puts 'No Redis URLs specified'
271
271
  exit(false)
272
272
  end
273
273
 
274
- redis_clients = redis_urls.map do |redis_url|
275
- parsed_uri = URI.parse(ThreeScale::Backend::Storage::Helpers.send(
276
- :to_redis_uri, redis_url)
277
- )
278
- Redis.new(host: parsed_uri.host, port: parsed_uri.port)
274
+ ThreeScale::Backend::Stats::Cleaner.delete!(
275
+ redis_conns, log_deleted_keys: logger_for_deleted_keys(args[:log_deleted_keys])
276
+ )
277
+ end
278
+
279
+ desc 'Delete stats keys set to 0'
280
+ task :delete_stats_keys_set_to_0, [:redis_urls, :log_deleted_keys] do |_, args|
281
+ redis_conns = redis_conns(args[:redis_urls])
282
+
283
+ if redis_conns.empty?
284
+ puts 'No Redis URLs specified'
285
+ exit(false)
279
286
  end
280
287
 
281
- log_deleted = args[:log_deleted_keys] == 'true' ? STDOUT : nil
288
+ ThreeScale::Backend::Stats::Cleaner.delete_stats_keys_set_to_0(
289
+ redis_conns, log_deleted_keys: logger_for_deleted_keys(args[:log_deleted_keys])
290
+ )
291
+ end
292
+ end
282
293
 
283
- ThreeScale::Backend::Stats::Cleaner.delete!(
284
- redis_clients, log_deleted_keys: log_deleted
294
+ def redis_conns(urls)
295
+ redis_urls = urls && urls.split(' ')
296
+
297
+ return [] if redis_urls.nil? || redis_urls.empty?
298
+
299
+ redis_urls.map do |redis_url|
300
+ parsed_uri = URI.parse(ThreeScale::Backend::Storage::Helpers.send(
301
+ :to_redis_uri, redis_url)
285
302
  )
303
+ Redis.new(host: parsed_uri.host, port: parsed_uri.port)
286
304
  end
287
305
  end
306
+
307
+ def logger_for_deleted_keys(arg_log_deleted_keys)
308
+ arg_log_deleted_keys == 'true' ? STDOUT : nil
309
+ end
@@ -7,6 +7,14 @@ module ThreeScale
7
7
  ServiceToken.exists?(token, service_id) ? 200 : 404
8
8
  end
9
9
 
10
+ get '/:token/:service_id/provider_key' do |token, service_id|
11
+ if ServiceToken.exists?(token, service_id)
12
+ { status: :found, provider_key: Service.provider_key_for(service_id) }.to_json
13
+ else
14
+ respond_with_404('token/service combination not found'.freeze)
15
+ end
16
+ end
17
+
10
18
  post '/' do
11
19
  check_tokens_param!
12
20
 
@@ -40,10 +40,8 @@ module ThreeScale
40
40
  private
41
41
 
42
42
  def self.first_traffic(service_id, application_id)
43
- key = Stats::Keys.applications_key_prefix(
44
- Stats::Keys.service_key_prefix(service_id)
45
- )
46
- if storage.sadd(key, encode_key(application_id))
43
+ if storage.sadd(Stats::Keys.set_of_apps_with_traffic(service_id),
44
+ encode_key(application_id))
47
45
  EventStorage.store(:first_traffic,
48
46
  { service_id: service_id,
49
47
  application_id: application_id,
@@ -1,6 +1,7 @@
1
1
  require '3scale/backend/configuration/loader'
2
2
  require '3scale/backend/environment'
3
3
  require '3scale/backend/configurable'
4
+ require '3scale/backend/errors'
4
5
 
5
6
  module ThreeScale
6
7
  module Backend
@@ -77,9 +78,6 @@ module ThreeScale
77
78
  master_metrics = [:transactions, :transactions_authorize]
78
79
  config.master.metrics = Struct.new(*master_metrics).new
79
80
 
80
- # Default config
81
- config.master_service_id = 1
82
-
83
81
  # This setting controls whether the listener can create event buckets in
84
82
  # Redis. We do not want all the listeners creating buckets yet, as we do
85
83
  # not know exactly the rate at which we can send events to Kinesis
@@ -32,25 +32,6 @@ module ThreeScale
32
32
  DEFAULT_WAIT_BEFORE_FETCHING_MORE_JOBS
33
33
  end
34
34
 
35
- def pop_from_queue
36
- begin
37
- encoded_job = @redis.blpop(*@queues, timeout: @fetch_timeout)
38
- rescue Redis::BaseConnectionError, Errno::ECONNREFUSED, Errno::EPIPE => e
39
- raise RedisConnectionError.new(e.message)
40
- rescue Redis::CommandError => e
41
- # Redis::CommandError from redis-rb can be raised for multiple
42
- # reasons, so we need to check the error message to distinguish
43
- # connection errors from the rest.
44
- if e.message == 'ERR Connection timed out'.freeze
45
- raise RedisConnectionError.new(e.message)
46
- else
47
- raise e
48
- end
49
- end
50
-
51
- encoded_job
52
- end
53
-
54
35
  def fetch
55
36
  encoded_job = pop_from_queue
56
37
  return nil if encoded_job.nil? || encoded_job.empty?
@@ -99,10 +80,11 @@ module ThreeScale
99
80
 
100
81
  # Re-instantiate Redis instance. This is needed to recover from
101
82
  # Errno::EPIPE, not sure if there are others.
102
- @redis = ThreeScale::Backend::QueueStorage.connection(
103
- ThreeScale::Backend.environment,
104
- ThreeScale::Backend.configuration
83
+ @redis = Redis::Namespace.new(
84
+ WorkerAsync.const_get(:RESQUE_REDIS_NAMESPACE),
85
+ redis: QueueStorage.connection(Backend.environment, Backend.configuration)
105
86
  )
87
+
106
88
  # If there is a different kind of error, it's probably a
107
89
  # programming error. Like sending an invalid blpop command to
108
90
  # Redis. In that case, let the worker crash.
@@ -111,12 +93,36 @@ module ThreeScale
111
93
  end
112
94
  end
113
95
 
96
+ rescue Exception => e
97
+ Worker.logger.notify(e)
98
+ ensure
114
99
  job_queue.close
115
100
  end
116
101
 
117
102
  def shutdown
118
103
  @shutdown = true
119
104
  end
105
+
106
+ private
107
+
108
+ def pop_from_queue
109
+ begin
110
+ encoded_job = @redis.blpop(*@queues, timeout: @fetch_timeout)
111
+ rescue Redis::BaseConnectionError, Errno::ECONNREFUSED, Errno::EPIPE => e
112
+ raise RedisConnectionError.new(e.message)
113
+ rescue Redis::CommandError => e
114
+ # Redis::CommandError from redis-rb can be raised for multiple
115
+ # reasons, so we need to check the error message to distinguish
116
+ # connection errors from the rest.
117
+ if e.message == 'ERR Connection timed out'.freeze
118
+ raise RedisConnectionError.new(e.message)
119
+ else
120
+ raise e
121
+ end
122
+ end
123
+
124
+ encoded_job
125
+ end
120
126
  end
121
127
  end
122
128
  end
@@ -4,14 +4,36 @@ require 'rack'
4
4
  module ThreeScale
5
5
  module Backend
6
6
  class ListenerMetrics
7
- REQUEST_TYPES = {
7
+ AUTH_AND_REPORT_REQUEST_TYPES = {
8
8
  '/transactions/authorize.xml' => 'authorize',
9
9
  '/transactions/oauth_authorize.xml' => 'authorize_oauth',
10
10
  '/transactions/authrep.xml' => 'authrep',
11
11
  '/transactions/oauth_authrep.xml' => 'authrep_oauth',
12
12
  '/transactions.xml' => 'report'
13
13
  }
14
- private_constant :REQUEST_TYPES
14
+ private_constant :AUTH_AND_REPORT_REQUEST_TYPES
15
+
16
+ # Only the first match is taken into account, that's why for example,
17
+ # "/\/services\/.*\/stats/" needs to appear before "/\/services/"
18
+ INTERNAL_API_PATHS = [
19
+ [/\/services\/.*\/alert_limits/, 'alerts'.freeze],
20
+ [/\/services\/.*\/applications\/.*\/keys/, 'application_keys'.freeze],
21
+ [/\/services\/.*\/applications\/.*\/referrer_filters/, 'application_referrer_filters'.freeze],
22
+ [/\/services\/.*\/applications\/.*\/utilization/, 'utilization'.freeze],
23
+ [/\/services\/.*\/applications/, 'applications'.freeze],
24
+ [/\/services\/.*\/errors/, 'errors'.freeze],
25
+ [/\/events/, 'events'.freeze],
26
+ [/\/services\/.*\/metrics/, 'metrics'.freeze],
27
+ [/\/service_tokens/, 'service_tokens'.freeze],
28
+ [/\/services\/.*\/stats/, 'stats'.freeze],
29
+ [/\/services\/.*\/plans\/.*\/usagelimits/, 'usage_limits'.freeze],
30
+ [/\/services/, 'services'.freeze],
31
+ ].freeze
32
+ private_constant :INTERNAL_API_PATHS
33
+
34
+ # Most requests will be under 100ms, so use a higher granularity from there
35
+ TIME_BUCKETS = [0.01, 0.02, 0.03, 0.04, 0.05, 0.06, 0.07, 0.08, 0.09, 0.1, 0.25, 0.5, 0.75, 1]
36
+ private_constant :TIME_BUCKETS
15
37
 
16
38
  class << self
17
39
  ERRORS_4XX_TO_TRACK = Set[403, 404, 409].freeze
@@ -27,9 +49,12 @@ module ThreeScale
27
49
  end
28
50
 
29
51
  def report_resp_code(path, resp_code)
30
- Yabeda.apisonator_listener.response_codes.increment(
52
+ req_type = req_type(path)
53
+ prometheus_group = prometheus_group(req_type)
54
+
55
+ Yabeda.send(prometheus_group).response_codes.increment(
31
56
  {
32
- request_type: REQUEST_TYPES[path],
57
+ request_type: req_type,
33
58
  resp_code: code_group(resp_code)
34
59
  },
35
60
  by: 1
@@ -37,8 +62,11 @@ module ThreeScale
37
62
  end
38
63
 
39
64
  def report_response_time(path, request_time)
40
- Yabeda.apisonator_listener.response_times.measure(
41
- { request_type: REQUEST_TYPES[path] },
65
+ req_type = req_type(path)
66
+ prometheus_group = prometheus_group(req_type)
67
+
68
+ Yabeda.send(prometheus_group).response_times.measure(
69
+ { request_type: req_type },
42
70
  request_time
43
71
  )
44
72
  end
@@ -69,8 +97,21 @@ module ThreeScale
69
97
  comment 'Response times'
70
98
  unit :seconds
71
99
  tags %i[request_type]
72
- # Most requests will be under 100ms, so use a higher granularity from there
73
- buckets [0.01, 0.02, 0.03, 0.04, 0.05, 0.06, 0.07, 0.08, 0.09, 0.1, 0.25, 0.5, 0.75, 1]
100
+ buckets TIME_BUCKETS
101
+ end
102
+ end
103
+
104
+ group :apisonator_listener_internal_api do
105
+ counter :response_codes do
106
+ comment 'Response codes'
107
+ tags %i[request_type resp_code]
108
+ end
109
+
110
+ histogram :response_times do
111
+ comment 'Response times'
112
+ unit :seconds
113
+ tags %i[request_type]
114
+ buckets TIME_BUCKETS
74
115
  end
75
116
  end
76
117
  end
@@ -93,6 +134,24 @@ module ThreeScale
93
134
  'unknown'.freeze
94
135
  end
95
136
  end
137
+
138
+ def req_type(path)
139
+ AUTH_AND_REPORT_REQUEST_TYPES[path] || internal_api_req_type(path)
140
+ end
141
+
142
+ def internal_api_req_type(path)
143
+ (_regex, type) = INTERNAL_API_PATHS.find { |(regex, _)| regex.match path }
144
+ type
145
+ end
146
+
147
+ # Returns the group as defined in .define_metrics
148
+ def prometheus_group(request_type)
149
+ if AUTH_AND_REPORT_REQUEST_TYPES.values.include? request_type
150
+ :apisonator_listener
151
+ else
152
+ :apisonator_listener_internal_api
153
+ end
154
+ end
96
155
  end
97
156
  end
98
157
  end
@@ -45,6 +45,12 @@ module ThreeScale
45
45
  STATS_KEY_PREFIX = 'stats/'.freeze
46
46
  private_constant :STATS_KEY_PREFIX
47
47
 
48
+ REDIS_CONN_ERRORS = [Redis::BaseConnectionError, Errno::ECONNREFUSED, Errno::EPIPE].freeze
49
+ private_constant :REDIS_CONN_ERRORS
50
+
51
+ MAX_RETRIES_REDIS_ERRORS = 3
52
+ private_constant :MAX_RETRIES_REDIS_ERRORS
53
+
48
54
  class << self
49
55
  include Logging
50
56
  def mark_service_to_be_deleted(service_id)
@@ -77,37 +83,73 @@ module ThreeScale
77
83
  logger.info("Going to delete the stats keys for these services: #{services.to_a}")
78
84
 
79
85
  unless services.empty?
80
- delete_successful = true
81
- redis_conns.each do |redis_conn|
86
+ _ok, failed = redis_conns.partition do |redis_conn|
82
87
  begin
83
88
  delete_keys(redis_conn, services, log_deleted_keys)
84
- # If it's a connection error, mark as failed and continue
85
- # cleaning other shards. If it's another kind of error, it
86
- # could be a bug, so better re-raise.
87
- rescue Redis::BaseConnectionError, Errno::ECONNREFUSED, Errno::EPIPE => e
88
- logger.error("Error while deleting stats of server #{redis_conn}: #{e}")
89
- delete_successful = false
90
- rescue Redis::CommandError => e
91
- # Redis::CommandError from redis-rb can be raised for multiple
92
- # reasons, so we need to check the error message to distinguish
93
- # connection errors from the rest.
94
- if e.message == 'ERR Connection timed out'.freeze
95
- logger.error("Error while deleting stats of server #{redis_conn}: #{e}")
96
- delete_successful = false
97
- else
98
- raise e
99
- end
89
+ true
90
+ rescue => e
91
+ handle_redis_exception(e, redis_conn)
92
+ false
100
93
  end
101
94
  end
102
95
 
103
- remove_services_from_delete_set(services) if delete_successful
96
+ with_retries { remove_services_from_delete_set(services) } if failed.empty?
97
+
98
+ failed.each do |failed_conn|
99
+ logger.error("Error while deleting stats of server #{failed_conn}")
100
+ end
104
101
  end
105
102
 
106
103
  logger.info("Finished deleting the stats keys for these services: #{services.to_a}")
107
104
  end
108
105
 
106
+ # Deletes all the stats keys set to 0.
107
+ #
108
+ # Stats keys set to 0 are useless and occupy Redis memory
109
+ # unnecessarily. They were generated due to a bug in previous versions
110
+ # of Apisonator.
111
+ # Ref: https://github.com/3scale/apisonator/pull/247
112
+ #
113
+ # As the .delete function, this one also receives a collection of
114
+ # instantiated Redis clients and those need to connect to Redis
115
+ # servers directly.
116
+ #
117
+ # @param [Array] redis_conns Instantiated Redis clients.
118
+ # @param [IO] log_deleted_keys IO where to write the logs. Defaults to
119
+ # nil (logs nothing).
120
+ def delete_stats_keys_set_to_0(redis_conns, log_deleted_keys: nil)
121
+ _ok, failed = redis_conns.partition do |redis_conn|
122
+ begin
123
+ delete_stats_keys_with_val_0(redis_conn, log_deleted_keys)
124
+ true
125
+ rescue => e
126
+ handle_redis_exception(e, redis_conn)
127
+ false
128
+ end
129
+ end
130
+
131
+ failed.each do |failed_conn|
132
+ logger.error("Error while deleting stats of server #{failed_conn}")
133
+ end
134
+ end
135
+
109
136
  private
110
137
 
138
+ def handle_redis_exception(exception, redis_conn)
139
+ # If it's a connection error, do nothing so we can continue with
140
+ # other shards. If it's another kind of error, it could be caused by
141
+ # a bug, so better re-raise.
142
+
143
+ case exception
144
+ when *REDIS_CONN_ERRORS
145
+ # Do nothing.
146
+ when Redis::CommandError
147
+ raise exception if exception.message != 'ERR Connection timed out'.freeze
148
+ else
149
+ raise exception
150
+ end
151
+ end
152
+
111
153
  # Returns a set with the services included in the
112
154
  # SET_WITH_SERVICES_MARKED_FOR_DELETION Redis set.
113
155
  def services_to_delete
@@ -133,19 +175,21 @@ module ThreeScale
133
175
  cursor = 0
134
176
 
135
177
  loop do
136
- cursor, keys = redis_conn.scan(cursor, count: SCAN_SLICE)
178
+ with_retries do
179
+ cursor, keys = redis_conn.scan(cursor, count: SCAN_SLICE)
137
180
 
138
- to_delete = keys.select { |key| delete_key?(key, services) }
181
+ to_delete = keys.select { |key| delete_key?(key, services) }
139
182
 
140
- unless to_delete.empty?
141
- if log_deleted_keys
142
- values = redis_conn.mget(*(to_delete.to_a))
143
- to_delete.each_with_index do |k, i|
144
- log_deleted_keys.puts "#{k} #{values[i]}"
183
+ unless to_delete.empty?
184
+ if log_deleted_keys
185
+ values = redis_conn.mget(*(to_delete.to_a))
186
+ to_delete.each_with_index do |k, i|
187
+ log_deleted_keys.puts "#{k} #{values[i]}"
188
+ end
145
189
  end
146
- end
147
190
 
148
- redis_conn.del(to_delete)
191
+ redis_conn.del(to_delete)
192
+ end
149
193
  end
150
194
 
151
195
  break if cursor.to_i == 0
@@ -188,6 +232,43 @@ module ThreeScale
188
232
  # simply ignore those keys.
189
233
  nil
190
234
  end
235
+
236
+ def delete_stats_keys_with_val_0(redis_conn, log_deleted_keys)
237
+ cursor = 0
238
+
239
+ loop do
240
+ with_retries do
241
+ cursor, keys = redis_conn.scan(cursor, count: SCAN_SLICE)
242
+
243
+ stats_keys = keys.select { |k| is_stats_key?(k) }
244
+
245
+ unless stats_keys.empty?
246
+ values = redis_conn.mget(*stats_keys)
247
+ to_delete = stats_keys.zip(values).select { |_, v| v == '0'.freeze }.map(&:first)
248
+
249
+ unless to_delete.empty?
250
+ redis_conn.del(to_delete)
251
+ to_delete.each { |k| log_deleted_keys.puts k } if log_deleted_keys
252
+ end
253
+ end
254
+ end
255
+
256
+ break if cursor.to_i == 0
257
+
258
+ sleep(SLEEP_BETWEEN_SCANS)
259
+ end
260
+ end
261
+
262
+ def with_retries(max = MAX_RETRIES_REDIS_ERRORS)
263
+ retries = 0
264
+ begin
265
+ yield
266
+ rescue Exception => e
267
+ retries += 1
268
+ retry if retries < max
269
+ raise e
270
+ end
271
+ end
191
272
  end
192
273
  end
193
274
  end
@@ -70,6 +70,12 @@ module ThreeScale
70
70
  key
71
71
  end
72
72
 
73
+ def set_of_apps_with_traffic(service_id)
74
+ Stats::Keys.applications_key_prefix(
75
+ Stats::Keys.service_key_prefix(service_id)
76
+ )
77
+ end
78
+
73
79
  # We want all the buckets to go to the same Redis shard.
74
80
  # The reason is that SUNION support in Twemproxy requires that the
75
81
  # supplied keys hash to the same server.
@@ -20,8 +20,14 @@ module ThreeScale
20
20
  def report(provider_key, service_id, transactions, context_info = {})
21
21
  service = Service.load_with_provider_key!(service_id, provider_key)
22
22
 
23
- report_enqueue(service.id, transactions, context_info)
24
- notify_report(provider_key, transactions.size)
23
+ # A usage of 0 does not affect rate-limits or stats, so we do not need
24
+ # to report it.
25
+ filtered_transactions = filter_usages_with_0(transactions.clone)
26
+
27
+ return if filtered_transactions.empty?
28
+
29
+ report_enqueue(service.id, filtered_transactions, context_info)
30
+ notify_report(provider_key, filtered_transactions.size)
25
31
  end
26
32
 
27
33
  def authorize(provider_key, params, context_info = {})
@@ -137,9 +143,17 @@ module ThreeScale
137
143
 
138
144
  usage = params[:usage]
139
145
 
140
- if (usage || params[:log]) && status.authorized?
146
+ filtered_usage = filter_metrics_without_inc(usage.clone) if usage
147
+
148
+ if ((filtered_usage && !filtered_usage.empty?) || params[:log]) && status.authorized?
141
149
  application_id = status.application.id
142
- report_enqueue(status.service_id, { 0 => {"app_id" => application_id, "usage" => usage, "log" => params[:log] } }, request: { extensions: request_info[:extensions] })
150
+
151
+ report_enqueue(
152
+ status.service_id,
153
+ { 0 => {"app_id" => application_id, "usage" => filtered_usage, "log" => params[:log] } },
154
+ request: { extensions: request_info[:extensions] }
155
+ )
156
+
143
157
  notify_authrep(provider_key, usage ? 1 : 0)
144
158
  else
145
159
  notify_authorize(provider_key)
@@ -182,6 +196,19 @@ module ThreeScale
182
196
  end
183
197
  end
184
198
 
199
+ def filter_usages_with_0(transactions)
200
+ # There are plenty of existing tests using both a string and a symbol
201
+ # when accessing the usage.
202
+ transactions.delete_if do |_idx, tx|
203
+ (usage = tx['usage'.freeze] || tx[:usage]) or next
204
+ filter_metrics_without_inc(usage).empty?
205
+ end
206
+ end
207
+
208
+ def filter_metrics_without_inc(usage)
209
+ usage.delete_if { |_metric, delta| delta.to_s == '0'.freeze }
210
+ end
211
+
185
212
  def storage
186
213
  Storage.instance
187
214
  end
@@ -30,9 +30,13 @@ module ThreeScale
30
30
  end
31
31
 
32
32
  def notify(provider_key, usage)
33
- # batch several notifications together so that we can process just one
33
+ # We need the master service ID to report its metrics. If it's not
34
+ # set, we don't need to notify anything.
35
+ # Batch several notifications together so that we can process just one
34
36
  # job for a group of them.
35
- notify_batch(provider_key, usage)
37
+ unless configuration.master_service_id.to_s.empty?
38
+ notify_batch(provider_key, usage)
39
+ end
36
40
  end
37
41
 
38
42
  def notify_batch(provider_key, usage)
@@ -7,8 +7,6 @@ module ThreeScale
7
7
  extend Configurable
8
8
  @queue = :main
9
9
 
10
- InvalidMasterServiceId = Class.new(ThreeScale::Backend::Error)
11
-
12
10
  class << self
13
11
  def perform_logged(provider_key, usage, timestamp, _enqueue_time)
14
12
  application_id = Application.load_id_by_key(master_service_id, provider_key)
@@ -16,12 +14,42 @@ module ThreeScale
16
14
  if application_id && Application.exists?(master_service_id, application_id)
17
15
  master_metrics = Metric.load_all(master_service_id)
18
16
 
19
- ProcessJob.perform([{
20
- service_id: master_service_id,
21
- application_id: application_id,
22
- timestamp: timestamp,
23
- usage: master_metrics.process_usage(usage)
24
- }])
17
+ begin
18
+ ProcessJob.perform([{
19
+ service_id: master_service_id,
20
+ application_id: application_id,
21
+ timestamp: timestamp,
22
+ usage: master_metrics.process_usage(usage)
23
+ }])
24
+ rescue MetricInvalid => e
25
+ # This happens when the master account in Porta does not have
26
+ # the notify metrics defined (by default "transactions" and
27
+ # "transactions/authorize"). These metrics need to be created in
28
+ # Porta, Apisonator does not have a way to guarantee that
29
+ # they're defined.
30
+ # Notice that this rescue prevents the job from being retried.
31
+ # Apisonator can't know when the metrics will be created (if
32
+ # ever) so it's better to log the error rather than retrying
33
+ # these jobs for an undefined period of time.
34
+ Worker.logger.notify(e)
35
+ return [false, "#{e}"]
36
+ rescue TransactionTimestampNotWithinRange => e
37
+ # This is very unlikely to happen. The timestamps in a notify
38
+ # job are not set by users, they are set by the listeners. If
39
+ # this error happens it might mean that:
40
+ # a) The worker started processing this job way after the
41
+ # listener produced it. This can happen for example if we make
42
+ # some requests to a listener with no workers. The listeners
43
+ # will enqueue some notify jobs. If we start a worker hours
44
+ # later, we might see this error.
45
+ # b) There's some kind of clock skew issue.
46
+ # c) There's a bug.
47
+ #
48
+ # We can't raise here, because then, the job will be retried,
49
+ # but it's going to fail always if it has an old timestamp.
50
+ Worker.logger.notify(e)
51
+ return [false, "#{provider_key} #{application_id} #{e}"]
52
+ end
25
53
  end
26
54
  [true, "#{provider_key} #{application_id || '--'}"]
27
55
  end
@@ -29,15 +57,7 @@ module ThreeScale
29
57
  private
30
58
 
31
59
  def master_service_id
32
- value = configuration.master_service_id
33
-
34
- unless value
35
- raise InvalidMasterServiceId,
36
- "Can't find master service id. Make sure the \"master_service_id\" "\
37
- 'configuration value is set correctly'
38
- end
39
-
40
- value.to_s
60
+ configuration.master_service_id.to_s
41
61
  end
42
62
  end
43
63
  end
@@ -1,5 +1,5 @@
1
1
  module ThreeScale
2
2
  module Backend
3
- VERSION = '3.0.1'
3
+ VERSION = '3.3.0'
4
4
  end
5
5
  end
@@ -1,4 +1,5 @@
1
1
  require 'async'
2
+ require 'redis-namespace'
2
3
  require '3scale/backend/job_fetcher'
3
4
 
4
5
  module ThreeScale
@@ -10,6 +11,9 @@ module ThreeScale
10
11
  DEFAULT_MAX_CONCURRENT_JOBS = 20
11
12
  private_constant :DEFAULT_MAX_CONCURRENT_JOBS
12
13
 
14
+ RESQUE_REDIS_NAMESPACE = :resque
15
+ private_constant :RESQUE_REDIS_NAMESPACE
16
+
13
17
  def initialize(options = {})
14
18
  trap('TERM') { shutdown }
15
19
  trap('INT') { shutdown }
@@ -17,7 +21,7 @@ module ThreeScale
17
21
  @one_off = options[:one_off]
18
22
  @jobs = Queue.new # Thread-safe queue
19
23
 
20
- @job_fetcher = options[:job_fetcher] || JobFetcher.new
24
+ @job_fetcher = options[:job_fetcher] || JobFetcher.new(redis_client: redis_client)
21
25
 
22
26
  @max_concurrent_jobs = configuration.async_worker.max_concurrent_jobs ||
23
27
  DEFAULT_MAX_CONCURRENT_JOBS
@@ -64,6 +68,10 @@ module ThreeScale
64
68
  # unblocks when there are new jobs or when .close() is called
65
69
  job = @jobs.pop
66
70
 
71
+ # If job is nil, it means that the queue is closed. No more jobs are
72
+ # going to be pushed, so shutdown.
73
+ shutdown unless job
74
+
67
75
  break if @shutdown
68
76
 
69
77
  @reactor.async { perform(job) }
@@ -83,6 +91,19 @@ module ThreeScale
83
91
  Async { @job_fetcher.start(@jobs) }
84
92
  end
85
93
  end
94
+
95
+ # Returns a new Redis client with namespace "resque".
96
+ # In the async worker, the job fetcher runs in a separate thread, and we
97
+ # need to avoid sharing an already instantiated client like the one in
98
+ # Resque::Helpers initialized in lib/3scale/backend.rb (Resque.redis).
99
+ # Failing to do so, will raise errors because of fibers shared across
100
+ # threads.
101
+ def redis_client
102
+ Redis::Namespace.new(
103
+ RESQUE_REDIS_NAMESPACE,
104
+ redis: QueueStorage.connection(Backend.environment, Backend.configuration)
105
+ )
106
+ end
86
107
  end
87
108
  end
88
109
  end
data/licenses.xml CHANGED
@@ -23,7 +23,7 @@
23
23
  </dependency>
24
24
  <dependency>
25
25
  <packageName>apisonator</packageName>
26
- <version>3.0.1</version>
26
+ <version>3.3.0</version>
27
27
  <licenses>
28
28
  <license>
29
29
  <name>Apache 2.0</name>
@@ -93,7 +93,7 @@
93
93
  </dependency>
94
94
  <dependency>
95
95
  <packageName>async-redis</packageName>
96
- <version>0.5.0</version>
96
+ <version>0.5.1</version>
97
97
  <licenses>
98
98
  <license>
99
99
  <name>MIT</name>
@@ -371,7 +371,7 @@
371
371
  </dependency>
372
372
  <dependency>
373
373
  <packageName>json</packageName>
374
- <version>2.1.0</version>
374
+ <version>2.3.1</version>
375
375
  <licenses>
376
376
  <license>
377
377
  <name>ruby</name>
@@ -525,7 +525,7 @@
525
525
  </dependency>
526
526
  <dependency>
527
527
  <packageName>nio4r</packageName>
528
- <version>2.5.2</version>
528
+ <version>2.5.4</version>
529
529
  <licenses>
530
530
  <license>
531
531
  <name>MIT</name>
@@ -769,7 +769,7 @@
769
769
  </dependency>
770
770
  <dependency>
771
771
  <packageName>redis-namespace</packageName>
772
- <version>1.6.0</version>
772
+ <version>1.8.0</version>
773
773
  <licenses>
774
774
  <license>
775
775
  <name>MIT</name>
@@ -1043,7 +1043,7 @@
1043
1043
  </dependency>
1044
1044
  <dependency>
1045
1045
  <packageName>timers</packageName>
1046
- <version>4.3.0</version>
1046
+ <version>4.3.2</version>
1047
1047
  <licenses>
1048
1048
  <license>
1049
1049
  <name>MIT</name>
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: apisonator
3
3
  version: !ruby/object:Gem::Version
4
- version: 3.0.1
4
+ version: 3.3.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - Adam Ciganek
@@ -16,7 +16,7 @@ authors:
16
16
  autorequire:
17
17
  bindir: bin
18
18
  cert_chain: []
19
- date: 2020-07-14 00:00:00.000000000 Z
19
+ date: 2021-02-09 00:00:00.000000000 Z
20
20
  dependencies: []
21
21
  description: This gem provides a daemon that handles authorization and reporting of
22
22
  web services managed by 3scale.