apisonator 3.1.0 → 3.3.1.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: edd1887f6aa71c17fb5279f98988c9a3b5e631e80bc1f4142b6619b5f5e24f48
4
- data.tar.gz: b6478d14e88a177480396a7d46d5f4e33cea683aea17303175ad18ae646e546f
3
+ metadata.gz: '09ccae8a2719bc3717d22dcdd78c5c996cbd4774f2528b91d77c74fbbe2e51b7'
4
+ data.tar.gz: 4a9f237a6d1b25f9cc763817030cbe562536a6d5ed89374326db2f99ea82971c
5
5
  SHA512:
6
- metadata.gz: 944b75385817abc4d2f8828c61b2301d5cdfbb6c1bb681b7a3165590f23ad1b6c80451fcf579b8f3cf0010fe5a95e65605998f5d8ec94f1ff4ce690556b3011a
7
- data.tar.gz: f3f44b1b2ebfe6c8fcbbf36b90dd44e5a7875d997a36b97288417472fa4156689508bfc95afdf196a7a168f768aa6e38521502b93078f128994959d184c1cd37
6
+ metadata.gz: 2818f8b3a97b701218c28ae93339650db57a0229181ee3573a24ffe215611747c1381e9f0e643eb9dc85b81d13d43336a08252a88cd63cd7fc78ed9a182c5c83
7
+ data.tar.gz: 7a9c2e389c239094e2c6f303749056d5a8ea2d216f5097e98e9a87ef5c3bcd4ea2d190c4b8277d659608f7c80f57d5d727bf65eb9330f4c08fb8777ab5a1ae81
data/CHANGELOG.md CHANGED
@@ -2,6 +2,67 @@
2
2
 
3
3
  Notable changes to Apisonator will be tracked in this document.
4
4
 
5
+ ## 3.3.1.1 - 2021-02-12
6
+
7
+ ### Changed
8
+
9
+ - Updated our Puma fork to v4.3.7
10
+ ([#261](https://github.com/3scale/apisonator/pull/261)).
11
+
12
+ ## 3.3.1 - 2021-02-11
13
+
14
+ ### Fixed
15
+
16
+ - Usages with `#0` (set to 0) no longer generate unnecessary stats keys in Redis
17
+ ([#258](https://github.com/3scale/apisonator/pull/258)).
18
+
19
+ ## 3.3.0 - 2021-02-09
20
+
21
+ ### Added
22
+
23
+ - Rake task to delete stats keys set to 0 in the DB left there because of [this
24
+ issue](https://github.com/3scale/apisonator/pull/247)
25
+ ([#250](https://github.com/3scale/apisonator/pull/250)).
26
+
27
+ ### Fixed
28
+
29
+ - Made the worker more reliable when configured in async mode. Now it handles
30
+ connection errors better
31
+ ([#253](https://github.com/3scale/apisonator/pull/253)),
32
+ ([#254](https://github.com/3scale/apisonator/pull/254)), and
33
+ ([#255](https://github.com/3scale/apisonator/pull/255)).
34
+
35
+ ### Changed
36
+
37
+ - Updated async-redis to v0.5.1
38
+ ([#251](https://github.com/3scale/apisonator/pull/251)).
39
+
40
+ ## 3.2.1 - 2021-01-22
41
+
42
+ ### Fixed
43
+
44
+ - Reports of 0 hits no longer generate unnecessary stats keys in Redis
45
+ ([#247](https://github.com/3scale/apisonator/pull/247)).
46
+
47
+ ## 3.2.0 - 2021-01-19
48
+
49
+ ### Added
50
+
51
+ - New endpoint in the internal API to get the provider key for a given (token,
52
+ service_id) pair ([#243](https://github.com/3scale/apisonator/pull/243)).
53
+
54
+ ### Changed
55
+
56
+ - The config file used when running in a Docker image now parses "1" and "true"
57
+ (case-insensitive) as true
58
+ ([#245](https://github.com/3scale/apisonator/pull/245)).
59
+
60
+ ### Fixed
61
+
62
+ - Fixed some metrics of the internal API that were not being counted
63
+ correctly([#244](https://github.com/3scale/apisonator/pull/244)).
64
+
65
+
5
66
  ## 3.1.0 - 2020-10-14
6
67
 
7
68
  ### Added
data/Gemfile.base CHANGED
@@ -15,11 +15,15 @@ platform :ruby do
15
15
  end
16
16
 
17
17
  group :test do
18
+ # Newer versions of rack-test don't work well with rspec-api-documentation.
19
+ # See https://github.com/rack/rack-test/pull/223 &
20
+ # https://github.com/zipmark/rspec_api_documentation/issues/342
21
+ gem 'rack-test', '= 0.8.2'
22
+
18
23
  gem 'benchmark-ips', '~> 2.7.2'
19
24
  gem 'mocha', '~> 1.3'
20
25
  gem 'nokogiri', '~> 1.10.8'
21
26
  gem 'pkg-config', '~> 1.1.7'
22
- gem 'rack-test', '~> 0.8.2'
23
27
  gem 'resque_unit', '~> 0.4.4', source: 'https://rubygems.org'
24
28
  gem 'test-unit', '~> 3.2.6'
25
29
  gem 'resque_spec', '~> 0.17.0'
@@ -42,7 +46,7 @@ group :development, :test do
42
46
  end
43
47
 
44
48
  # Default server by platform
45
- gem 'puma', git: 'https://github.com/3scale/puma', ref: 'b034371406690d3e6c2a9301c4a48bd721f3efc3'
49
+ gem 'puma', git: 'https://github.com/3scale/puma', branch: '3scale-4.3.7'
46
50
  # gems required by the runner
47
51
  gem 'gli', '~> 2.16.1', require: nil
48
52
  # Workers
@@ -53,13 +57,14 @@ gem 'rake', '~> 13.0'
53
57
  gem 'builder', '= 3.2.3'
54
58
  # Use a patched resque to allow reusing their Airbrake Failure class
55
59
  gem 'resque', git: 'https://github.com/3scale/resque', branch: '3scale'
60
+ gem 'redis-namespace', '~>1.8.0'
56
61
  gem 'rack', '~> 2.1.4'
57
62
  gem 'sinatra', '~> 2.0.3'
58
63
  gem 'sinatra-contrib', '~> 2.0.3'
59
64
  # Optional external error logging services
60
65
  gem 'bugsnag', '~> 6', require: nil
61
66
  gem 'yabeda-prometheus', '~> 0.5.0'
62
- gem 'async-redis', '~> 0.5'
67
+ gem 'async-redis', '~> 0.5.1'
63
68
  gem 'falcon', '~> 0.35'
64
69
 
65
70
  # Use a patched redis-rb that fixes an issue when trying to connect with
data/Gemfile.lock CHANGED
@@ -1,9 +1,10 @@
1
1
  GIT
2
2
  remote: https://github.com/3scale/puma
3
- revision: b034371406690d3e6c2a9301c4a48bd721f3efc3
4
- ref: b034371406690d3e6c2a9301c4a48bd721f3efc3
3
+ revision: c0601d08695839b8ffd0f380e91c3b91c1e8b754
4
+ branch: 3scale-4.3.7
5
5
  specs:
6
- puma (2.15.3)
6
+ puma (4.3.7)
7
+ nio4r (~> 2.0)
7
8
 
8
9
  GIT
9
10
  remote: https://github.com/3scale/redis-rb
@@ -35,7 +36,7 @@ GIT
35
36
  PATH
36
37
  remote: .
37
38
  specs:
38
- apisonator (3.1.0)
39
+ apisonator (3.3.1.1)
39
40
 
40
41
  GEM
41
42
  remote: https://rubygems.org/
@@ -70,7 +71,7 @@ GEM
70
71
  async (~> 1.14)
71
72
  async-pool (0.2.0)
72
73
  async (~> 1.8)
73
- async-redis (0.5.0)
74
+ async-redis (0.5.1)
74
75
  async (~> 1.8)
75
76
  async-io (~> 1.10)
76
77
  async-pool (~> 0.2)
@@ -142,7 +143,7 @@ GEM
142
143
  net-scp (1.2.1)
143
144
  net-ssh (>= 2.6.5)
144
145
  net-ssh (4.2.0)
145
- nio4r (2.5.2)
146
+ nio4r (2.5.4)
146
147
  nokogiri (1.10.9)
147
148
  mini_portile2 (~> 2.4.0)
148
149
  parslet (1.8.2)
@@ -178,7 +179,7 @@ GEM
178
179
  rack-test (0.8.2)
179
180
  rack (>= 1.0, < 3)
180
181
  rake (13.0.1)
181
- redis-namespace (1.6.0)
182
+ redis-namespace (1.8.0)
182
183
  redis (>= 3.0.4)
183
184
  resque_spec (0.17.0)
184
185
  resque (>= 1.19.0)
@@ -241,7 +242,7 @@ GEM
241
242
  thread_safe (0.3.6)
242
243
  tilt (2.0.8)
243
244
  timecop (0.9.1)
244
- timers (4.3.0)
245
+ timers (4.3.2)
245
246
  toml (0.2.0)
246
247
  parslet (~> 1.8.0)
247
248
  tzinfo (1.2.7)
@@ -267,7 +268,7 @@ PLATFORMS
267
268
  DEPENDENCIES
268
269
  airbrake (= 4.3.1)
269
270
  apisonator!
270
- async-redis (~> 0.5)
271
+ async-redis (~> 0.5.1)
271
272
  async-rspec
272
273
  aws-sdk (= 2.4.2)
273
274
  benchmark-ips (~> 2.7.2)
@@ -288,9 +289,10 @@ DEPENDENCIES
288
289
  pry-doc (~> 0.11.1)
289
290
  puma!
290
291
  rack (~> 2.1.4)
291
- rack-test (~> 0.8.2)
292
+ rack-test (= 0.8.2)
292
293
  rake (~> 13.0)
293
294
  redis!
295
+ redis-namespace (~> 1.8.0)
294
296
  resque!
295
297
  resque_spec (~> 0.17.0)
296
298
  resque_unit (~> 0.4.4)!
data/Gemfile.on_prem.lock CHANGED
@@ -1,9 +1,10 @@
1
1
  GIT
2
2
  remote: https://github.com/3scale/puma
3
- revision: b034371406690d3e6c2a9301c4a48bd721f3efc3
4
- ref: b034371406690d3e6c2a9301c4a48bd721f3efc3
3
+ revision: c0601d08695839b8ffd0f380e91c3b91c1e8b754
4
+ branch: 3scale-4.3.7
5
5
  specs:
6
- puma (2.15.3)
6
+ puma (4.3.7)
7
+ nio4r (~> 2.0)
7
8
 
8
9
  GIT
9
10
  remote: https://github.com/3scale/redis-rb
@@ -35,7 +36,7 @@ GIT
35
36
  PATH
36
37
  remote: .
37
38
  specs:
38
- apisonator (3.1.0)
39
+ apisonator (3.3.1.1)
39
40
 
40
41
  GEM
41
42
  remote: https://rubygems.org/
@@ -67,7 +68,7 @@ GEM
67
68
  async (~> 1.14)
68
69
  async-pool (0.2.0)
69
70
  async (~> 1.8)
70
- async-redis (0.5.0)
71
+ async-redis (0.5.1)
71
72
  async (~> 1.8)
72
73
  async-io (~> 1.10)
73
74
  async-pool (~> 0.2)
@@ -131,7 +132,7 @@ GEM
131
132
  net-scp (1.2.1)
132
133
  net-ssh (>= 2.6.5)
133
134
  net-ssh (4.2.0)
134
- nio4r (2.5.2)
135
+ nio4r (2.5.4)
135
136
  nokogiri (1.10.9)
136
137
  mini_portile2 (~> 2.4.0)
137
138
  parslet (1.8.2)
@@ -166,7 +167,7 @@ GEM
166
167
  rack-test (0.8.2)
167
168
  rack (>= 1.0, < 3)
168
169
  rake (13.0.1)
169
- redis-namespace (1.6.0)
170
+ redis-namespace (1.8.0)
170
171
  redis (>= 3.0.4)
171
172
  resque_spec (0.17.0)
172
173
  resque (>= 1.19.0)
@@ -227,7 +228,7 @@ GEM
227
228
  thread_safe (0.3.6)
228
229
  tilt (2.0.8)
229
230
  timecop (0.9.1)
230
- timers (4.3.0)
231
+ timers (4.3.2)
231
232
  toml (0.2.0)
232
233
  parslet (~> 1.8.0)
233
234
  tzinfo (1.2.7)
@@ -250,7 +251,7 @@ PLATFORMS
250
251
 
251
252
  DEPENDENCIES
252
253
  apisonator!
253
- async-redis (~> 0.5)
254
+ async-redis (~> 0.5.1)
254
255
  async-rspec
255
256
  benchmark-ips (~> 2.7.2)
256
257
  bugsnag (~> 6)
@@ -269,9 +270,10 @@ DEPENDENCIES
269
270
  pry-doc (~> 0.11.1)
270
271
  puma!
271
272
  rack (~> 2.1.4)
272
- rack-test (~> 0.8.2)
273
+ rack-test (= 0.8.2)
273
274
  rake (~> 13.0)
274
275
  redis!
276
+ redis-namespace (~> 1.8.0)
275
277
  resque!
276
278
  resque_spec (~> 0.17.0)
277
279
  resque_unit (~> 0.4.4)!
data/Rakefile CHANGED
@@ -261,27 +261,49 @@ task :reschedule_failed_jobs do
261
261
  "Pending failed jobs: #{result[:failed_current]}."
262
262
  end
263
263
 
264
- desc 'Delete stats of services marked for deletion'
265
264
  namespace :stats do
265
+ desc 'Delete stats of services marked for deletion'
266
266
  task :cleanup, [:redis_urls, :log_deleted_keys] do |_, args|
267
- redis_urls = args[:redis_urls] && args[:redis_urls].split(' ')
267
+ redis_conns = redis_conns(args[:redis_urls])
268
268
 
269
- if redis_urls.nil? || redis_urls.empty?
269
+ if redis_conns.empty?
270
270
  puts 'No Redis URLs specified'
271
271
  exit(false)
272
272
  end
273
273
 
274
- redis_clients = redis_urls.map do |redis_url|
275
- parsed_uri = URI.parse(ThreeScale::Backend::Storage::Helpers.send(
276
- :to_redis_uri, redis_url)
277
- )
278
- Redis.new(host: parsed_uri.host, port: parsed_uri.port)
274
+ ThreeScale::Backend::Stats::Cleaner.delete!(
275
+ redis_conns, log_deleted_keys: logger_for_deleted_keys(args[:log_deleted_keys])
276
+ )
277
+ end
278
+
279
+ desc 'Delete stats keys set to 0'
280
+ task :delete_stats_keys_set_to_0, [:redis_urls, :log_deleted_keys] do |_, args|
281
+ redis_conns = redis_conns(args[:redis_urls])
282
+
283
+ if redis_conns.empty?
284
+ puts 'No Redis URLs specified'
285
+ exit(false)
279
286
  end
280
287
 
281
- log_deleted = args[:log_deleted_keys] == 'true' ? STDOUT : nil
288
+ ThreeScale::Backend::Stats::Cleaner.delete_stats_keys_set_to_0(
289
+ redis_conns, log_deleted_keys: logger_for_deleted_keys(args[:log_deleted_keys])
290
+ )
291
+ end
292
+ end
282
293
 
283
- ThreeScale::Backend::Stats::Cleaner.delete!(
284
- redis_clients, log_deleted_keys: log_deleted
294
+ def redis_conns(urls)
295
+ redis_urls = urls && urls.split(' ')
296
+
297
+ return [] if redis_urls.nil? || redis_urls.empty?
298
+
299
+ redis_urls.map do |redis_url|
300
+ parsed_uri = URI.parse(ThreeScale::Backend::Storage::Helpers.send(
301
+ :to_redis_uri, redis_url)
285
302
  )
303
+ Redis.new(host: parsed_uri.host, port: parsed_uri.port)
286
304
  end
287
305
  end
306
+
307
+ def logger_for_deleted_keys(arg_log_deleted_keys)
308
+ arg_log_deleted_keys == 'true' ? STDOUT : nil
309
+ end
@@ -7,6 +7,14 @@ module ThreeScale
7
7
  ServiceToken.exists?(token, service_id) ? 200 : 404
8
8
  end
9
9
 
10
+ get '/:token/:service_id/provider_key' do |token, service_id|
11
+ if ServiceToken.exists?(token, service_id)
12
+ { status: :found, provider_key: Service.provider_key_for(service_id) }.to_json
13
+ else
14
+ respond_with_404('token/service combination not found'.freeze)
15
+ end
16
+ end
17
+
10
18
  post '/' do
11
19
  check_tokens_param!
12
20
 
@@ -6,32 +6,13 @@ module ThreeScale
6
6
  respond_with_404('service not found') unless Service.exists?(params[:service_id])
7
7
  end
8
8
 
9
- # This is very slow and needs to be disabled until the performance
10
- # issues are solved. In the meanwhile, the job will just return OK.
11
- =begin
12
- delete '' do |service_id|
13
- delete_stats_job_attrs = api_params Stats::DeleteJobDef
14
- delete_stats_job_attrs[:service_id] = service_id
15
- delete_stats_job_attrs[:from] = delete_stats_job_attrs[:from].to_i
16
- delete_stats_job_attrs[:to] = delete_stats_job_attrs[:to].to_i
17
- begin
18
- Stats::DeleteJobDef.new(delete_stats_job_attrs).run_async
19
- rescue DeleteServiceStatsValidationError => e
20
- [400, headers, { status: :error, error: e.message }.to_json]
21
- else
22
- { status: :to_be_deleted }.to_json
23
- end
24
- =end
25
-
26
- # This is an alternative to the above. It just adds the service to a
27
- # Redis set to marked is as "to be deleted".
28
- # Later a script can read that set and actually delete the keys.
29
- # Read the docs of the Stats::Cleaner class for more details.
9
+ # This adds the service to a Redis set to mark is as "to be deleted".
10
+ # Later a script can read that set and actually delete the keys. Read
11
+ # the docs of the Stats::Cleaner class for more details.
30
12
  #
31
- # Notice that this method ignores the "from" and "to" parameters. When
32
- # system calls this method, they're always interested in deleting all
33
- # the keys. They were just passing "from" and "to" to make the
34
- # implementation of the option above easier.
13
+ # Notice that this method ignores the "from" and "to" parameters used in
14
+ # previous versions. When system calls this method, they're always
15
+ # interested in deleting all the keys.
35
16
  delete '' do |service_id|
36
17
  Stats::Cleaner.mark_service_to_be_deleted(service_id)
37
18
  { status: :to_be_deleted }.to_json
@@ -40,10 +40,8 @@ module ThreeScale
40
40
  private
41
41
 
42
42
  def self.first_traffic(service_id, application_id)
43
- key = Stats::Keys.applications_key_prefix(
44
- Stats::Keys.service_key_prefix(service_id)
45
- )
46
- if storage.sadd(key, encode_key(application_id))
43
+ if storage.sadd(Stats::Keys.set_of_apps_with_traffic(service_id),
44
+ encode_key(application_id))
47
45
  EventStorage.store(:first_traffic,
48
46
  { service_id: service_id,
49
47
  application_id: application_id,
@@ -32,8 +32,6 @@ module ThreeScale
32
32
 
33
33
  CONFIG_DELETE_STATS_BATCH_SIZE = 50
34
34
  private_constant :CONFIG_DELETE_STATS_BATCH_SIZE
35
- CONFIG_DELETE_STATS_PARTITION_BATCH_SIZE = 1000
36
- private_constant :CONFIG_DELETE_STATS_PARTITION_BATCH_SIZE
37
35
 
38
36
  @configuration = Configuration::Loader.new
39
37
 
@@ -54,7 +52,7 @@ module ThreeScale
54
52
  config.add_section(:analytics_redis, :server,
55
53
  :connect_timeout, :read_timeout, :write_timeout)
56
54
  config.add_section(:hoptoad, :service, :api_key)
57
- config.add_section(:stats, :bucket_size, :delete_batch_size, :delete_partition_batch_size)
55
+ config.add_section(:stats, :bucket_size, :delete_batch_size)
58
56
  config.add_section(:redshift, :host, :port, :dbname, :user, :password)
59
57
  config.add_section(:statsd, :host, :port)
60
58
  config.add_section(:internal_api, :user, :password)
@@ -125,9 +123,6 @@ module ThreeScale
125
123
  config.stats.delete_batch_size = parse_int(config.stats.delete_batch_size,
126
124
  CONFIG_DELETE_STATS_BATCH_SIZE)
127
125
 
128
- config.stats.delete_partition_batch_size = parse_int(config.stats.delete_partition_batch_size,
129
- CONFIG_DELETE_STATS_PARTITION_BATCH_SIZE)
130
-
131
126
  # often we don't have a log_file setting - generate it here from
132
127
  # the log_path setting.
133
128
  log_file = config.log_file
@@ -292,12 +292,6 @@ module ThreeScale
292
292
  end
293
293
  end
294
294
 
295
- class DeleteServiceStatsValidationError < Error
296
- def initialize(service_id, msg)
297
- super "Delete stats job context validation error. Service: #{service_id}. Error: #{msg}"
298
- end
299
- end
300
-
301
295
  class EndUsersNoLongerSupported < BadRequest
302
296
  def initialize
303
297
  super 'End-users are no longer supported, do not specify the user_id parameter'.freeze
@@ -32,25 +32,6 @@ module ThreeScale
32
32
  DEFAULT_WAIT_BEFORE_FETCHING_MORE_JOBS
33
33
  end
34
34
 
35
- def pop_from_queue
36
- begin
37
- encoded_job = @redis.blpop(*@queues, timeout: @fetch_timeout)
38
- rescue Redis::BaseConnectionError, Errno::ECONNREFUSED, Errno::EPIPE => e
39
- raise RedisConnectionError.new(e.message)
40
- rescue Redis::CommandError => e
41
- # Redis::CommandError from redis-rb can be raised for multiple
42
- # reasons, so we need to check the error message to distinguish
43
- # connection errors from the rest.
44
- if e.message == 'ERR Connection timed out'.freeze
45
- raise RedisConnectionError.new(e.message)
46
- else
47
- raise e
48
- end
49
- end
50
-
51
- encoded_job
52
- end
53
-
54
35
  def fetch
55
36
  encoded_job = pop_from_queue
56
37
  return nil if encoded_job.nil? || encoded_job.empty?
@@ -99,10 +80,11 @@ module ThreeScale
99
80
 
100
81
  # Re-instantiate Redis instance. This is needed to recover from
101
82
  # Errno::EPIPE, not sure if there are others.
102
- @redis = ThreeScale::Backend::QueueStorage.connection(
103
- ThreeScale::Backend.environment,
104
- ThreeScale::Backend.configuration
83
+ @redis = Redis::Namespace.new(
84
+ WorkerAsync.const_get(:RESQUE_REDIS_NAMESPACE),
85
+ redis: QueueStorage.connection(Backend.environment, Backend.configuration)
105
86
  )
87
+
106
88
  # If there is a different kind of error, it's probably a
107
89
  # programming error. Like sending an invalid blpop command to
108
90
  # Redis. In that case, let the worker crash.
@@ -111,12 +93,36 @@ module ThreeScale
111
93
  end
112
94
  end
113
95
 
96
+ rescue Exception => e
97
+ Worker.logger.notify(e)
98
+ ensure
114
99
  job_queue.close
115
100
  end
116
101
 
117
102
  def shutdown
118
103
  @shutdown = true
119
104
  end
105
+
106
+ private
107
+
108
+ def pop_from_queue
109
+ begin
110
+ encoded_job = @redis.blpop(*@queues, timeout: @fetch_timeout)
111
+ rescue Redis::BaseConnectionError, Errno::ECONNREFUSED, Errno::EPIPE => e
112
+ raise RedisConnectionError.new(e.message)
113
+ rescue Redis::CommandError => e
114
+ # Redis::CommandError from redis-rb can be raised for multiple
115
+ # reasons, so we need to check the error message to distinguish
116
+ # connection errors from the rest.
117
+ if e.message == 'ERR Connection timed out'.freeze
118
+ raise RedisConnectionError.new(e.message)
119
+ else
120
+ raise e
121
+ end
122
+ end
123
+
124
+ encoded_job
125
+ end
120
126
  end
121
127
  end
122
128
  end
@@ -13,19 +13,21 @@ module ThreeScale
13
13
  }
14
14
  private_constant :AUTH_AND_REPORT_REQUEST_TYPES
15
15
 
16
+ # Only the first match is taken into account, that's why for example,
17
+ # "/\/services\/.*\/stats/" needs to appear before "/\/services/"
16
18
  INTERNAL_API_PATHS = [
17
- [/\/services\/.*\/alert_limits/, 'alerts'],
18
- [/\/services\/.*\/applications\/.*\/keys/, 'application_keys'],
19
- [/\/services\/.*\/applications\/.*\/referrer_filters/, 'application_referrer_filters'],
20
- [/\/services\/.*\/applications/, 'applications'],
21
- [/\/services\/.*\/errors/, 'errors'],
22
- [/\/events/, 'events'],
23
- [/\/services\/.*\/metrics/, 'metrics'],
24
- [/\/service_tokens/, 'service_tokens'],
25
- [/\/services/, 'services'],
26
- [/\/services\/.*\/stats/, 'stats'],
27
- [/\/services\/.*\/plans\/.*\/usagelimits/, 'usage_limits'],
28
- [/\/services\/.*\/applications\/.*\/utilization/, 'utilization'],
19
+ [/\/services\/.*\/alert_limits/, 'alerts'.freeze],
20
+ [/\/services\/.*\/applications\/.*\/keys/, 'application_keys'.freeze],
21
+ [/\/services\/.*\/applications\/.*\/referrer_filters/, 'application_referrer_filters'.freeze],
22
+ [/\/services\/.*\/applications\/.*\/utilization/, 'utilization'.freeze],
23
+ [/\/services\/.*\/applications/, 'applications'.freeze],
24
+ [/\/services\/.*\/errors/, 'errors'.freeze],
25
+ [/\/events/, 'events'.freeze],
26
+ [/\/services\/.*\/metrics/, 'metrics'.freeze],
27
+ [/\/service_tokens/, 'service_tokens'.freeze],
28
+ [/\/services\/.*\/stats/, 'stats'.freeze],
29
+ [/\/services\/.*\/plans\/.*\/usagelimits/, 'usage_limits'.freeze],
30
+ [/\/services/, 'services'.freeze],
29
31
  ].freeze
30
32
  private_constant :INTERNAL_API_PATHS
31
33
 
@@ -1,8 +1,4 @@
1
1
  require '3scale/backend/stats/codes_commons'
2
2
  require '3scale/backend/stats/period_commons'
3
3
  require '3scale/backend/stats/aggregator'
4
- require '3scale/backend/stats/delete_job_def'
5
- require '3scale/backend/stats/key_generator'
6
- require '3scale/backend/stats/partition_generator_job'
7
- require '3scale/backend/stats/partition_eraser_job'
8
4
  require '3scale/backend/stats/cleaner'
@@ -20,7 +20,14 @@ module ThreeScale
20
20
  key = counter_key(prefix_key, granularity.new(timestamp))
21
21
  expire_time = Stats::PeriodCommons.expire_time_for_granularity(granularity)
22
22
 
23
- store_key(cmd, key, value, expire_time)
23
+ # We don't need to store stats keys set to 0. It wastes Redis
24
+ # memory because for rate-limiting and stats, a key of set to 0
25
+ # is equivalent to a key that does not exist.
26
+ if cmd == :set && value == 0
27
+ storage.del(key)
28
+ else
29
+ store_key(cmd, key, value, expire_time)
30
+ end
24
31
 
25
32
  unless Stats::PeriodCommons::EXCLUDED_FOR_BUCKETS.include?(granularity)
26
33
  keys_for_bucket << key
@@ -45,6 +45,12 @@ module ThreeScale
45
45
  STATS_KEY_PREFIX = 'stats/'.freeze
46
46
  private_constant :STATS_KEY_PREFIX
47
47
 
48
+ REDIS_CONN_ERRORS = [Redis::BaseConnectionError, Errno::ECONNREFUSED, Errno::EPIPE].freeze
49
+ private_constant :REDIS_CONN_ERRORS
50
+
51
+ MAX_RETRIES_REDIS_ERRORS = 3
52
+ private_constant :MAX_RETRIES_REDIS_ERRORS
53
+
48
54
  class << self
49
55
  include Logging
50
56
  def mark_service_to_be_deleted(service_id)
@@ -77,37 +83,73 @@ module ThreeScale
77
83
  logger.info("Going to delete the stats keys for these services: #{services.to_a}")
78
84
 
79
85
  unless services.empty?
80
- delete_successful = true
81
- redis_conns.each do |redis_conn|
86
+ _ok, failed = redis_conns.partition do |redis_conn|
82
87
  begin
83
88
  delete_keys(redis_conn, services, log_deleted_keys)
84
- # If it's a connection error, mark as failed and continue
85
- # cleaning other shards. If it's another kind of error, it
86
- # could be a bug, so better re-raise.
87
- rescue Redis::BaseConnectionError, Errno::ECONNREFUSED, Errno::EPIPE => e
88
- logger.error("Error while deleting stats of server #{redis_conn}: #{e}")
89
- delete_successful = false
90
- rescue Redis::CommandError => e
91
- # Redis::CommandError from redis-rb can be raised for multiple
92
- # reasons, so we need to check the error message to distinguish
93
- # connection errors from the rest.
94
- if e.message == 'ERR Connection timed out'.freeze
95
- logger.error("Error while deleting stats of server #{redis_conn}: #{e}")
96
- delete_successful = false
97
- else
98
- raise e
99
- end
89
+ true
90
+ rescue => e
91
+ handle_redis_exception(e, redis_conn)
92
+ false
100
93
  end
101
94
  end
102
95
 
103
- remove_services_from_delete_set(services) if delete_successful
96
+ with_retries { remove_services_from_delete_set(services) } if failed.empty?
97
+
98
+ failed.each do |failed_conn|
99
+ logger.error("Error while deleting stats of server #{failed_conn}")
100
+ end
104
101
  end
105
102
 
106
103
  logger.info("Finished deleting the stats keys for these services: #{services.to_a}")
107
104
  end
108
105
 
106
+ # Deletes all the stats keys set to 0.
107
+ #
108
+ # Stats keys set to 0 are useless and occupy Redis memory
109
+ # unnecessarily. They were generated due to a bug in previous versions
110
+ # of Apisonator.
111
+ # Ref: https://github.com/3scale/apisonator/pull/247
112
+ #
113
+ # As the .delete function, this one also receives a collection of
114
+ # instantiated Redis clients and those need to connect to Redis
115
+ # servers directly.
116
+ #
117
+ # @param [Array] redis_conns Instantiated Redis clients.
118
+ # @param [IO] log_deleted_keys IO where to write the logs. Defaults to
119
+ # nil (logs nothing).
120
+ def delete_stats_keys_set_to_0(redis_conns, log_deleted_keys: nil)
121
+ _ok, failed = redis_conns.partition do |redis_conn|
122
+ begin
123
+ delete_stats_keys_with_val_0(redis_conn, log_deleted_keys)
124
+ true
125
+ rescue => e
126
+ handle_redis_exception(e, redis_conn)
127
+ false
128
+ end
129
+ end
130
+
131
+ failed.each do |failed_conn|
132
+ logger.error("Error while deleting stats of server #{failed_conn}")
133
+ end
134
+ end
135
+
109
136
  private
110
137
 
138
+ def handle_redis_exception(exception, redis_conn)
139
+ # If it's a connection error, do nothing so we can continue with
140
+ # other shards. If it's another kind of error, it could be caused by
141
+ # a bug, so better re-raise.
142
+
143
+ case exception
144
+ when *REDIS_CONN_ERRORS
145
+ # Do nothing.
146
+ when Redis::CommandError
147
+ raise exception if exception.message != 'ERR Connection timed out'.freeze
148
+ else
149
+ raise exception
150
+ end
151
+ end
152
+
111
153
  # Returns a set with the services included in the
112
154
  # SET_WITH_SERVICES_MARKED_FOR_DELETION Redis set.
113
155
  def services_to_delete
@@ -133,19 +175,21 @@ module ThreeScale
133
175
  cursor = 0
134
176
 
135
177
  loop do
136
- cursor, keys = redis_conn.scan(cursor, count: SCAN_SLICE)
178
+ with_retries do
179
+ cursor, keys = redis_conn.scan(cursor, count: SCAN_SLICE)
137
180
 
138
- to_delete = keys.select { |key| delete_key?(key, services) }
181
+ to_delete = keys.select { |key| delete_key?(key, services) }
139
182
 
140
- unless to_delete.empty?
141
- if log_deleted_keys
142
- values = redis_conn.mget(*(to_delete.to_a))
143
- to_delete.each_with_index do |k, i|
144
- log_deleted_keys.puts "#{k} #{values[i]}"
183
+ unless to_delete.empty?
184
+ if log_deleted_keys
185
+ values = redis_conn.mget(*(to_delete.to_a))
186
+ to_delete.each_with_index do |k, i|
187
+ log_deleted_keys.puts "#{k} #{values[i]}"
188
+ end
145
189
  end
146
- end
147
190
 
148
- redis_conn.del(to_delete)
191
+ redis_conn.del(to_delete)
192
+ end
149
193
  end
150
194
 
151
195
  break if cursor.to_i == 0
@@ -188,6 +232,43 @@ module ThreeScale
188
232
  # simply ignore those keys.
189
233
  nil
190
234
  end
235
+
236
+ def delete_stats_keys_with_val_0(redis_conn, log_deleted_keys)
237
+ cursor = 0
238
+
239
+ loop do
240
+ with_retries do
241
+ cursor, keys = redis_conn.scan(cursor, count: SCAN_SLICE)
242
+
243
+ stats_keys = keys.select { |k| is_stats_key?(k) }
244
+
245
+ unless stats_keys.empty?
246
+ values = redis_conn.mget(*stats_keys)
247
+ to_delete = stats_keys.zip(values).select { |_, v| v == '0'.freeze }.map(&:first)
248
+
249
+ unless to_delete.empty?
250
+ redis_conn.del(to_delete)
251
+ to_delete.each { |k| log_deleted_keys.puts k } if log_deleted_keys
252
+ end
253
+ end
254
+ end
255
+
256
+ break if cursor.to_i == 0
257
+
258
+ sleep(SLEEP_BETWEEN_SCANS)
259
+ end
260
+ end
261
+
262
+ def with_retries(max = MAX_RETRIES_REDIS_ERRORS)
263
+ retries = 0
264
+ begin
265
+ yield
266
+ rescue Exception => e
267
+ retries += 1
268
+ retry if retries < max
269
+ raise e
270
+ end
271
+ end
191
272
  end
192
273
  end
193
274
  end
@@ -70,6 +70,12 @@ module ThreeScale
70
70
  key
71
71
  end
72
72
 
73
+ def set_of_apps_with_traffic(service_id)
74
+ Stats::Keys.applications_key_prefix(
75
+ Stats::Keys.service_key_prefix(service_id)
76
+ )
77
+ end
78
+
73
79
  # We want all the buckets to go to the same Redis shard.
74
80
  # The reason is that SUNION support in Twemproxy requires that the
75
81
  # supplied keys hash to the same server.
@@ -12,9 +12,6 @@ module ThreeScale
12
12
  GRANULARITY_EXPIRATION_TIME = { Period[:minute] => 180 }.freeze
13
13
  private_constant :GRANULARITY_EXPIRATION_TIME
14
14
 
15
- PERMANENT_SERVICE_GRANULARITIES = (SERVICE_GRANULARITIES - GRANULARITY_EXPIRATION_TIME.keys).freeze
16
- PERMANENT_EXPANDED_GRANULARITIES = (EXPANDED_GRANULARITIES - GRANULARITY_EXPIRATION_TIME.keys).freeze
17
-
18
15
  # We are not going to send metrics with granularity 'eternity' or
19
16
  # 'week' to Kinesis, so there is no point in storing them in Redis
20
17
  # buckets.
@@ -20,8 +20,14 @@ module ThreeScale
20
20
  def report(provider_key, service_id, transactions, context_info = {})
21
21
  service = Service.load_with_provider_key!(service_id, provider_key)
22
22
 
23
- report_enqueue(service.id, transactions, context_info)
24
- notify_report(provider_key, transactions.size)
23
+ # A usage of 0 does not affect rate-limits or stats, so we do not need
24
+ # to report it.
25
+ filtered_transactions = filter_usages_with_0(transactions.clone)
26
+
27
+ return if filtered_transactions.empty?
28
+
29
+ report_enqueue(service.id, filtered_transactions, context_info)
30
+ notify_report(provider_key, filtered_transactions.size)
25
31
  end
26
32
 
27
33
  def authorize(provider_key, params, context_info = {})
@@ -137,9 +143,17 @@ module ThreeScale
137
143
 
138
144
  usage = params[:usage]
139
145
 
140
- if (usage || params[:log]) && status.authorized?
146
+ filtered_usage = filter_metrics_without_inc(usage.clone) if usage
147
+
148
+ if ((filtered_usage && !filtered_usage.empty?) || params[:log]) && status.authorized?
141
149
  application_id = status.application.id
142
- report_enqueue(status.service_id, { 0 => {"app_id" => application_id, "usage" => usage, "log" => params[:log] } }, request: { extensions: request_info[:extensions] })
150
+
151
+ report_enqueue(
152
+ status.service_id,
153
+ { 0 => {"app_id" => application_id, "usage" => filtered_usage, "log" => params[:log] } },
154
+ request: { extensions: request_info[:extensions] }
155
+ )
156
+
143
157
  notify_authrep(provider_key, usage ? 1 : 0)
144
158
  else
145
159
  notify_authorize(provider_key)
@@ -182,6 +196,19 @@ module ThreeScale
182
196
  end
183
197
  end
184
198
 
199
+ def filter_usages_with_0(transactions)
200
+ # There are plenty of existing tests using both a string and a symbol
201
+ # when accessing the usage.
202
+ transactions.delete_if do |_idx, tx|
203
+ (usage = tx['usage'.freeze] || tx[:usage]) or next
204
+ filter_metrics_without_inc(usage).empty?
205
+ end
206
+ end
207
+
208
+ def filter_metrics_without_inc(usage)
209
+ usage.delete_if { |_metric, delta| delta.to_s == '0'.freeze }
210
+ end
211
+
185
212
  def storage
186
213
  Storage.instance
187
214
  end
@@ -1,5 +1,5 @@
1
1
  module ThreeScale
2
2
  module Backend
3
- VERSION = '3.1.0'
3
+ VERSION = '3.3.1.1'
4
4
  end
5
5
  end
@@ -1,4 +1,5 @@
1
1
  require 'async'
2
+ require 'redis-namespace'
2
3
  require '3scale/backend/job_fetcher'
3
4
 
4
5
  module ThreeScale
@@ -10,6 +11,9 @@ module ThreeScale
10
11
  DEFAULT_MAX_CONCURRENT_JOBS = 20
11
12
  private_constant :DEFAULT_MAX_CONCURRENT_JOBS
12
13
 
14
+ RESQUE_REDIS_NAMESPACE = :resque
15
+ private_constant :RESQUE_REDIS_NAMESPACE
16
+
13
17
  def initialize(options = {})
14
18
  trap('TERM') { shutdown }
15
19
  trap('INT') { shutdown }
@@ -17,7 +21,7 @@ module ThreeScale
17
21
  @one_off = options[:one_off]
18
22
  @jobs = Queue.new # Thread-safe queue
19
23
 
20
- @job_fetcher = options[:job_fetcher] || JobFetcher.new
24
+ @job_fetcher = options[:job_fetcher] || JobFetcher.new(redis_client: redis_client)
21
25
 
22
26
  @max_concurrent_jobs = configuration.async_worker.max_concurrent_jobs ||
23
27
  DEFAULT_MAX_CONCURRENT_JOBS
@@ -64,6 +68,10 @@ module ThreeScale
64
68
  # unblocks when there are new jobs or when .close() is called
65
69
  job = @jobs.pop
66
70
 
71
+ # If job is nil, it means that the queue is closed. No more jobs are
72
+ # going to be pushed, so shutdown.
73
+ shutdown unless job
74
+
67
75
  break if @shutdown
68
76
 
69
77
  @reactor.async { perform(job) }
@@ -83,6 +91,19 @@ module ThreeScale
83
91
  Async { @job_fetcher.start(@jobs) }
84
92
  end
85
93
  end
94
+
95
+ # Returns a new Redis client with namespace "resque".
96
+ # In the async worker, the job fetcher runs in a separate thread, and we
97
+ # need to avoid sharing an already instantiated client like the one in
98
+ # Resque::Helpers initialized in lib/3scale/backend.rb (Resque.redis).
99
+ # Failing to do so, will raise errors because of fibers shared across
100
+ # threads.
101
+ def redis_client
102
+ Redis::Namespace.new(
103
+ RESQUE_REDIS_NAMESPACE,
104
+ redis: QueueStorage.connection(Backend.environment, Backend.configuration)
105
+ )
106
+ end
86
107
  end
87
108
  end
88
109
  end
data/licenses.xml CHANGED
@@ -23,7 +23,7 @@
23
23
  </dependency>
24
24
  <dependency>
25
25
  <packageName>apisonator</packageName>
26
- <version>3.1.0</version>
26
+ <version>3.3.1.1</version>
27
27
  <licenses>
28
28
  <license>
29
29
  <name>Apache 2.0</name>
@@ -93,7 +93,7 @@
93
93
  </dependency>
94
94
  <dependency>
95
95
  <packageName>async-redis</packageName>
96
- <version>0.5.0</version>
96
+ <version>0.5.1</version>
97
97
  <licenses>
98
98
  <license>
99
99
  <name>MIT</name>
@@ -525,7 +525,7 @@
525
525
  </dependency>
526
526
  <dependency>
527
527
  <packageName>nio4r</packageName>
528
- <version>2.5.2</version>
528
+ <version>2.5.4</version>
529
529
  <licenses>
530
530
  <license>
531
531
  <name>MIT</name>
@@ -709,7 +709,7 @@
709
709
  </dependency>
710
710
  <dependency>
711
711
  <packageName>puma</packageName>
712
- <version>2.15.3</version>
712
+ <version>4.3.7</version>
713
713
  <licenses>
714
714
  <license>
715
715
  <name>New BSD</name>
@@ -769,7 +769,7 @@
769
769
  </dependency>
770
770
  <dependency>
771
771
  <packageName>redis-namespace</packageName>
772
- <version>1.6.0</version>
772
+ <version>1.8.0</version>
773
773
  <licenses>
774
774
  <license>
775
775
  <name>MIT</name>
@@ -1043,7 +1043,7 @@
1043
1043
  </dependency>
1044
1044
  <dependency>
1045
1045
  <packageName>timers</packageName>
1046
- <version>4.3.0</version>
1046
+ <version>4.3.2</version>
1047
1047
  <licenses>
1048
1048
  <license>
1049
1049
  <name>MIT</name>
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: apisonator
3
3
  version: !ruby/object:Gem::Version
4
- version: 3.1.0
4
+ version: 3.3.1.1
5
5
  platform: ruby
6
6
  authors:
7
7
  - Adam Ciganek
@@ -16,7 +16,7 @@ authors:
16
16
  autorequire:
17
17
  bindir: bin
18
18
  cert_chain: []
19
- date: 2020-10-14 00:00:00.000000000 Z
19
+ date: 2021-02-12 00:00:00.000000000 Z
20
20
  dependencies: []
21
21
  description: This gem provides a daemon that handles authorization and reporting of
22
22
  web services managed by 3scale.
@@ -136,11 +136,7 @@ files:
136
136
  - lib/3scale/backend/stats/bucket_storage.rb
137
137
  - lib/3scale/backend/stats/cleaner.rb
138
138
  - lib/3scale/backend/stats/codes_commons.rb
139
- - lib/3scale/backend/stats/delete_job_def.rb
140
- - lib/3scale/backend/stats/key_generator.rb
141
139
  - lib/3scale/backend/stats/keys.rb
142
- - lib/3scale/backend/stats/partition_eraser_job.rb
143
- - lib/3scale/backend/stats/partition_generator_job.rb
144
140
  - lib/3scale/backend/stats/period_commons.rb
145
141
  - lib/3scale/backend/stats/stats_parser.rb
146
142
  - lib/3scale/backend/stats/storage.rb
@@ -1,60 +0,0 @@
1
- module ThreeScale
2
- module Backend
3
- module Stats
4
- class DeleteJobDef
5
- ATTRIBUTES = %i[service_id applications metrics from to context_info].freeze
6
- private_constant :ATTRIBUTES
7
- attr_reader(*ATTRIBUTES)
8
-
9
- def self.attribute_names
10
- ATTRIBUTES
11
- end
12
-
13
- def initialize(params = {})
14
- ATTRIBUTES.each do |key|
15
- instance_variable_set("@#{key}".to_sym, params[key]) unless params[key].nil?
16
- end
17
- validate
18
- end
19
-
20
- def run_async
21
- Resque.enqueue(PartitionGeneratorJob, Time.now.getutc.to_f, service_id, applications,
22
- metrics, from, to, context_info)
23
- end
24
-
25
- def to_json
26
- to_hash.to_json
27
- end
28
-
29
- def to_hash
30
- Hash[ATTRIBUTES.collect { |key| [key, send(key)] }]
31
- end
32
-
33
- private
34
-
35
- def validate
36
- # from and to valid epoch times
37
- raise_validation_error('from field not integer') unless from.is_a? Integer
38
- raise_validation_error('from field is zero') if from.zero?
39
- raise_validation_error('to field not integer') unless to.is_a? Integer
40
- raise_validation_error('to field is zero') if to.zero?
41
- raise_validation_error('from < to fields') if Time.at(to) < Time.at(from)
42
- # application is array
43
- raise_validation_error('applications field') unless applications.is_a? Array
44
- raise_validation_error('applications values') unless applications.all? do |x|
45
- x.is_a?(String) || x.is_a?(Integer)
46
- end
47
- # metrics is array
48
- raise_validation_error('metrics field') unless metrics.is_a? Array
49
- raise_validation_error('metrics values') unless metrics.all? do |x|
50
- x.is_a?(String) || x.is_a?(Integer)
51
- end
52
- end
53
-
54
- def raise_validation_error(msg)
55
- raise DeleteServiceStatsValidationError.new(service_id, msg)
56
- end
57
- end
58
- end
59
- end
60
- end
@@ -1,73 +0,0 @@
1
- module ThreeScale
2
- module Backend
3
- module Stats
4
- class KeyGenerator
5
- attr_reader :service_id, :applications, :metrics, :from, :to
6
-
7
- def initialize(service_id:, applications: [], metrics: [], from:, to:, **)
8
- @service_id = service_id
9
- @applications = applications
10
- @metrics = metrics
11
- @from = from
12
- @to = to
13
- end
14
-
15
- def keys
16
- response_code_service_keys +
17
- response_code_application_keys +
18
- usage_service_keys +
19
- usage_application_keys
20
- end
21
-
22
- private
23
-
24
- def periods(granularities)
25
- granularities.flat_map do |granularity|
26
- (Period[granularity].new(Time.at(from))..Period[granularity].new(Time.at(to))).to_a
27
- end
28
- end
29
-
30
- def response_codes
31
- CodesCommons::TRACKED_CODES + CodesCommons::TRACKED_CODE_GROUPS
32
- end
33
-
34
- def response_code_service_keys
35
- periods(PeriodCommons::PERMANENT_SERVICE_GRANULARITIES).flat_map do |period|
36
- response_codes.flat_map do |response_code|
37
- Keys.service_response_code_value_key(service_id, response_code, period)
38
- end
39
- end
40
- end
41
-
42
- def response_code_application_keys
43
- periods(PeriodCommons::PERMANENT_EXPANDED_GRANULARITIES).flat_map do |period|
44
- response_codes.flat_map do |response_code|
45
- applications.flat_map do |application|
46
- Keys.application_response_code_value_key(service_id, application,
47
- response_code, period)
48
- end
49
- end
50
- end
51
- end
52
-
53
- def usage_service_keys
54
- periods(PeriodCommons::PERMANENT_SERVICE_GRANULARITIES).flat_map do |period|
55
- metrics.flat_map do |metric|
56
- Keys.service_usage_value_key(service_id, metric, period)
57
- end
58
- end
59
- end
60
-
61
- def usage_application_keys
62
- periods(PeriodCommons::PERMANENT_EXPANDED_GRANULARITIES).flat_map do |period|
63
- metrics.flat_map do |metric|
64
- applications.flat_map do |application|
65
- Keys.application_usage_value_key(service_id, application, metric, period)
66
- end
67
- end
68
- end
69
- end
70
- end
71
- end
72
- end
73
- end
@@ -1,58 +0,0 @@
1
- module ThreeScale
2
- module Backend
3
- module Stats
4
- # Job for deleting service stats
5
- # Perform actual key deletion from a key partition definition
6
- class PartitionEraserJob < BackgroundJob
7
- # low priority queue
8
- @queue = :stats
9
-
10
- class << self
11
- include StorageHelpers
12
- include Configurable
13
-
14
- def perform_logged(_enqueue_time, service_id, applications, metrics,
15
- from, to, offset, length, context_info = {})
16
- job = DeleteJobDef.new(
17
- service_id: service_id,
18
- applications: applications,
19
- metrics: metrics,
20
- from: from,
21
- to: to
22
- )
23
-
24
- validate_job(job, offset, length)
25
-
26
- stats_key_gen = KeyGenerator.new(job.to_hash)
27
-
28
- stats_key_gen.keys.drop(offset).take(length).each_slice(configuration.stats.delete_batch_size) do |slice|
29
- storage.del(slice)
30
- end
31
-
32
- [true, { job: job.to_hash, offset: offset, lenght: length }.to_json]
33
- rescue Backend::Error => error
34
- [false, "#{service_id} #{error}"]
35
- end
36
-
37
- private
38
-
39
- def validate_job(job, offset, length)
40
- unless offset.is_a? Integer
41
- raise DeleteServiceStatsValidationError.new(job.service_id, 'offset field value ' \
42
- "[#{offset}] validation error")
43
- end
44
-
45
- unless length.is_a? Integer
46
- raise DeleteServiceStatsValidationError.new(job.service_id, 'length field value ' \
47
- "[#{length}] validation error")
48
- end
49
- end
50
-
51
- def enqueue_time(args)
52
- args[0]
53
- end
54
- end
55
- end
56
- end
57
- end
58
- end
@@ -1,46 +0,0 @@
1
- module ThreeScale
2
- module Backend
3
- module Stats
4
- # Job for deleting service stats
5
- # Maps delete job definition to a set of non overlapping key set partitions
6
- class PartitionGeneratorJob < BackgroundJob
7
- # low priority queue
8
- @queue = :stats
9
-
10
- class << self
11
- include Configurable
12
-
13
- def perform_logged(_enqueue_time, service_id, applications, metrics,
14
- from, to, context_info = {})
15
- job = DeleteJobDef.new(
16
- service_id: service_id,
17
- applications: applications,
18
- metrics: metrics,
19
- from: from,
20
- to: to
21
- )
22
-
23
- stats_key_gen = KeyGenerator.new(job.to_hash)
24
-
25
- # Generate partitions
26
- 0.step(stats_key_gen.keys.count, configuration.stats.delete_partition_batch_size).each do |idx|
27
- Resque.enqueue(PartitionEraserJob, Time.now.getutc.to_f, service_id, applications,
28
- metrics, from, to, idx,
29
- configuration.stats.delete_partition_batch_size, context_info)
30
- end
31
-
32
- [true, job.to_json]
33
- rescue Backend::Error => error
34
- [false, "#{service_id} #{error}"]
35
- end
36
-
37
- private
38
-
39
- def enqueue_time(args)
40
- args[0]
41
- end
42
- end
43
- end
44
- end
45
- end
46
- end