apisonator 2.100.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +7 -0
- data/CHANGELOG.md +317 -0
- data/Gemfile +11 -0
- data/Gemfile.base +65 -0
- data/Gemfile.lock +319 -0
- data/Gemfile.on_prem +1 -0
- data/Gemfile.on_prem.lock +297 -0
- data/LICENSE +202 -0
- data/NOTICE +15 -0
- data/README.md +230 -0
- data/Rakefile +287 -0
- data/apisonator.gemspec +47 -0
- data/app/api/api.rb +13 -0
- data/app/api/internal/alert_limits.rb +32 -0
- data/app/api/internal/application_keys.rb +49 -0
- data/app/api/internal/application_referrer_filters.rb +43 -0
- data/app/api/internal/applications.rb +77 -0
- data/app/api/internal/errors.rb +54 -0
- data/app/api/internal/events.rb +42 -0
- data/app/api/internal/internal.rb +104 -0
- data/app/api/internal/metrics.rb +40 -0
- data/app/api/internal/service_tokens.rb +46 -0
- data/app/api/internal/services.rb +58 -0
- data/app/api/internal/stats.rb +42 -0
- data/app/api/internal/usagelimits.rb +62 -0
- data/app/api/internal/utilization.rb +23 -0
- data/bin/3scale_backend +223 -0
- data/bin/3scale_backend_worker +26 -0
- data/config.ru +4 -0
- data/config/puma.rb +192 -0
- data/config/schedule.rb +9 -0
- data/ext/mkrf_conf.rb +64 -0
- data/lib/3scale/backend.rb +67 -0
- data/lib/3scale/backend/alert_limit.rb +56 -0
- data/lib/3scale/backend/alerts.rb +137 -0
- data/lib/3scale/backend/analytics/kinesis.rb +3 -0
- data/lib/3scale/backend/analytics/kinesis/adapter.rb +180 -0
- data/lib/3scale/backend/analytics/kinesis/exporter.rb +86 -0
- data/lib/3scale/backend/analytics/kinesis/job.rb +135 -0
- data/lib/3scale/backend/analytics/redshift.rb +3 -0
- data/lib/3scale/backend/analytics/redshift/adapter.rb +367 -0
- data/lib/3scale/backend/analytics/redshift/importer.rb +83 -0
- data/lib/3scale/backend/analytics/redshift/job.rb +33 -0
- data/lib/3scale/backend/application.rb +330 -0
- data/lib/3scale/backend/application_events.rb +76 -0
- data/lib/3scale/backend/background_job.rb +65 -0
- data/lib/3scale/backend/configurable.rb +20 -0
- data/lib/3scale/backend/configuration.rb +151 -0
- data/lib/3scale/backend/configuration/loader.rb +42 -0
- data/lib/3scale/backend/constants.rb +19 -0
- data/lib/3scale/backend/cors.rb +84 -0
- data/lib/3scale/backend/distributed_lock.rb +67 -0
- data/lib/3scale/backend/environment.rb +21 -0
- data/lib/3scale/backend/error_storage.rb +52 -0
- data/lib/3scale/backend/errors.rb +343 -0
- data/lib/3scale/backend/event_storage.rb +120 -0
- data/lib/3scale/backend/experiment.rb +84 -0
- data/lib/3scale/backend/extensions.rb +5 -0
- data/lib/3scale/backend/extensions/array.rb +19 -0
- data/lib/3scale/backend/extensions/hash.rb +26 -0
- data/lib/3scale/backend/extensions/nil_class.rb +13 -0
- data/lib/3scale/backend/extensions/redis.rb +44 -0
- data/lib/3scale/backend/extensions/string.rb +13 -0
- data/lib/3scale/backend/extensions/time.rb +110 -0
- data/lib/3scale/backend/failed_jobs_scheduler.rb +141 -0
- data/lib/3scale/backend/job_fetcher.rb +122 -0
- data/lib/3scale/backend/listener.rb +728 -0
- data/lib/3scale/backend/listener_metrics.rb +99 -0
- data/lib/3scale/backend/logging.rb +48 -0
- data/lib/3scale/backend/logging/external.rb +44 -0
- data/lib/3scale/backend/logging/external/impl.rb +93 -0
- data/lib/3scale/backend/logging/external/impl/airbrake.rb +66 -0
- data/lib/3scale/backend/logging/external/impl/bugsnag.rb +69 -0
- data/lib/3scale/backend/logging/external/impl/default.rb +18 -0
- data/lib/3scale/backend/logging/external/resque.rb +57 -0
- data/lib/3scale/backend/logging/logger.rb +18 -0
- data/lib/3scale/backend/logging/middleware.rb +62 -0
- data/lib/3scale/backend/logging/middleware/json_writer.rb +21 -0
- data/lib/3scale/backend/logging/middleware/text_writer.rb +60 -0
- data/lib/3scale/backend/logging/middleware/writer.rb +143 -0
- data/lib/3scale/backend/logging/worker.rb +107 -0
- data/lib/3scale/backend/manifest.rb +80 -0
- data/lib/3scale/backend/memoizer.rb +277 -0
- data/lib/3scale/backend/metric.rb +275 -0
- data/lib/3scale/backend/metric/collection.rb +91 -0
- data/lib/3scale/backend/oauth.rb +4 -0
- data/lib/3scale/backend/oauth/token.rb +26 -0
- data/lib/3scale/backend/oauth/token_key.rb +30 -0
- data/lib/3scale/backend/oauth/token_storage.rb +313 -0
- data/lib/3scale/backend/oauth/token_value.rb +25 -0
- data/lib/3scale/backend/period.rb +3 -0
- data/lib/3scale/backend/period/boundary.rb +107 -0
- data/lib/3scale/backend/period/cache.rb +28 -0
- data/lib/3scale/backend/period/period.rb +402 -0
- data/lib/3scale/backend/queue_storage.rb +16 -0
- data/lib/3scale/backend/rack.rb +49 -0
- data/lib/3scale/backend/rack/exception_catcher.rb +136 -0
- data/lib/3scale/backend/rack/internal_error_catcher.rb +23 -0
- data/lib/3scale/backend/rack/prometheus.rb +19 -0
- data/lib/3scale/backend/saas.rb +6 -0
- data/lib/3scale/backend/saas_analytics.rb +4 -0
- data/lib/3scale/backend/server.rb +30 -0
- data/lib/3scale/backend/server/falcon.rb +52 -0
- data/lib/3scale/backend/server/puma.rb +71 -0
- data/lib/3scale/backend/service.rb +317 -0
- data/lib/3scale/backend/service_token.rb +97 -0
- data/lib/3scale/backend/stats.rb +8 -0
- data/lib/3scale/backend/stats/aggregator.rb +170 -0
- data/lib/3scale/backend/stats/aggregators/base.rb +72 -0
- data/lib/3scale/backend/stats/aggregators/response_code.rb +58 -0
- data/lib/3scale/backend/stats/aggregators/usage.rb +34 -0
- data/lib/3scale/backend/stats/bucket_reader.rb +135 -0
- data/lib/3scale/backend/stats/bucket_storage.rb +108 -0
- data/lib/3scale/backend/stats/cleaner.rb +195 -0
- data/lib/3scale/backend/stats/codes_commons.rb +14 -0
- data/lib/3scale/backend/stats/delete_job_def.rb +60 -0
- data/lib/3scale/backend/stats/key_generator.rb +73 -0
- data/lib/3scale/backend/stats/keys.rb +104 -0
- data/lib/3scale/backend/stats/partition_eraser_job.rb +58 -0
- data/lib/3scale/backend/stats/partition_generator_job.rb +46 -0
- data/lib/3scale/backend/stats/period_commons.rb +34 -0
- data/lib/3scale/backend/stats/stats_parser.rb +141 -0
- data/lib/3scale/backend/stats/storage.rb +113 -0
- data/lib/3scale/backend/statsd.rb +14 -0
- data/lib/3scale/backend/storable.rb +35 -0
- data/lib/3scale/backend/storage.rb +40 -0
- data/lib/3scale/backend/storage_async.rb +4 -0
- data/lib/3scale/backend/storage_async/async_redis.rb +21 -0
- data/lib/3scale/backend/storage_async/client.rb +205 -0
- data/lib/3scale/backend/storage_async/pipeline.rb +79 -0
- data/lib/3scale/backend/storage_async/resque_extensions.rb +30 -0
- data/lib/3scale/backend/storage_helpers.rb +278 -0
- data/lib/3scale/backend/storage_key_helpers.rb +9 -0
- data/lib/3scale/backend/storage_sync.rb +43 -0
- data/lib/3scale/backend/transaction.rb +62 -0
- data/lib/3scale/backend/transactor.rb +177 -0
- data/lib/3scale/backend/transactor/limit_headers.rb +54 -0
- data/lib/3scale/backend/transactor/notify_batcher.rb +139 -0
- data/lib/3scale/backend/transactor/notify_job.rb +47 -0
- data/lib/3scale/backend/transactor/process_job.rb +33 -0
- data/lib/3scale/backend/transactor/report_job.rb +84 -0
- data/lib/3scale/backend/transactor/status.rb +236 -0
- data/lib/3scale/backend/transactor/usage_report.rb +182 -0
- data/lib/3scale/backend/usage.rb +63 -0
- data/lib/3scale/backend/usage_limit.rb +115 -0
- data/lib/3scale/backend/use_cases/provider_key_change_use_case.rb +60 -0
- data/lib/3scale/backend/util.rb +17 -0
- data/lib/3scale/backend/validators.rb +26 -0
- data/lib/3scale/backend/validators/base.rb +36 -0
- data/lib/3scale/backend/validators/key.rb +17 -0
- data/lib/3scale/backend/validators/limits.rb +57 -0
- data/lib/3scale/backend/validators/oauth_key.rb +15 -0
- data/lib/3scale/backend/validators/oauth_setting.rb +15 -0
- data/lib/3scale/backend/validators/redirect_uri.rb +33 -0
- data/lib/3scale/backend/validators/referrer.rb +60 -0
- data/lib/3scale/backend/validators/service_state.rb +15 -0
- data/lib/3scale/backend/validators/state.rb +15 -0
- data/lib/3scale/backend/version.rb +5 -0
- data/lib/3scale/backend/views/oauth_access_tokens.builder +14 -0
- data/lib/3scale/backend/views/oauth_app_id_by_token.builder +4 -0
- data/lib/3scale/backend/worker.rb +87 -0
- data/lib/3scale/backend/worker_async.rb +88 -0
- data/lib/3scale/backend/worker_metrics.rb +44 -0
- data/lib/3scale/backend/worker_sync.rb +32 -0
- data/lib/3scale/bundler_shim.rb +17 -0
- data/lib/3scale/prometheus_server.rb +10 -0
- data/lib/3scale/tasks/connectivity.rake +41 -0
- data/lib/3scale/tasks/helpers.rb +3 -0
- data/lib/3scale/tasks/helpers/environment.rb +23 -0
- data/lib/3scale/tasks/stats.rake +131 -0
- data/lib/3scale/tasks/swagger.rake +46 -0
- data/licenses.xml +1215 -0
- metadata +227 -0
@@ -0,0 +1,108 @@
|
|
1
|
+
module ThreeScale
|
2
|
+
module Backend
|
3
|
+
module Stats
|
4
|
+
|
5
|
+
# This class manages the buckets where we are storing stats keys.
|
6
|
+
# The way those buckets work is as follows: we are creating a bucket
|
7
|
+
# every few seconds (10 by default now), and in each of those buckets,
|
8
|
+
# we store all the stats keys that have changed in that bucket creation
|
9
|
+
# interval.
|
10
|
+
# The values of the keys that are stored in the buckets can be retrieved
|
11
|
+
# with a normal call to redis.
|
12
|
+
class BucketStorage
|
13
|
+
# If we have not read buckets for a long time, we might deal with lots
|
14
|
+
# of keys in the union operation. This is why we define a constant that
|
15
|
+
# limits the number of buckets that we send to the union op.
|
16
|
+
#
|
17
|
+
# Currently, we are running a Kinesis job every 2 min and the buckets
|
18
|
+
# are being created every 10s. We could set the constant to 12
|
19
|
+
# (120/10 = 12), but to be sure that we will call union just once on
|
20
|
+
# each job, we are going to set it to 15.
|
21
|
+
MAX_BUCKETS_REDIS_UNION = 15
|
22
|
+
private_constant :MAX_BUCKETS_REDIS_UNION
|
23
|
+
|
24
|
+
attr_reader :storage
|
25
|
+
|
26
|
+
def initialize(storage)
|
27
|
+
@storage = storage
|
28
|
+
end
|
29
|
+
|
30
|
+
# For each of the buckets in the range, deletes it from the set, and
|
31
|
+
# also deletes its contents.
|
32
|
+
def delete_range(last_bucket)
|
33
|
+
buckets = storage.zrangebyscore(Keys.changed_keys_key, 0, last_bucket)
|
34
|
+
|
35
|
+
storage.pipelined do
|
36
|
+
buckets.each { |bucket| delete_bucket_content(bucket) }
|
37
|
+
storage.zremrangebyscore(Keys.changed_keys_key, 0, last_bucket)
|
38
|
+
end
|
39
|
+
end
|
40
|
+
|
41
|
+
def delete_all_buckets_and_keys(options = {})
|
42
|
+
Storage.disable!
|
43
|
+
|
44
|
+
buckets.each do |bucket|
|
45
|
+
keys = storage.smembers(Keys.changed_keys_bucket_key(bucket))
|
46
|
+
unless options[:silent]
|
47
|
+
puts "Deleting bucket: #{bucket}, containing #{keys.size} keys"
|
48
|
+
end
|
49
|
+
storage.del(Keys.changed_keys_bucket_key(bucket))
|
50
|
+
end
|
51
|
+
storage.del(Keys.changed_keys_key)
|
52
|
+
end
|
53
|
+
|
54
|
+
def buckets(first: '-inf', last: '+inf')
|
55
|
+
storage.zrangebyscore(Keys.changed_keys_key, first, last)
|
56
|
+
end
|
57
|
+
|
58
|
+
def pending_buckets_size
|
59
|
+
storage.zcard(Keys.changed_keys_key)
|
60
|
+
end
|
61
|
+
|
62
|
+
# Puts keys in a bucket. The bucket is created if it does not exist.
|
63
|
+
# We could have decided to only fill the bucket if it existed, but that
|
64
|
+
# would affect performance, because we would need to get all the
|
65
|
+
# existing buckets to check if the given one exists in every call.
|
66
|
+
def put_in_bucket(event_keys, bucket)
|
67
|
+
storage.pipelined do
|
68
|
+
storage.zadd(Keys.changed_keys_key, bucket, bucket)
|
69
|
+
storage.sadd(Keys.changed_keys_bucket_key(bucket), event_keys)
|
70
|
+
end
|
71
|
+
end
|
72
|
+
|
73
|
+
def content(buckets)
|
74
|
+
# Values are stored as strings in Redis, but we want integers.
|
75
|
+
# There are some values that can be nil. This happens when the key
|
76
|
+
# has a TTL and we read it once it has expired. Right now, event keys
|
77
|
+
# with granularity = 'minute' expire after 180 s. We might need to
|
78
|
+
# increase that to make sure that we do not miss any values.
|
79
|
+
|
80
|
+
buckets.each_slice(MAX_BUCKETS_REDIS_UNION).inject([]) do |res, buckets_slice|
|
81
|
+
bucket_keys = buckets_slice.map do |bucket|
|
82
|
+
Keys.changed_keys_bucket_key(bucket)
|
83
|
+
end
|
84
|
+
(res + storage.sunion(bucket_keys))
|
85
|
+
end.uniq
|
86
|
+
end
|
87
|
+
|
88
|
+
def pending_keys_by_bucket
|
89
|
+
bucket_keys = buckets.map do |bucket|
|
90
|
+
Keys.changed_keys_bucket_key(bucket)
|
91
|
+
end
|
92
|
+
|
93
|
+
cardinalities = storage.pipelined do
|
94
|
+
bucket_keys.map { |bucket_key| storage.scard(bucket_key) }
|
95
|
+
end
|
96
|
+
|
97
|
+
Hash[buckets.zip(cardinalities)]
|
98
|
+
end
|
99
|
+
|
100
|
+
private
|
101
|
+
|
102
|
+
def delete_bucket_content(bucket)
|
103
|
+
storage.del(Keys.changed_keys_bucket_key(bucket))
|
104
|
+
end
|
105
|
+
end
|
106
|
+
end
|
107
|
+
end
|
108
|
+
end
|
@@ -0,0 +1,195 @@
|
|
1
|
+
module ThreeScale
|
2
|
+
module Backend
|
3
|
+
module Stats
|
4
|
+
class Cleaner
|
5
|
+
# Design notes:
|
6
|
+
# Apisonator does not store in any Redis structure the stats keys
|
7
|
+
# associated with a service. Doing so would imply:
|
8
|
+
# - Performance hit when reporting. After updating a stats key, it
|
9
|
+
# would need to be included in a set, which would increase the number
|
10
|
+
# of operations in Redis.
|
11
|
+
# - More space in Redis. To maintain the sets mentioned in the point
|
12
|
+
# above.
|
13
|
+
# - A data migration would be needed to create those sets from the
|
14
|
+
# existing stats keys.
|
15
|
+
#
|
16
|
+
# In order to avoid those costs, this class is implemented in a way that
|
17
|
+
# does not need to keep an updated list of all the stats keys for every
|
18
|
+
# service. Services marked for deletion are stored in a Redis set, and
|
19
|
+
# then, a script, periodic cron, etc. is responsible for calling
|
20
|
+
# delete!(). That method scans the whole database and deletes all the
|
21
|
+
# stats keys that belong to those services marked to be deleted.
|
22
|
+
# The downside of this method is that it requires direct access to the
|
23
|
+
# redis servers. Going through a proxy like Twemproxy does not work,
|
24
|
+
# because it does not support the "SCAN" command.
|
25
|
+
#
|
26
|
+
# In the past we tried an alternative approach. When we received a
|
27
|
+
# request to delete the stats of a service, we generated all the
|
28
|
+
# possible stats keys that could exist for it. That approach was not
|
29
|
+
# efficient because it ended up generating many keys that didn't exist
|
30
|
+
# and thus, unnecessary delete calls to Redis. That approach is also
|
31
|
+
# more complex and error prone. You can find the details here:
|
32
|
+
# https://github.com/3scale/apisonator/issues/90
|
33
|
+
|
34
|
+
include Storable
|
35
|
+
|
36
|
+
KEY_SERVICES_TO_DELETE = 'set_with_services_marked_for_deletion'.freeze
|
37
|
+
private_constant :KEY_SERVICES_TO_DELETE
|
38
|
+
|
39
|
+
SLEEP_BETWEEN_SCANS = 0.01 # In seconds
|
40
|
+
private_constant :SLEEP_BETWEEN_SCANS
|
41
|
+
|
42
|
+
SCAN_SLICE = 500
|
43
|
+
private_constant :SCAN_SLICE
|
44
|
+
|
45
|
+
STATS_KEY_PREFIX = 'stats/'.freeze
|
46
|
+
private_constant :STATS_KEY_PREFIX
|
47
|
+
|
48
|
+
class << self
|
49
|
+
include Logging
|
50
|
+
def mark_service_to_be_deleted(service_id)
|
51
|
+
storage.sadd(KEY_SERVICES_TO_DELETE, service_id)
|
52
|
+
end
|
53
|
+
|
54
|
+
# Deletes all the stats for the services that have been marked for
|
55
|
+
# deletion.
|
56
|
+
#
|
57
|
+
# This method receives a collection of instantiated Redis clients.
|
58
|
+
# Those clients need to connect to Redis servers directly. They cannot
|
59
|
+
# connect to a proxy like Twemproxy. The reason is that this function
|
60
|
+
# needs to scan the database using the "SCAN" command, which is not
|
61
|
+
# supported by Twemproxy.
|
62
|
+
#
|
63
|
+
# The services marked as deletion will be marked as done only when
|
64
|
+
# this function finishes deleting the keys from all the Redis servers.
|
65
|
+
# This means that if the function raises in the middle of the
|
66
|
+
# execution, those services will be retried in the next call.
|
67
|
+
#
|
68
|
+
# Note 1: keys deleted cannot be restored.
|
69
|
+
# Note 2: this method can take a long time to finish as it needs to
|
70
|
+
# scan all the keys in several DBs.
|
71
|
+
#
|
72
|
+
# @param [Array] redis_conns Instantiated Redis clients.
|
73
|
+
# @param [IO] log_deleted_keys IO where to write the logs. Defaults to
|
74
|
+
# nil (logs nothing).
|
75
|
+
def delete!(redis_conns, log_deleted_keys: nil)
|
76
|
+
services = services_to_delete
|
77
|
+
logger.info("Going to delete the stats keys for these services: #{services.to_a}")
|
78
|
+
|
79
|
+
unless services.empty?
|
80
|
+
delete_successful = true
|
81
|
+
redis_conns.each do |redis_conn|
|
82
|
+
begin
|
83
|
+
delete_keys(redis_conn, services, log_deleted_keys)
|
84
|
+
# If it's a connection error, mark as failed and continue
|
85
|
+
# cleaning other shards. If it's another kind of error, it
|
86
|
+
# could be a bug, so better re-raise.
|
87
|
+
rescue Redis::BaseConnectionError, Errno::ECONNREFUSED, Errno::EPIPE => e
|
88
|
+
logger.error("Error while deleting stats of server #{redis_conn}: #{e}")
|
89
|
+
delete_successful = false
|
90
|
+
rescue Redis::CommandError => e
|
91
|
+
# Redis::CommandError from redis-rb can be raised for multiple
|
92
|
+
# reasons, so we need to check the error message to distinguish
|
93
|
+
# connection errors from the rest.
|
94
|
+
if e.message == 'ERR Connection timed out'.freeze
|
95
|
+
logger.error("Error while deleting stats of server #{redis_conn}: #{e}")
|
96
|
+
delete_successful = false
|
97
|
+
else
|
98
|
+
raise e
|
99
|
+
end
|
100
|
+
end
|
101
|
+
end
|
102
|
+
|
103
|
+
remove_services_from_delete_set(services) if delete_successful
|
104
|
+
end
|
105
|
+
|
106
|
+
logger.info("Finished deleting the stats keys for these services: #{services.to_a}")
|
107
|
+
end
|
108
|
+
|
109
|
+
private
|
110
|
+
|
111
|
+
# Returns a set with the services included in the
|
112
|
+
# SET_WITH_SERVICES_MARKED_FOR_DELETION Redis set.
|
113
|
+
def services_to_delete
|
114
|
+
res = []
|
115
|
+
cursor = 0
|
116
|
+
|
117
|
+
loop do
|
118
|
+
cursor, services = storage.sscan(
|
119
|
+
KEY_SERVICES_TO_DELETE, cursor, count: SCAN_SLICE
|
120
|
+
)
|
121
|
+
|
122
|
+
res += services
|
123
|
+
|
124
|
+
break if cursor.to_i == 0
|
125
|
+
|
126
|
+
sleep(SLEEP_BETWEEN_SCANS)
|
127
|
+
end
|
128
|
+
|
129
|
+
res.to_set
|
130
|
+
end
|
131
|
+
|
132
|
+
def delete_keys(redis_conn, services, log_deleted_keys)
|
133
|
+
cursor = 0
|
134
|
+
|
135
|
+
loop do
|
136
|
+
cursor, keys = redis_conn.scan(cursor, count: SCAN_SLICE)
|
137
|
+
|
138
|
+
to_delete = keys.select { |key| delete_key?(key, services) }
|
139
|
+
|
140
|
+
unless to_delete.empty?
|
141
|
+
if log_deleted_keys
|
142
|
+
values = redis_conn.mget(*(to_delete.to_a))
|
143
|
+
to_delete.each_with_index do |k, i|
|
144
|
+
log_deleted_keys.puts "#{k} #{values[i]}"
|
145
|
+
end
|
146
|
+
end
|
147
|
+
|
148
|
+
redis_conn.del(to_delete)
|
149
|
+
end
|
150
|
+
|
151
|
+
break if cursor.to_i == 0
|
152
|
+
|
153
|
+
sleep(SLEEP_BETWEEN_SCANS)
|
154
|
+
end
|
155
|
+
end
|
156
|
+
|
157
|
+
def remove_services_from_delete_set(services)
|
158
|
+
storage.pipelined do
|
159
|
+
services.each do |service|
|
160
|
+
storage.srem(KEY_SERVICES_TO_DELETE, service)
|
161
|
+
end
|
162
|
+
end
|
163
|
+
end
|
164
|
+
|
165
|
+
def delete_key?(key, services_to_delete)
|
166
|
+
return false unless is_stats_key?(key)
|
167
|
+
|
168
|
+
service_in_key = service_from_stats_key(key)
|
169
|
+
service_in_key && services_to_delete.include?(service_in_key)
|
170
|
+
end
|
171
|
+
|
172
|
+
def is_stats_key?(key)
|
173
|
+
# A key that starts with STATS_KEY_PREFIX is a stats key except if it
|
174
|
+
# follows this pattern: /STATS_KEY_PREFIX{service:.*}\/cinstances/. That's a
|
175
|
+
# type of key used only for the "first traffic" event
|
176
|
+
# (ApplicationEvents.first_traffic).
|
177
|
+
key.start_with?(STATS_KEY_PREFIX) && !key.match(/cinstances/)
|
178
|
+
end
|
179
|
+
|
180
|
+
# Returns nil when there's not a service encoded in the key or when
|
181
|
+
# the stats key has an invalid format.
|
182
|
+
def service_from_stats_key(stats_key)
|
183
|
+
StatsParser.parse(stats_key, nil)[:service]
|
184
|
+
rescue StatsParser::StatsKeyValueInvalid
|
185
|
+
# This could happen with legacy stats keys. For example, a long time
|
186
|
+
# ago some stats keys had a "city" and a "country" encoded, but
|
187
|
+
# always empty. That format has not been used in a long time. We'll
|
188
|
+
# simply ignore those keys.
|
189
|
+
nil
|
190
|
+
end
|
191
|
+
end
|
192
|
+
end
|
193
|
+
end
|
194
|
+
end
|
195
|
+
end
|
@@ -0,0 +1,14 @@
|
|
1
|
+
module ThreeScale
|
2
|
+
module Backend
|
3
|
+
module Stats
|
4
|
+
module CodesCommons
|
5
|
+
TRACKED_CODES = [200, 404, 403, 500, 503].freeze
|
6
|
+
TRACKED_CODE_GROUPS = ['2XX'.freeze, '4XX'.freeze, '5XX'.freeze].freeze
|
7
|
+
|
8
|
+
def self.get_http_code_group(http_code)
|
9
|
+
"#{http_code / 100}XX"
|
10
|
+
end
|
11
|
+
end
|
12
|
+
end
|
13
|
+
end
|
14
|
+
end
|
@@ -0,0 +1,60 @@
|
|
1
|
+
module ThreeScale
|
2
|
+
module Backend
|
3
|
+
module Stats
|
4
|
+
class DeleteJobDef
|
5
|
+
ATTRIBUTES = %i[service_id applications metrics from to context_info].freeze
|
6
|
+
private_constant :ATTRIBUTES
|
7
|
+
attr_reader(*ATTRIBUTES)
|
8
|
+
|
9
|
+
def self.attribute_names
|
10
|
+
ATTRIBUTES
|
11
|
+
end
|
12
|
+
|
13
|
+
def initialize(params = {})
|
14
|
+
ATTRIBUTES.each do |key|
|
15
|
+
instance_variable_set("@#{key}".to_sym, params[key]) unless params[key].nil?
|
16
|
+
end
|
17
|
+
validate
|
18
|
+
end
|
19
|
+
|
20
|
+
def run_async
|
21
|
+
Resque.enqueue(PartitionGeneratorJob, Time.now.getutc.to_f, service_id, applications,
|
22
|
+
metrics, from, to, context_info)
|
23
|
+
end
|
24
|
+
|
25
|
+
def to_json
|
26
|
+
to_hash.to_json
|
27
|
+
end
|
28
|
+
|
29
|
+
def to_hash
|
30
|
+
Hash[ATTRIBUTES.collect { |key| [key, send(key)] }]
|
31
|
+
end
|
32
|
+
|
33
|
+
private
|
34
|
+
|
35
|
+
def validate
|
36
|
+
# from and to valid epoch times
|
37
|
+
raise_validation_error('from field not integer') unless from.is_a? Integer
|
38
|
+
raise_validation_error('from field is zero') if from.zero?
|
39
|
+
raise_validation_error('to field not integer') unless to.is_a? Integer
|
40
|
+
raise_validation_error('to field is zero') if to.zero?
|
41
|
+
raise_validation_error('from < to fields') if Time.at(to) < Time.at(from)
|
42
|
+
# application is array
|
43
|
+
raise_validation_error('applications field') unless applications.is_a? Array
|
44
|
+
raise_validation_error('applications values') unless applications.all? do |x|
|
45
|
+
x.is_a?(String) || x.is_a?(Integer)
|
46
|
+
end
|
47
|
+
# metrics is array
|
48
|
+
raise_validation_error('metrics field') unless metrics.is_a? Array
|
49
|
+
raise_validation_error('metrics values') unless metrics.all? do |x|
|
50
|
+
x.is_a?(String) || x.is_a?(Integer)
|
51
|
+
end
|
52
|
+
end
|
53
|
+
|
54
|
+
def raise_validation_error(msg)
|
55
|
+
raise DeleteServiceStatsValidationError.new(service_id, msg)
|
56
|
+
end
|
57
|
+
end
|
58
|
+
end
|
59
|
+
end
|
60
|
+
end
|
@@ -0,0 +1,73 @@
|
|
1
|
+
module ThreeScale
|
2
|
+
module Backend
|
3
|
+
module Stats
|
4
|
+
class KeyGenerator
|
5
|
+
attr_reader :service_id, :applications, :metrics, :from, :to
|
6
|
+
|
7
|
+
def initialize(service_id:, applications: [], metrics: [], from:, to:, **)
|
8
|
+
@service_id = service_id
|
9
|
+
@applications = applications
|
10
|
+
@metrics = metrics
|
11
|
+
@from = from
|
12
|
+
@to = to
|
13
|
+
end
|
14
|
+
|
15
|
+
def keys
|
16
|
+
response_code_service_keys +
|
17
|
+
response_code_application_keys +
|
18
|
+
usage_service_keys +
|
19
|
+
usage_application_keys
|
20
|
+
end
|
21
|
+
|
22
|
+
private
|
23
|
+
|
24
|
+
def periods(granularities)
|
25
|
+
granularities.flat_map do |granularity|
|
26
|
+
(Period[granularity].new(Time.at(from))..Period[granularity].new(Time.at(to))).to_a
|
27
|
+
end
|
28
|
+
end
|
29
|
+
|
30
|
+
def response_codes
|
31
|
+
CodesCommons::TRACKED_CODES + CodesCommons::TRACKED_CODE_GROUPS
|
32
|
+
end
|
33
|
+
|
34
|
+
def response_code_service_keys
|
35
|
+
periods(PeriodCommons::PERMANENT_SERVICE_GRANULARITIES).flat_map do |period|
|
36
|
+
response_codes.flat_map do |response_code|
|
37
|
+
Keys.service_response_code_value_key(service_id, response_code, period)
|
38
|
+
end
|
39
|
+
end
|
40
|
+
end
|
41
|
+
|
42
|
+
def response_code_application_keys
|
43
|
+
periods(PeriodCommons::PERMANENT_EXPANDED_GRANULARITIES).flat_map do |period|
|
44
|
+
response_codes.flat_map do |response_code|
|
45
|
+
applications.flat_map do |application|
|
46
|
+
Keys.application_response_code_value_key(service_id, application,
|
47
|
+
response_code, period)
|
48
|
+
end
|
49
|
+
end
|
50
|
+
end
|
51
|
+
end
|
52
|
+
|
53
|
+
def usage_service_keys
|
54
|
+
periods(PeriodCommons::PERMANENT_SERVICE_GRANULARITIES).flat_map do |period|
|
55
|
+
metrics.flat_map do |metric|
|
56
|
+
Keys.service_usage_value_key(service_id, metric, period)
|
57
|
+
end
|
58
|
+
end
|
59
|
+
end
|
60
|
+
|
61
|
+
def usage_application_keys
|
62
|
+
periods(PeriodCommons::PERMANENT_EXPANDED_GRANULARITIES).flat_map do |period|
|
63
|
+
metrics.flat_map do |metric|
|
64
|
+
applications.flat_map do |application|
|
65
|
+
Keys.application_usage_value_key(service_id, application, metric, period)
|
66
|
+
end
|
67
|
+
end
|
68
|
+
end
|
69
|
+
end
|
70
|
+
end
|
71
|
+
end
|
72
|
+
end
|
73
|
+
end
|