apisonator 2.100.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +7 -0
- data/CHANGELOG.md +317 -0
- data/Gemfile +11 -0
- data/Gemfile.base +65 -0
- data/Gemfile.lock +319 -0
- data/Gemfile.on_prem +1 -0
- data/Gemfile.on_prem.lock +297 -0
- data/LICENSE +202 -0
- data/NOTICE +15 -0
- data/README.md +230 -0
- data/Rakefile +287 -0
- data/apisonator.gemspec +47 -0
- data/app/api/api.rb +13 -0
- data/app/api/internal/alert_limits.rb +32 -0
- data/app/api/internal/application_keys.rb +49 -0
- data/app/api/internal/application_referrer_filters.rb +43 -0
- data/app/api/internal/applications.rb +77 -0
- data/app/api/internal/errors.rb +54 -0
- data/app/api/internal/events.rb +42 -0
- data/app/api/internal/internal.rb +104 -0
- data/app/api/internal/metrics.rb +40 -0
- data/app/api/internal/service_tokens.rb +46 -0
- data/app/api/internal/services.rb +58 -0
- data/app/api/internal/stats.rb +42 -0
- data/app/api/internal/usagelimits.rb +62 -0
- data/app/api/internal/utilization.rb +23 -0
- data/bin/3scale_backend +223 -0
- data/bin/3scale_backend_worker +26 -0
- data/config.ru +4 -0
- data/config/puma.rb +192 -0
- data/config/schedule.rb +9 -0
- data/ext/mkrf_conf.rb +64 -0
- data/lib/3scale/backend.rb +67 -0
- data/lib/3scale/backend/alert_limit.rb +56 -0
- data/lib/3scale/backend/alerts.rb +137 -0
- data/lib/3scale/backend/analytics/kinesis.rb +3 -0
- data/lib/3scale/backend/analytics/kinesis/adapter.rb +180 -0
- data/lib/3scale/backend/analytics/kinesis/exporter.rb +86 -0
- data/lib/3scale/backend/analytics/kinesis/job.rb +135 -0
- data/lib/3scale/backend/analytics/redshift.rb +3 -0
- data/lib/3scale/backend/analytics/redshift/adapter.rb +367 -0
- data/lib/3scale/backend/analytics/redshift/importer.rb +83 -0
- data/lib/3scale/backend/analytics/redshift/job.rb +33 -0
- data/lib/3scale/backend/application.rb +330 -0
- data/lib/3scale/backend/application_events.rb +76 -0
- data/lib/3scale/backend/background_job.rb +65 -0
- data/lib/3scale/backend/configurable.rb +20 -0
- data/lib/3scale/backend/configuration.rb +151 -0
- data/lib/3scale/backend/configuration/loader.rb +42 -0
- data/lib/3scale/backend/constants.rb +19 -0
- data/lib/3scale/backend/cors.rb +84 -0
- data/lib/3scale/backend/distributed_lock.rb +67 -0
- data/lib/3scale/backend/environment.rb +21 -0
- data/lib/3scale/backend/error_storage.rb +52 -0
- data/lib/3scale/backend/errors.rb +343 -0
- data/lib/3scale/backend/event_storage.rb +120 -0
- data/lib/3scale/backend/experiment.rb +84 -0
- data/lib/3scale/backend/extensions.rb +5 -0
- data/lib/3scale/backend/extensions/array.rb +19 -0
- data/lib/3scale/backend/extensions/hash.rb +26 -0
- data/lib/3scale/backend/extensions/nil_class.rb +13 -0
- data/lib/3scale/backend/extensions/redis.rb +44 -0
- data/lib/3scale/backend/extensions/string.rb +13 -0
- data/lib/3scale/backend/extensions/time.rb +110 -0
- data/lib/3scale/backend/failed_jobs_scheduler.rb +141 -0
- data/lib/3scale/backend/job_fetcher.rb +122 -0
- data/lib/3scale/backend/listener.rb +728 -0
- data/lib/3scale/backend/listener_metrics.rb +99 -0
- data/lib/3scale/backend/logging.rb +48 -0
- data/lib/3scale/backend/logging/external.rb +44 -0
- data/lib/3scale/backend/logging/external/impl.rb +93 -0
- data/lib/3scale/backend/logging/external/impl/airbrake.rb +66 -0
- data/lib/3scale/backend/logging/external/impl/bugsnag.rb +69 -0
- data/lib/3scale/backend/logging/external/impl/default.rb +18 -0
- data/lib/3scale/backend/logging/external/resque.rb +57 -0
- data/lib/3scale/backend/logging/logger.rb +18 -0
- data/lib/3scale/backend/logging/middleware.rb +62 -0
- data/lib/3scale/backend/logging/middleware/json_writer.rb +21 -0
- data/lib/3scale/backend/logging/middleware/text_writer.rb +60 -0
- data/lib/3scale/backend/logging/middleware/writer.rb +143 -0
- data/lib/3scale/backend/logging/worker.rb +107 -0
- data/lib/3scale/backend/manifest.rb +80 -0
- data/lib/3scale/backend/memoizer.rb +277 -0
- data/lib/3scale/backend/metric.rb +275 -0
- data/lib/3scale/backend/metric/collection.rb +91 -0
- data/lib/3scale/backend/oauth.rb +4 -0
- data/lib/3scale/backend/oauth/token.rb +26 -0
- data/lib/3scale/backend/oauth/token_key.rb +30 -0
- data/lib/3scale/backend/oauth/token_storage.rb +313 -0
- data/lib/3scale/backend/oauth/token_value.rb +25 -0
- data/lib/3scale/backend/period.rb +3 -0
- data/lib/3scale/backend/period/boundary.rb +107 -0
- data/lib/3scale/backend/period/cache.rb +28 -0
- data/lib/3scale/backend/period/period.rb +402 -0
- data/lib/3scale/backend/queue_storage.rb +16 -0
- data/lib/3scale/backend/rack.rb +49 -0
- data/lib/3scale/backend/rack/exception_catcher.rb +136 -0
- data/lib/3scale/backend/rack/internal_error_catcher.rb +23 -0
- data/lib/3scale/backend/rack/prometheus.rb +19 -0
- data/lib/3scale/backend/saas.rb +6 -0
- data/lib/3scale/backend/saas_analytics.rb +4 -0
- data/lib/3scale/backend/server.rb +30 -0
- data/lib/3scale/backend/server/falcon.rb +52 -0
- data/lib/3scale/backend/server/puma.rb +71 -0
- data/lib/3scale/backend/service.rb +317 -0
- data/lib/3scale/backend/service_token.rb +97 -0
- data/lib/3scale/backend/stats.rb +8 -0
- data/lib/3scale/backend/stats/aggregator.rb +170 -0
- data/lib/3scale/backend/stats/aggregators/base.rb +72 -0
- data/lib/3scale/backend/stats/aggregators/response_code.rb +58 -0
- data/lib/3scale/backend/stats/aggregators/usage.rb +34 -0
- data/lib/3scale/backend/stats/bucket_reader.rb +135 -0
- data/lib/3scale/backend/stats/bucket_storage.rb +108 -0
- data/lib/3scale/backend/stats/cleaner.rb +195 -0
- data/lib/3scale/backend/stats/codes_commons.rb +14 -0
- data/lib/3scale/backend/stats/delete_job_def.rb +60 -0
- data/lib/3scale/backend/stats/key_generator.rb +73 -0
- data/lib/3scale/backend/stats/keys.rb +104 -0
- data/lib/3scale/backend/stats/partition_eraser_job.rb +58 -0
- data/lib/3scale/backend/stats/partition_generator_job.rb +46 -0
- data/lib/3scale/backend/stats/period_commons.rb +34 -0
- data/lib/3scale/backend/stats/stats_parser.rb +141 -0
- data/lib/3scale/backend/stats/storage.rb +113 -0
- data/lib/3scale/backend/statsd.rb +14 -0
- data/lib/3scale/backend/storable.rb +35 -0
- data/lib/3scale/backend/storage.rb +40 -0
- data/lib/3scale/backend/storage_async.rb +4 -0
- data/lib/3scale/backend/storage_async/async_redis.rb +21 -0
- data/lib/3scale/backend/storage_async/client.rb +205 -0
- data/lib/3scale/backend/storage_async/pipeline.rb +79 -0
- data/lib/3scale/backend/storage_async/resque_extensions.rb +30 -0
- data/lib/3scale/backend/storage_helpers.rb +278 -0
- data/lib/3scale/backend/storage_key_helpers.rb +9 -0
- data/lib/3scale/backend/storage_sync.rb +43 -0
- data/lib/3scale/backend/transaction.rb +62 -0
- data/lib/3scale/backend/transactor.rb +177 -0
- data/lib/3scale/backend/transactor/limit_headers.rb +54 -0
- data/lib/3scale/backend/transactor/notify_batcher.rb +139 -0
- data/lib/3scale/backend/transactor/notify_job.rb +47 -0
- data/lib/3scale/backend/transactor/process_job.rb +33 -0
- data/lib/3scale/backend/transactor/report_job.rb +84 -0
- data/lib/3scale/backend/transactor/status.rb +236 -0
- data/lib/3scale/backend/transactor/usage_report.rb +182 -0
- data/lib/3scale/backend/usage.rb +63 -0
- data/lib/3scale/backend/usage_limit.rb +115 -0
- data/lib/3scale/backend/use_cases/provider_key_change_use_case.rb +60 -0
- data/lib/3scale/backend/util.rb +17 -0
- data/lib/3scale/backend/validators.rb +26 -0
- data/lib/3scale/backend/validators/base.rb +36 -0
- data/lib/3scale/backend/validators/key.rb +17 -0
- data/lib/3scale/backend/validators/limits.rb +57 -0
- data/lib/3scale/backend/validators/oauth_key.rb +15 -0
- data/lib/3scale/backend/validators/oauth_setting.rb +15 -0
- data/lib/3scale/backend/validators/redirect_uri.rb +33 -0
- data/lib/3scale/backend/validators/referrer.rb +60 -0
- data/lib/3scale/backend/validators/service_state.rb +15 -0
- data/lib/3scale/backend/validators/state.rb +15 -0
- data/lib/3scale/backend/version.rb +5 -0
- data/lib/3scale/backend/views/oauth_access_tokens.builder +14 -0
- data/lib/3scale/backend/views/oauth_app_id_by_token.builder +4 -0
- data/lib/3scale/backend/worker.rb +87 -0
- data/lib/3scale/backend/worker_async.rb +88 -0
- data/lib/3scale/backend/worker_metrics.rb +44 -0
- data/lib/3scale/backend/worker_sync.rb +32 -0
- data/lib/3scale/bundler_shim.rb +17 -0
- data/lib/3scale/prometheus_server.rb +10 -0
- data/lib/3scale/tasks/connectivity.rake +41 -0
- data/lib/3scale/tasks/helpers.rb +3 -0
- data/lib/3scale/tasks/helpers/environment.rb +23 -0
- data/lib/3scale/tasks/stats.rake +131 -0
- data/lib/3scale/tasks/swagger.rake +46 -0
- data/licenses.xml +1215 -0
- metadata +227 -0
@@ -0,0 +1,97 @@
|
|
1
|
+
module ThreeScale
|
2
|
+
module Backend
|
3
|
+
class ServiceToken
|
4
|
+
|
5
|
+
module KeyHelpers
|
6
|
+
def key(service_token, service_id)
|
7
|
+
encode_key("service_token/token:#{service_token}/service_id:#{service_id}")
|
8
|
+
end
|
9
|
+
end
|
10
|
+
|
11
|
+
include KeyHelpers
|
12
|
+
extend KeyHelpers
|
13
|
+
include Storable
|
14
|
+
|
15
|
+
ValidationError = Class.new(ThreeScale::Backend::Invalid)
|
16
|
+
|
17
|
+
class InvalidServiceToken < ValidationError
|
18
|
+
def initialize
|
19
|
+
super('Service token cannot be blank'.freeze)
|
20
|
+
end
|
21
|
+
end
|
22
|
+
|
23
|
+
class InvalidServiceId < ValidationError
|
24
|
+
def initialize
|
25
|
+
super('Service ID cannot be blank'.freeze)
|
26
|
+
end
|
27
|
+
end
|
28
|
+
|
29
|
+
# We want to use a hash in Redis because in the future we might have
|
30
|
+
# several fields related to permissions and roles.
|
31
|
+
# For now we do not need any of those fields, but we need to define at
|
32
|
+
# least one to be able to create a hash, even if we are not going to use
|
33
|
+
# it.
|
34
|
+
PERMISSIONS_KEY_FIELD = 'permissions'.freeze
|
35
|
+
private_constant :PERMISSIONS_KEY_FIELD
|
36
|
+
|
37
|
+
class << self
|
38
|
+
include Memoizer::Decorator
|
39
|
+
|
40
|
+
def save(service_token, service_id)
|
41
|
+
validate_pairs([{ service_token: service_token, service_id: service_id }])
|
42
|
+
storage.hset(key(service_token, service_id), PERMISSIONS_KEY_FIELD, ''.freeze)
|
43
|
+
end
|
44
|
+
|
45
|
+
# Saves a collection of (service_token, service_id) pairs only if all
|
46
|
+
# the pairs contain valid data, meaning that there are no null or empty
|
47
|
+
# strings.
|
48
|
+
def save_pairs(token_id_pairs)
|
49
|
+
validate_pairs(token_id_pairs)
|
50
|
+
|
51
|
+
token_id_pairs.each do |pair|
|
52
|
+
unchecked_save(pair[:service_token], pair[:service_id])
|
53
|
+
end
|
54
|
+
end
|
55
|
+
|
56
|
+
def delete(service_token, service_id)
|
57
|
+
res = storage.del(key(service_token, service_id))
|
58
|
+
clear_cache(service_token, service_id)
|
59
|
+
res
|
60
|
+
end
|
61
|
+
|
62
|
+
def exists?(service_token, service_id)
|
63
|
+
storage.exists(key(service_token, service_id))
|
64
|
+
end
|
65
|
+
memoize :exists?
|
66
|
+
|
67
|
+
private
|
68
|
+
|
69
|
+
def validate_pairs(token_id_pairs)
|
70
|
+
invalid_token = token_id_pairs.any? do |pair|
|
71
|
+
pair[:service_token].nil? || pair[:service_token].empty?
|
72
|
+
end
|
73
|
+
raise InvalidServiceToken if invalid_token
|
74
|
+
|
75
|
+
invalid_service_id = token_id_pairs.any? do |pair|
|
76
|
+
pair[:service_id].nil? || pair[:service_id].to_s.empty?
|
77
|
+
end
|
78
|
+
raise InvalidServiceId if invalid_service_id
|
79
|
+
end
|
80
|
+
|
81
|
+
def unchecked_save(service_token, service_id)
|
82
|
+
storage.hset(key(service_token, service_id), PERMISSIONS_KEY_FIELD, ''.freeze)
|
83
|
+
end
|
84
|
+
|
85
|
+
def storage
|
86
|
+
Storage.instance
|
87
|
+
end
|
88
|
+
|
89
|
+
def clear_cache(service_token, service_id)
|
90
|
+
Memoizer.clear(Memoizer.build_key(self, :exists?, service_token, service_id))
|
91
|
+
end
|
92
|
+
|
93
|
+
end
|
94
|
+
|
95
|
+
end
|
96
|
+
end
|
97
|
+
end
|
@@ -0,0 +1,8 @@
|
|
1
|
+
require '3scale/backend/stats/codes_commons'
|
2
|
+
require '3scale/backend/stats/period_commons'
|
3
|
+
require '3scale/backend/stats/aggregator'
|
4
|
+
require '3scale/backend/stats/delete_job_def'
|
5
|
+
require '3scale/backend/stats/key_generator'
|
6
|
+
require '3scale/backend/stats/partition_generator_job'
|
7
|
+
require '3scale/backend/stats/partition_eraser_job'
|
8
|
+
require '3scale/backend/stats/cleaner'
|
@@ -0,0 +1,170 @@
|
|
1
|
+
require '3scale/backend/logging'
|
2
|
+
require '3scale/backend/stats/storage'
|
3
|
+
require '3scale/backend/stats/keys'
|
4
|
+
require '3scale/backend/application_events'
|
5
|
+
require '3scale/backend/transaction'
|
6
|
+
require '3scale/backend/stats/aggregators/response_code'
|
7
|
+
require '3scale/backend/stats/aggregators/usage'
|
8
|
+
|
9
|
+
module ThreeScale
|
10
|
+
module Backend
|
11
|
+
module Stats
|
12
|
+
|
13
|
+
# This class contains several methods that deal with buckets, which are
|
14
|
+
# only used in the SaaS analytics system.
|
15
|
+
class Aggregator
|
16
|
+
# We need to limit the number of buckets stored in the system.
|
17
|
+
# The reason is that our Redis can grow VERY quickly if we start
|
18
|
+
# creating buckets and we never delete them.
|
19
|
+
# When the max defined is reached, I simply disable the option
|
20
|
+
# to save the stats keys in buckets. Yes, we will lose data,
|
21
|
+
# but that is better than the alternative. We will try to find
|
22
|
+
# a better alternative once we cannot afford to lose data.
|
23
|
+
# Right now, we are just deleting the stats keys with
|
24
|
+
# period = minute, so we can restore everything else.
|
25
|
+
MAX_BUCKETS = 360
|
26
|
+
private_constant :MAX_BUCKETS
|
27
|
+
|
28
|
+
MAX_BUCKETS_CREATED_MSG =
|
29
|
+
'Bucket creation has been disabled. Max number of stats buckets reached'.freeze
|
30
|
+
private_constant :MAX_BUCKETS_CREATED_MSG
|
31
|
+
|
32
|
+
class << self
|
33
|
+
include Backend::StorageKeyHelpers
|
34
|
+
include Configurable
|
35
|
+
include Keys
|
36
|
+
include Logging
|
37
|
+
|
38
|
+
# This method stores the events in buckets if that option is enabled
|
39
|
+
# or if it was disable because of an emergency (not because a user
|
40
|
+
# did it manually), and Kinesis has already consumed all the pending
|
41
|
+
# buckets.
|
42
|
+
def process(transactions)
|
43
|
+
current_bucket = nil
|
44
|
+
|
45
|
+
if configuration.can_create_event_buckets
|
46
|
+
# Only disable indicating emergency if bucket storage is enabled.
|
47
|
+
# Otherwise, we might indicate emergency when a user manually
|
48
|
+
# disabled it previously.
|
49
|
+
if Storage.enabled? && buckets_limit_exceeded?
|
50
|
+
Storage.disable!(true)
|
51
|
+
log_bucket_creation_disabled
|
52
|
+
elsif save_in_bucket?
|
53
|
+
Storage.enable! unless Storage.enabled?
|
54
|
+
current_bucket = Time.now.utc.beginning_of_bucket(stats_bucket_size)
|
55
|
+
.to_not_compact_s
|
56
|
+
end
|
57
|
+
end
|
58
|
+
|
59
|
+
touched_apps = aggregate(transactions, current_bucket)
|
60
|
+
|
61
|
+
ApplicationEvents.generate(touched_apps.values)
|
62
|
+
update_alerts(touched_apps)
|
63
|
+
begin
|
64
|
+
ApplicationEvents.ping
|
65
|
+
rescue ApplicationEvents::PingFailed => e
|
66
|
+
# we could not ping the frontend, log it
|
67
|
+
logger.notify e
|
68
|
+
end
|
69
|
+
end
|
70
|
+
|
71
|
+
private
|
72
|
+
|
73
|
+
# Aggregate stats values for a collection of Transactions.
|
74
|
+
#
|
75
|
+
# @param [Array] transactions the collection of transactions
|
76
|
+
# @param [String, Nil] bucket
|
77
|
+
# @return [Hash] A Hash where each key is an application_id and the
|
78
|
+
# value is another Hash with service_id and application_id.
|
79
|
+
def aggregate(transactions, bucket = nil)
|
80
|
+
touched_apps = {}
|
81
|
+
|
82
|
+
transactions.each_slice(PIPELINED_SLICE_SIZE) do |slice|
|
83
|
+
storage.pipelined do
|
84
|
+
slice.each do |transaction|
|
85
|
+
aggregate_all(transaction, bucket)
|
86
|
+
touched_apps.merge!(touched_relation(transaction))
|
87
|
+
end
|
88
|
+
end
|
89
|
+
end
|
90
|
+
|
91
|
+
touched_apps
|
92
|
+
end
|
93
|
+
|
94
|
+
def aggregate_all(transaction, bucket)
|
95
|
+
[Aggregators::ResponseCode, Aggregators::Usage].each do |aggregator|
|
96
|
+
aggregator.aggregate(transaction, bucket)
|
97
|
+
end
|
98
|
+
end
|
99
|
+
|
100
|
+
def save_in_bucket?
|
101
|
+
if Storage.enabled?
|
102
|
+
true
|
103
|
+
else
|
104
|
+
Storage.last_disable_was_emergency? && bucket_storage.pending_buckets_size == 0
|
105
|
+
end
|
106
|
+
end
|
107
|
+
|
108
|
+
def stats_bucket_size
|
109
|
+
@stats_bucket_size ||= (configuration.stats.bucket_size || 5)
|
110
|
+
end
|
111
|
+
|
112
|
+
def storage
|
113
|
+
Backend::Storage.instance
|
114
|
+
end
|
115
|
+
|
116
|
+
def bucket_storage
|
117
|
+
Stats::Storage.bucket_storage
|
118
|
+
end
|
119
|
+
|
120
|
+
# Return a Hash with needed info to update usages and alerts.
|
121
|
+
#
|
122
|
+
# @param [Transaction] transaction
|
123
|
+
# @return [Hash] the hash that contains the application_id that has
|
124
|
+
# been updated and the transaction's service_id. The key of the
|
125
|
+
# hash is the application_id.
|
126
|
+
def touched_relation(transaction)
|
127
|
+
relation_value = transaction.send(:application_id)
|
128
|
+
{ relation_value => { application_id: relation_value,
|
129
|
+
service_id: transaction.service_id } }
|
130
|
+
end
|
131
|
+
|
132
|
+
def buckets_limit_exceeded?
|
133
|
+
bucket_storage.pending_buckets_size > MAX_BUCKETS
|
134
|
+
end
|
135
|
+
|
136
|
+
def log_bucket_creation_disabled
|
137
|
+
logger.info(MAX_BUCKETS_CREATED_MSG)
|
138
|
+
end
|
139
|
+
|
140
|
+
def update_alerts(applications)
|
141
|
+
current_timestamp = Time.now.getutc
|
142
|
+
|
143
|
+
applications.each do |_appid, values|
|
144
|
+
service_id = values[:service_id]
|
145
|
+
application = Backend::Application.load(service_id,
|
146
|
+
values[:application_id])
|
147
|
+
|
148
|
+
application.load_metric_names
|
149
|
+
usage = Usage.application_usage(application, current_timestamp)
|
150
|
+
status = Transactor::Status.new(service_id: service_id,
|
151
|
+
application: application,
|
152
|
+
values: usage)
|
153
|
+
|
154
|
+
max_utilization, max_record = Alerts.utilization(
|
155
|
+
status.application_usage_reports)
|
156
|
+
|
157
|
+
if max_utilization >= 0.0
|
158
|
+
Alerts.update_utilization(service_id,
|
159
|
+
values[:application_id],
|
160
|
+
max_utilization,
|
161
|
+
max_record,
|
162
|
+
current_timestamp)
|
163
|
+
end
|
164
|
+
end
|
165
|
+
end
|
166
|
+
end
|
167
|
+
end
|
168
|
+
end
|
169
|
+
end
|
170
|
+
end
|
@@ -0,0 +1,72 @@
|
|
1
|
+
module ThreeScale
|
2
|
+
module Backend
|
3
|
+
module Stats
|
4
|
+
module Aggregators
|
5
|
+
module Base
|
6
|
+
# Aggregates a value in a timestamp for all given keys using a specific
|
7
|
+
# Redis command to store them. If a bucket_key is specified, each key will
|
8
|
+
# be added to a Redis Set with that name.
|
9
|
+
#
|
10
|
+
# @param [Integer] value
|
11
|
+
# @param [Time] timestamp
|
12
|
+
# @param [Array] keys array of {(service|application|user) => "key"}
|
13
|
+
# @param [Symbol] cmd
|
14
|
+
# @param [String, Nil] bucket
|
15
|
+
def aggregate_values(value, timestamp, keys, cmd, bucket)
|
16
|
+
keys_for_bucket = []
|
17
|
+
|
18
|
+
keys.each do |metric_type, prefix_key|
|
19
|
+
granularities(metric_type).each do |granularity|
|
20
|
+
key = counter_key(prefix_key, granularity.new(timestamp))
|
21
|
+
expire_time = Stats::PeriodCommons.expire_time_for_granularity(granularity)
|
22
|
+
|
23
|
+
store_key(cmd, key, value, expire_time)
|
24
|
+
|
25
|
+
unless Stats::PeriodCommons::EXCLUDED_FOR_BUCKETS.include?(granularity)
|
26
|
+
keys_for_bucket << key
|
27
|
+
end
|
28
|
+
end
|
29
|
+
end
|
30
|
+
|
31
|
+
store_in_changed_keys(keys_for_bucket, bucket) if bucket
|
32
|
+
end
|
33
|
+
|
34
|
+
# Return Redis command depending on raw_value.
|
35
|
+
# If raw_value is a string with a '#' in the beginning, it returns 'set'.
|
36
|
+
# Else, it returns 'incrby'.
|
37
|
+
#
|
38
|
+
# @param [String] raw_value
|
39
|
+
# @return [Symbol] the Redis command
|
40
|
+
def storage_cmd(raw_value)
|
41
|
+
Backend::Usage.is_set?(raw_value) ? :set : :incrby
|
42
|
+
end
|
43
|
+
|
44
|
+
def storage
|
45
|
+
Backend::Storage.instance
|
46
|
+
end
|
47
|
+
|
48
|
+
protected
|
49
|
+
|
50
|
+
def granularities(metric_type)
|
51
|
+
metric_type == :service ? Stats::PeriodCommons::SERVICE_GRANULARITIES : Stats::PeriodCommons::EXPANDED_GRANULARITIES
|
52
|
+
end
|
53
|
+
|
54
|
+
def store_key(cmd, key, value, expire_time = nil)
|
55
|
+
storage.send(cmd, key, value)
|
56
|
+
storage.expire(key, expire_time) if expire_time
|
57
|
+
end
|
58
|
+
|
59
|
+
def store_in_changed_keys(keys, bucket)
|
60
|
+
bucket_storage.put_in_bucket(keys, bucket)
|
61
|
+
end
|
62
|
+
|
63
|
+
private
|
64
|
+
|
65
|
+
def bucket_storage
|
66
|
+
Stats::Storage.bucket_storage
|
67
|
+
end
|
68
|
+
end
|
69
|
+
end
|
70
|
+
end
|
71
|
+
end
|
72
|
+
end
|
@@ -0,0 +1,58 @@
|
|
1
|
+
require '3scale/backend/stats/keys'
|
2
|
+
require '3scale/backend/transaction'
|
3
|
+
require '3scale/backend/stats/aggregators/base'
|
4
|
+
|
5
|
+
module ThreeScale
|
6
|
+
module Backend
|
7
|
+
module Stats
|
8
|
+
module Aggregators
|
9
|
+
class ResponseCode
|
10
|
+
class << self
|
11
|
+
include Keys
|
12
|
+
include Base
|
13
|
+
|
14
|
+
def aggregate(transaction, bucket = nil)
|
15
|
+
keys_for_multiple_codes = keys_for_response_code(transaction)
|
16
|
+
timestamp = transaction.timestamp
|
17
|
+
|
18
|
+
# For now, we are not interested in storing response codes in
|
19
|
+
# buckets, that is the reason why set bucket to nil.
|
20
|
+
bucket = nil
|
21
|
+
|
22
|
+
keys_for_multiple_codes.each do |keys|
|
23
|
+
aggregate_values(1, timestamp, keys, :incrby, bucket)
|
24
|
+
end
|
25
|
+
end
|
26
|
+
|
27
|
+
|
28
|
+
protected
|
29
|
+
def keys_for_response_code(transaction)
|
30
|
+
response_code = transaction.extract_response_code
|
31
|
+
return {} unless response_code
|
32
|
+
values = values_to_inc(response_code)
|
33
|
+
values.flat_map do |code|
|
34
|
+
Keys.transaction_keys(transaction, :response_code, code)
|
35
|
+
end
|
36
|
+
end
|
37
|
+
|
38
|
+
def values_to_inc(response_code)
|
39
|
+
group_code = Stats::CodesCommons.get_http_code_group(response_code)
|
40
|
+
[].tap do |keys|
|
41
|
+
keys << group_code if tracked_group_code?(group_code)
|
42
|
+
keys << response_code.to_s if tracked_code?(response_code)
|
43
|
+
end
|
44
|
+
end
|
45
|
+
|
46
|
+
def tracked_code?(code)
|
47
|
+
Stats::CodesCommons::TRACKED_CODES.include?(code)
|
48
|
+
end
|
49
|
+
|
50
|
+
def tracked_group_code?(group_code)
|
51
|
+
Stats::CodesCommons::TRACKED_CODE_GROUPS.include?(group_code)
|
52
|
+
end
|
53
|
+
end
|
54
|
+
end
|
55
|
+
end
|
56
|
+
end
|
57
|
+
end
|
58
|
+
end
|
@@ -0,0 +1,34 @@
|
|
1
|
+
require '3scale/backend/stats/keys'
|
2
|
+
require '3scale/backend/transaction'
|
3
|
+
require '3scale/backend/stats/aggregators/base'
|
4
|
+
|
5
|
+
module ThreeScale
|
6
|
+
module Backend
|
7
|
+
module Stats
|
8
|
+
module Aggregators
|
9
|
+
class Usage
|
10
|
+
class << self
|
11
|
+
include Keys
|
12
|
+
include Base
|
13
|
+
|
14
|
+
# Aggregates the usage of a transaction. If a bucket time is specified,
|
15
|
+
# all new or updated stats keys will be stored in a Redis Set.
|
16
|
+
#
|
17
|
+
# @param [Transaction] transaction
|
18
|
+
# @param [String, Nil] bucket
|
19
|
+
def aggregate(transaction, bucket = nil)
|
20
|
+
transaction.usage.each do |metric_id, raw_value|
|
21
|
+
metric_keys = Keys.transaction_keys(transaction, :metric, metric_id)
|
22
|
+
cmd = storage_cmd(raw_value)
|
23
|
+
value = Backend::Usage.get_from raw_value
|
24
|
+
|
25
|
+
aggregate_values(value, transaction.timestamp, metric_keys, cmd, bucket)
|
26
|
+
end
|
27
|
+
end
|
28
|
+
|
29
|
+
end
|
30
|
+
end
|
31
|
+
end
|
32
|
+
end
|
33
|
+
end
|
34
|
+
end
|
@@ -0,0 +1,135 @@
|
|
1
|
+
module ThreeScale
|
2
|
+
module Backend
|
3
|
+
module Stats
|
4
|
+
|
5
|
+
# This class allows us to read the buckets that we are creating in Redis
|
6
|
+
# to store the stats keys that change. It also allows us to keep track of
|
7
|
+
# the ones that are pending to be read.
|
8
|
+
class BucketReader
|
9
|
+
|
10
|
+
# This private nested class allows us to isolate accesses to Redis.
|
11
|
+
class LatestBucketReadMarker
|
12
|
+
LATEST_BUCKET_READ_KEY = 'send_to_kinesis:latest_bucket_read'
|
13
|
+
private_constant :LATEST_BUCKET_READ_KEY
|
14
|
+
|
15
|
+
def initialize(storage)
|
16
|
+
@storage = storage
|
17
|
+
end
|
18
|
+
|
19
|
+
def latest_bucket_read=(latest_bucket_read)
|
20
|
+
storage.set(LATEST_BUCKET_READ_KEY, latest_bucket_read)
|
21
|
+
end
|
22
|
+
|
23
|
+
def latest_bucket_read
|
24
|
+
storage.get(LATEST_BUCKET_READ_KEY)
|
25
|
+
end
|
26
|
+
|
27
|
+
private
|
28
|
+
|
29
|
+
attr_reader :storage
|
30
|
+
end
|
31
|
+
private_constant :LatestBucketReadMarker
|
32
|
+
|
33
|
+
# Before we read and mark a bucket as read, we need to make sure that
|
34
|
+
# it will not receive more events. Otherwise, there is the risk that
|
35
|
+
# we will miss some events.
|
36
|
+
# Buckets are created every 'bucket_create_interval' seconds, it is one
|
37
|
+
# of the parameters that 'initialize' receives. We should be able to
|
38
|
+
# read any bucket identified with a timestamp ts, where
|
39
|
+
# ts < Time.now - bucket_create_interval. However, in order to be sure
|
40
|
+
# that we will not miss any events, we are going to define a constant
|
41
|
+
# that will define some backup time.
|
42
|
+
BACKUP_SECONDS_READ_BUCKET = 10
|
43
|
+
private_constant :BACKUP_SECONDS_READ_BUCKET
|
44
|
+
|
45
|
+
KEYS_SLICE_CALL_TO_REDIS = 1000
|
46
|
+
private_constant :KEYS_SLICE_CALL_TO_REDIS
|
47
|
+
|
48
|
+
InvalidInterval = Class.new(ThreeScale::Backend::Error)
|
49
|
+
|
50
|
+
def initialize(bucket_create_interval, bucket_storage, events_storage)
|
51
|
+
# This is needed because ThreeScale::Backend::TimeHacks.beginning_of_bucket
|
52
|
+
if 60%bucket_create_interval != 0 || bucket_create_interval <= 0
|
53
|
+
raise InvalidInterval, 'Bucket create interval needs to divide 60'
|
54
|
+
end
|
55
|
+
|
56
|
+
@bucket_create_interval = bucket_create_interval
|
57
|
+
@bucket_storage = bucket_storage
|
58
|
+
@events_storage = events_storage
|
59
|
+
@latest_bucket_read_marker = LatestBucketReadMarker.new(bucket_storage.storage)
|
60
|
+
end
|
61
|
+
|
62
|
+
# Returns the pending events and the bucket of the most recent of the
|
63
|
+
# events sent. This allows the caller to call latest_bucket_read= when
|
64
|
+
# it has processed all the events.
|
65
|
+
def pending_events_in_buckets(end_time_utc: Time.now.utc, max_buckets: nil)
|
66
|
+
buckets = if max_buckets
|
67
|
+
pending_buckets(end_time_utc).take(max_buckets)
|
68
|
+
else
|
69
|
+
pending_buckets(end_time_utc).to_a
|
70
|
+
end
|
71
|
+
|
72
|
+
{ events: events(buckets), latest_bucket: buckets.last }
|
73
|
+
end
|
74
|
+
|
75
|
+
def latest_bucket_read=(latest_bucket_read)
|
76
|
+
latest_bucket_read_marker.latest_bucket_read = latest_bucket_read
|
77
|
+
end
|
78
|
+
|
79
|
+
private
|
80
|
+
|
81
|
+
attr_reader :bucket_create_interval,
|
82
|
+
:bucket_storage,
|
83
|
+
:events_storage,
|
84
|
+
:latest_bucket_read_marker
|
85
|
+
|
86
|
+
def pending_buckets(end_time_utc = Time.now.utc)
|
87
|
+
latest_bucket_read = latest_bucket_read_marker.latest_bucket_read
|
88
|
+
start_time = unless latest_bucket_read.nil?
|
89
|
+
bucket_to_time(latest_bucket_read) + bucket_create_interval
|
90
|
+
end
|
91
|
+
end_time = end_time_with_backup(end_time_utc)
|
92
|
+
stored_buckets(start_time, end_time)
|
93
|
+
end
|
94
|
+
|
95
|
+
def events(buckets)
|
96
|
+
event_keys = bucket_storage.content(buckets)
|
97
|
+
|
98
|
+
# Values are stored as strings in Redis, but we want integers.
|
99
|
+
# There are some values that can be nil. This happens when the key
|
100
|
+
# has a TTL and we read it once it has expired. Right now, event keys
|
101
|
+
# with granularity = 'minute' expire after 180 s (see
|
102
|
+
# Stats::Aggregators::Base module). We might need to increase that to
|
103
|
+
# make sure that we do not miss any values.
|
104
|
+
event_values = event_keys.each_slice(KEYS_SLICE_CALL_TO_REDIS)
|
105
|
+
.flat_map do |keys_slice|
|
106
|
+
events_storage.mget(keys_slice)
|
107
|
+
end.map { |value| Integer(value) if value }
|
108
|
+
|
109
|
+
Hash[event_keys.zip(event_values)]
|
110
|
+
end
|
111
|
+
|
112
|
+
def end_time_with_backup(end_time_utc)
|
113
|
+
[end_time_utc, Time.now.utc - bucket_create_interval - BACKUP_SECONDS_READ_BUCKET].min
|
114
|
+
end
|
115
|
+
|
116
|
+
def stored_buckets(start_time_utc, end_time_utc)
|
117
|
+
range = { }
|
118
|
+
range[:first] = time_to_bucket_name(start_time_utc) if start_time_utc
|
119
|
+
range[:last] = time_to_bucket_name(end_time_utc)
|
120
|
+
bucket_storage.buckets(range)
|
121
|
+
end
|
122
|
+
|
123
|
+
def time_to_bucket_name(time_utc)
|
124
|
+
# We know that the names of the buckets follow the following pattern:
|
125
|
+
# they are a timestamp with format YYYYmmddHHMMSS
|
126
|
+
time_utc.strftime('%Y%m%d%H%M%S')
|
127
|
+
end
|
128
|
+
|
129
|
+
def bucket_to_time(bucket_name)
|
130
|
+
DateTime.parse(bucket_name).to_time.utc
|
131
|
+
end
|
132
|
+
end
|
133
|
+
end
|
134
|
+
end
|
135
|
+
end
|