apisonator 2.100.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +7 -0
- data/CHANGELOG.md +317 -0
- data/Gemfile +11 -0
- data/Gemfile.base +65 -0
- data/Gemfile.lock +319 -0
- data/Gemfile.on_prem +1 -0
- data/Gemfile.on_prem.lock +297 -0
- data/LICENSE +202 -0
- data/NOTICE +15 -0
- data/README.md +230 -0
- data/Rakefile +287 -0
- data/apisonator.gemspec +47 -0
- data/app/api/api.rb +13 -0
- data/app/api/internal/alert_limits.rb +32 -0
- data/app/api/internal/application_keys.rb +49 -0
- data/app/api/internal/application_referrer_filters.rb +43 -0
- data/app/api/internal/applications.rb +77 -0
- data/app/api/internal/errors.rb +54 -0
- data/app/api/internal/events.rb +42 -0
- data/app/api/internal/internal.rb +104 -0
- data/app/api/internal/metrics.rb +40 -0
- data/app/api/internal/service_tokens.rb +46 -0
- data/app/api/internal/services.rb +58 -0
- data/app/api/internal/stats.rb +42 -0
- data/app/api/internal/usagelimits.rb +62 -0
- data/app/api/internal/utilization.rb +23 -0
- data/bin/3scale_backend +223 -0
- data/bin/3scale_backend_worker +26 -0
- data/config.ru +4 -0
- data/config/puma.rb +192 -0
- data/config/schedule.rb +9 -0
- data/ext/mkrf_conf.rb +64 -0
- data/lib/3scale/backend.rb +67 -0
- data/lib/3scale/backend/alert_limit.rb +56 -0
- data/lib/3scale/backend/alerts.rb +137 -0
- data/lib/3scale/backend/analytics/kinesis.rb +3 -0
- data/lib/3scale/backend/analytics/kinesis/adapter.rb +180 -0
- data/lib/3scale/backend/analytics/kinesis/exporter.rb +86 -0
- data/lib/3scale/backend/analytics/kinesis/job.rb +135 -0
- data/lib/3scale/backend/analytics/redshift.rb +3 -0
- data/lib/3scale/backend/analytics/redshift/adapter.rb +367 -0
- data/lib/3scale/backend/analytics/redshift/importer.rb +83 -0
- data/lib/3scale/backend/analytics/redshift/job.rb +33 -0
- data/lib/3scale/backend/application.rb +330 -0
- data/lib/3scale/backend/application_events.rb +76 -0
- data/lib/3scale/backend/background_job.rb +65 -0
- data/lib/3scale/backend/configurable.rb +20 -0
- data/lib/3scale/backend/configuration.rb +151 -0
- data/lib/3scale/backend/configuration/loader.rb +42 -0
- data/lib/3scale/backend/constants.rb +19 -0
- data/lib/3scale/backend/cors.rb +84 -0
- data/lib/3scale/backend/distributed_lock.rb +67 -0
- data/lib/3scale/backend/environment.rb +21 -0
- data/lib/3scale/backend/error_storage.rb +52 -0
- data/lib/3scale/backend/errors.rb +343 -0
- data/lib/3scale/backend/event_storage.rb +120 -0
- data/lib/3scale/backend/experiment.rb +84 -0
- data/lib/3scale/backend/extensions.rb +5 -0
- data/lib/3scale/backend/extensions/array.rb +19 -0
- data/lib/3scale/backend/extensions/hash.rb +26 -0
- data/lib/3scale/backend/extensions/nil_class.rb +13 -0
- data/lib/3scale/backend/extensions/redis.rb +44 -0
- data/lib/3scale/backend/extensions/string.rb +13 -0
- data/lib/3scale/backend/extensions/time.rb +110 -0
- data/lib/3scale/backend/failed_jobs_scheduler.rb +141 -0
- data/lib/3scale/backend/job_fetcher.rb +122 -0
- data/lib/3scale/backend/listener.rb +728 -0
- data/lib/3scale/backend/listener_metrics.rb +99 -0
- data/lib/3scale/backend/logging.rb +48 -0
- data/lib/3scale/backend/logging/external.rb +44 -0
- data/lib/3scale/backend/logging/external/impl.rb +93 -0
- data/lib/3scale/backend/logging/external/impl/airbrake.rb +66 -0
- data/lib/3scale/backend/logging/external/impl/bugsnag.rb +69 -0
- data/lib/3scale/backend/logging/external/impl/default.rb +18 -0
- data/lib/3scale/backend/logging/external/resque.rb +57 -0
- data/lib/3scale/backend/logging/logger.rb +18 -0
- data/lib/3scale/backend/logging/middleware.rb +62 -0
- data/lib/3scale/backend/logging/middleware/json_writer.rb +21 -0
- data/lib/3scale/backend/logging/middleware/text_writer.rb +60 -0
- data/lib/3scale/backend/logging/middleware/writer.rb +143 -0
- data/lib/3scale/backend/logging/worker.rb +107 -0
- data/lib/3scale/backend/manifest.rb +80 -0
- data/lib/3scale/backend/memoizer.rb +277 -0
- data/lib/3scale/backend/metric.rb +275 -0
- data/lib/3scale/backend/metric/collection.rb +91 -0
- data/lib/3scale/backend/oauth.rb +4 -0
- data/lib/3scale/backend/oauth/token.rb +26 -0
- data/lib/3scale/backend/oauth/token_key.rb +30 -0
- data/lib/3scale/backend/oauth/token_storage.rb +313 -0
- data/lib/3scale/backend/oauth/token_value.rb +25 -0
- data/lib/3scale/backend/period.rb +3 -0
- data/lib/3scale/backend/period/boundary.rb +107 -0
- data/lib/3scale/backend/period/cache.rb +28 -0
- data/lib/3scale/backend/period/period.rb +402 -0
- data/lib/3scale/backend/queue_storage.rb +16 -0
- data/lib/3scale/backend/rack.rb +49 -0
- data/lib/3scale/backend/rack/exception_catcher.rb +136 -0
- data/lib/3scale/backend/rack/internal_error_catcher.rb +23 -0
- data/lib/3scale/backend/rack/prometheus.rb +19 -0
- data/lib/3scale/backend/saas.rb +6 -0
- data/lib/3scale/backend/saas_analytics.rb +4 -0
- data/lib/3scale/backend/server.rb +30 -0
- data/lib/3scale/backend/server/falcon.rb +52 -0
- data/lib/3scale/backend/server/puma.rb +71 -0
- data/lib/3scale/backend/service.rb +317 -0
- data/lib/3scale/backend/service_token.rb +97 -0
- data/lib/3scale/backend/stats.rb +8 -0
- data/lib/3scale/backend/stats/aggregator.rb +170 -0
- data/lib/3scale/backend/stats/aggregators/base.rb +72 -0
- data/lib/3scale/backend/stats/aggregators/response_code.rb +58 -0
- data/lib/3scale/backend/stats/aggregators/usage.rb +34 -0
- data/lib/3scale/backend/stats/bucket_reader.rb +135 -0
- data/lib/3scale/backend/stats/bucket_storage.rb +108 -0
- data/lib/3scale/backend/stats/cleaner.rb +195 -0
- data/lib/3scale/backend/stats/codes_commons.rb +14 -0
- data/lib/3scale/backend/stats/delete_job_def.rb +60 -0
- data/lib/3scale/backend/stats/key_generator.rb +73 -0
- data/lib/3scale/backend/stats/keys.rb +104 -0
- data/lib/3scale/backend/stats/partition_eraser_job.rb +58 -0
- data/lib/3scale/backend/stats/partition_generator_job.rb +46 -0
- data/lib/3scale/backend/stats/period_commons.rb +34 -0
- data/lib/3scale/backend/stats/stats_parser.rb +141 -0
- data/lib/3scale/backend/stats/storage.rb +113 -0
- data/lib/3scale/backend/statsd.rb +14 -0
- data/lib/3scale/backend/storable.rb +35 -0
- data/lib/3scale/backend/storage.rb +40 -0
- data/lib/3scale/backend/storage_async.rb +4 -0
- data/lib/3scale/backend/storage_async/async_redis.rb +21 -0
- data/lib/3scale/backend/storage_async/client.rb +205 -0
- data/lib/3scale/backend/storage_async/pipeline.rb +79 -0
- data/lib/3scale/backend/storage_async/resque_extensions.rb +30 -0
- data/lib/3scale/backend/storage_helpers.rb +278 -0
- data/lib/3scale/backend/storage_key_helpers.rb +9 -0
- data/lib/3scale/backend/storage_sync.rb +43 -0
- data/lib/3scale/backend/transaction.rb +62 -0
- data/lib/3scale/backend/transactor.rb +177 -0
- data/lib/3scale/backend/transactor/limit_headers.rb +54 -0
- data/lib/3scale/backend/transactor/notify_batcher.rb +139 -0
- data/lib/3scale/backend/transactor/notify_job.rb +47 -0
- data/lib/3scale/backend/transactor/process_job.rb +33 -0
- data/lib/3scale/backend/transactor/report_job.rb +84 -0
- data/lib/3scale/backend/transactor/status.rb +236 -0
- data/lib/3scale/backend/transactor/usage_report.rb +182 -0
- data/lib/3scale/backend/usage.rb +63 -0
- data/lib/3scale/backend/usage_limit.rb +115 -0
- data/lib/3scale/backend/use_cases/provider_key_change_use_case.rb +60 -0
- data/lib/3scale/backend/util.rb +17 -0
- data/lib/3scale/backend/validators.rb +26 -0
- data/lib/3scale/backend/validators/base.rb +36 -0
- data/lib/3scale/backend/validators/key.rb +17 -0
- data/lib/3scale/backend/validators/limits.rb +57 -0
- data/lib/3scale/backend/validators/oauth_key.rb +15 -0
- data/lib/3scale/backend/validators/oauth_setting.rb +15 -0
- data/lib/3scale/backend/validators/redirect_uri.rb +33 -0
- data/lib/3scale/backend/validators/referrer.rb +60 -0
- data/lib/3scale/backend/validators/service_state.rb +15 -0
- data/lib/3scale/backend/validators/state.rb +15 -0
- data/lib/3scale/backend/version.rb +5 -0
- data/lib/3scale/backend/views/oauth_access_tokens.builder +14 -0
- data/lib/3scale/backend/views/oauth_app_id_by_token.builder +4 -0
- data/lib/3scale/backend/worker.rb +87 -0
- data/lib/3scale/backend/worker_async.rb +88 -0
- data/lib/3scale/backend/worker_metrics.rb +44 -0
- data/lib/3scale/backend/worker_sync.rb +32 -0
- data/lib/3scale/bundler_shim.rb +17 -0
- data/lib/3scale/prometheus_server.rb +10 -0
- data/lib/3scale/tasks/connectivity.rake +41 -0
- data/lib/3scale/tasks/helpers.rb +3 -0
- data/lib/3scale/tasks/helpers/environment.rb +23 -0
- data/lib/3scale/tasks/stats.rake +131 -0
- data/lib/3scale/tasks/swagger.rake +46 -0
- data/licenses.xml +1215 -0
- metadata +227 -0
@@ -0,0 +1,91 @@
|
|
1
|
+
module ThreeScale
|
2
|
+
module Backend
|
3
|
+
class Metric
|
4
|
+
class Collection
|
5
|
+
include Storable
|
6
|
+
|
7
|
+
def initialize(service_id)
|
8
|
+
@service_id = service_id
|
9
|
+
@metric_ids = {}
|
10
|
+
@parent_ids = {}
|
11
|
+
end
|
12
|
+
|
13
|
+
# Accepts usage as {'metric_name' => value, ...} and converts it into
|
14
|
+
# {metric_id => value, ...}, evaluating also metric hierarchy.
|
15
|
+
#
|
16
|
+
# == Example
|
17
|
+
#
|
18
|
+
# Let's supose there is a metric called "hits" with id 1001 and it has one child
|
19
|
+
# metric called "search_queries" with id 1002. Then:
|
20
|
+
#
|
21
|
+
# metrics.process_usage('search_queries' => 42)
|
22
|
+
#
|
23
|
+
# will produce:
|
24
|
+
#
|
25
|
+
# {1001 => 42, 1002 => 42}
|
26
|
+
#
|
27
|
+
def process_usage(raw_usage, flat_usage = false)
|
28
|
+
return {} unless raw_usage
|
29
|
+
usage = parse_usage(raw_usage)
|
30
|
+
flat_usage ? usage : process_parents(usage)
|
31
|
+
end
|
32
|
+
|
33
|
+
private
|
34
|
+
|
35
|
+
def parse_usage(raw_usage)
|
36
|
+
raw_usage.inject({}) do |usage, (name, value)|
|
37
|
+
name = name.strip
|
38
|
+
raise UsageValueInvalid.new(name, value) unless sane_value?(value)
|
39
|
+
usage.update(metric_id(name) => value)
|
40
|
+
end
|
41
|
+
end
|
42
|
+
|
43
|
+
# Propagates the usage to all the levels of the hierarchy.
|
44
|
+
# For example, in this scenario:
|
45
|
+
# m1 --child_of--> m2 --child_of--> m3
|
46
|
+
# If there's a +1 in m1, this method will set the +1 in the other 2 as
|
47
|
+
# well.
|
48
|
+
def process_parents(usage)
|
49
|
+
usage.inject(usage.dup) do |memo, (id, val)|
|
50
|
+
is_set_op = Usage.is_set?(val)
|
51
|
+
|
52
|
+
while (id = parent_id(id))
|
53
|
+
if is_set_op
|
54
|
+
memo[id] = val
|
55
|
+
else
|
56
|
+
# need the to_i here instead of in parse_usage because the value
|
57
|
+
# can be a string if the parent is passed explicitly on the usage
|
58
|
+
# since the value might not be a Fixnum but a '#'Fixnum
|
59
|
+
# (also because memo[p_id] might be nil)
|
60
|
+
memo[id] = memo[id].to_i
|
61
|
+
memo[id] += val.to_i
|
62
|
+
end
|
63
|
+
end
|
64
|
+
|
65
|
+
memo
|
66
|
+
end
|
67
|
+
end
|
68
|
+
|
69
|
+
def parent_id(id)
|
70
|
+
@parent_ids[id] ||= Metric.load_parent_id(@service_id, id)
|
71
|
+
end
|
72
|
+
|
73
|
+
def metric_id(name)
|
74
|
+
@metric_ids[name] ||= load_metric_id(name)
|
75
|
+
end
|
76
|
+
|
77
|
+
def load_metric_id(name)
|
78
|
+
Memoizer.memoize_block(Memoizer.build_key(self,
|
79
|
+
:load_metric_id, @service_id, name)) do
|
80
|
+
storage.get(encode_key("metric/service_id:#{@service_id}/name:#{name}/id"))
|
81
|
+
end || raise(MetricInvalid.new(name))
|
82
|
+
end
|
83
|
+
|
84
|
+
## accepts postive integers or positive integers preffixed with # (for sets)
|
85
|
+
def sane_value?(value)
|
86
|
+
value.is_a?(Numeric) || value.to_s =~ /\A\s*#?\d+\s*\Z/
|
87
|
+
end
|
88
|
+
end
|
89
|
+
end
|
90
|
+
end
|
91
|
+
end
|
@@ -0,0 +1,26 @@
|
|
1
|
+
module ThreeScale
|
2
|
+
module Backend
|
3
|
+
module OAuth
|
4
|
+
class Token
|
5
|
+
attr_reader :service_id, :token, :key
|
6
|
+
attr_accessor :ttl, :app_id
|
7
|
+
|
8
|
+
def initialize(token, service_id, app_id, ttl)
|
9
|
+
@token = token
|
10
|
+
@service_id = service_id
|
11
|
+
@app_id = app_id
|
12
|
+
@ttl = ttl
|
13
|
+
@key = Key.for token, service_id
|
14
|
+
end
|
15
|
+
|
16
|
+
def value
|
17
|
+
Value.for app_id
|
18
|
+
end
|
19
|
+
|
20
|
+
def self.from_value(token, service_id, value, ttl)
|
21
|
+
new token, service_id, *Value.from(value), ttl
|
22
|
+
end
|
23
|
+
end
|
24
|
+
end
|
25
|
+
end
|
26
|
+
end
|
@@ -0,0 +1,30 @@
|
|
1
|
+
# This module defines the format of the keys for OAuth tokens and token sets.
|
2
|
+
#
|
3
|
+
# Note that while we can build the key easily, we cannot reliably obtain a token
|
4
|
+
# and a service_id out of the key, because there are no constraints on them:
|
5
|
+
#
|
6
|
+
# "oauth_access_tokens/service:some/servicegoeshere/andthisis_a_/valid_token"
|
7
|
+
#
|
8
|
+
module ThreeScale
|
9
|
+
module Backend
|
10
|
+
module OAuth
|
11
|
+
class Token
|
12
|
+
module Key
|
13
|
+
class << self
|
14
|
+
def for(token, service_id)
|
15
|
+
"oauth_access_tokens/service:#{service_id}/#{token}"
|
16
|
+
end
|
17
|
+
end
|
18
|
+
|
19
|
+
module Set
|
20
|
+
class << self
|
21
|
+
def for(service_id, app_id)
|
22
|
+
"oauth_access_tokens/service:#{service_id}/app:#{app_id}/"
|
23
|
+
end
|
24
|
+
end
|
25
|
+
end
|
26
|
+
end
|
27
|
+
end
|
28
|
+
end
|
29
|
+
end
|
30
|
+
end
|
@@ -0,0 +1,313 @@
|
|
1
|
+
module ThreeScale
|
2
|
+
module Backend
|
3
|
+
module OAuth
|
4
|
+
class Token
|
5
|
+
module Storage
|
6
|
+
include Configurable
|
7
|
+
|
8
|
+
# Default token size is 4K - 512 (to allow for some metadata)
|
9
|
+
MAXIMUM_TOKEN_SIZE = configuration.oauth.max_token_size || 3584
|
10
|
+
private_constant :MAXIMUM_TOKEN_SIZE
|
11
|
+
TOKEN_MAX_REDIS_SLICE_SIZE = 500
|
12
|
+
private_constant :TOKEN_MAX_REDIS_SLICE_SIZE
|
13
|
+
TOKEN_TTL_DEFAULT = 86400
|
14
|
+
private_constant :TOKEN_TTL_DEFAULT
|
15
|
+
TOKEN_TTL_PERMANENT = 0
|
16
|
+
private_constant :TOKEN_TTL_PERMANENT
|
17
|
+
|
18
|
+
Error = Class.new StandardError
|
19
|
+
InconsistencyError = Class.new Error
|
20
|
+
|
21
|
+
class << self
|
22
|
+
include Backend::Logging
|
23
|
+
include Backend::StorageHelpers
|
24
|
+
|
25
|
+
def create(token, service_id, app_id, ttl = nil)
|
26
|
+
raise AccessTokenFormatInvalid if token.nil? || token.empty? ||
|
27
|
+
!token.is_a?(String) || token.bytesize > MAXIMUM_TOKEN_SIZE
|
28
|
+
|
29
|
+
# raises if TTL is invalid
|
30
|
+
ttl = sanitized_ttl ttl
|
31
|
+
|
32
|
+
key = Key.for token, service_id
|
33
|
+
raise AccessTokenAlreadyExists.new(token) unless storage.get(key).nil?
|
34
|
+
|
35
|
+
value = Value.for app_id
|
36
|
+
token_set = Key::Set.for(service_id, app_id)
|
37
|
+
|
38
|
+
store_token token, token_set, key, value, ttl
|
39
|
+
ensure_stored! token, token_set, key, value
|
40
|
+
end
|
41
|
+
|
42
|
+
# Deletes a token
|
43
|
+
#
|
44
|
+
# Returns the associated app_id or nil
|
45
|
+
#
|
46
|
+
def delete(token, service_id)
|
47
|
+
key = Key.for token, service_id
|
48
|
+
val = storage.get key
|
49
|
+
if val
|
50
|
+
app_id = Value.from val
|
51
|
+
token_set = Key::Set.for(service_id, app_id)
|
52
|
+
|
53
|
+
existed, * = remove_a_token token_set, token, key
|
54
|
+
|
55
|
+
unless existed
|
56
|
+
logger.notify(InconsistencyError.new("Found OAuth token " \
|
57
|
+
"#{token} for service #{service_id} and app #{app_id} as " \
|
58
|
+
"key but not in set!"))
|
59
|
+
end
|
60
|
+
end
|
61
|
+
|
62
|
+
val
|
63
|
+
end
|
64
|
+
|
65
|
+
# Get a token's associated app_id
|
66
|
+
def get_credentials(token, service_id)
|
67
|
+
app_id = Value.from(storage.get(Key.for(token, service_id)))
|
68
|
+
raise AccessTokenInvalid.new token if app_id.nil?
|
69
|
+
app_id
|
70
|
+
end
|
71
|
+
|
72
|
+
# This is used to list tokens by service and app.
|
73
|
+
#
|
74
|
+
# Note: this deletes tokens that have not been found from the set of
|
75
|
+
# tokens for the given app - those have to be expired tokens.
|
76
|
+
def all_by_service_and_app(service_id, app_id)
|
77
|
+
token_set = Key::Set.for(service_id, app_id)
|
78
|
+
deltokens = []
|
79
|
+
tokens_n_values_flat(token_set, service_id)
|
80
|
+
.select do |(token, _key, value, _ttl)|
|
81
|
+
app_id = Value.from value
|
82
|
+
if app_id.nil?
|
83
|
+
deltokens << token
|
84
|
+
false
|
85
|
+
else
|
86
|
+
true
|
87
|
+
end
|
88
|
+
end
|
89
|
+
.map do |(token, _key, value, ttl)|
|
90
|
+
Token.from_value token, service_id, value, ttl
|
91
|
+
end
|
92
|
+
.tap do
|
93
|
+
# delete expired tokens (nil values) from token set
|
94
|
+
deltokens.each_slice(TOKEN_MAX_REDIS_SLICE_SIZE) do |delgrp|
|
95
|
+
storage.srem token_set, delgrp
|
96
|
+
end
|
97
|
+
end
|
98
|
+
end
|
99
|
+
|
100
|
+
# Remove tokens by app_id.
|
101
|
+
#
|
102
|
+
# Triggered by Application deletion.
|
103
|
+
#
|
104
|
+
def remove_tokens(service_id, app_id)
|
105
|
+
remove_tokens_by service_id, app_id
|
106
|
+
end
|
107
|
+
|
108
|
+
|
109
|
+
private
|
110
|
+
|
111
|
+
# Remove all tokens
|
112
|
+
#
|
113
|
+
# I thought of leaving this one public, but remove_*_tokens removed
|
114
|
+
# my use cases for the time being.
|
115
|
+
def remove_tokens_by(service_id, app_id)
|
116
|
+
token_set = Key::Set.for(service_id, app_id)
|
117
|
+
|
118
|
+
remove_whole_token_set(token_set, service_id)
|
119
|
+
end
|
120
|
+
|
121
|
+
def remove_token_set_by(token_set, service_id, &blk)
|
122
|
+
# Get tokens. Filter them. Group them into manageable groups.
|
123
|
+
# Extract tokens and keys into separate arrays, one for each.
|
124
|
+
# Remove tokens from token set (they are keys in a set) and token
|
125
|
+
# keys themselves.
|
126
|
+
tokens_n_values_flat(token_set, service_id, false)
|
127
|
+
.select(&blk)
|
128
|
+
.each_slice(TOKEN_MAX_REDIS_SLICE_SIZE)
|
129
|
+
.inject([[], []]) do |acc, groups|
|
130
|
+
groups.each do |token, key, _value|
|
131
|
+
acc[0] << token
|
132
|
+
acc[1] << key
|
133
|
+
end
|
134
|
+
acc
|
135
|
+
end
|
136
|
+
.each_slice(2)
|
137
|
+
.inject([]) do |acc, (tokens, keys)|
|
138
|
+
storage.pipelined do
|
139
|
+
if tokens && !tokens.empty?
|
140
|
+
storage.srem token_set, tokens
|
141
|
+
acc.concat tokens
|
142
|
+
end
|
143
|
+
storage.del keys if keys && !keys.empty?
|
144
|
+
end
|
145
|
+
acc
|
146
|
+
end
|
147
|
+
end
|
148
|
+
|
149
|
+
def remove_a_token(token_set, token, key)
|
150
|
+
storage.pipelined do
|
151
|
+
storage.srem token_set, token
|
152
|
+
storage.del key
|
153
|
+
end
|
154
|
+
end
|
155
|
+
|
156
|
+
def remove_whole_token_set(token_set, service_id)
|
157
|
+
_token_groups, key_groups = tokens_n_keys(token_set, service_id)
|
158
|
+
storage.pipelined do
|
159
|
+
storage.del token_set
|
160
|
+
# remove all tokens for this app
|
161
|
+
key_groups.each do |keys|
|
162
|
+
storage.del keys
|
163
|
+
end
|
164
|
+
end
|
165
|
+
end
|
166
|
+
|
167
|
+
# TODO: provide a SSCAN interface with lazy enums because SMEMBERS
|
168
|
+
# is prone to DoSing and timeouts
|
169
|
+
def tokens_from(token_set)
|
170
|
+
storage.smembers(token_set)
|
171
|
+
end
|
172
|
+
|
173
|
+
def tokens_n_keys(token_set, service_id)
|
174
|
+
token_groups = tokens_from(token_set).each_slice(TOKEN_MAX_REDIS_SLICE_SIZE)
|
175
|
+
key_groups = token_groups.map do |tokens|
|
176
|
+
tokens.map do |token|
|
177
|
+
Key.for token, service_id
|
178
|
+
end
|
179
|
+
end
|
180
|
+
|
181
|
+
[token_groups, key_groups]
|
182
|
+
end
|
183
|
+
|
184
|
+
# Provides grouped data which matches respectively in each array
|
185
|
+
# position, ie. 1st group of data contains a group of tokens, keys
|
186
|
+
# and values with ttls, and position N of the tokens group has key
|
187
|
+
# in position N of the keys group, and so on.
|
188
|
+
#
|
189
|
+
# [[[token group], [key group], [value_with_ttls_group]], ...]
|
190
|
+
#
|
191
|
+
def tokens_n_values_groups(token_set, service_id, with_ttls)
|
192
|
+
token_groups, key_groups = tokens_n_keys(token_set, service_id)
|
193
|
+
value_ttl_groups = key_groups.map do |keys|
|
194
|
+
# pipelining will create an array with the results of commands
|
195
|
+
res = storage.pipelined do
|
196
|
+
storage.mget(keys)
|
197
|
+
if with_ttls
|
198
|
+
keys.map do |key|
|
199
|
+
storage.ttl key
|
200
|
+
end
|
201
|
+
end
|
202
|
+
end
|
203
|
+
# [mget array, 0..n ttls] => [mget array, ttls array]
|
204
|
+
[res.shift, res]
|
205
|
+
end
|
206
|
+
token_groups.zip(key_groups, value_ttl_groups)
|
207
|
+
end
|
208
|
+
|
209
|
+
# Zips the data provided by tokens_n_values_groups so that you stop
|
210
|
+
# looking at indexes in the respective arrays and instead have:
|
211
|
+
#
|
212
|
+
# [group 0, ..., group N] where each group is made of:
|
213
|
+
# [[token 0, key 0, value 0, ttl 0], ..., [token N, key N, value
|
214
|
+
# N, ttl N]]
|
215
|
+
#
|
216
|
+
def tokens_n_values_zipped_groups(token_set, service_id, with_ttls = true)
|
217
|
+
tokens_n_values_groups(token_set,
|
218
|
+
service_id,
|
219
|
+
with_ttls).map do |tokens, keys, (values, ttls)|
|
220
|
+
tokens.zip keys, values, ttls
|
221
|
+
end
|
222
|
+
end
|
223
|
+
|
224
|
+
# Flattens the data provided by tokens_n_values_zipped_groups so
|
225
|
+
# that you have a transparent iterator with all needed data and can
|
226
|
+
# stop worrying about streaming groups of elements.
|
227
|
+
#
|
228
|
+
def tokens_n_values_flat(token_set, service_id, with_ttls = true)
|
229
|
+
tokens_n_values_zipped_groups(token_set,
|
230
|
+
service_id,
|
231
|
+
with_ttls).flat_map do |groups|
|
232
|
+
groups.map do |token, key, value, ttl|
|
233
|
+
[token, key, value, ttl]
|
234
|
+
end
|
235
|
+
end
|
236
|
+
end
|
237
|
+
|
238
|
+
# Store the specified token in Redis
|
239
|
+
#
|
240
|
+
# TTL specified in seconds.
|
241
|
+
# A TTL of 0 stores a permanent token
|
242
|
+
def store_token(token, token_set, key, value, ttl)
|
243
|
+
# build the storage command so that we can pipeline everything cleanly
|
244
|
+
command = :set
|
245
|
+
args = [key]
|
246
|
+
|
247
|
+
if !permanent_ttl? ttl
|
248
|
+
command = :setex
|
249
|
+
args << ttl
|
250
|
+
end
|
251
|
+
|
252
|
+
args << value
|
253
|
+
|
254
|
+
# pipelined will return nil if it is embedded into another
|
255
|
+
# pipeline(which would be an error at this point) or if shutting
|
256
|
+
# down and a connection error happens. Both things being abnormal
|
257
|
+
# means we should just raise a storage error.
|
258
|
+
raise AccessTokenStorageError, token unless storage.pipelined do
|
259
|
+
storage.send(command, *args)
|
260
|
+
storage.sadd(token_set, token)
|
261
|
+
end
|
262
|
+
end
|
263
|
+
|
264
|
+
|
265
|
+
# Make sure everything ended up there
|
266
|
+
#
|
267
|
+
# TODO: review and possibly reimplement trying to leave it
|
268
|
+
# consistent as much as possible.
|
269
|
+
#
|
270
|
+
# Note that we have a sharding proxy and pipelines can't be guaranteed
|
271
|
+
# to behave like transactions, since we might have one non-working
|
272
|
+
# shard. Instead of relying on proxy-specific responses, we just check
|
273
|
+
# that the data we should have in the store is really there.
|
274
|
+
def ensure_stored!(token, token_set, key, value)
|
275
|
+
results = storage.pipelined do
|
276
|
+
storage.get(key)
|
277
|
+
storage.sismember(token_set, token)
|
278
|
+
end
|
279
|
+
|
280
|
+
results.last && results.first == value ||
|
281
|
+
raise(AccessTokenStorageError, token)
|
282
|
+
end
|
283
|
+
|
284
|
+
# Validation for the TTL value
|
285
|
+
#
|
286
|
+
# 0 is accepted (understood as permanent token)
|
287
|
+
# Negative values are not accepted
|
288
|
+
# Integer(ttl) validation is required (if input is nil, default applies)
|
289
|
+
def sanitized_ttl(ttl)
|
290
|
+
ttl = begin
|
291
|
+
Integer(ttl)
|
292
|
+
rescue TypeError
|
293
|
+
# ttl is nil
|
294
|
+
TOKEN_TTL_DEFAULT
|
295
|
+
rescue
|
296
|
+
# NaN
|
297
|
+
-1
|
298
|
+
end
|
299
|
+
raise AccessTokenInvalidTTL if ttl < 0
|
300
|
+
|
301
|
+
ttl
|
302
|
+
end
|
303
|
+
|
304
|
+
# Check whether a TTL has the magic value for a permanent token
|
305
|
+
def permanent_ttl?(ttl)
|
306
|
+
ttl == TOKEN_TTL_PERMANENT
|
307
|
+
end
|
308
|
+
end
|
309
|
+
end
|
310
|
+
end
|
311
|
+
end
|
312
|
+
end
|
313
|
+
end
|