apisonator 2.100.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (173) hide show
  1. checksums.yaml +7 -0
  2. data/CHANGELOG.md +317 -0
  3. data/Gemfile +11 -0
  4. data/Gemfile.base +65 -0
  5. data/Gemfile.lock +319 -0
  6. data/Gemfile.on_prem +1 -0
  7. data/Gemfile.on_prem.lock +297 -0
  8. data/LICENSE +202 -0
  9. data/NOTICE +15 -0
  10. data/README.md +230 -0
  11. data/Rakefile +287 -0
  12. data/apisonator.gemspec +47 -0
  13. data/app/api/api.rb +13 -0
  14. data/app/api/internal/alert_limits.rb +32 -0
  15. data/app/api/internal/application_keys.rb +49 -0
  16. data/app/api/internal/application_referrer_filters.rb +43 -0
  17. data/app/api/internal/applications.rb +77 -0
  18. data/app/api/internal/errors.rb +54 -0
  19. data/app/api/internal/events.rb +42 -0
  20. data/app/api/internal/internal.rb +104 -0
  21. data/app/api/internal/metrics.rb +40 -0
  22. data/app/api/internal/service_tokens.rb +46 -0
  23. data/app/api/internal/services.rb +58 -0
  24. data/app/api/internal/stats.rb +42 -0
  25. data/app/api/internal/usagelimits.rb +62 -0
  26. data/app/api/internal/utilization.rb +23 -0
  27. data/bin/3scale_backend +223 -0
  28. data/bin/3scale_backend_worker +26 -0
  29. data/config.ru +4 -0
  30. data/config/puma.rb +192 -0
  31. data/config/schedule.rb +9 -0
  32. data/ext/mkrf_conf.rb +64 -0
  33. data/lib/3scale/backend.rb +67 -0
  34. data/lib/3scale/backend/alert_limit.rb +56 -0
  35. data/lib/3scale/backend/alerts.rb +137 -0
  36. data/lib/3scale/backend/analytics/kinesis.rb +3 -0
  37. data/lib/3scale/backend/analytics/kinesis/adapter.rb +180 -0
  38. data/lib/3scale/backend/analytics/kinesis/exporter.rb +86 -0
  39. data/lib/3scale/backend/analytics/kinesis/job.rb +135 -0
  40. data/lib/3scale/backend/analytics/redshift.rb +3 -0
  41. data/lib/3scale/backend/analytics/redshift/adapter.rb +367 -0
  42. data/lib/3scale/backend/analytics/redshift/importer.rb +83 -0
  43. data/lib/3scale/backend/analytics/redshift/job.rb +33 -0
  44. data/lib/3scale/backend/application.rb +330 -0
  45. data/lib/3scale/backend/application_events.rb +76 -0
  46. data/lib/3scale/backend/background_job.rb +65 -0
  47. data/lib/3scale/backend/configurable.rb +20 -0
  48. data/lib/3scale/backend/configuration.rb +151 -0
  49. data/lib/3scale/backend/configuration/loader.rb +42 -0
  50. data/lib/3scale/backend/constants.rb +19 -0
  51. data/lib/3scale/backend/cors.rb +84 -0
  52. data/lib/3scale/backend/distributed_lock.rb +67 -0
  53. data/lib/3scale/backend/environment.rb +21 -0
  54. data/lib/3scale/backend/error_storage.rb +52 -0
  55. data/lib/3scale/backend/errors.rb +343 -0
  56. data/lib/3scale/backend/event_storage.rb +120 -0
  57. data/lib/3scale/backend/experiment.rb +84 -0
  58. data/lib/3scale/backend/extensions.rb +5 -0
  59. data/lib/3scale/backend/extensions/array.rb +19 -0
  60. data/lib/3scale/backend/extensions/hash.rb +26 -0
  61. data/lib/3scale/backend/extensions/nil_class.rb +13 -0
  62. data/lib/3scale/backend/extensions/redis.rb +44 -0
  63. data/lib/3scale/backend/extensions/string.rb +13 -0
  64. data/lib/3scale/backend/extensions/time.rb +110 -0
  65. data/lib/3scale/backend/failed_jobs_scheduler.rb +141 -0
  66. data/lib/3scale/backend/job_fetcher.rb +122 -0
  67. data/lib/3scale/backend/listener.rb +728 -0
  68. data/lib/3scale/backend/listener_metrics.rb +99 -0
  69. data/lib/3scale/backend/logging.rb +48 -0
  70. data/lib/3scale/backend/logging/external.rb +44 -0
  71. data/lib/3scale/backend/logging/external/impl.rb +93 -0
  72. data/lib/3scale/backend/logging/external/impl/airbrake.rb +66 -0
  73. data/lib/3scale/backend/logging/external/impl/bugsnag.rb +69 -0
  74. data/lib/3scale/backend/logging/external/impl/default.rb +18 -0
  75. data/lib/3scale/backend/logging/external/resque.rb +57 -0
  76. data/lib/3scale/backend/logging/logger.rb +18 -0
  77. data/lib/3scale/backend/logging/middleware.rb +62 -0
  78. data/lib/3scale/backend/logging/middleware/json_writer.rb +21 -0
  79. data/lib/3scale/backend/logging/middleware/text_writer.rb +60 -0
  80. data/lib/3scale/backend/logging/middleware/writer.rb +143 -0
  81. data/lib/3scale/backend/logging/worker.rb +107 -0
  82. data/lib/3scale/backend/manifest.rb +80 -0
  83. data/lib/3scale/backend/memoizer.rb +277 -0
  84. data/lib/3scale/backend/metric.rb +275 -0
  85. data/lib/3scale/backend/metric/collection.rb +91 -0
  86. data/lib/3scale/backend/oauth.rb +4 -0
  87. data/lib/3scale/backend/oauth/token.rb +26 -0
  88. data/lib/3scale/backend/oauth/token_key.rb +30 -0
  89. data/lib/3scale/backend/oauth/token_storage.rb +313 -0
  90. data/lib/3scale/backend/oauth/token_value.rb +25 -0
  91. data/lib/3scale/backend/period.rb +3 -0
  92. data/lib/3scale/backend/period/boundary.rb +107 -0
  93. data/lib/3scale/backend/period/cache.rb +28 -0
  94. data/lib/3scale/backend/period/period.rb +402 -0
  95. data/lib/3scale/backend/queue_storage.rb +16 -0
  96. data/lib/3scale/backend/rack.rb +49 -0
  97. data/lib/3scale/backend/rack/exception_catcher.rb +136 -0
  98. data/lib/3scale/backend/rack/internal_error_catcher.rb +23 -0
  99. data/lib/3scale/backend/rack/prometheus.rb +19 -0
  100. data/lib/3scale/backend/saas.rb +6 -0
  101. data/lib/3scale/backend/saas_analytics.rb +4 -0
  102. data/lib/3scale/backend/server.rb +30 -0
  103. data/lib/3scale/backend/server/falcon.rb +52 -0
  104. data/lib/3scale/backend/server/puma.rb +71 -0
  105. data/lib/3scale/backend/service.rb +317 -0
  106. data/lib/3scale/backend/service_token.rb +97 -0
  107. data/lib/3scale/backend/stats.rb +8 -0
  108. data/lib/3scale/backend/stats/aggregator.rb +170 -0
  109. data/lib/3scale/backend/stats/aggregators/base.rb +72 -0
  110. data/lib/3scale/backend/stats/aggregators/response_code.rb +58 -0
  111. data/lib/3scale/backend/stats/aggregators/usage.rb +34 -0
  112. data/lib/3scale/backend/stats/bucket_reader.rb +135 -0
  113. data/lib/3scale/backend/stats/bucket_storage.rb +108 -0
  114. data/lib/3scale/backend/stats/cleaner.rb +195 -0
  115. data/lib/3scale/backend/stats/codes_commons.rb +14 -0
  116. data/lib/3scale/backend/stats/delete_job_def.rb +60 -0
  117. data/lib/3scale/backend/stats/key_generator.rb +73 -0
  118. data/lib/3scale/backend/stats/keys.rb +104 -0
  119. data/lib/3scale/backend/stats/partition_eraser_job.rb +58 -0
  120. data/lib/3scale/backend/stats/partition_generator_job.rb +46 -0
  121. data/lib/3scale/backend/stats/period_commons.rb +34 -0
  122. data/lib/3scale/backend/stats/stats_parser.rb +141 -0
  123. data/lib/3scale/backend/stats/storage.rb +113 -0
  124. data/lib/3scale/backend/statsd.rb +14 -0
  125. data/lib/3scale/backend/storable.rb +35 -0
  126. data/lib/3scale/backend/storage.rb +40 -0
  127. data/lib/3scale/backend/storage_async.rb +4 -0
  128. data/lib/3scale/backend/storage_async/async_redis.rb +21 -0
  129. data/lib/3scale/backend/storage_async/client.rb +205 -0
  130. data/lib/3scale/backend/storage_async/pipeline.rb +79 -0
  131. data/lib/3scale/backend/storage_async/resque_extensions.rb +30 -0
  132. data/lib/3scale/backend/storage_helpers.rb +278 -0
  133. data/lib/3scale/backend/storage_key_helpers.rb +9 -0
  134. data/lib/3scale/backend/storage_sync.rb +43 -0
  135. data/lib/3scale/backend/transaction.rb +62 -0
  136. data/lib/3scale/backend/transactor.rb +177 -0
  137. data/lib/3scale/backend/transactor/limit_headers.rb +54 -0
  138. data/lib/3scale/backend/transactor/notify_batcher.rb +139 -0
  139. data/lib/3scale/backend/transactor/notify_job.rb +47 -0
  140. data/lib/3scale/backend/transactor/process_job.rb +33 -0
  141. data/lib/3scale/backend/transactor/report_job.rb +84 -0
  142. data/lib/3scale/backend/transactor/status.rb +236 -0
  143. data/lib/3scale/backend/transactor/usage_report.rb +182 -0
  144. data/lib/3scale/backend/usage.rb +63 -0
  145. data/lib/3scale/backend/usage_limit.rb +115 -0
  146. data/lib/3scale/backend/use_cases/provider_key_change_use_case.rb +60 -0
  147. data/lib/3scale/backend/util.rb +17 -0
  148. data/lib/3scale/backend/validators.rb +26 -0
  149. data/lib/3scale/backend/validators/base.rb +36 -0
  150. data/lib/3scale/backend/validators/key.rb +17 -0
  151. data/lib/3scale/backend/validators/limits.rb +57 -0
  152. data/lib/3scale/backend/validators/oauth_key.rb +15 -0
  153. data/lib/3scale/backend/validators/oauth_setting.rb +15 -0
  154. data/lib/3scale/backend/validators/redirect_uri.rb +33 -0
  155. data/lib/3scale/backend/validators/referrer.rb +60 -0
  156. data/lib/3scale/backend/validators/service_state.rb +15 -0
  157. data/lib/3scale/backend/validators/state.rb +15 -0
  158. data/lib/3scale/backend/version.rb +5 -0
  159. data/lib/3scale/backend/views/oauth_access_tokens.builder +14 -0
  160. data/lib/3scale/backend/views/oauth_app_id_by_token.builder +4 -0
  161. data/lib/3scale/backend/worker.rb +87 -0
  162. data/lib/3scale/backend/worker_async.rb +88 -0
  163. data/lib/3scale/backend/worker_metrics.rb +44 -0
  164. data/lib/3scale/backend/worker_sync.rb +32 -0
  165. data/lib/3scale/bundler_shim.rb +17 -0
  166. data/lib/3scale/prometheus_server.rb +10 -0
  167. data/lib/3scale/tasks/connectivity.rake +41 -0
  168. data/lib/3scale/tasks/helpers.rb +3 -0
  169. data/lib/3scale/tasks/helpers/environment.rb +23 -0
  170. data/lib/3scale/tasks/stats.rake +131 -0
  171. data/lib/3scale/tasks/swagger.rake +46 -0
  172. data/licenses.xml +1215 -0
  173. metadata +227 -0
@@ -0,0 +1,104 @@
1
+ module ThreeScale
2
+ module Backend
3
+ module Stats
4
+ module Keys
5
+ module_function
6
+
7
+ extend Backend::StorageKeyHelpers
8
+
9
+ # @note The { ... } is the key tag. See redis docs for more info
10
+ # about key tags.
11
+ def service_key_prefix(service_id)
12
+ "stats/{service:#{service_id}}"
13
+ end
14
+
15
+ # @note For backwards compatibility, the key is called cinstance.
16
+ # It will be eventually renamed to application.
17
+ def application_key_prefix(prefix, application_id)
18
+ "#{prefix}/cinstance:#{application_id}"
19
+ end
20
+
21
+ def applications_key_prefix(prefix)
22
+ "#{prefix}/cinstances"
23
+ end
24
+
25
+ def metric_key_prefix(prefix, metric_id)
26
+ "#{prefix}/metric:#{metric_id}"
27
+ end
28
+
29
+ def response_code_key_prefix(prefix, response_code)
30
+ "#{prefix}/response_code:#{response_code}"
31
+ end
32
+
33
+ def service_usage_value_key(service_id, metric_id, period)
34
+ service_key = service_key_prefix(service_id)
35
+ metric_key = metric_key_prefix(service_key, metric_id)
36
+
37
+ encode_key(counter_key(metric_key, period))
38
+ end
39
+
40
+ def application_usage_value_key(service_id, app_id, metric_id, period)
41
+ service_key = service_key_prefix(service_id)
42
+ app_key = application_key_prefix(service_key, app_id)
43
+ metric_key = metric_key_prefix(app_key, metric_id)
44
+
45
+ encode_key(counter_key(metric_key, period))
46
+ end
47
+
48
+ def service_response_code_value_key(service_id, response_code, period)
49
+ service_key = service_key_prefix(service_id)
50
+ response_code_key = response_code_key_prefix(service_key, response_code)
51
+
52
+ encode_key(counter_key(response_code_key, period))
53
+ end
54
+
55
+ def application_response_code_value_key(service_id, app_id, response_code, period)
56
+ service_key = service_key_prefix(service_id)
57
+ app_key = application_key_prefix(service_key, app_id)
58
+ response_code_key = response_code_key_prefix(app_key, response_code)
59
+
60
+ encode_key(counter_key(response_code_key, period))
61
+ end
62
+
63
+ def counter_key(prefix, period)
64
+ granularity = period.granularity
65
+ key = "#{prefix}/#{granularity}"
66
+ if granularity.to_sym != :eternity
67
+ key += ":#{period.start.to_compact_s}"
68
+ end
69
+
70
+ key
71
+ end
72
+
73
+ # We want all the buckets to go to the same Redis shard.
74
+ # The reason is that SUNION support in Twemproxy requires that the
75
+ # supplied keys hash to the same server.
76
+ # We are already using a hash tag in the Twemproxy config file: "{}".
77
+ # For that reason, if we specify a key that contains something like
78
+ # "{stats_bucket}", we can be sure that all of them will be in the same
79
+ # shard.
80
+ def changed_keys_bucket_key(bucket)
81
+ "{stats_bucket}:#{bucket}"
82
+ end
83
+
84
+ def changed_keys_key
85
+ "keys_changed_set"
86
+ end
87
+
88
+ def transaction_keys(transaction, item, value)
89
+ service_key = service_key_prefix(transaction.service_id)
90
+ application_key = application_key_prefix(service_key,
91
+ transaction.application_id)
92
+
93
+ method = "#{item}_key_prefix".to_sym
94
+
95
+ {
96
+ service: public_send(method, service_key, value),
97
+ application: public_send(method, application_key, value),
98
+ }
99
+ end
100
+
101
+ end
102
+ end
103
+ end
104
+ end
@@ -0,0 +1,58 @@
1
+ module ThreeScale
2
+ module Backend
3
+ module Stats
4
+ # Job for deleting service stats
5
+ # Perform actual key deletion from a key partition definition
6
+ class PartitionEraserJob < BackgroundJob
7
+ # low priority queue
8
+ @queue = :stats
9
+
10
+ class << self
11
+ include StorageHelpers
12
+ include Configurable
13
+
14
+ def perform_logged(_enqueue_time, service_id, applications, metrics,
15
+ from, to, offset, length, context_info = {})
16
+ job = DeleteJobDef.new(
17
+ service_id: service_id,
18
+ applications: applications,
19
+ metrics: metrics,
20
+ from: from,
21
+ to: to
22
+ )
23
+
24
+ validate_job(job, offset, length)
25
+
26
+ stats_key_gen = KeyGenerator.new(job.to_hash)
27
+
28
+ stats_key_gen.keys.drop(offset).take(length).each_slice(configuration.stats.delete_batch_size) do |slice|
29
+ storage.del(slice)
30
+ end
31
+
32
+ [true, { job: job.to_hash, offset: offset, lenght: length }.to_json]
33
+ rescue Backend::Error => error
34
+ [false, "#{service_id} #{error}"]
35
+ end
36
+
37
+ private
38
+
39
+ def validate_job(job, offset, length)
40
+ unless offset.is_a? Integer
41
+ raise DeleteServiceStatsValidationError.new(job.service_id, 'offset field value ' \
42
+ "[#{offset}] validation error")
43
+ end
44
+
45
+ unless length.is_a? Integer
46
+ raise DeleteServiceStatsValidationError.new(job.service_id, 'length field value ' \
47
+ "[#{length}] validation error")
48
+ end
49
+ end
50
+
51
+ def enqueue_time(args)
52
+ args[0]
53
+ end
54
+ end
55
+ end
56
+ end
57
+ end
58
+ end
@@ -0,0 +1,46 @@
1
+ module ThreeScale
2
+ module Backend
3
+ module Stats
4
+ # Job for deleting service stats
5
+ # Maps delete job definition to a set of non overlapping key set partitions
6
+ class PartitionGeneratorJob < BackgroundJob
7
+ # low priority queue
8
+ @queue = :stats
9
+
10
+ class << self
11
+ include Configurable
12
+
13
+ def perform_logged(_enqueue_time, service_id, applications, metrics,
14
+ from, to, context_info = {})
15
+ job = DeleteJobDef.new(
16
+ service_id: service_id,
17
+ applications: applications,
18
+ metrics: metrics,
19
+ from: from,
20
+ to: to
21
+ )
22
+
23
+ stats_key_gen = KeyGenerator.new(job.to_hash)
24
+
25
+ # Generate partitions
26
+ 0.step(stats_key_gen.keys.count, configuration.stats.delete_partition_batch_size).each do |idx|
27
+ Resque.enqueue(PartitionEraserJob, Time.now.getutc.to_f, service_id, applications,
28
+ metrics, from, to, idx,
29
+ configuration.stats.delete_partition_batch_size, context_info)
30
+ end
31
+
32
+ [true, job.to_json]
33
+ rescue Backend::Error => error
34
+ [false, "#{service_id} #{error}"]
35
+ end
36
+
37
+ private
38
+
39
+ def enqueue_time(args)
40
+ args[0]
41
+ end
42
+ end
43
+ end
44
+ end
45
+ end
46
+ end
@@ -0,0 +1,34 @@
1
+ module ThreeScale
2
+ module Backend
3
+ module Stats
4
+ module PeriodCommons
5
+ SERVICE_GRANULARITIES = %i[eternity month week day hour].map do |g|
6
+ Period[g]
7
+ end.freeze
8
+
9
+ # For applications and users
10
+ EXPANDED_GRANULARITIES = (SERVICE_GRANULARITIES + [Period[:year], Period[:minute]]).freeze
11
+
12
+ GRANULARITY_EXPIRATION_TIME = { Period[:minute] => 180 }.freeze
13
+ private_constant :GRANULARITY_EXPIRATION_TIME
14
+
15
+ PERMANENT_SERVICE_GRANULARITIES = (SERVICE_GRANULARITIES - GRANULARITY_EXPIRATION_TIME.keys).freeze
16
+ PERMANENT_EXPANDED_GRANULARITIES = (EXPANDED_GRANULARITIES - GRANULARITY_EXPIRATION_TIME.keys).freeze
17
+
18
+ # We are not going to send metrics with granularity 'eternity' or
19
+ # 'week' to Kinesis, so there is no point in storing them in Redis
20
+ # buckets.
21
+ EXCLUDED_FOR_BUCKETS = [Period[:eternity], Period[:week]].freeze
22
+
23
+ # Return an array of granularities given a metric_type
24
+ def self.granularities(metric_type)
25
+ metric_type == :service ? SERVICE_GRANULARITIES : EXPANDED_GRANULARITIES
26
+ end
27
+
28
+ def self.expire_time_for_granularity(granularity)
29
+ GRANULARITY_EXPIRATION_TIME[granularity]
30
+ end
31
+ end
32
+ end
33
+ end
34
+ end
@@ -0,0 +1,141 @@
1
+ module ThreeScale
2
+ module Backend
3
+ module Stats
4
+ class StatsParser
5
+ # This parser converts a stats key of Redis into a Hash
6
+ # This is an example of stats key that this parser converts:
7
+ # "stats/{service:1006}/cinstance:a60/metric:255/week:20151130"
8
+
9
+ # This class contains code stolen from Alex's script:
10
+ # /script/redis/stats_keys_2_csv
11
+ # We can think about unifying code later.
12
+
13
+ DATE_COLS = [
14
+ 'year'.freeze,
15
+ 'month'.freeze,
16
+ 'day'.freeze,
17
+ 'hour'.freeze,
18
+ 'minute'.freeze,
19
+ ].freeze
20
+ private_constant :DATE_COLS
21
+
22
+ PERIODS = [
23
+ *DATE_COLS,
24
+ 'week'.freeze,
25
+ 'eternity'.freeze,
26
+ ].freeze
27
+ private_constant :PERIODS
28
+
29
+ NON_DATE_PERIODS = (PERIODS - DATE_COLS).freeze
30
+ private_constant :NON_DATE_PERIODS
31
+
32
+ ALL_COLUMNS = [
33
+ *DATE_COLS,
34
+ 'period'.freeze,
35
+ 'service'.freeze,
36
+ 'cinstance'.freeze,
37
+ 'uinstance'.freeze,
38
+ 'metric'.freeze,
39
+ 'response_code'.freeze,
40
+ 'value'.freeze,
41
+ ].freeze
42
+ private_constant :ALL_COLUMNS
43
+
44
+ REQUIRED_COLS = [
45
+ *DATE_COLS,
46
+ 'period'.freeze,
47
+ 'service'.freeze,
48
+ 'value'.freeze,
49
+ ].freeze
50
+ private_constant :REQUIRED_COLS
51
+
52
+ StatsKeyValueInvalid = Class.new(ThreeScale::Backend::Error)
53
+
54
+ class << self
55
+
56
+ def parse(stats_key, value)
57
+ key_value_to_hash(stats_key, value)
58
+ end
59
+
60
+ private
61
+
62
+ def key_value_to_hash(key,value)
63
+ key_value = ("\"#{key}\":\"#{value}\"")
64
+
65
+ # some keys have things like "field1:xxx/uinstance:N/A/field3:yyy" WTF.
66
+ key_value.gsub!(/:N\/A/, ':'.freeze)
67
+
68
+ h = Hash[str2ary(prepare_str_from(key_value))]
69
+
70
+ result = fix_dates_and_periods(h)
71
+
72
+ all_required_columns = REQUIRED_COLS.all? { |col| h.has_key?(col) }
73
+ no_extra_columns = (h.keys - ALL_COLUMNS).empty?
74
+
75
+ unless all_required_columns && no_extra_columns
76
+ raise StatsKeyValueInvalid, "Error parsing #{key_value}"
77
+ end
78
+
79
+ date_cols_to_timestamp(h)
80
+
81
+ Hash[result.map{ |k, v| [k.to_sym, v] }]
82
+ end
83
+
84
+ def prepare_str_from(line)
85
+ _, key, _, val, *_ = line.split('"')
86
+ "#{key.gsub(/[\{\}]/, '')}/value:#{val}"
87
+ end
88
+
89
+ def str2ary(str)
90
+ str.split('/')[1..-1].map do |kv|
91
+ kv.split(':')
92
+ end
93
+ end
94
+
95
+ def fix_dates_and_periods(hash)
96
+ period = hash.keys.find { |k| PERIODS.include? k }
97
+ if period
98
+ hash['period'.freeze] = period
99
+ period_val = (period == 'eternity' ? '' : hash[period].dup)
100
+ fix_date_cols(hash, period_val)
101
+ NON_DATE_PERIODS.each { |ndp| hash.delete ndp }
102
+ end
103
+ hash
104
+ end
105
+
106
+ def fix_date_cols(hash, period_val)
107
+ DATE_COLS.each do |date_col|
108
+ hash[date_col] = if date_col == 'year'
109
+ period_val.slice! 0, 4
110
+ else
111
+ period_val.slice! 0, 2
112
+ end
113
+
114
+ if hash[date_col].empty?
115
+ hash[date_col] = nil
116
+ elsif hash[date_col].length == 1 # because of 'compacted' times
117
+ hash[date_col] = hash[date_col] + '0'
118
+ end
119
+ end
120
+ end
121
+
122
+ # Adds the 'timestamp' key to the hash. The value follows the format:
123
+ # YYYYMMDD HH:mm. This function also deletes all the date columns
124
+ # from the given hash
125
+ def date_cols_to_timestamp(hash)
126
+ hash['timestamp'.freeze] = timestamp(hash)
127
+ DATE_COLS.each { |date_col| hash.delete(date_col) }
128
+ hash
129
+ end
130
+
131
+ def timestamp(hash)
132
+ return '' if hash['period'] == 'eternity'
133
+ timestamp = hash['year'] + hash['month'] + hash['day'] + ' '
134
+ timestamp << (hash['hour'] ? hash['hour'] : '00')
135
+ timestamp << (hash['minute'] ? (':' + hash['minute']) : ':00')
136
+ end
137
+ end
138
+ end
139
+ end
140
+ end
141
+ end
@@ -0,0 +1,113 @@
1
+ require_relative '../storage'
2
+ require_relative 'keys'
3
+ require '3scale/backend/analytics/kinesis/adapter'
4
+ require '3scale/backend/stats/bucket_reader'
5
+ require '3scale/backend/stats/bucket_storage'
6
+ require '3scale/backend/stats/stats_parser'
7
+
8
+ module ThreeScale
9
+ module Backend
10
+ module Stats
11
+ class Storage
12
+
13
+ STATS_ENABLED_KEY = 'stats:enabled'.freeze
14
+ private_constant :STATS_ENABLED_KEY
15
+
16
+ DISABLED_BECAUSE_EMERGENCY_KEY = 'stats:disabled_emergency'.freeze
17
+ private_constant :DISABLED_BECAUSE_EMERGENCY_KEY
18
+
19
+ class << self
20
+ include Memoizer::Decorator
21
+
22
+ def enabled?
23
+ storage.get(STATS_ENABLED_KEY).to_i == 1
24
+ end
25
+ memoize :enabled?
26
+
27
+ def enable!
28
+ storage.set(STATS_ENABLED_KEY, '1')
29
+ end
30
+
31
+ # Bucket storage can be disabled because an 'emergency' happened.
32
+ # If too many buckets accumulate, we disable the feature because
33
+ # the memory occupied by Redis can grow very quickly.
34
+ # Check the code in the Aggregator class to check the conditions
35
+ # that trigger this 'emergency'.
36
+ def disable!(emergency = false)
37
+ storage.del(STATS_ENABLED_KEY)
38
+
39
+ if emergency
40
+ storage.set(DISABLED_BECAUSE_EMERGENCY_KEY, '1')
41
+ else
42
+ storage.del(DISABLED_BECAUSE_EMERGENCY_KEY)
43
+ end
44
+ end
45
+
46
+ # Returns whether the last time that bucket storage was disabled was
47
+ # because of an emergency. Notice that this method can return 'true'
48
+ # even when enabled? is true.
49
+ def last_disable_was_emergency?
50
+ storage.get(DISABLED_BECAUSE_EMERGENCY_KEY).to_i == 1
51
+ end
52
+ memoize :last_disable_was_emergency?
53
+
54
+ def bucket_storage
55
+ @bucket_storage ||= BucketStorage.new(stats_storage)
56
+ end
57
+
58
+ def bucket_reader
59
+ @bucket_reader ||= BucketReader.new(config.stats.bucket_size,
60
+ bucket_storage,
61
+ storage)
62
+ end
63
+
64
+ def kinesis_adapter
65
+ @kinesis_adapter ||= Analytics::Kinesis::Adapter.new(
66
+ config.kinesis_stream_name,
67
+ kinesis_client,
68
+ stats_storage
69
+ )
70
+ end
71
+
72
+ private
73
+
74
+ def storage
75
+ Backend::Storage.instance
76
+ end
77
+
78
+ # This is a separate storage used only for the analytics system. More
79
+ # specifically, the only things saved in this storage are:
80
+ # - The buckets handled by the BucketStorage class.
81
+ # - The batches of events created by the Kinesis::Adapter class.
82
+ def stats_storage
83
+ @stats_storage ||= if config.analytics_redis && config.analytics_redis[:server]
84
+ stats_storage_from_config
85
+ else
86
+ # A stats storage has not been specified, so
87
+ # use the same one for everything.
88
+ storage
89
+ end
90
+ end
91
+
92
+ def stats_storage_from_config
93
+ options = Backend::Storage::Helpers.config_with(config.analytics_redis)
94
+
95
+ Backend::Storage.new(options)
96
+ end
97
+
98
+ def config
99
+ Backend.configuration
100
+ end
101
+
102
+ def kinesis_client
103
+ @kinesis_client ||= Aws::Firehose::Client.new(
104
+ region: config.kinesis_region,
105
+ access_key_id: config.aws_access_key_id,
106
+ secret_access_key: config.aws_secret_access_key)
107
+ end
108
+ end
109
+
110
+ end
111
+ end
112
+ end
113
+ end