launchdarkly-server-sdk 8.8.3-java
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/LICENSE.txt +13 -0
- data/README.md +61 -0
- data/lib/launchdarkly-server-sdk.rb +1 -0
- data/lib/ldclient-rb/cache_store.rb +45 -0
- data/lib/ldclient-rb/config.rb +658 -0
- data/lib/ldclient-rb/context.rb +565 -0
- data/lib/ldclient-rb/evaluation_detail.rb +387 -0
- data/lib/ldclient-rb/events.rb +642 -0
- data/lib/ldclient-rb/expiring_cache.rb +77 -0
- data/lib/ldclient-rb/flags_state.rb +88 -0
- data/lib/ldclient-rb/impl/big_segments.rb +117 -0
- data/lib/ldclient-rb/impl/broadcaster.rb +78 -0
- data/lib/ldclient-rb/impl/context.rb +96 -0
- data/lib/ldclient-rb/impl/context_filter.rb +166 -0
- data/lib/ldclient-rb/impl/data_source.rb +188 -0
- data/lib/ldclient-rb/impl/data_store.rb +109 -0
- data/lib/ldclient-rb/impl/dependency_tracker.rb +102 -0
- data/lib/ldclient-rb/impl/diagnostic_events.rb +129 -0
- data/lib/ldclient-rb/impl/evaluation_with_hook_result.rb +34 -0
- data/lib/ldclient-rb/impl/evaluator.rb +539 -0
- data/lib/ldclient-rb/impl/evaluator_bucketing.rb +86 -0
- data/lib/ldclient-rb/impl/evaluator_helpers.rb +50 -0
- data/lib/ldclient-rb/impl/evaluator_operators.rb +131 -0
- data/lib/ldclient-rb/impl/event_sender.rb +100 -0
- data/lib/ldclient-rb/impl/event_summarizer.rb +68 -0
- data/lib/ldclient-rb/impl/event_types.rb +136 -0
- data/lib/ldclient-rb/impl/flag_tracker.rb +58 -0
- data/lib/ldclient-rb/impl/integrations/consul_impl.rb +170 -0
- data/lib/ldclient-rb/impl/integrations/dynamodb_impl.rb +300 -0
- data/lib/ldclient-rb/impl/integrations/file_data_source.rb +229 -0
- data/lib/ldclient-rb/impl/integrations/redis_impl.rb +306 -0
- data/lib/ldclient-rb/impl/integrations/test_data/test_data_source.rb +40 -0
- data/lib/ldclient-rb/impl/migrations/migrator.rb +287 -0
- data/lib/ldclient-rb/impl/migrations/tracker.rb +136 -0
- data/lib/ldclient-rb/impl/model/clause.rb +45 -0
- data/lib/ldclient-rb/impl/model/feature_flag.rb +254 -0
- data/lib/ldclient-rb/impl/model/preprocessed_data.rb +64 -0
- data/lib/ldclient-rb/impl/model/segment.rb +132 -0
- data/lib/ldclient-rb/impl/model/serialization.rb +72 -0
- data/lib/ldclient-rb/impl/repeating_task.rb +46 -0
- data/lib/ldclient-rb/impl/sampler.rb +25 -0
- data/lib/ldclient-rb/impl/store_client_wrapper.rb +141 -0
- data/lib/ldclient-rb/impl/store_data_set_sorter.rb +55 -0
- data/lib/ldclient-rb/impl/unbounded_pool.rb +34 -0
- data/lib/ldclient-rb/impl/util.rb +95 -0
- data/lib/ldclient-rb/impl.rb +13 -0
- data/lib/ldclient-rb/in_memory_store.rb +100 -0
- data/lib/ldclient-rb/integrations/consul.rb +45 -0
- data/lib/ldclient-rb/integrations/dynamodb.rb +92 -0
- data/lib/ldclient-rb/integrations/file_data.rb +108 -0
- data/lib/ldclient-rb/integrations/redis.rb +98 -0
- data/lib/ldclient-rb/integrations/test_data/flag_builder.rb +663 -0
- data/lib/ldclient-rb/integrations/test_data.rb +213 -0
- data/lib/ldclient-rb/integrations/util/store_wrapper.rb +246 -0
- data/lib/ldclient-rb/integrations.rb +6 -0
- data/lib/ldclient-rb/interfaces.rb +974 -0
- data/lib/ldclient-rb/ldclient.rb +822 -0
- data/lib/ldclient-rb/memoized_value.rb +32 -0
- data/lib/ldclient-rb/migrations.rb +230 -0
- data/lib/ldclient-rb/non_blocking_thread_pool.rb +46 -0
- data/lib/ldclient-rb/polling.rb +102 -0
- data/lib/ldclient-rb/reference.rb +295 -0
- data/lib/ldclient-rb/requestor.rb +102 -0
- data/lib/ldclient-rb/simple_lru_cache.rb +25 -0
- data/lib/ldclient-rb/stream.rb +196 -0
- data/lib/ldclient-rb/util.rb +132 -0
- data/lib/ldclient-rb/version.rb +3 -0
- data/lib/ldclient-rb.rb +27 -0
- metadata +400 -0
@@ -0,0 +1,300 @@
|
|
1
|
+
require "json"
|
2
|
+
|
3
|
+
module LaunchDarkly
|
4
|
+
module Impl
|
5
|
+
module Integrations
|
6
|
+
module DynamoDB
|
7
|
+
class DynamoDBStoreImplBase
|
8
|
+
begin
|
9
|
+
require "aws-sdk-dynamodb"
|
10
|
+
AWS_SDK_ENABLED = true
|
11
|
+
rescue ScriptError, StandardError
|
12
|
+
begin
|
13
|
+
require "aws-sdk"
|
14
|
+
AWS_SDK_ENABLED = true
|
15
|
+
rescue ScriptError, StandardError
|
16
|
+
AWS_SDK_ENABLED = false
|
17
|
+
end
|
18
|
+
end
|
19
|
+
|
20
|
+
PARTITION_KEY = "namespace"
|
21
|
+
SORT_KEY = "key"
|
22
|
+
|
23
|
+
def initialize(table_name, opts)
|
24
|
+
unless AWS_SDK_ENABLED
|
25
|
+
raise RuntimeError.new("can't use #{description} without the aws-sdk or aws-sdk-dynamodb gem")
|
26
|
+
end
|
27
|
+
|
28
|
+
@table_name = table_name
|
29
|
+
@prefix = opts[:prefix] ? (opts[:prefix] + ":") : ""
|
30
|
+
@logger = opts[:logger] || Config.default_logger
|
31
|
+
|
32
|
+
if !opts[:existing_client].nil?
|
33
|
+
@client = opts[:existing_client]
|
34
|
+
else
|
35
|
+
@client = Aws::DynamoDB::Client.new(opts[:dynamodb_opts] || {})
|
36
|
+
end
|
37
|
+
|
38
|
+
@logger.info("#{description}: using DynamoDB table \"#{table_name}\"")
|
39
|
+
end
|
40
|
+
|
41
|
+
def stop
|
42
|
+
# AWS client doesn't seem to have a close method
|
43
|
+
end
|
44
|
+
|
45
|
+
protected def description
|
46
|
+
"DynamoDB"
|
47
|
+
end
|
48
|
+
end
|
49
|
+
|
50
|
+
#
|
51
|
+
# Internal implementation of the DynamoDB feature store, intended to be used with CachingStoreWrapper.
|
52
|
+
#
|
53
|
+
class DynamoDBFeatureStoreCore < DynamoDBStoreImplBase
|
54
|
+
VERSION_ATTRIBUTE = "version"
|
55
|
+
ITEM_JSON_ATTRIBUTE = "item"
|
56
|
+
|
57
|
+
def initialize(table_name, opts)
|
58
|
+
super(table_name, opts)
|
59
|
+
end
|
60
|
+
|
61
|
+
def description
|
62
|
+
"DynamoDBFeatureStore"
|
63
|
+
end
|
64
|
+
|
65
|
+
def available?
|
66
|
+
resp = get_item_by_keys(inited_key, inited_key)
|
67
|
+
!resp.item.nil? && resp.item.length > 0
|
68
|
+
true
|
69
|
+
rescue
|
70
|
+
false
|
71
|
+
end
|
72
|
+
|
73
|
+
def init_internal(all_data)
|
74
|
+
# Start by reading the existing keys; we will later delete any of these that weren't in all_data.
|
75
|
+
unused_old_keys = read_existing_keys(all_data.keys)
|
76
|
+
|
77
|
+
requests = []
|
78
|
+
num_items = 0
|
79
|
+
|
80
|
+
# Insert or update every provided item
|
81
|
+
all_data.each do |kind, items|
|
82
|
+
items.values.each do |item|
|
83
|
+
requests.push({ put_request: { item: marshal_item(kind, item) } })
|
84
|
+
unused_old_keys.delete([ namespace_for_kind(kind), item[:key] ])
|
85
|
+
num_items = num_items + 1
|
86
|
+
end
|
87
|
+
end
|
88
|
+
|
89
|
+
# Now delete any previously existing items whose keys were not in the current data
|
90
|
+
unused_old_keys.each do |tuple|
|
91
|
+
del_item = make_keys_hash(tuple[0], tuple[1])
|
92
|
+
requests.push({ delete_request: { key: del_item } })
|
93
|
+
end
|
94
|
+
|
95
|
+
# Now set the special key that we check in initialized_internal?
|
96
|
+
inited_item = make_keys_hash(inited_key, inited_key)
|
97
|
+
requests.push({ put_request: { item: inited_item } })
|
98
|
+
|
99
|
+
DynamoDBUtil.batch_write_requests(@client, @table_name, requests)
|
100
|
+
|
101
|
+
@logger.info { "Initialized table #{@table_name} with #{num_items} items" }
|
102
|
+
end
|
103
|
+
|
104
|
+
def get_internal(kind, key)
|
105
|
+
resp = get_item_by_keys(namespace_for_kind(kind), key)
|
106
|
+
unmarshal_item(kind, resp.item)
|
107
|
+
end
|
108
|
+
|
109
|
+
def get_all_internal(kind)
|
110
|
+
items_out = {}
|
111
|
+
req = make_query_for_kind(kind)
|
112
|
+
while true
|
113
|
+
resp = @client.query(req)
|
114
|
+
resp.items.each do |item|
|
115
|
+
item_out = unmarshal_item(kind, item)
|
116
|
+
items_out[item_out[:key].to_sym] = item_out
|
117
|
+
end
|
118
|
+
break if resp.last_evaluated_key.nil? || resp.last_evaluated_key.length == 0
|
119
|
+
req.exclusive_start_key = resp.last_evaluated_key
|
120
|
+
end
|
121
|
+
items_out
|
122
|
+
end
|
123
|
+
|
124
|
+
def upsert_internal(kind, new_item)
|
125
|
+
encoded_item = marshal_item(kind, new_item)
|
126
|
+
begin
|
127
|
+
@client.put_item({
|
128
|
+
table_name: @table_name,
|
129
|
+
item: encoded_item,
|
130
|
+
condition_expression: "attribute_not_exists(#namespace) or attribute_not_exists(#key) or :version > #version",
|
131
|
+
expression_attribute_names: {
|
132
|
+
"#namespace" => PARTITION_KEY,
|
133
|
+
"#key" => SORT_KEY,
|
134
|
+
"#version" => VERSION_ATTRIBUTE,
|
135
|
+
},
|
136
|
+
expression_attribute_values: {
|
137
|
+
":version" => new_item[:version],
|
138
|
+
},
|
139
|
+
})
|
140
|
+
new_item
|
141
|
+
rescue Aws::DynamoDB::Errors::ConditionalCheckFailedException
|
142
|
+
# The item was not updated because there's a newer item in the database.
|
143
|
+
# We must now read the item that's in the database and return it, so CachingStoreWrapper can cache it.
|
144
|
+
get_internal(kind, new_item[:key])
|
145
|
+
end
|
146
|
+
end
|
147
|
+
|
148
|
+
def initialized_internal?
|
149
|
+
resp = get_item_by_keys(inited_key, inited_key)
|
150
|
+
!resp.item.nil? && resp.item.length > 0
|
151
|
+
end
|
152
|
+
|
153
|
+
private
|
154
|
+
|
155
|
+
def prefixed_namespace(base_str)
|
156
|
+
@prefix + base_str
|
157
|
+
end
|
158
|
+
|
159
|
+
def namespace_for_kind(kind)
|
160
|
+
prefixed_namespace(kind[:namespace])
|
161
|
+
end
|
162
|
+
|
163
|
+
def inited_key
|
164
|
+
prefixed_namespace("$inited")
|
165
|
+
end
|
166
|
+
|
167
|
+
def make_keys_hash(namespace, key)
|
168
|
+
{
|
169
|
+
PARTITION_KEY => namespace,
|
170
|
+
SORT_KEY => key,
|
171
|
+
}
|
172
|
+
end
|
173
|
+
|
174
|
+
def make_query_for_kind(kind)
|
175
|
+
{
|
176
|
+
table_name: @table_name,
|
177
|
+
consistent_read: true,
|
178
|
+
key_conditions: {
|
179
|
+
PARTITION_KEY => {
|
180
|
+
comparison_operator: "EQ",
|
181
|
+
attribute_value_list: [ namespace_for_kind(kind) ],
|
182
|
+
},
|
183
|
+
},
|
184
|
+
}
|
185
|
+
end
|
186
|
+
|
187
|
+
def get_item_by_keys(namespace, key)
|
188
|
+
@client.get_item({
|
189
|
+
table_name: @table_name,
|
190
|
+
key: make_keys_hash(namespace, key),
|
191
|
+
})
|
192
|
+
end
|
193
|
+
|
194
|
+
def read_existing_keys(kinds)
|
195
|
+
keys = Set.new
|
196
|
+
kinds.each do |kind|
|
197
|
+
req = make_query_for_kind(kind).merge({
|
198
|
+
projection_expression: "#namespace, #key",
|
199
|
+
expression_attribute_names: {
|
200
|
+
"#namespace" => PARTITION_KEY,
|
201
|
+
"#key" => SORT_KEY,
|
202
|
+
},
|
203
|
+
})
|
204
|
+
while true
|
205
|
+
resp = @client.query(req)
|
206
|
+
resp.items.each do |item|
|
207
|
+
namespace = item[PARTITION_KEY]
|
208
|
+
key = item[SORT_KEY]
|
209
|
+
keys.add([ namespace, key ])
|
210
|
+
end
|
211
|
+
break if resp.last_evaluated_key.nil? || resp.last_evaluated_key.length == 0
|
212
|
+
req.exclusive_start_key = resp.last_evaluated_key
|
213
|
+
end
|
214
|
+
end
|
215
|
+
keys
|
216
|
+
end
|
217
|
+
|
218
|
+
def marshal_item(kind, item)
|
219
|
+
make_keys_hash(namespace_for_kind(kind), item[:key]).merge({
|
220
|
+
VERSION_ATTRIBUTE => item[:version],
|
221
|
+
ITEM_JSON_ATTRIBUTE => Model.serialize(kind, item),
|
222
|
+
})
|
223
|
+
end
|
224
|
+
|
225
|
+
def unmarshal_item(kind, item)
|
226
|
+
return nil if item.nil? || item.length == 0
|
227
|
+
json_attr = item[ITEM_JSON_ATTRIBUTE]
|
228
|
+
raise RuntimeError.new("DynamoDB map did not contain expected item string") if json_attr.nil?
|
229
|
+
Model.deserialize(kind, json_attr)
|
230
|
+
end
|
231
|
+
end
|
232
|
+
|
233
|
+
class DynamoDBBigSegmentStore < DynamoDBStoreImplBase
|
234
|
+
KEY_METADATA = 'big_segments_metadata'
|
235
|
+
KEY_CONTEXT_DATA = 'big_segments_user'
|
236
|
+
ATTR_SYNC_TIME = 'synchronizedOn'
|
237
|
+
ATTR_INCLUDED = 'included'
|
238
|
+
ATTR_EXCLUDED = 'excluded'
|
239
|
+
|
240
|
+
def initialize(table_name, opts)
|
241
|
+
super(table_name, opts)
|
242
|
+
end
|
243
|
+
|
244
|
+
def description
|
245
|
+
"DynamoDBBigSegmentStore"
|
246
|
+
end
|
247
|
+
|
248
|
+
def get_metadata
|
249
|
+
key = @prefix + KEY_METADATA
|
250
|
+
data = @client.get_item(
|
251
|
+
table_name: @table_name,
|
252
|
+
key: {
|
253
|
+
PARTITION_KEY => key,
|
254
|
+
SORT_KEY => key,
|
255
|
+
}
|
256
|
+
)
|
257
|
+
timestamp = data.item && data.item[ATTR_SYNC_TIME] ?
|
258
|
+
data.item[ATTR_SYNC_TIME] : nil
|
259
|
+
LaunchDarkly::Interfaces::BigSegmentStoreMetadata.new(timestamp)
|
260
|
+
end
|
261
|
+
|
262
|
+
def get_membership(context_hash)
|
263
|
+
data = @client.get_item(
|
264
|
+
table_name: @table_name,
|
265
|
+
key: {
|
266
|
+
PARTITION_KEY => @prefix + KEY_CONTEXT_DATA,
|
267
|
+
SORT_KEY => context_hash,
|
268
|
+
})
|
269
|
+
return nil unless data.item
|
270
|
+
excluded_refs = data.item[ATTR_EXCLUDED] || []
|
271
|
+
included_refs = data.item[ATTR_INCLUDED] || []
|
272
|
+
if excluded_refs.empty? && included_refs.empty?
|
273
|
+
nil
|
274
|
+
else
|
275
|
+
membership = {}
|
276
|
+
excluded_refs.each { |ref| membership[ref] = false }
|
277
|
+
included_refs.each { |ref| membership[ref] = true }
|
278
|
+
membership
|
279
|
+
end
|
280
|
+
end
|
281
|
+
end
|
282
|
+
|
283
|
+
class DynamoDBUtil
|
284
|
+
#
|
285
|
+
# Calls client.batch_write_item as many times as necessary to submit all of the given requests.
|
286
|
+
# The requests array is consumed.
|
287
|
+
#
|
288
|
+
def self.batch_write_requests(client, table, requests)
|
289
|
+
batch_size = 25
|
290
|
+
while true
|
291
|
+
chunk = requests.shift(batch_size)
|
292
|
+
break if chunk.empty?
|
293
|
+
client.batch_write_item({ request_items: { table => chunk } })
|
294
|
+
end
|
295
|
+
end
|
296
|
+
end
|
297
|
+
end
|
298
|
+
end
|
299
|
+
end
|
300
|
+
end
|
@@ -0,0 +1,229 @@
|
|
1
|
+
require 'ldclient-rb/in_memory_store'
|
2
|
+
require 'ldclient-rb/util'
|
3
|
+
|
4
|
+
require 'concurrent/atomics'
|
5
|
+
require 'json'
|
6
|
+
require 'yaml'
|
7
|
+
require 'pathname'
|
8
|
+
|
9
|
+
module LaunchDarkly
|
10
|
+
module Impl
|
11
|
+
module Integrations
|
12
|
+
class FileDataSourceImpl
|
13
|
+
# To avoid pulling in 'listen' and its transitive dependencies for people who aren't using the
|
14
|
+
# file data source or who don't need auto-updating, we only enable auto-update if the 'listen'
|
15
|
+
# gem has been provided by the host app.
|
16
|
+
@@have_listen = false
|
17
|
+
begin
|
18
|
+
require 'listen'
|
19
|
+
@@have_listen = true
|
20
|
+
rescue LoadError
|
21
|
+
# Ignored
|
22
|
+
end
|
23
|
+
|
24
|
+
#
|
25
|
+
# @param data_store [LaunchDarkly::Interfaces::FeatureStore]
|
26
|
+
# @param data_source_update_sink [LaunchDarkly::Interfaces::DataSource::UpdateSink, nil] Might be nil for backwards compatibility reasons.
|
27
|
+
# @param logger [Logger]
|
28
|
+
# @param options [Hash]
|
29
|
+
#
|
30
|
+
def initialize(data_store, data_source_update_sink, logger, options={})
|
31
|
+
@data_store = data_source_update_sink || data_store
|
32
|
+
@data_source_update_sink = data_source_update_sink
|
33
|
+
@logger = logger
|
34
|
+
@paths = options[:paths] || []
|
35
|
+
if @paths.is_a? String
|
36
|
+
@paths = [ @paths ]
|
37
|
+
end
|
38
|
+
@auto_update = options[:auto_update]
|
39
|
+
@use_listen = @auto_update && @@have_listen && !options[:force_polling]
|
40
|
+
@poll_interval = options[:poll_interval] || 1
|
41
|
+
@initialized = Concurrent::AtomicBoolean.new(false)
|
42
|
+
@ready = Concurrent::Event.new
|
43
|
+
|
44
|
+
@version_lock = Mutex.new
|
45
|
+
@last_version = 1
|
46
|
+
end
|
47
|
+
|
48
|
+
def initialized?
|
49
|
+
@initialized.value
|
50
|
+
end
|
51
|
+
|
52
|
+
def start
|
53
|
+
ready = Concurrent::Event.new
|
54
|
+
|
55
|
+
# We will return immediately regardless of whether the file load succeeded or failed -
|
56
|
+
# the difference can be detected by checking "initialized?"
|
57
|
+
ready.set
|
58
|
+
|
59
|
+
load_all
|
60
|
+
|
61
|
+
if @auto_update
|
62
|
+
# If we're going to watch files, then the start event will be set the first time we get
|
63
|
+
# a successful load.
|
64
|
+
@listener = start_listener
|
65
|
+
end
|
66
|
+
|
67
|
+
ready
|
68
|
+
end
|
69
|
+
|
70
|
+
def stop
|
71
|
+
@listener.stop unless @listener.nil?
|
72
|
+
end
|
73
|
+
|
74
|
+
private
|
75
|
+
|
76
|
+
def load_all
|
77
|
+
all_data = {
|
78
|
+
FEATURES => {},
|
79
|
+
SEGMENTS => {},
|
80
|
+
}
|
81
|
+
@paths.each do |path|
|
82
|
+
begin
|
83
|
+
load_file(path, all_data)
|
84
|
+
rescue => exn
|
85
|
+
LaunchDarkly::Util.log_exception(@logger, "Unable to load flag data from \"#{path}\"", exn)
|
86
|
+
@data_source_update_sink&.update_status(
|
87
|
+
LaunchDarkly::Interfaces::DataSource::Status::INTERRUPTED,
|
88
|
+
LaunchDarkly::Interfaces::DataSource::ErrorInfo.new(LaunchDarkly::Interfaces::DataSource::ErrorInfo::INVALID_DATA, 0, exn.to_s, Time.now)
|
89
|
+
)
|
90
|
+
return
|
91
|
+
end
|
92
|
+
end
|
93
|
+
@data_store.init(all_data)
|
94
|
+
@data_source_update_sink&.update_status(LaunchDarkly::Interfaces::DataSource::Status::VALID, nil)
|
95
|
+
@initialized.make_true
|
96
|
+
end
|
97
|
+
|
98
|
+
def load_file(path, all_data)
|
99
|
+
version = 1
|
100
|
+
@version_lock.synchronize {
|
101
|
+
version = @last_version
|
102
|
+
@last_version += 1
|
103
|
+
}
|
104
|
+
|
105
|
+
parsed = parse_content(IO.read(path))
|
106
|
+
(parsed[:flags] || {}).each do |key, flag|
|
107
|
+
flag[:version] = version
|
108
|
+
add_item(all_data, FEATURES, flag)
|
109
|
+
end
|
110
|
+
(parsed[:flagValues] || {}).each do |key, value|
|
111
|
+
add_item(all_data, FEATURES, make_flag_with_value(key.to_s, value, version))
|
112
|
+
end
|
113
|
+
(parsed[:segments] || {}).each do |key, segment|
|
114
|
+
segment[:version] = version
|
115
|
+
add_item(all_data, SEGMENTS, segment)
|
116
|
+
end
|
117
|
+
end
|
118
|
+
|
119
|
+
def parse_content(content)
|
120
|
+
# We can use the Ruby YAML parser for both YAML and JSON (JSON is a subset of YAML and while
|
121
|
+
# not all YAML parsers handle it correctly, we have verified that the Ruby one does, at least
|
122
|
+
# for all the samples of actual flag data that we've tested).
|
123
|
+
symbolize_all_keys(YAML.safe_load(content))
|
124
|
+
end
|
125
|
+
|
126
|
+
def symbolize_all_keys(value)
|
127
|
+
# This is necessary because YAML.load doesn't have an option for parsing keys as symbols, and
|
128
|
+
# the SDK expects all objects to be formatted that way.
|
129
|
+
if value.is_a?(Hash)
|
130
|
+
value.map{ |k, v| [k.to_sym, symbolize_all_keys(v)] }.to_h
|
131
|
+
elsif value.is_a?(Array)
|
132
|
+
value.map{ |v| symbolize_all_keys(v) }
|
133
|
+
else
|
134
|
+
value
|
135
|
+
end
|
136
|
+
end
|
137
|
+
|
138
|
+
def add_item(all_data, kind, item)
|
139
|
+
items = all_data[kind]
|
140
|
+
raise ArgumentError, "Received unknown item kind #{kind[:namespace]} in add_data" if items.nil? # shouldn't be possible since we preinitialize the hash
|
141
|
+
key = item[:key].to_sym
|
142
|
+
unless items[key].nil?
|
143
|
+
raise ArgumentError, "#{kind[:namespace]} key \"#{item[:key]}\" was used more than once"
|
144
|
+
end
|
145
|
+
items[key] = Model.deserialize(kind, item)
|
146
|
+
end
|
147
|
+
|
148
|
+
def make_flag_with_value(key, value, version)
|
149
|
+
{
|
150
|
+
key: key,
|
151
|
+
on: true,
|
152
|
+
version: version,
|
153
|
+
fallthrough: { variation: 0 },
|
154
|
+
variations: [ value ],
|
155
|
+
}
|
156
|
+
end
|
157
|
+
|
158
|
+
def start_listener
|
159
|
+
resolved_paths = @paths.map { |p| Pathname.new(File.absolute_path(p)).realpath.to_s }
|
160
|
+
if @use_listen
|
161
|
+
start_listener_with_listen_gem(resolved_paths)
|
162
|
+
else
|
163
|
+
FileDataSourcePoller.new(resolved_paths, @poll_interval, self.method(:load_all), @logger)
|
164
|
+
end
|
165
|
+
end
|
166
|
+
|
167
|
+
def start_listener_with_listen_gem(resolved_paths)
|
168
|
+
path_set = resolved_paths.to_set
|
169
|
+
dir_paths = resolved_paths.map{ |p| File.dirname(p) }.uniq
|
170
|
+
opts = { latency: @poll_interval }
|
171
|
+
l = Listen.to(*dir_paths, opts) do |modified, added, removed|
|
172
|
+
paths = modified + added + removed
|
173
|
+
if paths.any? { |p| path_set.include?(p) }
|
174
|
+
load_all
|
175
|
+
end
|
176
|
+
end
|
177
|
+
l.start
|
178
|
+
l
|
179
|
+
end
|
180
|
+
|
181
|
+
#
|
182
|
+
# Used internally by FileDataSource to track data file changes if the 'listen' gem is not available.
|
183
|
+
#
|
184
|
+
class FileDataSourcePoller
|
185
|
+
def initialize(resolved_paths, interval, reloader, logger)
|
186
|
+
@stopped = Concurrent::AtomicBoolean.new(false)
|
187
|
+
get_file_times = Proc.new do
|
188
|
+
ret = {}
|
189
|
+
resolved_paths.each do |path|
|
190
|
+
begin
|
191
|
+
ret[path] = File.mtime(path)
|
192
|
+
rescue Errno::ENOENT
|
193
|
+
ret[path] = nil
|
194
|
+
end
|
195
|
+
end
|
196
|
+
ret
|
197
|
+
end
|
198
|
+
last_times = get_file_times.call
|
199
|
+
@thread = Thread.new do
|
200
|
+
while true
|
201
|
+
sleep interval
|
202
|
+
break if @stopped.value
|
203
|
+
begin
|
204
|
+
new_times = get_file_times.call
|
205
|
+
changed = false
|
206
|
+
last_times.each do |path, old_time|
|
207
|
+
new_time = new_times[path]
|
208
|
+
if !new_time.nil? && new_time != old_time
|
209
|
+
changed = true
|
210
|
+
break
|
211
|
+
end
|
212
|
+
end
|
213
|
+
reloader.call if changed
|
214
|
+
rescue => exn
|
215
|
+
LaunchDarkly::Util.log_exception(logger, "Unexpected exception in FileDataSourcePoller", exn)
|
216
|
+
end
|
217
|
+
end
|
218
|
+
end
|
219
|
+
end
|
220
|
+
|
221
|
+
def stop
|
222
|
+
@stopped.make_true
|
223
|
+
@thread.run # wakes it up if it's sleeping
|
224
|
+
end
|
225
|
+
end
|
226
|
+
end
|
227
|
+
end
|
228
|
+
end
|
229
|
+
end
|