ldclient-rb 5.4.3 → 5.5.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/.circleci/config.yml +33 -6
- data/CHANGELOG.md +19 -0
- data/CONTRIBUTING.md +0 -12
- data/Gemfile.lock +22 -3
- data/README.md +41 -35
- data/ldclient-rb.gemspec +4 -3
- data/lib/ldclient-rb.rb +9 -1
- data/lib/ldclient-rb/cache_store.rb +1 -0
- data/lib/ldclient-rb/config.rb +201 -90
- data/lib/ldclient-rb/evaluation.rb +56 -8
- data/lib/ldclient-rb/event_summarizer.rb +3 -0
- data/lib/ldclient-rb/events.rb +16 -0
- data/lib/ldclient-rb/expiring_cache.rb +1 -0
- data/lib/ldclient-rb/file_data_source.rb +18 -13
- data/lib/ldclient-rb/flags_state.rb +3 -2
- data/lib/ldclient-rb/impl.rb +13 -0
- data/lib/ldclient-rb/impl/integrations/consul_impl.rb +158 -0
- data/lib/ldclient-rb/impl/integrations/dynamodb_impl.rb +228 -0
- data/lib/ldclient-rb/impl/integrations/redis_impl.rb +155 -0
- data/lib/ldclient-rb/impl/store_client_wrapper.rb +47 -0
- data/lib/ldclient-rb/impl/store_data_set_sorter.rb +55 -0
- data/lib/ldclient-rb/in_memory_store.rb +15 -4
- data/lib/ldclient-rb/integrations.rb +55 -0
- data/lib/ldclient-rb/integrations/consul.rb +38 -0
- data/lib/ldclient-rb/integrations/dynamodb.rb +47 -0
- data/lib/ldclient-rb/integrations/redis.rb +55 -0
- data/lib/ldclient-rb/integrations/util/store_wrapper.rb +230 -0
- data/lib/ldclient-rb/interfaces.rb +153 -0
- data/lib/ldclient-rb/ldclient.rb +135 -77
- data/lib/ldclient-rb/memoized_value.rb +2 -0
- data/lib/ldclient-rb/newrelic.rb +1 -0
- data/lib/ldclient-rb/non_blocking_thread_pool.rb +3 -3
- data/lib/ldclient-rb/polling.rb +1 -0
- data/lib/ldclient-rb/redis_store.rb +24 -190
- data/lib/ldclient-rb/requestor.rb +3 -2
- data/lib/ldclient-rb/simple_lru_cache.rb +1 -0
- data/lib/ldclient-rb/stream.rb +22 -10
- data/lib/ldclient-rb/user_filter.rb +1 -0
- data/lib/ldclient-rb/util.rb +1 -0
- data/lib/ldclient-rb/version.rb +1 -1
- data/scripts/gendocs.sh +12 -0
- data/spec/feature_store_spec_base.rb +173 -72
- data/spec/file_data_source_spec.rb +2 -2
- data/spec/http_util.rb +103 -0
- data/spec/in_memory_feature_store_spec.rb +1 -1
- data/spec/integrations/consul_feature_store_spec.rb +41 -0
- data/spec/integrations/dynamodb_feature_store_spec.rb +104 -0
- data/spec/integrations/store_wrapper_spec.rb +276 -0
- data/spec/ldclient_spec.rb +83 -4
- data/spec/redis_feature_store_spec.rb +25 -16
- data/spec/requestor_spec.rb +44 -38
- data/spec/stream_spec.rb +18 -18
- metadata +55 -33
- data/lib/sse_client.rb +0 -4
- data/lib/sse_client/backoff.rb +0 -38
- data/lib/sse_client/sse_client.rb +0 -171
- data/lib/sse_client/sse_events.rb +0 -67
- data/lib/sse_client/streaming_http.rb +0 -199
- data/spec/sse_client/sse_client_spec.rb +0 -177
- data/spec/sse_client/sse_events_spec.rb +0 -100
- data/spec/sse_client/sse_shared.rb +0 -82
- data/spec/sse_client/streaming_http_spec.rb +0 -263
@@ -0,0 +1,228 @@
|
|
1
|
+
require "json"
|
2
|
+
|
3
|
+
module LaunchDarkly
|
4
|
+
module Impl
|
5
|
+
module Integrations
|
6
|
+
module DynamoDB
|
7
|
+
#
|
8
|
+
# Internal implementation of the DynamoDB feature store, intended to be used with CachingStoreWrapper.
|
9
|
+
#
|
10
|
+
class DynamoDBFeatureStoreCore
|
11
|
+
begin
|
12
|
+
require "aws-sdk-dynamodb"
|
13
|
+
AWS_SDK_ENABLED = true
|
14
|
+
rescue ScriptError, StandardError
|
15
|
+
begin
|
16
|
+
require "aws-sdk"
|
17
|
+
AWS_SDK_ENABLED = true
|
18
|
+
rescue ScriptError, StandardError
|
19
|
+
AWS_SDK_ENABLED = false
|
20
|
+
end
|
21
|
+
end
|
22
|
+
|
23
|
+
PARTITION_KEY = "namespace"
|
24
|
+
SORT_KEY = "key"
|
25
|
+
|
26
|
+
VERSION_ATTRIBUTE = "version"
|
27
|
+
ITEM_JSON_ATTRIBUTE = "item"
|
28
|
+
|
29
|
+
def initialize(table_name, opts)
|
30
|
+
if !AWS_SDK_ENABLED
|
31
|
+
raise RuntimeError.new("can't use DynamoDB feature store without the aws-sdk or aws-sdk-dynamodb gem")
|
32
|
+
end
|
33
|
+
|
34
|
+
@table_name = table_name
|
35
|
+
@prefix = opts[:prefix]
|
36
|
+
@logger = opts[:logger] || Config.default_logger
|
37
|
+
|
38
|
+
if !opts[:existing_client].nil?
|
39
|
+
@client = opts[:existing_client]
|
40
|
+
else
|
41
|
+
@client = Aws::DynamoDB::Client.new(opts[:dynamodb_opts] || {})
|
42
|
+
end
|
43
|
+
|
44
|
+
@logger.info("DynamoDBFeatureStore: using DynamoDB table \"#{table_name}\"")
|
45
|
+
end
|
46
|
+
|
47
|
+
def init_internal(all_data)
|
48
|
+
# Start by reading the existing keys; we will later delete any of these that weren't in all_data.
|
49
|
+
unused_old_keys = read_existing_keys(all_data.keys)
|
50
|
+
|
51
|
+
requests = []
|
52
|
+
num_items = 0
|
53
|
+
|
54
|
+
# Insert or update every provided item
|
55
|
+
all_data.each do |kind, items|
|
56
|
+
items.values.each do |item|
|
57
|
+
requests.push({ put_request: { item: marshal_item(kind, item) } })
|
58
|
+
unused_old_keys.delete([ namespace_for_kind(kind), item[:key] ])
|
59
|
+
num_items = num_items + 1
|
60
|
+
end
|
61
|
+
end
|
62
|
+
|
63
|
+
# Now delete any previously existing items whose keys were not in the current data
|
64
|
+
unused_old_keys.each do |tuple|
|
65
|
+
del_item = make_keys_hash(tuple[0], tuple[1])
|
66
|
+
requests.push({ delete_request: { key: del_item } })
|
67
|
+
end
|
68
|
+
|
69
|
+
# Now set the special key that we check in initialized_internal?
|
70
|
+
inited_item = make_keys_hash(inited_key, inited_key)
|
71
|
+
requests.push({ put_request: { item: inited_item } })
|
72
|
+
|
73
|
+
DynamoDBUtil.batch_write_requests(@client, @table_name, requests)
|
74
|
+
|
75
|
+
@logger.info { "Initialized table #{@table_name} with #{num_items} items" }
|
76
|
+
end
|
77
|
+
|
78
|
+
def get_internal(kind, key)
|
79
|
+
resp = get_item_by_keys(namespace_for_kind(kind), key)
|
80
|
+
unmarshal_item(resp.item)
|
81
|
+
end
|
82
|
+
|
83
|
+
def get_all_internal(kind)
|
84
|
+
items_out = {}
|
85
|
+
req = make_query_for_kind(kind)
|
86
|
+
while true
|
87
|
+
resp = @client.query(req)
|
88
|
+
resp.items.each do |item|
|
89
|
+
item_out = unmarshal_item(item)
|
90
|
+
items_out[item_out[:key].to_sym] = item_out
|
91
|
+
end
|
92
|
+
break if resp.last_evaluated_key.nil? || resp.last_evaluated_key.length == 0
|
93
|
+
req.exclusive_start_key = resp.last_evaluated_key
|
94
|
+
end
|
95
|
+
items_out
|
96
|
+
end
|
97
|
+
|
98
|
+
def upsert_internal(kind, new_item)
|
99
|
+
encoded_item = marshal_item(kind, new_item)
|
100
|
+
begin
|
101
|
+
@client.put_item({
|
102
|
+
table_name: @table_name,
|
103
|
+
item: encoded_item,
|
104
|
+
condition_expression: "attribute_not_exists(#namespace) or attribute_not_exists(#key) or :version > #version",
|
105
|
+
expression_attribute_names: {
|
106
|
+
"#namespace" => PARTITION_KEY,
|
107
|
+
"#key" => SORT_KEY,
|
108
|
+
"#version" => VERSION_ATTRIBUTE
|
109
|
+
},
|
110
|
+
expression_attribute_values: {
|
111
|
+
":version" => new_item[:version]
|
112
|
+
}
|
113
|
+
})
|
114
|
+
new_item
|
115
|
+
rescue Aws::DynamoDB::Errors::ConditionalCheckFailedException
|
116
|
+
# The item was not updated because there's a newer item in the database.
|
117
|
+
# We must now read the item that's in the database and return it, so CachingStoreWrapper can cache it.
|
118
|
+
get_internal(kind, new_item[:key])
|
119
|
+
end
|
120
|
+
end
|
121
|
+
|
122
|
+
def initialized_internal?
|
123
|
+
resp = get_item_by_keys(inited_key, inited_key)
|
124
|
+
!resp.item.nil? && resp.item.length > 0
|
125
|
+
end
|
126
|
+
|
127
|
+
def stop
|
128
|
+
# AWS client doesn't seem to have a close method
|
129
|
+
end
|
130
|
+
|
131
|
+
private
|
132
|
+
|
133
|
+
def prefixed_namespace(base_str)
|
134
|
+
(@prefix.nil? || @prefix == "") ? base_str : "#{@prefix}:#{base_str}"
|
135
|
+
end
|
136
|
+
|
137
|
+
def namespace_for_kind(kind)
|
138
|
+
prefixed_namespace(kind[:namespace])
|
139
|
+
end
|
140
|
+
|
141
|
+
def inited_key
|
142
|
+
prefixed_namespace("$inited")
|
143
|
+
end
|
144
|
+
|
145
|
+
def make_keys_hash(namespace, key)
|
146
|
+
{
|
147
|
+
PARTITION_KEY => namespace,
|
148
|
+
SORT_KEY => key
|
149
|
+
}
|
150
|
+
end
|
151
|
+
|
152
|
+
def make_query_for_kind(kind)
|
153
|
+
{
|
154
|
+
table_name: @table_name,
|
155
|
+
consistent_read: true,
|
156
|
+
key_conditions: {
|
157
|
+
PARTITION_KEY => {
|
158
|
+
comparison_operator: "EQ",
|
159
|
+
attribute_value_list: [ namespace_for_kind(kind) ]
|
160
|
+
}
|
161
|
+
}
|
162
|
+
}
|
163
|
+
end
|
164
|
+
|
165
|
+
def get_item_by_keys(namespace, key)
|
166
|
+
@client.get_item({
|
167
|
+
table_name: @table_name,
|
168
|
+
key: make_keys_hash(namespace, key)
|
169
|
+
})
|
170
|
+
end
|
171
|
+
|
172
|
+
def read_existing_keys(kinds)
|
173
|
+
keys = Set.new
|
174
|
+
kinds.each do |kind|
|
175
|
+
req = make_query_for_kind(kind).merge({
|
176
|
+
projection_expression: "#namespace, #key",
|
177
|
+
expression_attribute_names: {
|
178
|
+
"#namespace" => PARTITION_KEY,
|
179
|
+
"#key" => SORT_KEY
|
180
|
+
}
|
181
|
+
})
|
182
|
+
while true
|
183
|
+
resp = @client.query(req)
|
184
|
+
resp.items.each do |item|
|
185
|
+
namespace = item[PARTITION_KEY]
|
186
|
+
key = item[SORT_KEY]
|
187
|
+
keys.add([ namespace, key ])
|
188
|
+
end
|
189
|
+
break if resp.last_evaluated_key.nil? || resp.last_evaluated_key.length == 0
|
190
|
+
req.exclusive_start_key = resp.last_evaluated_key
|
191
|
+
end
|
192
|
+
end
|
193
|
+
keys
|
194
|
+
end
|
195
|
+
|
196
|
+
def marshal_item(kind, item)
|
197
|
+
make_keys_hash(namespace_for_kind(kind), item[:key]).merge({
|
198
|
+
VERSION_ATTRIBUTE => item[:version],
|
199
|
+
ITEM_JSON_ATTRIBUTE => item.to_json
|
200
|
+
})
|
201
|
+
end
|
202
|
+
|
203
|
+
def unmarshal_item(item)
|
204
|
+
return nil if item.nil? || item.length == 0
|
205
|
+
json_attr = item[ITEM_JSON_ATTRIBUTE]
|
206
|
+
raise RuntimeError.new("DynamoDB map did not contain expected item string") if json_attr.nil?
|
207
|
+
JSON.parse(json_attr, symbolize_names: true)
|
208
|
+
end
|
209
|
+
end
|
210
|
+
|
211
|
+
class DynamoDBUtil
|
212
|
+
#
|
213
|
+
# Calls client.batch_write_item as many times as necessary to submit all of the given requests.
|
214
|
+
# The requests array is consumed.
|
215
|
+
#
|
216
|
+
def self.batch_write_requests(client, table, requests)
|
217
|
+
batch_size = 25
|
218
|
+
while true
|
219
|
+
chunk = requests.shift(batch_size)
|
220
|
+
break if chunk.empty?
|
221
|
+
client.batch_write_item({ request_items: { table => chunk } })
|
222
|
+
end
|
223
|
+
end
|
224
|
+
end
|
225
|
+
end
|
226
|
+
end
|
227
|
+
end
|
228
|
+
end
|
@@ -0,0 +1,155 @@
|
|
1
|
+
require "concurrent/atomics"
|
2
|
+
require "json"
|
3
|
+
|
4
|
+
module LaunchDarkly
|
5
|
+
module Impl
|
6
|
+
module Integrations
|
7
|
+
module Redis
|
8
|
+
#
|
9
|
+
# Internal implementation of the Redis feature store, intended to be used with CachingStoreWrapper.
|
10
|
+
#
|
11
|
+
class RedisFeatureStoreCore
|
12
|
+
begin
|
13
|
+
require "redis"
|
14
|
+
require "connection_pool"
|
15
|
+
REDIS_ENABLED = true
|
16
|
+
rescue ScriptError, StandardError
|
17
|
+
REDIS_ENABLED = false
|
18
|
+
end
|
19
|
+
|
20
|
+
def initialize(opts)
|
21
|
+
if !REDIS_ENABLED
|
22
|
+
raise RuntimeError.new("can't use Redis feature store because one of these gems is missing: redis, connection_pool")
|
23
|
+
end
|
24
|
+
|
25
|
+
@redis_opts = opts[:redis_opts] || Hash.new
|
26
|
+
if opts[:redis_url]
|
27
|
+
@redis_opts[:url] = opts[:redis_url]
|
28
|
+
end
|
29
|
+
if !@redis_opts.include?(:url)
|
30
|
+
@redis_opts[:url] = LaunchDarkly::Integrations::Redis::default_redis_url
|
31
|
+
end
|
32
|
+
max_connections = opts[:max_connections] || 16
|
33
|
+
@pool = opts[:pool] || ConnectionPool.new(size: max_connections) do
|
34
|
+
::Redis.new(@redis_opts)
|
35
|
+
end
|
36
|
+
@prefix = opts[:prefix] || LaunchDarkly::Integrations::Redis::default_prefix
|
37
|
+
@logger = opts[:logger] || Config.default_logger
|
38
|
+
@test_hook = opts[:test_hook] # used for unit tests, deliberately undocumented
|
39
|
+
|
40
|
+
@stopped = Concurrent::AtomicBoolean.new(false)
|
41
|
+
|
42
|
+
with_connection do |redis|
|
43
|
+
@logger.info("RedisFeatureStore: using Redis instance at #{redis.connection[:host]}:#{redis.connection[:port]} \
|
44
|
+
and prefix: #{@prefix}")
|
45
|
+
end
|
46
|
+
end
|
47
|
+
|
48
|
+
def init_internal(all_data)
|
49
|
+
count = 0
|
50
|
+
with_connection do |redis|
|
51
|
+
redis.multi do |multi|
|
52
|
+
all_data.each do |kind, items|
|
53
|
+
multi.del(items_key(kind))
|
54
|
+
count = count + items.count
|
55
|
+
items.each do |key, item|
|
56
|
+
multi.hset(items_key(kind), key, item.to_json)
|
57
|
+
end
|
58
|
+
end
|
59
|
+
multi.set(inited_key, inited_key)
|
60
|
+
end
|
61
|
+
end
|
62
|
+
@logger.info { "RedisFeatureStore: initialized with #{count} items" }
|
63
|
+
end
|
64
|
+
|
65
|
+
def get_internal(kind, key)
|
66
|
+
with_connection do |redis|
|
67
|
+
get_redis(redis, kind, key)
|
68
|
+
end
|
69
|
+
end
|
70
|
+
|
71
|
+
def get_all_internal(kind)
|
72
|
+
fs = {}
|
73
|
+
with_connection do |redis|
|
74
|
+
hashfs = redis.hgetall(items_key(kind))
|
75
|
+
hashfs.each do |k, json_item|
|
76
|
+
f = JSON.parse(json_item, symbolize_names: true)
|
77
|
+
fs[k.to_sym] = f
|
78
|
+
end
|
79
|
+
end
|
80
|
+
fs
|
81
|
+
end
|
82
|
+
|
83
|
+
def upsert_internal(kind, new_item)
|
84
|
+
base_key = items_key(kind)
|
85
|
+
key = new_item[:key]
|
86
|
+
try_again = true
|
87
|
+
final_item = new_item
|
88
|
+
while try_again
|
89
|
+
try_again = false
|
90
|
+
with_connection do |redis|
|
91
|
+
redis.watch(base_key) do
|
92
|
+
old_item = get_redis(redis, kind, key)
|
93
|
+
before_update_transaction(base_key, key)
|
94
|
+
if old_item.nil? || old_item[:version] < new_item[:version]
|
95
|
+
result = redis.multi do |multi|
|
96
|
+
multi.hset(base_key, key, new_item.to_json)
|
97
|
+
end
|
98
|
+
if result.nil?
|
99
|
+
@logger.debug { "RedisFeatureStore: concurrent modification detected, retrying" }
|
100
|
+
try_again = true
|
101
|
+
end
|
102
|
+
else
|
103
|
+
final_item = old_item
|
104
|
+
action = new_item[:deleted] ? "delete" : "update"
|
105
|
+
@logger.warn { "RedisFeatureStore: attempted to #{action} #{key} version: #{old_item[:version]} \
|
106
|
+
in '#{kind[:namespace]}' with a version that is the same or older: #{new_item[:version]}" }
|
107
|
+
end
|
108
|
+
redis.unwatch
|
109
|
+
end
|
110
|
+
end
|
111
|
+
end
|
112
|
+
final_item
|
113
|
+
end
|
114
|
+
|
115
|
+
def initialized_internal?
|
116
|
+
with_connection { |redis| redis.exists(inited_key) }
|
117
|
+
end
|
118
|
+
|
119
|
+
def stop
|
120
|
+
if @stopped.make_true
|
121
|
+
@pool.shutdown { |redis| redis.close }
|
122
|
+
end
|
123
|
+
end
|
124
|
+
|
125
|
+
private
|
126
|
+
|
127
|
+
def before_update_transaction(base_key, key)
|
128
|
+
@test_hook.before_update_transaction(base_key, key) if !@test_hook.nil?
|
129
|
+
end
|
130
|
+
|
131
|
+
def items_key(kind)
|
132
|
+
@prefix + ":" + kind[:namespace]
|
133
|
+
end
|
134
|
+
|
135
|
+
def cache_key(kind, key)
|
136
|
+
kind[:namespace] + ":" + key.to_s
|
137
|
+
end
|
138
|
+
|
139
|
+
def inited_key
|
140
|
+
@prefix + ":$inited"
|
141
|
+
end
|
142
|
+
|
143
|
+
def with_connection
|
144
|
+
@pool.with { |redis| yield(redis) }
|
145
|
+
end
|
146
|
+
|
147
|
+
def get_redis(redis, kind, key)
|
148
|
+
json_item = redis.hget(items_key(kind), key)
|
149
|
+
json_item.nil? ? nil : JSON.parse(json_item, symbolize_names: true)
|
150
|
+
end
|
151
|
+
end
|
152
|
+
end
|
153
|
+
end
|
154
|
+
end
|
155
|
+
end
|
@@ -0,0 +1,47 @@
|
|
1
|
+
require "ldclient-rb/interfaces"
|
2
|
+
require "ldclient-rb/impl/store_data_set_sorter"
|
3
|
+
|
4
|
+
module LaunchDarkly
|
5
|
+
module Impl
|
6
|
+
#
|
7
|
+
# Provides additional behavior that the client requires before or after feature store operations.
|
8
|
+
# Currently this just means sorting the data set for init(). In the future we may also use this
|
9
|
+
# to provide an update listener capability.
|
10
|
+
#
|
11
|
+
class FeatureStoreClientWrapper
|
12
|
+
include Interfaces::FeatureStore
|
13
|
+
|
14
|
+
def initialize(store)
|
15
|
+
@store = store
|
16
|
+
end
|
17
|
+
|
18
|
+
def init(all_data)
|
19
|
+
@store.init(FeatureStoreDataSetSorter.sort_all_collections(all_data))
|
20
|
+
end
|
21
|
+
|
22
|
+
def get(kind, key)
|
23
|
+
@store.get(kind, key)
|
24
|
+
end
|
25
|
+
|
26
|
+
def all(kind)
|
27
|
+
@store.all(kind)
|
28
|
+
end
|
29
|
+
|
30
|
+
def upsert(kind, item)
|
31
|
+
@store.upsert(kind, item)
|
32
|
+
end
|
33
|
+
|
34
|
+
def delete(kind, key, version)
|
35
|
+
@store.delete(kind, key, version)
|
36
|
+
end
|
37
|
+
|
38
|
+
def initialized?
|
39
|
+
@store.initialized?
|
40
|
+
end
|
41
|
+
|
42
|
+
def stop
|
43
|
+
@store.stop
|
44
|
+
end
|
45
|
+
end
|
46
|
+
end
|
47
|
+
end
|
@@ -0,0 +1,55 @@
|
|
1
|
+
|
2
|
+
module LaunchDarkly
|
3
|
+
module Impl
|
4
|
+
#
|
5
|
+
# Implements a dependency graph ordering for data to be stored in a feature store. We must use this
|
6
|
+
# on every data set that will be passed to the feature store's init() method.
|
7
|
+
#
|
8
|
+
class FeatureStoreDataSetSorter
|
9
|
+
#
|
10
|
+
# Returns a copy of the input hash that has the following guarantees: the iteration order of the outer
|
11
|
+
# hash will be in ascending order by the VersionDataKind's :priority property (if any), and for each
|
12
|
+
# data kind that has a :get_dependency_keys function, the inner hash will have an iteration order
|
13
|
+
# where B is before A if A has a dependency on B.
|
14
|
+
#
|
15
|
+
# This implementation relies on the fact that hashes in Ruby have an iteration order that is the same
|
16
|
+
# as the insertion order. Also, due to the way we deserialize JSON received from LaunchDarkly, the
|
17
|
+
# keys in the inner hash will always be symbols.
|
18
|
+
#
|
19
|
+
def self.sort_all_collections(all_data)
|
20
|
+
outer_hash = {}
|
21
|
+
kinds = all_data.keys.sort_by { |k|
|
22
|
+
k[:priority].nil? ? k[:namespace].length : k[:priority] # arbitrary order if priority is unknown
|
23
|
+
}
|
24
|
+
kinds.each do |kind|
|
25
|
+
items = all_data[kind]
|
26
|
+
outer_hash[kind] = self.sort_collection(kind, items)
|
27
|
+
end
|
28
|
+
outer_hash
|
29
|
+
end
|
30
|
+
|
31
|
+
def self.sort_collection(kind, input)
|
32
|
+
dependency_fn = kind[:get_dependency_keys]
|
33
|
+
return input if dependency_fn.nil? || input.empty?
|
34
|
+
remaining_items = input.clone
|
35
|
+
items_out = {}
|
36
|
+
while !remaining_items.empty?
|
37
|
+
# pick a random item that hasn't been updated yet
|
38
|
+
key, item = remaining_items.first
|
39
|
+
self.add_with_dependencies_first(item, dependency_fn, remaining_items, items_out)
|
40
|
+
end
|
41
|
+
items_out
|
42
|
+
end
|
43
|
+
|
44
|
+
def self.add_with_dependencies_first(item, dependency_fn, remaining_items, items_out)
|
45
|
+
item_key = item[:key].to_sym
|
46
|
+
remaining_items.delete(item_key) # we won't need to visit this item again
|
47
|
+
dependency_fn.call(item).each do |dep_key|
|
48
|
+
dep_item = remaining_items[dep_key.to_sym]
|
49
|
+
self.add_with_dependencies_first(dep_item, dependency_fn, remaining_items, items_out) if !dep_item.nil?
|
50
|
+
end
|
51
|
+
items_out[item_key] = item
|
52
|
+
end
|
53
|
+
end
|
54
|
+
end
|
55
|
+
end
|