launchdarkly-server-sdk 8.8.3-java
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/LICENSE.txt +13 -0
- data/README.md +61 -0
- data/lib/launchdarkly-server-sdk.rb +1 -0
- data/lib/ldclient-rb/cache_store.rb +45 -0
- data/lib/ldclient-rb/config.rb +658 -0
- data/lib/ldclient-rb/context.rb +565 -0
- data/lib/ldclient-rb/evaluation_detail.rb +387 -0
- data/lib/ldclient-rb/events.rb +642 -0
- data/lib/ldclient-rb/expiring_cache.rb +77 -0
- data/lib/ldclient-rb/flags_state.rb +88 -0
- data/lib/ldclient-rb/impl/big_segments.rb +117 -0
- data/lib/ldclient-rb/impl/broadcaster.rb +78 -0
- data/lib/ldclient-rb/impl/context.rb +96 -0
- data/lib/ldclient-rb/impl/context_filter.rb +166 -0
- data/lib/ldclient-rb/impl/data_source.rb +188 -0
- data/lib/ldclient-rb/impl/data_store.rb +109 -0
- data/lib/ldclient-rb/impl/dependency_tracker.rb +102 -0
- data/lib/ldclient-rb/impl/diagnostic_events.rb +129 -0
- data/lib/ldclient-rb/impl/evaluation_with_hook_result.rb +34 -0
- data/lib/ldclient-rb/impl/evaluator.rb +539 -0
- data/lib/ldclient-rb/impl/evaluator_bucketing.rb +86 -0
- data/lib/ldclient-rb/impl/evaluator_helpers.rb +50 -0
- data/lib/ldclient-rb/impl/evaluator_operators.rb +131 -0
- data/lib/ldclient-rb/impl/event_sender.rb +100 -0
- data/lib/ldclient-rb/impl/event_summarizer.rb +68 -0
- data/lib/ldclient-rb/impl/event_types.rb +136 -0
- data/lib/ldclient-rb/impl/flag_tracker.rb +58 -0
- data/lib/ldclient-rb/impl/integrations/consul_impl.rb +170 -0
- data/lib/ldclient-rb/impl/integrations/dynamodb_impl.rb +300 -0
- data/lib/ldclient-rb/impl/integrations/file_data_source.rb +229 -0
- data/lib/ldclient-rb/impl/integrations/redis_impl.rb +306 -0
- data/lib/ldclient-rb/impl/integrations/test_data/test_data_source.rb +40 -0
- data/lib/ldclient-rb/impl/migrations/migrator.rb +287 -0
- data/lib/ldclient-rb/impl/migrations/tracker.rb +136 -0
- data/lib/ldclient-rb/impl/model/clause.rb +45 -0
- data/lib/ldclient-rb/impl/model/feature_flag.rb +254 -0
- data/lib/ldclient-rb/impl/model/preprocessed_data.rb +64 -0
- data/lib/ldclient-rb/impl/model/segment.rb +132 -0
- data/lib/ldclient-rb/impl/model/serialization.rb +72 -0
- data/lib/ldclient-rb/impl/repeating_task.rb +46 -0
- data/lib/ldclient-rb/impl/sampler.rb +25 -0
- data/lib/ldclient-rb/impl/store_client_wrapper.rb +141 -0
- data/lib/ldclient-rb/impl/store_data_set_sorter.rb +55 -0
- data/lib/ldclient-rb/impl/unbounded_pool.rb +34 -0
- data/lib/ldclient-rb/impl/util.rb +95 -0
- data/lib/ldclient-rb/impl.rb +13 -0
- data/lib/ldclient-rb/in_memory_store.rb +100 -0
- data/lib/ldclient-rb/integrations/consul.rb +45 -0
- data/lib/ldclient-rb/integrations/dynamodb.rb +92 -0
- data/lib/ldclient-rb/integrations/file_data.rb +108 -0
- data/lib/ldclient-rb/integrations/redis.rb +98 -0
- data/lib/ldclient-rb/integrations/test_data/flag_builder.rb +663 -0
- data/lib/ldclient-rb/integrations/test_data.rb +213 -0
- data/lib/ldclient-rb/integrations/util/store_wrapper.rb +246 -0
- data/lib/ldclient-rb/integrations.rb +6 -0
- data/lib/ldclient-rb/interfaces.rb +974 -0
- data/lib/ldclient-rb/ldclient.rb +822 -0
- data/lib/ldclient-rb/memoized_value.rb +32 -0
- data/lib/ldclient-rb/migrations.rb +230 -0
- data/lib/ldclient-rb/non_blocking_thread_pool.rb +46 -0
- data/lib/ldclient-rb/polling.rb +102 -0
- data/lib/ldclient-rb/reference.rb +295 -0
- data/lib/ldclient-rb/requestor.rb +102 -0
- data/lib/ldclient-rb/simple_lru_cache.rb +25 -0
- data/lib/ldclient-rb/stream.rb +196 -0
- data/lib/ldclient-rb/util.rb +132 -0
- data/lib/ldclient-rb/version.rb +3 -0
- data/lib/ldclient-rb.rb +27 -0
- metadata +400 -0
@@ -0,0 +1,974 @@
|
|
1
|
+
require "observer"
|
2
|
+
|
3
|
+
module LaunchDarkly
|
4
|
+
#
|
5
|
+
# Mixins that define the required methods of various pluggable components used by the client.
|
6
|
+
#
|
7
|
+
module Interfaces
|
8
|
+
#
|
9
|
+
# Mixin that defines the required methods of a feature store implementation. The LaunchDarkly
|
10
|
+
# client uses the feature store to persist feature flags and related objects received from
|
11
|
+
# the LaunchDarkly service. Implementations must support concurrent access and updates.
|
12
|
+
# For more about how feature stores can be used, see:
|
13
|
+
# [Using a persistent feature store](https://docs.launchdarkly.com/sdk/features/storing-data#ruby).
|
14
|
+
#
|
15
|
+
# An entity that can be stored in a feature store is a hash that can be converted to and from
|
16
|
+
# JSON, and that has at a minimum the following properties: `:key`, a string that is unique
|
17
|
+
# among entities of the same kind; `:version`, an integer that is higher for newer data;
|
18
|
+
# `:deleted`, a boolean (optional, defaults to false) that if true means this is a
|
19
|
+
# placeholder for a deleted entity.
|
20
|
+
#
|
21
|
+
# To represent the different kinds of objects that can be stored, such as feature flags and
|
22
|
+
# segments, the SDK will provide a "kind" object; this is a hash with a single property,
|
23
|
+
# `:namespace`, which is a short string unique to that kind. This string can be used as a
|
24
|
+
# collection name or a key prefix.
|
25
|
+
#
|
26
|
+
# The default implementation is {LaunchDarkly::InMemoryFeatureStore}. Several implementations
|
27
|
+
# that use databases can be found in {LaunchDarkly::Integrations}. If you want to write a new
|
28
|
+
# implementation, see {LaunchDarkly::Integrations::Util} for tools that can make this task
|
29
|
+
# simpler.
|
30
|
+
#
|
31
|
+
module FeatureStore
|
32
|
+
#
|
33
|
+
# Initializes (or re-initializes) the store with the specified set of entities. Any
|
34
|
+
# existing entries will be removed. Implementations can assume that this data set is up to
|
35
|
+
# date-- there is no need to perform individual version comparisons between the existing
|
36
|
+
# objects and the supplied features.
|
37
|
+
#
|
38
|
+
# If possible, the store should update the entire data set atomically. If that is not possible,
|
39
|
+
# it should iterate through the outer hash and then the inner hash using the existing iteration
|
40
|
+
# order of those hashes (the SDK will ensure that the items were inserted into the hashes in
|
41
|
+
# the correct order), storing each item, and then delete any leftover items at the very end.
|
42
|
+
#
|
43
|
+
# @param all_data [Hash] a hash where each key is one of the data kind objects, and each
|
44
|
+
# value is in turn a hash of string keys to entities
|
45
|
+
# @return [void]
|
46
|
+
#
|
47
|
+
def init(all_data)
|
48
|
+
end
|
49
|
+
|
50
|
+
#
|
51
|
+
# Returns the entity to which the specified key is mapped, if any.
|
52
|
+
#
|
53
|
+
# @param kind [Object] the kind of entity to get
|
54
|
+
# @param key [String] the unique key of the entity to get
|
55
|
+
# @return [Hash] the entity; nil if the key was not found, or if the stored entity's
|
56
|
+
# `:deleted` property was true
|
57
|
+
#
|
58
|
+
def get(kind, key)
|
59
|
+
end
|
60
|
+
|
61
|
+
#
|
62
|
+
# Returns all stored entities of the specified kind, not including deleted entities.
|
63
|
+
#
|
64
|
+
# @param kind [Object] the kind of entity to get
|
65
|
+
# @return [Hash] a hash where each key is the entity's `:key` property and each value
|
66
|
+
# is the entity
|
67
|
+
#
|
68
|
+
def all(kind)
|
69
|
+
end
|
70
|
+
|
71
|
+
#
|
72
|
+
# Attempt to add an entity, or update an existing entity with the same key. An update
|
73
|
+
# should only succeed if the new item's `:version` is greater than the old one;
|
74
|
+
# otherwise, the method should do nothing.
|
75
|
+
#
|
76
|
+
# @param kind [Object] the kind of entity to add or update
|
77
|
+
# @param item [Hash] the entity to add or update
|
78
|
+
# @return [void]
|
79
|
+
#
|
80
|
+
def upsert(kind, item)
|
81
|
+
end
|
82
|
+
|
83
|
+
#
|
84
|
+
# Attempt to delete an entity if it exists. Deletion should only succeed if the
|
85
|
+
# `version` parameter is greater than the existing entity's `:version`; otherwise, the
|
86
|
+
# method should do nothing.
|
87
|
+
#
|
88
|
+
# @param kind [Object] the kind of entity to delete
|
89
|
+
# @param key [String] the unique key of the entity
|
90
|
+
# @param version [Integer] the entity must have a lower version than this to be deleted
|
91
|
+
# @return [void]
|
92
|
+
#
|
93
|
+
def delete(kind, key, version)
|
94
|
+
end
|
95
|
+
|
96
|
+
#
|
97
|
+
# Checks whether this store has been initialized. That means that `init` has been called
|
98
|
+
# either by this process, or (if the store can be shared) by another process. This
|
99
|
+
# method will be called frequently, so it should be efficient. You can assume that if it
|
100
|
+
# has returned true once, it can continue to return true, i.e. a store cannot become
|
101
|
+
# uninitialized again.
|
102
|
+
#
|
103
|
+
# @return [Boolean] true if the store is in an initialized state
|
104
|
+
#
|
105
|
+
def initialized?
|
106
|
+
end
|
107
|
+
|
108
|
+
#
|
109
|
+
# Performs any necessary cleanup to shut down the store when the client is being shut down.
|
110
|
+
#
|
111
|
+
# @return [void]
|
112
|
+
#
|
113
|
+
def stop
|
114
|
+
end
|
115
|
+
|
116
|
+
#
|
117
|
+
# WARN: This isn't a required method on a FeatureStore yet. The SDK will
|
118
|
+
# currently check if the provided store responds to this method, and if
|
119
|
+
# it does, will take appropriate action based on the documented behavior
|
120
|
+
# below. This will become required in a future major version release of
|
121
|
+
# the SDK.
|
122
|
+
#
|
123
|
+
# Returns true if this data store implementation supports status
|
124
|
+
# monitoring.
|
125
|
+
#
|
126
|
+
# This is normally only true for persistent data stores but it could also
|
127
|
+
# be true for any custom {FeatureStore} implementation.
|
128
|
+
#
|
129
|
+
# Returning true means that the store guarantees that if it ever enters
|
130
|
+
# an invalid state (that is, an operation has failed or it knows that
|
131
|
+
# operations cannot succeed at the moment), it will publish a status
|
132
|
+
# update, and will then publish another status update once it has
|
133
|
+
# returned to a valid state.
|
134
|
+
#
|
135
|
+
# Custom implementations must implement `def available?` which
|
136
|
+
# synchronously checks if the store is available. Without this method,
|
137
|
+
# the SDK cannot ensure status updates will occur once the store has gone
|
138
|
+
# offline.
|
139
|
+
#
|
140
|
+
# The same value will be returned from
|
141
|
+
# {StatusProvider::monitoring_enabled?}.
|
142
|
+
#
|
143
|
+
# def monitoring_enabled? end
|
144
|
+
|
145
|
+
#
|
146
|
+
# WARN: This isn't a required method on a FeatureStore. The SDK will
|
147
|
+
# check if the provided store responds to this method, and if it does,
|
148
|
+
# will take appropriate action based on the documented behavior below.
|
149
|
+
# Usage of this method will be dropped in a future version of the SDK.
|
150
|
+
#
|
151
|
+
# Tests whether the data store seems to be functioning normally.
|
152
|
+
#
|
153
|
+
# This should not be a detailed test of different kinds of operations,
|
154
|
+
# but just the smallest possible operation to determine whether (for
|
155
|
+
# instance) we can reach the database.
|
156
|
+
#
|
157
|
+
# Whenever one of the store's other methods throws an exception, the SDK
|
158
|
+
# will assume that it may have become unavailable (e.g. the database
|
159
|
+
# connection was lost). The SDK will then call {#available?} at intervals
|
160
|
+
# until it returns true.
|
161
|
+
#
|
162
|
+
# @return [Boolean] true if the underlying data store is reachable
|
163
|
+
#
|
164
|
+
# def available? end
|
165
|
+
end
|
166
|
+
|
167
|
+
#
|
168
|
+
# An interface for tracking changes in feature flag configurations.
|
169
|
+
#
|
170
|
+
# An implementation of this interface is returned by {LaunchDarkly::LDClient#flag_tracker}.
|
171
|
+
# Application code never needs to implement this interface.
|
172
|
+
#
|
173
|
+
module FlagTracker
|
174
|
+
#
|
175
|
+
# Registers a listener to be notified of feature flag changes in general.
|
176
|
+
#
|
177
|
+
# The listener will be notified whenever the SDK receives any change to any feature flag's configuration,
|
178
|
+
# or to a user segment that is referenced by a feature flag. If the updated flag is used as a prerequisite
|
179
|
+
# for other flags, the SDK assumes that those flags may now behave differently and sends flag change events
|
180
|
+
# for them as well.
|
181
|
+
#
|
182
|
+
# Note that this does not necessarily mean the flag's value has changed for any particular evaluation
|
183
|
+
# context, only that some part of the flag configuration was changed so that it may return a
|
184
|
+
# different value than it previously returned for some context. If you want to track flag value changes,
|
185
|
+
# use {#add_flag_value_change_listener} instead.
|
186
|
+
#
|
187
|
+
# It is possible, given current design restrictions, that a listener might be notified when no change has
|
188
|
+
# occurred. This edge case will be addressed in a later version of the SDK. It is important to note this issue
|
189
|
+
# does not affect {#add_flag_value_change_listener} listeners.
|
190
|
+
#
|
191
|
+
# If using the file data source, any change in a data file will be treated as a change to every flag. Again,
|
192
|
+
# use {#add_flag_value_change_listener} (or just re-evaluate the flag # yourself) if you want to know whether
|
193
|
+
# this is a change that really affects a flag's value.
|
194
|
+
#
|
195
|
+
# Change events only work if the SDK is actually connecting to LaunchDarkly (or using the file data source).
|
196
|
+
# If the SDK is only reading flags from a database then it cannot know when there is a change, because
|
197
|
+
# flags are read on an as-needed basis.
|
198
|
+
#
|
199
|
+
# The listener will be called from a worker thread.
|
200
|
+
#
|
201
|
+
# Calling this method for an already-registered listener has no effect.
|
202
|
+
#
|
203
|
+
# @param listener [#update]
|
204
|
+
#
|
205
|
+
def add_listener(listener) end
|
206
|
+
|
207
|
+
#
|
208
|
+
# Unregisters a listener so that it will no longer be notified of feature flag changes.
|
209
|
+
#
|
210
|
+
# Calling this method for a listener that was not previously registered has no effect.
|
211
|
+
#
|
212
|
+
# @param listener [Object]
|
213
|
+
#
|
214
|
+
def remove_listener(listener) end
|
215
|
+
|
216
|
+
#
|
217
|
+
# Registers a listener to be notified of a change in a specific feature flag's value for a specific
|
218
|
+
# evaluation context.
|
219
|
+
#
|
220
|
+
# When you call this method, it first immediately evaluates the feature flag. It then uses
|
221
|
+
# {#add_listener} to start listening for feature flag configuration
|
222
|
+
# changes, and whenever the specified feature flag changes, it re-evaluates the flag for the same context.
|
223
|
+
# It then calls your listener if and only if the resulting value has changed.
|
224
|
+
#
|
225
|
+
# All feature flag evaluations require an instance of {LaunchDarkly::LDContext}. If the feature flag you are
|
226
|
+
# tracking does not have any context targeting rules, you must still pass a dummy context such as
|
227
|
+
# `LDContext.with_key("for-global-flags")`. If you do not want the user to appear on your dashboard,
|
228
|
+
# use the anonymous property: `LDContext.create({key: "for-global-flags", kind: "user", anonymous: true})`.
|
229
|
+
#
|
230
|
+
# The returned listener represents the subscription that was created by this method
|
231
|
+
# call; to unsubscribe, pass that object (not your listener) to {#remove_listener}.
|
232
|
+
#
|
233
|
+
# @param key [Symbol]
|
234
|
+
# @param context [LaunchDarkly::LDContext]
|
235
|
+
# @param listener [#update]
|
236
|
+
#
|
237
|
+
def add_flag_value_change_listener(key, context, listener) end
|
238
|
+
end
|
239
|
+
|
240
|
+
#
|
241
|
+
# Change event fired when some aspect of the flag referenced by the key has changed.
|
242
|
+
#
|
243
|
+
class FlagChange
|
244
|
+
attr_accessor :key
|
245
|
+
|
246
|
+
# @param [Symbol] key
|
247
|
+
def initialize(key)
|
248
|
+
@key = key
|
249
|
+
end
|
250
|
+
end
|
251
|
+
|
252
|
+
#
|
253
|
+
# Change event fired when the evaluated value for the specified flag key has changed.
|
254
|
+
#
|
255
|
+
class FlagValueChange
|
256
|
+
attr_accessor :key
|
257
|
+
attr_accessor :old_value
|
258
|
+
attr_accessor :new_value
|
259
|
+
|
260
|
+
# @param [Symbol] key
|
261
|
+
# @param [Object] old_value
|
262
|
+
# @param [Object] new_value
|
263
|
+
def initialize(key, old_value, new_value)
|
264
|
+
@key = key
|
265
|
+
@old_value = old_value
|
266
|
+
@new_value = new_value
|
267
|
+
end
|
268
|
+
end
|
269
|
+
|
270
|
+
module DataStore
|
271
|
+
#
|
272
|
+
# An interface for querying the status of a persistent data store.
|
273
|
+
#
|
274
|
+
# An implementation of this interface is returned by {LaunchDarkly::LDClient#data_store_status_provider}.
|
275
|
+
# Application code should not implement this interface.
|
276
|
+
#
|
277
|
+
module StatusProvider
|
278
|
+
#
|
279
|
+
# Returns the current status of the store.
|
280
|
+
#
|
281
|
+
# This is only meaningful for persistent stores, or any custom data store implementation that makes use of
|
282
|
+
# the status reporting mechanism provided by the SDK. For the default in-memory store, the status will always
|
283
|
+
# be reported as "available".
|
284
|
+
#
|
285
|
+
# @return [Status] the latest status
|
286
|
+
#
|
287
|
+
def status
|
288
|
+
end
|
289
|
+
|
290
|
+
#
|
291
|
+
# Indicates whether the current data store implementation supports status monitoring.
|
292
|
+
#
|
293
|
+
# This is normally true for all persistent data stores, and false for the default in-memory store. A true value
|
294
|
+
# means that any listeners added with {#add_listener} can expect to be notified if there is any error in
|
295
|
+
# storing data, and then notified again when the error condition is resolved. A false value means that the
|
296
|
+
# status is not meaningful and listeners should not expect to be notified.
|
297
|
+
#
|
298
|
+
# @return [Boolean] true if status monitoring is enabled
|
299
|
+
#
|
300
|
+
def monitoring_enabled?
|
301
|
+
end
|
302
|
+
|
303
|
+
#
|
304
|
+
# Subscribes for notifications of status changes.
|
305
|
+
#
|
306
|
+
# Applications may wish to know if there is an outage in a persistent data store, since that could mean that
|
307
|
+
# flag evaluations are unable to get the flag data from the store (unless it is currently cached) and therefore
|
308
|
+
# might return default values.
|
309
|
+
#
|
310
|
+
# If the SDK receives an exception while trying to query or update the data store, then it notifies listeners
|
311
|
+
# that the store appears to be offline ({Status#available} is false) and begins polling the store
|
312
|
+
# at intervals until a query succeeds. Once it succeeds, it notifies listeners again with {Status#available}
|
313
|
+
# set to true.
|
314
|
+
#
|
315
|
+
# This method has no effect if the data store implementation does not support status tracking, such as if you
|
316
|
+
# are using the default in-memory store rather than a persistent store.
|
317
|
+
#
|
318
|
+
# @param listener [#update] the listener to add
|
319
|
+
#
|
320
|
+
def add_listener(listener)
|
321
|
+
end
|
322
|
+
|
323
|
+
#
|
324
|
+
# Unsubscribes from notifications of status changes.
|
325
|
+
#
|
326
|
+
# This method has no effect if the data store implementation does not support status tracking, such as if you
|
327
|
+
# are using the default in-memory store rather than a persistent store.
|
328
|
+
#
|
329
|
+
# @param listener [Object] the listener to remove; if no such listener was added, this does nothing
|
330
|
+
#
|
331
|
+
def remove_listener(listener)
|
332
|
+
end
|
333
|
+
end
|
334
|
+
|
335
|
+
#
|
336
|
+
# Interface that a data store implementation can use to report information back to the SDK.
|
337
|
+
#
|
338
|
+
module UpdateSink
|
339
|
+
#
|
340
|
+
# Reports a change in the data store's operational status.
|
341
|
+
#
|
342
|
+
# This is what makes the status monitoring mechanisms in {StatusProvider} work.
|
343
|
+
#
|
344
|
+
# @param status [Status] the updated status properties
|
345
|
+
#
|
346
|
+
def update_status(status)
|
347
|
+
end
|
348
|
+
end
|
349
|
+
|
350
|
+
class Status
|
351
|
+
def initialize(available, stale)
|
352
|
+
@available = available
|
353
|
+
@stale = stale
|
354
|
+
end
|
355
|
+
|
356
|
+
#
|
357
|
+
# Returns true if the SDK believes the data store is now available.
|
358
|
+
#
|
359
|
+
# This property is normally true. If the SDK receives an exception while trying to query or update the data
|
360
|
+
# store, then it sets this property to false (notifying listeners, if any) and polls the store at intervals
|
361
|
+
# until a query succeeds. Once it succeeds, it sets the property back to true (again notifying listeners).
|
362
|
+
#
|
363
|
+
# @return [Boolean] true if store is available
|
364
|
+
#
|
365
|
+
attr_reader :available
|
366
|
+
|
367
|
+
#
|
368
|
+
# Returns true if the store may be out of date due to a previous
|
369
|
+
# outage, so the SDK should attempt to refresh all feature flag data
|
370
|
+
# and rewrite it to the store.
|
371
|
+
#
|
372
|
+
# This property is not meaningful to application code.
|
373
|
+
#
|
374
|
+
# @return [Boolean] true if data should be rewritten
|
375
|
+
#
|
376
|
+
attr_reader :stale
|
377
|
+
end
|
378
|
+
end
|
379
|
+
|
380
|
+
#
|
381
|
+
# Mixin that defines the required methods of a data source implementation. This is the
|
382
|
+
# component that delivers feature flag data from LaunchDarkly to the LDClient by putting
|
383
|
+
# the data in the {FeatureStore}. It is expected to run concurrently on its own thread.
|
384
|
+
#
|
385
|
+
# The client has its own standard implementation, which uses either a streaming connection or
|
386
|
+
# polling depending on your configuration. Normally you will not need to use another one
|
387
|
+
# except for testing purposes. Two such test fixtures are {LaunchDarkly::Integrations::FileData}
|
388
|
+
# and {LaunchDarkly::Integrations::TestData}.
|
389
|
+
#
|
390
|
+
module DataSource
|
391
|
+
#
|
392
|
+
# Checks whether the data source has finished initializing. Initialization is considered done
|
393
|
+
# once it has received one complete data set from LaunchDarkly.
|
394
|
+
#
|
395
|
+
# @return [Boolean] true if initialization is complete
|
396
|
+
#
|
397
|
+
def initialized?
|
398
|
+
end
|
399
|
+
|
400
|
+
#
|
401
|
+
# Puts the data source into an active state. Normally this means it will make its first
|
402
|
+
# connection attempt to LaunchDarkly. If `start` has already been called, calling it again
|
403
|
+
# should simply return the same value as the first call.
|
404
|
+
#
|
405
|
+
# @return [Concurrent::Event] an Event which will be set once initialization is complete
|
406
|
+
#
|
407
|
+
def start
|
408
|
+
end
|
409
|
+
|
410
|
+
#
|
411
|
+
# Puts the data source into an inactive state and releases all of its resources.
|
412
|
+
# This state should be considered permanent (`start` does not have to work after `stop`).
|
413
|
+
#
|
414
|
+
def stop
|
415
|
+
end
|
416
|
+
end
|
417
|
+
|
418
|
+
module BigSegmentStore
|
419
|
+
#
|
420
|
+
# Returns information about the overall state of the store. This method will be called only
|
421
|
+
# when the SDK needs the latest state, so it should not be cached.
|
422
|
+
#
|
423
|
+
# @return [BigSegmentStoreMetadata]
|
424
|
+
#
|
425
|
+
def get_metadata
|
426
|
+
end
|
427
|
+
|
428
|
+
#
|
429
|
+
# Queries the store for a snapshot of the current segment state for a specific context.
|
430
|
+
#
|
431
|
+
# The context_hash is a base64-encoded string produced by hashing the context key as defined by
|
432
|
+
# the Big Segments specification; the store implementation does not need to know the details
|
433
|
+
# of how this is done, because it deals only with already-hashed keys, but the string can be
|
434
|
+
# assumed to only contain characters that are valid in base64.
|
435
|
+
#
|
436
|
+
# The return value should be either a Hash, or nil if the context is not referenced in any big
|
437
|
+
# segments. Each key in the Hash is a "segment reference", which is how segments are
|
438
|
+
# identified in Big Segment data. This string is not identical to the segment key-- the SDK
|
439
|
+
# will add other information. The store implementation should not be concerned with the
|
440
|
+
# format of the string. Each value in the Hash is true if the context is explicitly included in
|
441
|
+
# the segment, false if the context is explicitly excluded from the segment-- and is not also
|
442
|
+
# explicitly included (that is, if both an include and an exclude existed in the data, the
|
443
|
+
# include would take precedence). If the context's status in a particular segment is undefined,
|
444
|
+
# there should be no key or value for that segment.
|
445
|
+
#
|
446
|
+
# This Hash may be cached by the SDK, so it should not be modified after it is created. It
|
447
|
+
# is a snapshot of the segment membership state at one point in time.
|
448
|
+
#
|
449
|
+
# @param context_hash [String]
|
450
|
+
# @return [Hash] true/false values for Big Segments that reference this context
|
451
|
+
#
|
452
|
+
def get_membership(context_hash)
|
453
|
+
end
|
454
|
+
|
455
|
+
#
|
456
|
+
# Performs any necessary cleanup to shut down the store when the client is being shut down.
|
457
|
+
#
|
458
|
+
# @return [void]
|
459
|
+
#
|
460
|
+
def stop
|
461
|
+
end
|
462
|
+
end
|
463
|
+
|
464
|
+
#
|
465
|
+
# Values returned by {BigSegmentStore#get_metadata}.
|
466
|
+
#
|
467
|
+
class BigSegmentStoreMetadata
|
468
|
+
def initialize(last_up_to_date)
|
469
|
+
@last_up_to_date = last_up_to_date
|
470
|
+
end
|
471
|
+
|
472
|
+
# The Unix epoch millisecond timestamp of the last update to the {BigSegmentStore}. It is
|
473
|
+
# nil if the store has never been updated.
|
474
|
+
#
|
475
|
+
# @return [Integer|nil]
|
476
|
+
attr_reader :last_up_to_date
|
477
|
+
end
|
478
|
+
|
479
|
+
#
|
480
|
+
# Information about the status of a Big Segment store, provided by {BigSegmentStoreStatusProvider}.
|
481
|
+
#
|
482
|
+
# Big Segments are a specific type of segments. For more information, read the LaunchDarkly
|
483
|
+
# documentation: https://docs.launchdarkly.com/home/users/big-segments
|
484
|
+
#
|
485
|
+
class BigSegmentStoreStatus
|
486
|
+
def initialize(available, stale)
|
487
|
+
@available = available
|
488
|
+
@stale = stale
|
489
|
+
end
|
490
|
+
|
491
|
+
# True if the Big Segment store is able to respond to queries, so that the SDK can evaluate
|
492
|
+
# whether a context is in a segment or not.
|
493
|
+
#
|
494
|
+
# If this property is false, the store is not able to make queries (for instance, it may not have
|
495
|
+
# a valid database connection). In this case, the SDK will treat any reference to a Big Segment
|
496
|
+
# as if no contexts are included in that segment. Also, the {EvaluationReason} associated with
|
497
|
+
# with any flag evaluation that references a Big Segment when the store is not available will
|
498
|
+
# have a `big_segments_status` of `STORE_ERROR`.
|
499
|
+
#
|
500
|
+
# @return [Boolean]
|
501
|
+
attr_reader :available
|
502
|
+
|
503
|
+
# True if the Big Segment store is available, but has not been updated within the amount of time
|
504
|
+
# specified by {BigSegmentsConfig#stale_after}.
|
505
|
+
#
|
506
|
+
# This may indicate that the LaunchDarkly Relay Proxy, which populates the store, has stopped
|
507
|
+
# running or has become unable to receive fresh data from LaunchDarkly. Any feature flag
|
508
|
+
# evaluations that reference a Big Segment will be using the last known data, which may be out
|
509
|
+
# of date. Also, the {EvaluationReason} associated with those evaluations will have a
|
510
|
+
# `big_segments_status` of `STALE`.
|
511
|
+
#
|
512
|
+
# @return [Boolean]
|
513
|
+
attr_reader :stale
|
514
|
+
|
515
|
+
def ==(other)
|
516
|
+
self.available == other.available && self.stale == other.stale
|
517
|
+
end
|
518
|
+
end
|
519
|
+
|
520
|
+
#
|
521
|
+
# An interface for querying the status of a Big Segment store.
|
522
|
+
#
|
523
|
+
# The Big Segment store is the component that receives information about Big Segments, normally
|
524
|
+
# from a database populated by the LaunchDarkly Relay Proxy. Big Segments are a specific type
|
525
|
+
# of segments. For more information, read the LaunchDarkly documentation:
|
526
|
+
# https://docs.launchdarkly.com/home/users/big-segments
|
527
|
+
#
|
528
|
+
# An implementation of this interface is returned by {LDClient#big_segment_store_status_provider}.
|
529
|
+
# Application code never needs to implement this interface.
|
530
|
+
#
|
531
|
+
# There are two ways to interact with the status. One is to simply get the current status; if its
|
532
|
+
# `available` property is true, then the SDK is able to evaluate context membership in Big Segments,
|
533
|
+
# and the `stale`` property indicates whether the data might be out of date.
|
534
|
+
#
|
535
|
+
# The other way is to subscribe to status change notifications. Applications may wish to know if
|
536
|
+
# there is an outage in the Big Segment store, or if it has become stale (the Relay Proxy has
|
537
|
+
# stopped updating it with new data), since then flag evaluations that reference a Big Segment
|
538
|
+
# might return incorrect values. To allow finding out about status changes as soon as possible,
|
539
|
+
# `BigSegmentStoreStatusProvider` mixes in Ruby's
|
540
|
+
# [Observable](https://docs.ruby-lang.org/en/2.5.0/Observable.html) module to provide standard
|
541
|
+
# methods such as `add_observer`. Observers will be called with a new {BigSegmentStoreStatus}
|
542
|
+
# value whenever the status changes.
|
543
|
+
#
|
544
|
+
# @example Getting the current status
|
545
|
+
# status = client.big_segment_store_status_provider.status
|
546
|
+
#
|
547
|
+
# @example Subscribing to status notifications
|
548
|
+
# client.big_segment_store_status_provider.add_observer(self, :big_segments_status_changed)
|
549
|
+
#
|
550
|
+
# def big_segments_status_changed(new_status)
|
551
|
+
# puts "Big segment store status is now: #{new_status}"
|
552
|
+
# end
|
553
|
+
#
|
554
|
+
module BigSegmentStoreStatusProvider
|
555
|
+
include Observable
|
556
|
+
#
|
557
|
+
# Gets the current status of the store, if known.
|
558
|
+
#
|
559
|
+
# @return [BigSegmentStoreStatus] the status, or nil if the SDK has not yet queried the Big
|
560
|
+
# Segment store status
|
561
|
+
#
|
562
|
+
def status
|
563
|
+
end
|
564
|
+
end
|
565
|
+
|
566
|
+
module DataSource
|
567
|
+
#
|
568
|
+
# An interface for querying the status of the SDK's data source. The data
|
569
|
+
# source is the component that receives updates to feature flag data;
|
570
|
+
# normally this is a streaming connection, but it could be polling or
|
571
|
+
# file data depending on your configuration.
|
572
|
+
#
|
573
|
+
# An implementation of this interface is returned by
|
574
|
+
# {LaunchDarkly::LDClient#data_source_status_provider}. Application code
|
575
|
+
# never needs to implement this interface.
|
576
|
+
#
|
577
|
+
module StatusProvider
|
578
|
+
#
|
579
|
+
# Returns the current status of the data source.
|
580
|
+
#
|
581
|
+
# All of the built-in data source implementations are guaranteed to update this status whenever they
|
582
|
+
# successfully initialize, encounter an error, or recover after an error.
|
583
|
+
#
|
584
|
+
# For a custom data source implementation, it is the responsibility of the data source to push
|
585
|
+
# status updates to the SDK; if it does not do so, the status will always be reported as
|
586
|
+
# {Status::INITIALIZING}.
|
587
|
+
#
|
588
|
+
# @return [Status]
|
589
|
+
#
|
590
|
+
def status
|
591
|
+
end
|
592
|
+
|
593
|
+
#
|
594
|
+
# Subscribes for notifications of status changes.
|
595
|
+
#
|
596
|
+
# The listener will be notified whenever any property of the status has changed. See {Status} for an
|
597
|
+
# explanation of the meaning of each property and what could cause it to change.
|
598
|
+
#
|
599
|
+
# Notifications will be dispatched on a worker thread. It is the listener's responsibility to return as soon as
|
600
|
+
# possible so as not to block subsequent notifications.
|
601
|
+
#
|
602
|
+
# @param [#update] the listener to add
|
603
|
+
#
|
604
|
+
def add_listener(listener) end
|
605
|
+
|
606
|
+
#
|
607
|
+
# Unsubscribes from notifications of status changes.
|
608
|
+
#
|
609
|
+
def remove_listener(listener) end
|
610
|
+
end
|
611
|
+
|
612
|
+
#
|
613
|
+
# Interface that a data source implementation will use to push data into
|
614
|
+
# the SDK.
|
615
|
+
#
|
616
|
+
# The data source interacts with this object, rather than manipulating
|
617
|
+
# the data store directly, so that the SDK can perform any other
|
618
|
+
# necessary operations that must happen when data is updated.
|
619
|
+
#
|
620
|
+
module UpdateSink
|
621
|
+
#
|
622
|
+
# Initializes (or re-initializes) the store with the specified set of entities. Any
|
623
|
+
# existing entries will be removed. Implementations can assume that this data set is up to
|
624
|
+
# date-- there is no need to perform individual version comparisons between the existing
|
625
|
+
# objects and the supplied features.
|
626
|
+
#
|
627
|
+
# If possible, the store should update the entire data set atomically. If that is not possible,
|
628
|
+
# it should iterate through the outer hash and then the inner hash using the existing iteration
|
629
|
+
# order of those hashes (the SDK will ensure that the items were inserted into the hashes in
|
630
|
+
# the correct order), storing each item, and then delete any leftover items at the very end.
|
631
|
+
#
|
632
|
+
# @param all_data [Hash] a hash where each key is one of the data kind objects, and each
|
633
|
+
# value is in turn a hash of string keys to entities
|
634
|
+
# @return [void]
|
635
|
+
#
|
636
|
+
def init(all_data) end
|
637
|
+
|
638
|
+
#
|
639
|
+
# Attempt to add an entity, or update an existing entity with the same key. An update
|
640
|
+
# should only succeed if the new item's `:version` is greater than the old one;
|
641
|
+
# otherwise, the method should do nothing.
|
642
|
+
#
|
643
|
+
# @param kind [Object] the kind of entity to add or update
|
644
|
+
# @param item [Hash] the entity to add or update
|
645
|
+
# @return [void]
|
646
|
+
#
|
647
|
+
def upsert(kind, item) end
|
648
|
+
|
649
|
+
#
|
650
|
+
# Attempt to delete an entity if it exists. Deletion should only succeed if the
|
651
|
+
# `version` parameter is greater than the existing entity's `:version`; otherwise, the
|
652
|
+
# method should do nothing.
|
653
|
+
#
|
654
|
+
# @param kind [Object] the kind of entity to delete
|
655
|
+
# @param key [String] the unique key of the entity
|
656
|
+
# @param version [Integer] the entity must have a lower version than this to be deleted
|
657
|
+
# @return [void]
|
658
|
+
#
|
659
|
+
def delete(kind, key, version) end
|
660
|
+
|
661
|
+
#
|
662
|
+
# Informs the SDK of a change in the data source's status.
|
663
|
+
#
|
664
|
+
# Data source implementations should use this method if they have any
|
665
|
+
# concept of being in a valid state, a temporarily disconnected state,
|
666
|
+
# or a permanently stopped state.
|
667
|
+
#
|
668
|
+
# If `new_state` is different from the previous state, and/or
|
669
|
+
# `new_error` is non-null, the SDK will start returning the new status
|
670
|
+
# (adding a timestamp for the change) from {StatusProvider#status}, and
|
671
|
+
# will trigger status change events to any registered listeners.
|
672
|
+
#
|
673
|
+
# A special case is that if {new_state} is {Status::INTERRUPTED}, but the
|
674
|
+
# previous state was {Status::INITIALIZING}, the state will remain at
|
675
|
+
# {Status::INITIALIZING} because {Status::INTERRUPTED} is only meaningful
|
676
|
+
# after a successful startup.
|
677
|
+
#
|
678
|
+
# @param new_state [Symbol]
|
679
|
+
# @param new_error [ErrorInfo, nil]
|
680
|
+
#
|
681
|
+
def update_status(new_state, new_error) end
|
682
|
+
end
|
683
|
+
|
684
|
+
#
|
685
|
+
# Information about the data source's status and about the last status change.
|
686
|
+
#
|
687
|
+
class Status
|
688
|
+
#
|
689
|
+
# The initial state of the data source when the SDK is being initialized.
|
690
|
+
#
|
691
|
+
# If it encounters an error that requires it to retry initialization, the state will remain at
|
692
|
+
# {INITIALIZING} until it either succeeds and becomes {VALID}, or permanently fails and
|
693
|
+
# becomes {OFF}.
|
694
|
+
#
|
695
|
+
|
696
|
+
INITIALIZING = :initializing
|
697
|
+
|
698
|
+
#
|
699
|
+
# Indicates that the data source is currently operational and has not had any problems since the
|
700
|
+
# last time it received data.
|
701
|
+
#
|
702
|
+
# In streaming mode, this means that there is currently an open stream connection and that at least
|
703
|
+
# one initial message has been received on the stream. In polling mode, it means that the last poll
|
704
|
+
# request succeeded.
|
705
|
+
#
|
706
|
+
VALID = :valid
|
707
|
+
|
708
|
+
#
|
709
|
+
# Indicates that the data source encountered an error that it will attempt to recover from.
|
710
|
+
#
|
711
|
+
# In streaming mode, this means that the stream connection failed, or had to be dropped due to some
|
712
|
+
# other error, and will be retried after a backoff delay. In polling mode, it means that the last poll
|
713
|
+
# request failed, and a new poll request will be made after the configured polling interval.
|
714
|
+
#
|
715
|
+
INTERRUPTED = :interrupted
|
716
|
+
|
717
|
+
#
|
718
|
+
# Indicates that the data source has been permanently shut down.
|
719
|
+
#
|
720
|
+
# This could be because it encountered an unrecoverable error (for instance, the LaunchDarkly service
|
721
|
+
# rejected the SDK key; an invalid SDK key will never become valid), or because the SDK client was
|
722
|
+
# explicitly shut down.
|
723
|
+
#
|
724
|
+
OFF = :off
|
725
|
+
|
726
|
+
# @return [Symbol] The basic state
|
727
|
+
attr_reader :state
|
728
|
+
# @return [Time] timestamp of the last state transition
|
729
|
+
attr_reader :state_since
|
730
|
+
# @return [ErrorInfo, nil] a description of the last error or nil if no errors have occurred since startup
|
731
|
+
attr_reader :last_error
|
732
|
+
|
733
|
+
def initialize(state, state_since, last_error)
|
734
|
+
@state = state
|
735
|
+
@state_since = state_since
|
736
|
+
@last_error = last_error
|
737
|
+
end
|
738
|
+
end
|
739
|
+
|
740
|
+
#
|
741
|
+
# A description of an error condition that the data source encountered.
|
742
|
+
#
|
743
|
+
class ErrorInfo
|
744
|
+
#
|
745
|
+
# An unexpected error, such as an uncaught exception, further described by {#message}.
|
746
|
+
#
|
747
|
+
UNKNOWN = :unknown
|
748
|
+
|
749
|
+
#
|
750
|
+
# An I/O error such as a dropped connection.
|
751
|
+
#
|
752
|
+
NETWORK_ERROR = :network_error
|
753
|
+
|
754
|
+
#
|
755
|
+
# The LaunchDarkly service returned an HTTP response with an error status, available with
|
756
|
+
# {#status_code}.
|
757
|
+
#
|
758
|
+
ERROR_RESPONSE = :error_response
|
759
|
+
|
760
|
+
#
|
761
|
+
# The SDK received malformed data from the LaunchDarkly service.
|
762
|
+
#
|
763
|
+
INVALID_DATA = :invalid_data
|
764
|
+
|
765
|
+
#
|
766
|
+
# The data source itself is working, but when it tried to put an update into the data store, the data
|
767
|
+
# store failed (so the SDK may not have the latest data).
|
768
|
+
#
|
769
|
+
# Data source implementations do not need to report this kind of error; it will be automatically
|
770
|
+
# reported by the SDK when exceptions are detected.
|
771
|
+
#
|
772
|
+
STORE_ERROR = :store_error
|
773
|
+
|
774
|
+
# @return [Symbol] the general category of the error
|
775
|
+
attr_reader :kind
|
776
|
+
# @return [Integer] an HTTP status or zero
|
777
|
+
attr_reader :status_code
|
778
|
+
# @return [String, nil] message an error message if applicable, or nil
|
779
|
+
attr_reader :message
|
780
|
+
# @return [Time] time the error timestamp
|
781
|
+
attr_reader :time
|
782
|
+
|
783
|
+
def initialize(kind, status_code, message, time)
|
784
|
+
@kind = kind
|
785
|
+
@status_code = status_code
|
786
|
+
@message = message
|
787
|
+
@time = time
|
788
|
+
end
|
789
|
+
end
|
790
|
+
end
|
791
|
+
|
792
|
+
#
|
793
|
+
# Namespace for feature-flag based technology migration support.
|
794
|
+
#
|
795
|
+
module Migrations
|
796
|
+
#
|
797
|
+
# A migrator is the interface through which migration support is executed. A migrator is configured through the
|
798
|
+
# {LaunchDarkly::Migrations::MigratorBuilder} class.
|
799
|
+
#
|
800
|
+
module Migrator
|
801
|
+
#
|
802
|
+
# Uses the provided flag key and context to execute a migration-backed read operation.
|
803
|
+
#
|
804
|
+
# @param key [String]
|
805
|
+
# @param context [LaunchDarkly::LDContext]
|
806
|
+
# @param default_stage [Symbol]
|
807
|
+
# @param payload [Object, nil]
|
808
|
+
#
|
809
|
+
# @return [LaunchDarkly::Migrations::OperationResult]
|
810
|
+
#
|
811
|
+
def read(key, context, default_stage, payload = nil) end
|
812
|
+
|
813
|
+
#
|
814
|
+
# Uses the provided flag key and context to execute a migration-backed write operation.
|
815
|
+
#
|
816
|
+
# @param key [String]
|
817
|
+
# @param context [LaunchDarkly::LDContext]
|
818
|
+
# @param default_stage [Symbol]
|
819
|
+
# @param payload [Object, nil]
|
820
|
+
#
|
821
|
+
# @return [LaunchDarkly::Migrations::WriteResult]
|
822
|
+
#
|
823
|
+
def write(key, context, default_stage, payload = nil) end
|
824
|
+
end
|
825
|
+
|
826
|
+
#
|
827
|
+
# An OpTracker is responsible for managing the collection of measurements that which a user might wish to record
|
828
|
+
# throughout a migration-assisted operation.
|
829
|
+
#
|
830
|
+
# Example measurements include latency, errors, and consistency.
|
831
|
+
#
|
832
|
+
# This data can be provided to the {LaunchDarkly::LDClient.track_migration_op} method to relay this metric
|
833
|
+
# information upstream to LaunchDarkly services.
|
834
|
+
#
|
835
|
+
module OpTracker
|
836
|
+
#
|
837
|
+
# Sets the migration related operation associated with these tracking measurements.
|
838
|
+
#
|
839
|
+
# @param [Symbol] op The read or write operation symbol.
|
840
|
+
#
|
841
|
+
def operation(op) end
|
842
|
+
|
843
|
+
#
|
844
|
+
# Allows recording which origins were called during a migration.
|
845
|
+
#
|
846
|
+
# @param [Symbol] origin Designation for the old or new origin.
|
847
|
+
#
|
848
|
+
def invoked(origin) end
|
849
|
+
|
850
|
+
#
|
851
|
+
# Allows recording the results of a consistency check.
|
852
|
+
#
|
853
|
+
# This method accepts a callable which should take no parameters and return a single boolean to represent the
|
854
|
+
# consistency check results for a read operation.
|
855
|
+
#
|
856
|
+
# A callable is provided in case sampling rules do not require consistency checking to run. In this case, we can
|
857
|
+
# avoid the overhead of a function by not using the callable.
|
858
|
+
#
|
859
|
+
# @param [#call] is_consistent closure to return result of comparison check
|
860
|
+
#
|
861
|
+
def consistent(is_consistent) end
|
862
|
+
|
863
|
+
#
|
864
|
+
# Allows recording whether an error occurred during the operation.
|
865
|
+
#
|
866
|
+
# @param [Symbol] origin Designation for the old or new origin.
|
867
|
+
#
|
868
|
+
def error(origin) end
|
869
|
+
|
870
|
+
#
|
871
|
+
# Allows tracking the recorded latency for an individual operation.
|
872
|
+
#
|
873
|
+
# @param [Symbol] origin Designation for the old or new origin.
|
874
|
+
# @param [Float] duration Duration measurement in milliseconds (ms).
|
875
|
+
#
|
876
|
+
def latency(origin, duration) end
|
877
|
+
|
878
|
+
#
|
879
|
+
# Creates an instance of {LaunchDarkly::Impl::MigrationOpEventData}.
|
880
|
+
#
|
881
|
+
# @return [LaunchDarkly::Impl::MigrationOpEvent, String] A migration op event or a string describing the error.
|
882
|
+
# failure.
|
883
|
+
#
|
884
|
+
def build
|
885
|
+
end
|
886
|
+
end
|
887
|
+
end
|
888
|
+
|
889
|
+
module Hooks
|
890
|
+
#
|
891
|
+
# Mixin for extending SDK functionality via hooks.
|
892
|
+
#
|
893
|
+
# All provided hook implementations **MUST** include this mixin. Hooks without this mixin will be ignored.
|
894
|
+
#
|
895
|
+
# This mixin includes default implementations for all hook handlers. This allows LaunchDarkly to expand the list
|
896
|
+
# of hook handlers without breaking customer integrations.
|
897
|
+
#
|
898
|
+
module Hook
|
899
|
+
#
|
900
|
+
# Get metadata about the hook implementation.
|
901
|
+
#
|
902
|
+
# @return [Metadata]
|
903
|
+
#
|
904
|
+
def metadata
|
905
|
+
Metadata.new('UNDEFINED')
|
906
|
+
end
|
907
|
+
|
908
|
+
#
|
909
|
+
# The before method is called during the execution of a variation method before the flag value has been
|
910
|
+
# determined. The method is executed synchronously.
|
911
|
+
#
|
912
|
+
# @param evaluation_series_context [EvaluationSeriesContext] Contains information about the evaluation being
|
913
|
+
# performed. This is not mutable.
|
914
|
+
# @param data [Hash] A record associated with each stage of hook invocations. Each stage is called with the data
|
915
|
+
# of the previous stage for a series. The input record should not be modified.
|
916
|
+
# @return [Hash] Data to use when executing the next state of the hook in the evaluation series.
|
917
|
+
#
|
918
|
+
def before_evaluation(evaluation_series_context, data)
|
919
|
+
data
|
920
|
+
end
|
921
|
+
|
922
|
+
#
|
923
|
+
# The after method is called during the execution of the variation method after the flag value has been
|
924
|
+
# determined. The method is executed synchronously.
|
925
|
+
#
|
926
|
+
# @param evaluation_series_context [EvaluationSeriesContext] Contains read-only information about the evaluation
|
927
|
+
# being performed.
|
928
|
+
# @param data [Hash] A record associated with each stage of hook invocations. Each stage is called with the data
|
929
|
+
# of the previous stage for a series.
|
930
|
+
# @param detail [LaunchDarkly::EvaluationDetail] The result of the evaluation. This value should not be
|
931
|
+
# modified.
|
932
|
+
# @return [Hash] Data to use when executing the next state of the hook in the evaluation series.
|
933
|
+
#
|
934
|
+
def after_evaluation(evaluation_series_context, data, detail)
|
935
|
+
data
|
936
|
+
end
|
937
|
+
end
|
938
|
+
|
939
|
+
#
|
940
|
+
# Metadata data class used for annotating hook implementations.
|
941
|
+
#
|
942
|
+
class Metadata
|
943
|
+
attr_reader :name
|
944
|
+
|
945
|
+
def initialize(name)
|
946
|
+
@name = name
|
947
|
+
end
|
948
|
+
end
|
949
|
+
|
950
|
+
#
|
951
|
+
# Contextual information that will be provided to handlers during evaluation series.
|
952
|
+
#
|
953
|
+
class EvaluationSeriesContext
|
954
|
+
attr_reader :key
|
955
|
+
attr_reader :context
|
956
|
+
attr_reader :default_value
|
957
|
+
attr_reader :method
|
958
|
+
|
959
|
+
#
|
960
|
+
# @param key [String]
|
961
|
+
# @param context [LaunchDarkly::LDContext]
|
962
|
+
# @param default_value [any]
|
963
|
+
# @param method [Symbol]
|
964
|
+
#
|
965
|
+
def initialize(key, context, default_value, method)
|
966
|
+
@key = key
|
967
|
+
@context = context
|
968
|
+
@default_value = default_value
|
969
|
+
@method = method
|
970
|
+
end
|
971
|
+
end
|
972
|
+
end
|
973
|
+
end
|
974
|
+
end
|