launchdarkly-server-sdk 6.3.0 → 8.0.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/README.md +3 -4
- data/lib/ldclient-rb/config.rb +112 -62
- data/lib/ldclient-rb/context.rb +444 -0
- data/lib/ldclient-rb/evaluation_detail.rb +26 -22
- data/lib/ldclient-rb/events.rb +256 -146
- data/lib/ldclient-rb/flags_state.rb +26 -15
- data/lib/ldclient-rb/impl/big_segments.rb +18 -18
- data/lib/ldclient-rb/impl/broadcaster.rb +78 -0
- data/lib/ldclient-rb/impl/context.rb +96 -0
- data/lib/ldclient-rb/impl/context_filter.rb +145 -0
- data/lib/ldclient-rb/impl/data_source.rb +188 -0
- data/lib/ldclient-rb/impl/data_store.rb +59 -0
- data/lib/ldclient-rb/impl/dependency_tracker.rb +102 -0
- data/lib/ldclient-rb/impl/diagnostic_events.rb +9 -10
- data/lib/ldclient-rb/impl/evaluator.rb +386 -142
- data/lib/ldclient-rb/impl/evaluator_bucketing.rb +40 -41
- data/lib/ldclient-rb/impl/evaluator_helpers.rb +50 -0
- data/lib/ldclient-rb/impl/evaluator_operators.rb +26 -55
- data/lib/ldclient-rb/impl/event_sender.rb +7 -6
- data/lib/ldclient-rb/impl/event_summarizer.rb +68 -0
- data/lib/ldclient-rb/impl/event_types.rb +136 -0
- data/lib/ldclient-rb/impl/flag_tracker.rb +58 -0
- data/lib/ldclient-rb/impl/integrations/consul_impl.rb +19 -7
- data/lib/ldclient-rb/impl/integrations/dynamodb_impl.rb +38 -30
- data/lib/ldclient-rb/impl/integrations/file_data_source.rb +24 -11
- data/lib/ldclient-rb/impl/integrations/redis_impl.rb +109 -12
- data/lib/ldclient-rb/impl/migrations/migrator.rb +287 -0
- data/lib/ldclient-rb/impl/migrations/tracker.rb +136 -0
- data/lib/ldclient-rb/impl/model/clause.rb +45 -0
- data/lib/ldclient-rb/impl/model/feature_flag.rb +255 -0
- data/lib/ldclient-rb/impl/model/preprocessed_data.rb +64 -0
- data/lib/ldclient-rb/impl/model/segment.rb +132 -0
- data/lib/ldclient-rb/impl/model/serialization.rb +54 -44
- data/lib/ldclient-rb/impl/repeating_task.rb +3 -4
- data/lib/ldclient-rb/impl/sampler.rb +25 -0
- data/lib/ldclient-rb/impl/store_client_wrapper.rb +102 -8
- data/lib/ldclient-rb/impl/store_data_set_sorter.rb +2 -2
- data/lib/ldclient-rb/impl/unbounded_pool.rb +1 -1
- data/lib/ldclient-rb/impl/util.rb +59 -1
- data/lib/ldclient-rb/in_memory_store.rb +9 -2
- data/lib/ldclient-rb/integrations/consul.rb +2 -2
- data/lib/ldclient-rb/integrations/dynamodb.rb +2 -2
- data/lib/ldclient-rb/integrations/file_data.rb +4 -4
- data/lib/ldclient-rb/integrations/redis.rb +5 -5
- data/lib/ldclient-rb/integrations/test_data/flag_builder.rb +287 -62
- data/lib/ldclient-rb/integrations/test_data.rb +18 -14
- data/lib/ldclient-rb/integrations/util/store_wrapper.rb +20 -9
- data/lib/ldclient-rb/interfaces.rb +600 -14
- data/lib/ldclient-rb/ldclient.rb +314 -134
- data/lib/ldclient-rb/memoized_value.rb +1 -1
- data/lib/ldclient-rb/migrations.rb +230 -0
- data/lib/ldclient-rb/non_blocking_thread_pool.rb +1 -1
- data/lib/ldclient-rb/polling.rb +52 -6
- data/lib/ldclient-rb/reference.rb +274 -0
- data/lib/ldclient-rb/requestor.rb +9 -11
- data/lib/ldclient-rb/stream.rb +96 -34
- data/lib/ldclient-rb/util.rb +97 -14
- data/lib/ldclient-rb/version.rb +1 -1
- data/lib/ldclient-rb.rb +3 -4
- metadata +65 -23
- data/lib/ldclient-rb/event_summarizer.rb +0 -55
- data/lib/ldclient-rb/file_data_source.rb +0 -23
- data/lib/ldclient-rb/impl/event_factory.rb +0 -126
- data/lib/ldclient-rb/newrelic.rb +0 -17
- data/lib/ldclient-rb/redis_store.rb +0 -88
- data/lib/ldclient-rb/user_filter.rb +0 -52
@@ -112,6 +112,269 @@ module LaunchDarkly
|
|
112
112
|
#
|
113
113
|
def stop
|
114
114
|
end
|
115
|
+
|
116
|
+
#
|
117
|
+
# WARN: This isn't a required method on a FeatureStore yet. The SDK will
|
118
|
+
# currently check if the provided store responds to this method, and if
|
119
|
+
# it does, will take appropriate action based on the documented behavior
|
120
|
+
# below. This will become required in a future major version release of
|
121
|
+
# the SDK.
|
122
|
+
#
|
123
|
+
# Returns true if this data store implementation supports status
|
124
|
+
# monitoring.
|
125
|
+
#
|
126
|
+
# This is normally only true for persistent data stores but it could also
|
127
|
+
# be true for any custom {FeatureStore} implementation.
|
128
|
+
#
|
129
|
+
# Returning true means that the store guarantees that if it ever enters
|
130
|
+
# an invalid state (that is, an operation has failed or it knows that
|
131
|
+
# operations cannot succeed at the moment), it will publish a status
|
132
|
+
# update, and will then publish another status update once it has
|
133
|
+
# returned to a valid state.
|
134
|
+
#
|
135
|
+
# Custom implementations must implement `def available?` which
|
136
|
+
# synchronously checks if the store is available. Without this method,
|
137
|
+
# the SDK cannot ensure status updates will occur once the store has gone
|
138
|
+
# offline.
|
139
|
+
#
|
140
|
+
# The same value will be returned from
|
141
|
+
# {StatusProvider::monitoring_enabled?}.
|
142
|
+
#
|
143
|
+
# def monitoring_enabled? end
|
144
|
+
|
145
|
+
#
|
146
|
+
# WARN: This isn't a required method on a FeatureStore. The SDK will
|
147
|
+
# check if the provided store responds to this method, and if it does,
|
148
|
+
# will take appropriate action based on the documented behavior below.
|
149
|
+
# Usage of this method will be dropped in a future version of the SDK.
|
150
|
+
#
|
151
|
+
# Tests whether the data store seems to be functioning normally.
|
152
|
+
#
|
153
|
+
# This should not be a detailed test of different kinds of operations,
|
154
|
+
# but just the smallest possible operation to determine whether (for
|
155
|
+
# instance) we can reach the database.
|
156
|
+
#
|
157
|
+
# Whenever one of the store's other methods throws an exception, the SDK
|
158
|
+
# will assume that it may have become unavailable (e.g. the database
|
159
|
+
# connection was lost). The SDK will then call {#available?} at intervals
|
160
|
+
# until it returns true.
|
161
|
+
#
|
162
|
+
# @return [Boolean] true if the underlying data store is reachable
|
163
|
+
#
|
164
|
+
# def available? end
|
165
|
+
end
|
166
|
+
|
167
|
+
#
|
168
|
+
# An interface for tracking changes in feature flag configurations.
|
169
|
+
#
|
170
|
+
# An implementation of this interface is returned by {LaunchDarkly::LDClient#flag_tracker}.
|
171
|
+
# Application code never needs to implement this interface.
|
172
|
+
#
|
173
|
+
module FlagTracker
|
174
|
+
#
|
175
|
+
# Registers a listener to be notified of feature flag changes in general.
|
176
|
+
#
|
177
|
+
# The listener will be notified whenever the SDK receives any change to any feature flag's configuration,
|
178
|
+
# or to a user segment that is referenced by a feature flag. If the updated flag is used as a prerequisite
|
179
|
+
# for other flags, the SDK assumes that those flags may now behave differently and sends flag change events
|
180
|
+
# for them as well.
|
181
|
+
#
|
182
|
+
# Note that this does not necessarily mean the flag's value has changed for any particular evaluation
|
183
|
+
# context, only that some part of the flag configuration was changed so that it may return a
|
184
|
+
# different value than it previously returned for some context. If you want to track flag value changes,
|
185
|
+
# use {#add_flag_value_change_listener} instead.
|
186
|
+
#
|
187
|
+
# It is possible, given current design restrictions, that a listener might be notified when no change has
|
188
|
+
# occurred. This edge case will be addressed in a later version of the SDK. It is important to note this issue
|
189
|
+
# does not affect {#add_flag_value_change_listener} listeners.
|
190
|
+
#
|
191
|
+
# If using the file data source, any change in a data file will be treated as a change to every flag. Again,
|
192
|
+
# use {#add_flag_value_change_listener} (or just re-evaluate the flag # yourself) if you want to know whether
|
193
|
+
# this is a change that really affects a flag's value.
|
194
|
+
#
|
195
|
+
# Change events only work if the SDK is actually connecting to LaunchDarkly (or using the file data source).
|
196
|
+
# If the SDK is only reading flags from a database then it cannot know when there is a change, because
|
197
|
+
# flags are read on an as-needed basis.
|
198
|
+
#
|
199
|
+
# The listener will be called from a worker thread.
|
200
|
+
#
|
201
|
+
# Calling this method for an already-registered listener has no effect.
|
202
|
+
#
|
203
|
+
# @param listener [#update]
|
204
|
+
#
|
205
|
+
def add_listener(listener) end
|
206
|
+
|
207
|
+
#
|
208
|
+
# Unregisters a listener so that it will no longer be notified of feature flag changes.
|
209
|
+
#
|
210
|
+
# Calling this method for a listener that was not previously registered has no effect.
|
211
|
+
#
|
212
|
+
# @param listener [Object]
|
213
|
+
#
|
214
|
+
def remove_listener(listener) end
|
215
|
+
|
216
|
+
#
|
217
|
+
# Registers a listener to be notified of a change in a specific feature flag's value for a specific
|
218
|
+
# evaluation context.
|
219
|
+
#
|
220
|
+
# When you call this method, it first immediately evaluates the feature flag. It then uses
|
221
|
+
# {#add_listener} to start listening for feature flag configuration
|
222
|
+
# changes, and whenever the specified feature flag changes, it re-evaluates the flag for the same context.
|
223
|
+
# It then calls your listener if and only if the resulting value has changed.
|
224
|
+
#
|
225
|
+
# All feature flag evaluations require an instance of {LaunchDarkly::LDContext}. If the feature flag you are
|
226
|
+
# tracking does not have any context targeting rules, you must still pass a dummy context such as
|
227
|
+
# `LDContext.with_key("for-global-flags")`. If you do not want the user to appear on your dashboard,
|
228
|
+
# use the anonymous property: `LDContext.create({key: "for-global-flags", kind: "user", anonymous: true})`.
|
229
|
+
#
|
230
|
+
# The returned listener represents the subscription that was created by this method
|
231
|
+
# call; to unsubscribe, pass that object (not your listener) to {#remove_listener}.
|
232
|
+
#
|
233
|
+
# @param key [Symbol]
|
234
|
+
# @param context [LaunchDarkly::LDContext]
|
235
|
+
# @param listener [#update]
|
236
|
+
#
|
237
|
+
def add_flag_value_change_listener(key, context, listener) end
|
238
|
+
end
|
239
|
+
|
240
|
+
#
|
241
|
+
# Change event fired when some aspect of the flag referenced by the key has changed.
|
242
|
+
#
|
243
|
+
class FlagChange
|
244
|
+
attr_accessor :key
|
245
|
+
|
246
|
+
# @param [Symbol] key
|
247
|
+
def initialize(key)
|
248
|
+
@key = key
|
249
|
+
end
|
250
|
+
end
|
251
|
+
|
252
|
+
#
|
253
|
+
# Change event fired when the evaluated value for the specified flag key has changed.
|
254
|
+
#
|
255
|
+
class FlagValueChange
|
256
|
+
attr_accessor :key
|
257
|
+
attr_accessor :old_value
|
258
|
+
attr_accessor :new_value
|
259
|
+
|
260
|
+
# @param [Symbol] key
|
261
|
+
# @param [Object] old_value
|
262
|
+
# @param [Object] new_value
|
263
|
+
def initialize(key, old_value, new_value)
|
264
|
+
@key = key
|
265
|
+
@old_value = old_value
|
266
|
+
@new_value = new_value
|
267
|
+
end
|
268
|
+
end
|
269
|
+
|
270
|
+
module DataStore
|
271
|
+
#
|
272
|
+
# An interface for querying the status of a persistent data store.
|
273
|
+
#
|
274
|
+
# An implementation of this interface is returned by {LaunchDarkly::LDClient#data_store_status_provider}.
|
275
|
+
# Application code should not implement this interface.
|
276
|
+
#
|
277
|
+
module StatusProvider
|
278
|
+
#
|
279
|
+
# Returns the current status of the store.
|
280
|
+
#
|
281
|
+
# This is only meaningful for persistent stores, or any custom data store implementation that makes use of
|
282
|
+
# the status reporting mechanism provided by the SDK. For the default in-memory store, the status will always
|
283
|
+
# be reported as "available".
|
284
|
+
#
|
285
|
+
# @return [Status] the latest status
|
286
|
+
#
|
287
|
+
def status
|
288
|
+
end
|
289
|
+
|
290
|
+
#
|
291
|
+
# Indicates whether the current data store implementation supports status monitoring.
|
292
|
+
#
|
293
|
+
# This is normally true for all persistent data stores, and false for the default in-memory store. A true value
|
294
|
+
# means that any listeners added with {#add_listener} can expect to be notified if there is any error in
|
295
|
+
# storing data, and then notified again when the error condition is resolved. A false value means that the
|
296
|
+
# status is not meaningful and listeners should not expect to be notified.
|
297
|
+
#
|
298
|
+
# @return [Boolean] true if status monitoring is enabled
|
299
|
+
#
|
300
|
+
def monitoring_enabled?
|
301
|
+
end
|
302
|
+
|
303
|
+
#
|
304
|
+
# Subscribes for notifications of status changes.
|
305
|
+
#
|
306
|
+
# Applications may wish to know if there is an outage in a persistent data store, since that could mean that
|
307
|
+
# flag evaluations are unable to get the flag data from the store (unless it is currently cached) and therefore
|
308
|
+
# might return default values.
|
309
|
+
#
|
310
|
+
# If the SDK receives an exception while trying to query or update the data store, then it notifies listeners
|
311
|
+
# that the store appears to be offline ({Status#available} is false) and begins polling the store
|
312
|
+
# at intervals until a query succeeds. Once it succeeds, it notifies listeners again with {Status#available}
|
313
|
+
# set to true.
|
314
|
+
#
|
315
|
+
# This method has no effect if the data store implementation does not support status tracking, such as if you
|
316
|
+
# are using the default in-memory store rather than a persistent store.
|
317
|
+
#
|
318
|
+
# @param listener [#update] the listener to add
|
319
|
+
#
|
320
|
+
def add_listener(listener)
|
321
|
+
end
|
322
|
+
|
323
|
+
#
|
324
|
+
# Unsubscribes from notifications of status changes.
|
325
|
+
#
|
326
|
+
# This method has no effect if the data store implementation does not support status tracking, such as if you
|
327
|
+
# are using the default in-memory store rather than a persistent store.
|
328
|
+
#
|
329
|
+
# @param listener [Object] the listener to remove; if no such listener was added, this does nothing
|
330
|
+
#
|
331
|
+
def remove_listener(listener)
|
332
|
+
end
|
333
|
+
end
|
334
|
+
|
335
|
+
#
|
336
|
+
# Interface that a data store implementation can use to report information back to the SDK.
|
337
|
+
#
|
338
|
+
module UpdateSink
|
339
|
+
#
|
340
|
+
# Reports a change in the data store's operational status.
|
341
|
+
#
|
342
|
+
# This is what makes the status monitoring mechanisms in {StatusProvider} work.
|
343
|
+
#
|
344
|
+
# @param status [Status] the updated status properties
|
345
|
+
#
|
346
|
+
def update_status(status)
|
347
|
+
end
|
348
|
+
end
|
349
|
+
|
350
|
+
class Status
|
351
|
+
def initialize(available, stale)
|
352
|
+
@available = available
|
353
|
+
@stale = stale
|
354
|
+
end
|
355
|
+
|
356
|
+
#
|
357
|
+
# Returns true if the SDK believes the data store is now available.
|
358
|
+
#
|
359
|
+
# This property is normally true. If the SDK receives an exception while trying to query or update the data
|
360
|
+
# store, then it sets this property to false (notifying listeners, if any) and polls the store at intervals
|
361
|
+
# until a query succeeds. Once it succeeds, it sets the property back to true (again notifying listeners).
|
362
|
+
#
|
363
|
+
# @return [Boolean] true if store is available
|
364
|
+
#
|
365
|
+
attr_reader :available
|
366
|
+
|
367
|
+
#
|
368
|
+
# Returns true if the store may be out of date due to a previous
|
369
|
+
# outage, so the SDK should attempt to refresh all feature flag data
|
370
|
+
# and rewrite it to the store.
|
371
|
+
#
|
372
|
+
# This property is not meaningful to application code.
|
373
|
+
#
|
374
|
+
# @return [Boolean] true if data should be rewritten
|
375
|
+
#
|
376
|
+
attr_reader :stale
|
377
|
+
end
|
115
378
|
end
|
116
379
|
|
117
380
|
#
|
@@ -163,30 +426,30 @@ module LaunchDarkly
|
|
163
426
|
end
|
164
427
|
|
165
428
|
#
|
166
|
-
# Queries the store for a snapshot of the current segment state for a specific
|
429
|
+
# Queries the store for a snapshot of the current segment state for a specific context.
|
167
430
|
#
|
168
|
-
# The
|
431
|
+
# The context_hash is a base64-encoded string produced by hashing the context key as defined by
|
169
432
|
# the Big Segments specification; the store implementation does not need to know the details
|
170
433
|
# of how this is done, because it deals only with already-hashed keys, but the string can be
|
171
434
|
# assumed to only contain characters that are valid in base64.
|
172
435
|
#
|
173
|
-
# The return value should be either a Hash, or nil if the
|
436
|
+
# The return value should be either a Hash, or nil if the context is not referenced in any big
|
174
437
|
# segments. Each key in the Hash is a "segment reference", which is how segments are
|
175
438
|
# identified in Big Segment data. This string is not identical to the segment key-- the SDK
|
176
439
|
# will add other information. The store implementation should not be concerned with the
|
177
|
-
# format of the string. Each value in the Hash is true if the
|
178
|
-
# the segment, false if the
|
440
|
+
# format of the string. Each value in the Hash is true if the context is explicitly included in
|
441
|
+
# the segment, false if the context is explicitly excluded from the segment-- and is not also
|
179
442
|
# explicitly included (that is, if both an include and an exclude existed in the data, the
|
180
|
-
# include would take precedence). If the
|
443
|
+
# include would take precedence). If the context's status in a particular segment is undefined,
|
181
444
|
# there should be no key or value for that segment.
|
182
445
|
#
|
183
446
|
# This Hash may be cached by the SDK, so it should not be modified after it is created. It
|
184
447
|
# is a snapshot of the segment membership state at one point in time.
|
185
448
|
#
|
186
|
-
# @param
|
187
|
-
# @return [Hash] true/false values for Big Segments that reference this
|
449
|
+
# @param context_hash [String]
|
450
|
+
# @return [Hash] true/false values for Big Segments that reference this context
|
188
451
|
#
|
189
|
-
def get_membership(
|
452
|
+
def get_membership(context_hash)
|
190
453
|
end
|
191
454
|
|
192
455
|
#
|
@@ -216,7 +479,7 @@ module LaunchDarkly
|
|
216
479
|
#
|
217
480
|
# Information about the status of a Big Segment store, provided by {BigSegmentStoreStatusProvider}.
|
218
481
|
#
|
219
|
-
# Big Segments are a specific type of
|
482
|
+
# Big Segments are a specific type of segments. For more information, read the LaunchDarkly
|
220
483
|
# documentation: https://docs.launchdarkly.com/home/users/big-segments
|
221
484
|
#
|
222
485
|
class BigSegmentStoreStatus
|
@@ -226,11 +489,11 @@ module LaunchDarkly
|
|
226
489
|
end
|
227
490
|
|
228
491
|
# True if the Big Segment store is able to respond to queries, so that the SDK can evaluate
|
229
|
-
# whether a
|
492
|
+
# whether a context is in a segment or not.
|
230
493
|
#
|
231
494
|
# If this property is false, the store is not able to make queries (for instance, it may not have
|
232
495
|
# a valid database connection). In this case, the SDK will treat any reference to a Big Segment
|
233
|
-
# as if no
|
496
|
+
# as if no contexts are included in that segment. Also, the {EvaluationReason} associated with
|
234
497
|
# with any flag evaluation that references a Big Segment when the store is not available will
|
235
498
|
# have a `big_segments_status` of `STORE_ERROR`.
|
236
499
|
#
|
@@ -259,14 +522,14 @@ module LaunchDarkly
|
|
259
522
|
#
|
260
523
|
# The Big Segment store is the component that receives information about Big Segments, normally
|
261
524
|
# from a database populated by the LaunchDarkly Relay Proxy. Big Segments are a specific type
|
262
|
-
# of
|
525
|
+
# of segments. For more information, read the LaunchDarkly documentation:
|
263
526
|
# https://docs.launchdarkly.com/home/users/big-segments
|
264
527
|
#
|
265
528
|
# An implementation of this interface is returned by {LDClient#big_segment_store_status_provider}.
|
266
529
|
# Application code never needs to implement this interface.
|
267
530
|
#
|
268
531
|
# There are two ways to interact with the status. One is to simply get the current status; if its
|
269
|
-
# `available` property is true, then the SDK is able to evaluate
|
532
|
+
# `available` property is true, then the SDK is able to evaluate context membership in Big Segments,
|
270
533
|
# and the `stale`` property indicates whether the data might be out of date.
|
271
534
|
#
|
272
535
|
# The other way is to subscribe to status change notifications. Applications may wish to know if
|
@@ -299,5 +562,328 @@ module LaunchDarkly
|
|
299
562
|
def status
|
300
563
|
end
|
301
564
|
end
|
565
|
+
|
566
|
+
module DataSource
|
567
|
+
#
|
568
|
+
# An interface for querying the status of the SDK's data source. The data
|
569
|
+
# source is the component that receives updates to feature flag data;
|
570
|
+
# normally this is a streaming connection, but it could be polling or
|
571
|
+
# file data depending on your configuration.
|
572
|
+
#
|
573
|
+
# An implementation of this interface is returned by
|
574
|
+
# {LaunchDarkly::LDClient#data_source_status_provider}. Application code
|
575
|
+
# never needs to implement this interface.
|
576
|
+
#
|
577
|
+
module StatusProvider
|
578
|
+
#
|
579
|
+
# Returns the current status of the data source.
|
580
|
+
#
|
581
|
+
# All of the built-in data source implementations are guaranteed to update this status whenever they
|
582
|
+
# successfully initialize, encounter an error, or recover after an error.
|
583
|
+
#
|
584
|
+
# For a custom data source implementation, it is the responsibility of the data source to push
|
585
|
+
# status updates to the SDK; if it does not do so, the status will always be reported as
|
586
|
+
# {Status::INITIALIZING}.
|
587
|
+
#
|
588
|
+
# @return [Status]
|
589
|
+
#
|
590
|
+
def status
|
591
|
+
end
|
592
|
+
|
593
|
+
#
|
594
|
+
# Subscribes for notifications of status changes.
|
595
|
+
#
|
596
|
+
# The listener will be notified whenever any property of the status has changed. See {Status} for an
|
597
|
+
# explanation of the meaning of each property and what could cause it to change.
|
598
|
+
#
|
599
|
+
# Notifications will be dispatched on a worker thread. It is the listener's responsibility to return as soon as
|
600
|
+
# possible so as not to block subsequent notifications.
|
601
|
+
#
|
602
|
+
# @param [#update] the listener to add
|
603
|
+
#
|
604
|
+
def add_listener(listener) end
|
605
|
+
|
606
|
+
#
|
607
|
+
# Unsubscribes from notifications of status changes.
|
608
|
+
#
|
609
|
+
def remove_listener(listener) end
|
610
|
+
end
|
611
|
+
|
612
|
+
#
|
613
|
+
# Interface that a data source implementation will use to push data into
|
614
|
+
# the SDK.
|
615
|
+
#
|
616
|
+
# The data source interacts with this object, rather than manipulating
|
617
|
+
# the data store directly, so that the SDK can perform any other
|
618
|
+
# necessary operations that must happen when data is updated.
|
619
|
+
#
|
620
|
+
module UpdateSink
|
621
|
+
#
|
622
|
+
# Initializes (or re-initializes) the store with the specified set of entities. Any
|
623
|
+
# existing entries will be removed. Implementations can assume that this data set is up to
|
624
|
+
# date-- there is no need to perform individual version comparisons between the existing
|
625
|
+
# objects and the supplied features.
|
626
|
+
#
|
627
|
+
# If possible, the store should update the entire data set atomically. If that is not possible,
|
628
|
+
# it should iterate through the outer hash and then the inner hash using the existing iteration
|
629
|
+
# order of those hashes (the SDK will ensure that the items were inserted into the hashes in
|
630
|
+
# the correct order), storing each item, and then delete any leftover items at the very end.
|
631
|
+
#
|
632
|
+
# @param all_data [Hash] a hash where each key is one of the data kind objects, and each
|
633
|
+
# value is in turn a hash of string keys to entities
|
634
|
+
# @return [void]
|
635
|
+
#
|
636
|
+
def init(all_data) end
|
637
|
+
|
638
|
+
#
|
639
|
+
# Attempt to add an entity, or update an existing entity with the same key. An update
|
640
|
+
# should only succeed if the new item's `:version` is greater than the old one;
|
641
|
+
# otherwise, the method should do nothing.
|
642
|
+
#
|
643
|
+
# @param kind [Object] the kind of entity to add or update
|
644
|
+
# @param item [Hash] the entity to add or update
|
645
|
+
# @return [void]
|
646
|
+
#
|
647
|
+
def upsert(kind, item) end
|
648
|
+
|
649
|
+
#
|
650
|
+
# Attempt to delete an entity if it exists. Deletion should only succeed if the
|
651
|
+
# `version` parameter is greater than the existing entity's `:version`; otherwise, the
|
652
|
+
# method should do nothing.
|
653
|
+
#
|
654
|
+
# @param kind [Object] the kind of entity to delete
|
655
|
+
# @param key [String] the unique key of the entity
|
656
|
+
# @param version [Integer] the entity must have a lower version than this to be deleted
|
657
|
+
# @return [void]
|
658
|
+
#
|
659
|
+
def delete(kind, key, version) end
|
660
|
+
|
661
|
+
#
|
662
|
+
# Informs the SDK of a change in the data source's status.
|
663
|
+
#
|
664
|
+
# Data source implementations should use this method if they have any
|
665
|
+
# concept of being in a valid state, a temporarily disconnected state,
|
666
|
+
# or a permanently stopped state.
|
667
|
+
#
|
668
|
+
# If `new_state` is different from the previous state, and/or
|
669
|
+
# `new_error` is non-null, the SDK will start returning the new status
|
670
|
+
# (adding a timestamp for the change) from {StatusProvider#status}, and
|
671
|
+
# will trigger status change events to any registered listeners.
|
672
|
+
#
|
673
|
+
# A special case is that if {new_state} is {Status::INTERRUPTED}, but the
|
674
|
+
# previous state was {Status::INITIALIZING}, the state will remain at
|
675
|
+
# {Status::INITIALIZING} because {Status::INTERRUPTED} is only meaningful
|
676
|
+
# after a successful startup.
|
677
|
+
#
|
678
|
+
# @param new_state [Symbol]
|
679
|
+
# @param new_error [ErrorInfo, nil]
|
680
|
+
#
|
681
|
+
def update_status(new_state, new_error) end
|
682
|
+
end
|
683
|
+
|
684
|
+
#
|
685
|
+
# Information about the data source's status and about the last status change.
|
686
|
+
#
|
687
|
+
class Status
|
688
|
+
#
|
689
|
+
# The initial state of the data source when the SDK is being initialized.
|
690
|
+
#
|
691
|
+
# If it encounters an error that requires it to retry initialization, the state will remain at
|
692
|
+
# {INITIALIZING} until it either succeeds and becomes {VALID}, or permanently fails and
|
693
|
+
# becomes {OFF}.
|
694
|
+
#
|
695
|
+
|
696
|
+
INITIALIZING = :initializing
|
697
|
+
|
698
|
+
#
|
699
|
+
# Indicates that the data source is currently operational and has not had any problems since the
|
700
|
+
# last time it received data.
|
701
|
+
#
|
702
|
+
# In streaming mode, this means that there is currently an open stream connection and that at least
|
703
|
+
# one initial message has been received on the stream. In polling mode, it means that the last poll
|
704
|
+
# request succeeded.
|
705
|
+
#
|
706
|
+
VALID = :valid
|
707
|
+
|
708
|
+
#
|
709
|
+
# Indicates that the data source encountered an error that it will attempt to recover from.
|
710
|
+
#
|
711
|
+
# In streaming mode, this means that the stream connection failed, or had to be dropped due to some
|
712
|
+
# other error, and will be retried after a backoff delay. In polling mode, it means that the last poll
|
713
|
+
# request failed, and a new poll request will be made after the configured polling interval.
|
714
|
+
#
|
715
|
+
INTERRUPTED = :interrupted
|
716
|
+
|
717
|
+
#
|
718
|
+
# Indicates that the data source has been permanently shut down.
|
719
|
+
#
|
720
|
+
# This could be because it encountered an unrecoverable error (for instance, the LaunchDarkly service
|
721
|
+
# rejected the SDK key; an invalid SDK key will never become valid), or because the SDK client was
|
722
|
+
# explicitly shut down.
|
723
|
+
#
|
724
|
+
OFF = :off
|
725
|
+
|
726
|
+
# @return [Symbol] The basic state
|
727
|
+
attr_reader :state
|
728
|
+
# @return [Time] timestamp of the last state transition
|
729
|
+
attr_reader :state_since
|
730
|
+
# @return [ErrorInfo, nil] a description of the last error or nil if no errors have occurred since startup
|
731
|
+
attr_reader :last_error
|
732
|
+
|
733
|
+
def initialize(state, state_since, last_error)
|
734
|
+
@state = state
|
735
|
+
@state_since = state_since
|
736
|
+
@last_error = last_error
|
737
|
+
end
|
738
|
+
end
|
739
|
+
|
740
|
+
#
|
741
|
+
# A description of an error condition that the data source encountered.
|
742
|
+
#
|
743
|
+
class ErrorInfo
|
744
|
+
#
|
745
|
+
# An unexpected error, such as an uncaught exception, further described by {#message}.
|
746
|
+
#
|
747
|
+
UNKNOWN = :unknown
|
748
|
+
|
749
|
+
#
|
750
|
+
# An I/O error such as a dropped connection.
|
751
|
+
#
|
752
|
+
NETWORK_ERROR = :network_error
|
753
|
+
|
754
|
+
#
|
755
|
+
# The LaunchDarkly service returned an HTTP response with an error status, available with
|
756
|
+
# {#status_code}.
|
757
|
+
#
|
758
|
+
ERROR_RESPONSE = :error_response
|
759
|
+
|
760
|
+
#
|
761
|
+
# The SDK received malformed data from the LaunchDarkly service.
|
762
|
+
#
|
763
|
+
INVALID_DATA = :invalid_data
|
764
|
+
|
765
|
+
#
|
766
|
+
# The data source itself is working, but when it tried to put an update into the data store, the data
|
767
|
+
# store failed (so the SDK may not have the latest data).
|
768
|
+
#
|
769
|
+
# Data source implementations do not need to report this kind of error; it will be automatically
|
770
|
+
# reported by the SDK when exceptions are detected.
|
771
|
+
#
|
772
|
+
STORE_ERROR = :store_error
|
773
|
+
|
774
|
+
# @return [Symbol] the general category of the error
|
775
|
+
attr_reader :kind
|
776
|
+
# @return [Integer] an HTTP status or zero
|
777
|
+
attr_reader :status_code
|
778
|
+
# @return [String, nil] message an error message if applicable, or nil
|
779
|
+
attr_reader :message
|
780
|
+
# @return [Time] time the error timestamp
|
781
|
+
attr_reader :time
|
782
|
+
|
783
|
+
def initialize(kind, status_code, message, time)
|
784
|
+
@kind = kind
|
785
|
+
@status_code = status_code
|
786
|
+
@message = message
|
787
|
+
@time = time
|
788
|
+
end
|
789
|
+
end
|
790
|
+
end
|
791
|
+
|
792
|
+
#
|
793
|
+
# Namespace for feature-flag based technology migration support.
|
794
|
+
#
|
795
|
+
module Migrations
|
796
|
+
#
|
797
|
+
# A migrator is the interface through which migration support is executed. A migrator is configured through the
|
798
|
+
# {LaunchDarkly::Migrations::MigratorBuilder} class.
|
799
|
+
#
|
800
|
+
module Migrator
|
801
|
+
#
|
802
|
+
# Uses the provided flag key and context to execute a migration-backed read operation.
|
803
|
+
#
|
804
|
+
# @param key [String]
|
805
|
+
# @param context [LaunchDarkly::LDContext]
|
806
|
+
# @param default_stage [Symbol]
|
807
|
+
# @param payload [Object, nil]
|
808
|
+
#
|
809
|
+
# @return [LaunchDarkly::Migrations::OperationResult]
|
810
|
+
#
|
811
|
+
def read(key, context, default_stage, payload = nil) end
|
812
|
+
|
813
|
+
#
|
814
|
+
# Uses the provided flag key and context to execute a migration-backed write operation.
|
815
|
+
#
|
816
|
+
# @param key [String]
|
817
|
+
# @param context [LaunchDarkly::LDContext]
|
818
|
+
# @param default_stage [Symbol]
|
819
|
+
# @param payload [Object, nil]
|
820
|
+
#
|
821
|
+
# @return [LaunchDarkly::Migrations::WriteResult]
|
822
|
+
#
|
823
|
+
def write(key, context, default_stage, payload = nil) end
|
824
|
+
end
|
825
|
+
|
826
|
+
#
|
827
|
+
# An OpTracker is responsible for managing the collection of measurements that which a user might wish to record
|
828
|
+
# throughout a migration-assisted operation.
|
829
|
+
#
|
830
|
+
# Example measurements include latency, errors, and consistency.
|
831
|
+
#
|
832
|
+
# This data can be provided to the {LaunchDarkly::LDClient.track_migration_op} method to relay this metric
|
833
|
+
# information upstream to LaunchDarkly services.
|
834
|
+
#
|
835
|
+
module OpTracker
|
836
|
+
#
|
837
|
+
# Sets the migration related operation associated with these tracking measurements.
|
838
|
+
#
|
839
|
+
# @param [Symbol] op The read or write operation symbol.
|
840
|
+
#
|
841
|
+
def operation(op) end
|
842
|
+
|
843
|
+
#
|
844
|
+
# Allows recording which origins were called during a migration.
|
845
|
+
#
|
846
|
+
# @param [Symbol] origin Designation for the old or new origin.
|
847
|
+
#
|
848
|
+
def invoked(origin) end
|
849
|
+
|
850
|
+
#
|
851
|
+
# Allows recording the results of a consistency check.
|
852
|
+
#
|
853
|
+
# This method accepts a callable which should take no parameters and return a single boolean to represent the
|
854
|
+
# consistency check results for a read operation.
|
855
|
+
#
|
856
|
+
# A callable is provided in case sampling rules do not require consistency checking to run. In this case, we can
|
857
|
+
# avoid the overhead of a function by not using the callable.
|
858
|
+
#
|
859
|
+
# @param [#call] is_consistent closure to return result of comparison check
|
860
|
+
#
|
861
|
+
def consistent(is_consistent) end
|
862
|
+
|
863
|
+
#
|
864
|
+
# Allows recording whether an error occurred during the operation.
|
865
|
+
#
|
866
|
+
# @param [Symbol] origin Designation for the old or new origin.
|
867
|
+
#
|
868
|
+
def error(origin) end
|
869
|
+
|
870
|
+
#
|
871
|
+
# Allows tracking the recorded latency for an individual operation.
|
872
|
+
#
|
873
|
+
# @param [Symbol] origin Designation for the old or new origin.
|
874
|
+
# @param [Float] duration Duration measurement in milliseconds (ms).
|
875
|
+
#
|
876
|
+
def latency(origin, duration) end
|
877
|
+
|
878
|
+
#
|
879
|
+
# Creates an instance of {LaunchDarkly::Impl::MigrationOpEventData}.
|
880
|
+
#
|
881
|
+
# @return [LaunchDarkly::Impl::MigrationOpEvent, String] A migration op event or a string describing the error.
|
882
|
+
# failure.
|
883
|
+
#
|
884
|
+
def build
|
885
|
+
end
|
886
|
+
end
|
887
|
+
end
|
302
888
|
end
|
303
889
|
end
|