launchdarkly-server-sdk 7.0.4 → 7.2.0

Sign up to get free protection for your applications and to get access to all the features.
@@ -1,3 +1,4 @@
1
+ require "concurrent"
1
2
  require "ldclient-rb/interfaces"
2
3
  require "ldclient-rb/impl/store_data_set_sorter"
3
4
 
@@ -5,34 +6,45 @@ module LaunchDarkly
5
6
  module Impl
6
7
  #
7
8
  # Provides additional behavior that the client requires before or after feature store operations.
8
- # Currently this just means sorting the data set for init(). In the future we may also use this
9
- # to provide an update listener capability.
9
+ # This just means sorting the data set for init() and dealing with data store status listeners.
10
10
  #
11
11
  class FeatureStoreClientWrapper
12
12
  include Interfaces::FeatureStore
13
13
 
14
- def initialize(store)
14
+ def initialize(store, store_update_sink, logger)
15
+ # @type [LaunchDarkly::Interfaces::FeatureStore]
15
16
  @store = store
17
+
18
+ @monitoring_enabled = does_store_support_monitoring?
19
+
20
+ # @type [LaunchDarkly::Impl::DataStore::UpdateSink]
21
+ @store_update_sink = store_update_sink
22
+ @logger = logger
23
+
24
+ @mutex = Mutex.new # Covers the following variables
25
+ @last_available = true
26
+ # @type [LaunchDarkly::Impl::RepeatingTask, nil]
27
+ @poller = nil
16
28
  end
17
29
 
18
30
  def init(all_data)
19
- @store.init(FeatureStoreDataSetSorter.sort_all_collections(all_data))
31
+ wrapper { @store.init(FeatureStoreDataSetSorter.sort_all_collections(all_data)) }
20
32
  end
21
33
 
22
34
  def get(kind, key)
23
- @store.get(kind, key)
35
+ wrapper { @store.get(kind, key) }
24
36
  end
25
37
 
26
38
  def all(kind)
27
- @store.all(kind)
39
+ wrapper { @store.all(kind) }
28
40
  end
29
41
 
30
42
  def upsert(kind, item)
31
- @store.upsert(kind, item)
43
+ wrapper { @store.upsert(kind, item) }
32
44
  end
33
45
 
34
46
  def delete(kind, key, version)
35
- @store.delete(kind, key, version)
47
+ wrapper { @store.delete(kind, key, version) }
36
48
  end
37
49
 
38
50
  def initialized?
@@ -41,6 +53,88 @@ module LaunchDarkly
41
53
 
42
54
  def stop
43
55
  @store.stop
56
+ @mutex.synchronize do
57
+ return if @poller.nil?
58
+
59
+ @poller.stop
60
+ @poller = nil
61
+ end
62
+ end
63
+
64
+ def monitoring_enabled?
65
+ @monitoring_enabled
66
+ end
67
+
68
+ private def wrapper()
69
+ begin
70
+ yield
71
+ rescue => e
72
+ update_availability(false) if @monitoring_enabled
73
+ raise
74
+ end
75
+ end
76
+
77
+ private def update_availability(available)
78
+ @mutex.synchronize do
79
+ return if available == @last_available
80
+ @last_available = available
81
+ end
82
+
83
+ status = LaunchDarkly::Interfaces::DataStore::Status.new(available, false)
84
+
85
+ @logger.warn("Persistent store is available again") if available
86
+
87
+ @store_update_sink.update_status(status)
88
+
89
+ if available
90
+ @mutex.synchronize do
91
+ return if @poller.nil?
92
+
93
+ @poller.stop
94
+ @poller = nil
95
+ end
96
+
97
+ return
98
+ end
99
+
100
+ @logger.warn("Detected persistent store unavailability; updates will be cached until it recovers.")
101
+
102
+ task = Impl::RepeatingTask.new(0.5, 0, -> { self.check_availability }, @logger)
103
+
104
+ @mutex.synchronize do
105
+ @poller = task
106
+ @poller.start
107
+ end
108
+ end
109
+
110
+ private def check_availability
111
+ begin
112
+ update_availability(true) if @store.available?
113
+ rescue => e
114
+ @logger.error("Unexpected error from data store status function: #{e}")
115
+ end
116
+ end
117
+
118
+ # This methods determines whether the wrapped store can support enabling monitoring.
119
+ #
120
+ # The wrapped store must provide a monitoring_enabled method, which must
121
+ # be true. But this alone is not sufficient.
122
+ #
123
+ # Because this class wraps all interactions with a provided store, it can
124
+ # technically "monitor" any store. However, monitoring also requires that
125
+ # we notify listeners when the store is available again.
126
+ #
127
+ # We determine this by checking the store's `available?` method, so this
128
+ # is also a requirement for monitoring support.
129
+ #
130
+ # These extra checks won't be necessary once `available` becomes a part
131
+ # of the core interface requirements and this class no longer wraps every
132
+ # feature store.
133
+ private def does_store_support_monitoring?
134
+ return false unless @store.respond_to? :monitoring_enabled?
135
+ return false unless @store.respond_to? :available?
136
+
137
+ @store.monitoring_enabled?
44
138
  end
45
139
  end
46
140
  end
@@ -23,6 +23,9 @@ module LaunchDarkly
23
23
  priority: 0,
24
24
  }.freeze
25
25
 
26
+ # @private
27
+ ALL_KINDS = [FEATURES, SEGMENTS].freeze
28
+
26
29
  #
27
30
  # Default implementation of the LaunchDarkly client's feature store, using an in-memory
28
31
  # cache. This object holds feature flags and related data received from LaunchDarkly.
@@ -37,6 +40,10 @@ module LaunchDarkly
37
40
  @initialized = Concurrent::AtomicBoolean.new(false)
38
41
  end
39
42
 
43
+ def monitoring_enabled?
44
+ false
45
+ end
46
+
40
47
  def get(kind, key)
41
48
  @lock.with_read_lock do
42
49
  coll = @items[kind]
@@ -101,7 +101,7 @@ module LaunchDarkly
101
101
  #
102
102
  def self.data_source(options={})
103
103
  lambda { |sdk_key, config|
104
- Impl::Integrations::FileDataSourceImpl.new(config.feature_store, config.logger, options) }
104
+ Impl::Integrations::FileDataSourceImpl.new(config.feature_store, config.data_source_update_sink, config.logger, options) }
105
105
  end
106
106
  end
107
107
  end
@@ -43,6 +43,17 @@ module LaunchDarkly
43
43
  end
44
44
 
45
45
  @inited = Concurrent::AtomicBoolean.new(false)
46
+ @has_available_method = @core.respond_to? :available?
47
+ end
48
+
49
+ def monitoring_enabled?
50
+ @has_available_method
51
+ end
52
+
53
+ def available?
54
+ return false unless @has_available_method
55
+
56
+ @core.available?
46
57
  end
47
58
 
48
59
  def init(all_data)
@@ -112,6 +112,269 @@ module LaunchDarkly
112
112
  #
113
113
  def stop
114
114
  end
115
+
116
+ #
117
+ # WARN: This isn't a required method on a FeatureStore yet. The SDK will
118
+ # currently check if the provided store responds to this method, and if
119
+ # it does, will take appropriate action based on the documented behavior
120
+ # below. This will become required in a future major version release of
121
+ # the SDK.
122
+ #
123
+ # Returns true if this data store implementation supports status
124
+ # monitoring.
125
+ #
126
+ # This is normally only true for persistent data stores but it could also
127
+ # be true for any custom {FeatureStore} implementation.
128
+ #
129
+ # Returning true means that the store guarantees that if it ever enters
130
+ # an invalid state (that is, an operation has failed or it knows that
131
+ # operations cannot succeed at the moment), it will publish a status
132
+ # update, and will then publish another status update once it has
133
+ # returned to a valid state.
134
+ #
135
+ # Custom implementations must implement `def available?` which
136
+ # synchronously checks if the store is available. Without this method,
137
+ # the SDK cannot ensure status updates will occur once the store has gone
138
+ # offline.
139
+ #
140
+ # The same value will be returned from
141
+ # {StatusProvider::monitoring_enabled?}.
142
+ #
143
+ # def monitoring_enabled? end
144
+
145
+ #
146
+ # WARN: This isn't a required method on a FeatureStore. The SDK will
147
+ # check if the provided store responds to this method, and if it does,
148
+ # will take appropriate action based on the documented behavior below.
149
+ # Usage of this method will be dropped in a future version of the SDK.
150
+ #
151
+ # Tests whether the data store seems to be functioning normally.
152
+ #
153
+ # This should not be a detailed test of different kinds of operations,
154
+ # but just the smallest possible operation to determine whether (for
155
+ # instance) we can reach the database.
156
+ #
157
+ # Whenever one of the store's other methods throws an exception, the SDK
158
+ # will assume that it may have become unavailable (e.g. the database
159
+ # connection was lost). The SDK will then call {#available?} at intervals
160
+ # until it returns true.
161
+ #
162
+ # @return [Boolean] true if the underlying data store is reachable
163
+ #
164
+ # def available? end
165
+ end
166
+
167
+ #
168
+ # An interface for tracking changes in feature flag configurations.
169
+ #
170
+ # An implementation of this interface is returned by {LaunchDarkly::LDClient#flag_tracker}.
171
+ # Application code never needs to implement this interface.
172
+ #
173
+ module FlagTracker
174
+ #
175
+ # Registers a listener to be notified of feature flag changes in general.
176
+ #
177
+ # The listener will be notified whenever the SDK receives any change to any feature flag's configuration,
178
+ # or to a user segment that is referenced by a feature flag. If the updated flag is used as a prerequisite
179
+ # for other flags, the SDK assumes that those flags may now behave differently and sends flag change events
180
+ # for them as well.
181
+ #
182
+ # Note that this does not necessarily mean the flag's value has changed for any particular evaluation
183
+ # context, only that some part of the flag configuration was changed so that it may return a
184
+ # different value than it previously returned for some context. If you want to track flag value changes,
185
+ # use {#add_flag_value_change_listener} instead.
186
+ #
187
+ # It is possible, given current design restrictions, that a listener might be notified when no change has
188
+ # occurred. This edge case will be addressed in a later version of the SDK. It is important to note this issue
189
+ # does not affect {#add_flag_value_change_listener} listeners.
190
+ #
191
+ # If using the file data source, any change in a data file will be treated as a change to every flag. Again,
192
+ # use {#add_flag_value_change_listener} (or just re-evaluate the flag # yourself) if you want to know whether
193
+ # this is a change that really affects a flag's value.
194
+ #
195
+ # Change events only work if the SDK is actually connecting to LaunchDarkly (or using the file data source).
196
+ # If the SDK is only reading flags from a database then it cannot know when there is a change, because
197
+ # flags are read on an as-needed basis.
198
+ #
199
+ # The listener will be called from a worker thread.
200
+ #
201
+ # Calling this method for an already-registered listener has no effect.
202
+ #
203
+ # @param listener [#update]
204
+ #
205
+ def add_listener(listener) end
206
+
207
+ #
208
+ # Unregisters a listener so that it will no longer be notified of feature flag changes.
209
+ #
210
+ # Calling this method for a listener that was not previously registered has no effect.
211
+ #
212
+ # @param listener [Object]
213
+ #
214
+ def remove_listener(listener) end
215
+
216
+ #
217
+ # Registers a listener to be notified of a change in a specific feature flag's value for a specific
218
+ # evaluation context.
219
+ #
220
+ # When you call this method, it first immediately evaluates the feature flag. It then uses
221
+ # {#add_listener} to start listening for feature flag configuration
222
+ # changes, and whenever the specified feature flag changes, it re-evaluates the flag for the same context.
223
+ # It then calls your listener if and only if the resulting value has changed.
224
+ #
225
+ # All feature flag evaluations require an instance of {LaunchDarkly::LDContext}. If the feature flag you are
226
+ # tracking does not have any context targeting rules, you must still pass a dummy context such as
227
+ # `LDContext.with_key("for-global-flags")`. If you do not want the user to appear on your dashboard,
228
+ # use the anonymous property: `LDContext.create({key: "for-global-flags", kind: "user", anonymous: true})`.
229
+ #
230
+ # The returned listener represents the subscription that was created by this method
231
+ # call; to unsubscribe, pass that object (not your listener) to {#remove_listener}.
232
+ #
233
+ # @param key [Symbol]
234
+ # @param context [LaunchDarkly::LDContext]
235
+ # @param listener [#update]
236
+ #
237
+ def add_flag_value_change_listener(key, context, listener) end
238
+ end
239
+
240
+ #
241
+ # Change event fired when some aspect of the flag referenced by the key has changed.
242
+ #
243
+ class FlagChange
244
+ attr_accessor :key
245
+
246
+ # @param [Symbol] key
247
+ def initialize(key)
248
+ @key = key
249
+ end
250
+ end
251
+
252
+ #
253
+ # Change event fired when the evaluated value for the specified flag key has changed.
254
+ #
255
+ class FlagValueChange
256
+ attr_accessor :key
257
+ attr_accessor :old_value
258
+ attr_accessor :new_value
259
+
260
+ # @param [Symbol] key
261
+ # @param [Object] old_value
262
+ # @param [Object] new_value
263
+ def initialize(key, old_value, new_value)
264
+ @key = key
265
+ @old_value = old_value
266
+ @new_value = new_value
267
+ end
268
+ end
269
+
270
+ module DataStore
271
+ #
272
+ # An interface for querying the status of a persistent data store.
273
+ #
274
+ # An implementation of this interface is returned by {LaunchDarkly::LDClient#data_store_status_provider}.
275
+ # Application code should not implement this interface.
276
+ #
277
+ module StatusProvider
278
+ #
279
+ # Returns the current status of the store.
280
+ #
281
+ # This is only meaningful for persistent stores, or any custom data store implementation that makes use of
282
+ # the status reporting mechanism provided by the SDK. For the default in-memory store, the status will always
283
+ # be reported as "available".
284
+ #
285
+ # @return [Status] the latest status
286
+ #
287
+ def status
288
+ end
289
+
290
+ #
291
+ # Indicates whether the current data store implementation supports status monitoring.
292
+ #
293
+ # This is normally true for all persistent data stores, and false for the default in-memory store. A true value
294
+ # means that any listeners added with {#add_listener} can expect to be notified if there is any error in
295
+ # storing data, and then notified again when the error condition is resolved. A false value means that the
296
+ # status is not meaningful and listeners should not expect to be notified.
297
+ #
298
+ # @return [Boolean] true if status monitoring is enabled
299
+ #
300
+ def monitoring_enabled?
301
+ end
302
+
303
+ #
304
+ # Subscribes for notifications of status changes.
305
+ #
306
+ # Applications may wish to know if there is an outage in a persistent data store, since that could mean that
307
+ # flag evaluations are unable to get the flag data from the store (unless it is currently cached) and therefore
308
+ # might return default values.
309
+ #
310
+ # If the SDK receives an exception while trying to query or update the data store, then it notifies listeners
311
+ # that the store appears to be offline ({Status#available} is false) and begins polling the store
312
+ # at intervals until a query succeeds. Once it succeeds, it notifies listeners again with {Status#available}
313
+ # set to true.
314
+ #
315
+ # This method has no effect if the data store implementation does not support status tracking, such as if you
316
+ # are using the default in-memory store rather than a persistent store.
317
+ #
318
+ # @param listener [#update] the listener to add
319
+ #
320
+ def add_listener(listener)
321
+ end
322
+
323
+ #
324
+ # Unsubscribes from notifications of status changes.
325
+ #
326
+ # This method has no effect if the data store implementation does not support status tracking, such as if you
327
+ # are using the default in-memory store rather than a persistent store.
328
+ #
329
+ # @param listener [Object] the listener to remove; if no such listener was added, this does nothing
330
+ #
331
+ def remove_listener(listener)
332
+ end
333
+ end
334
+
335
+ #
336
+ # Interface that a data store implementation can use to report information back to the SDK.
337
+ #
338
+ module UpdateSink
339
+ #
340
+ # Reports a change in the data store's operational status.
341
+ #
342
+ # This is what makes the status monitoring mechanisms in {StatusProvider} work.
343
+ #
344
+ # @param status [Status] the updated status properties
345
+ #
346
+ def update_status(status)
347
+ end
348
+ end
349
+
350
+ class Status
351
+ def initialize(available, stale)
352
+ @available = available
353
+ @stale = stale
354
+ end
355
+
356
+ #
357
+ # Returns true if the SDK believes the data store is now available.
358
+ #
359
+ # This property is normally true. If the SDK receives an exception while trying to query or update the data
360
+ # store, then it sets this property to false (notifying listeners, if any) and polls the store at intervals
361
+ # until a query succeeds. Once it succeeds, it sets the property back to true (again notifying listeners).
362
+ #
363
+ # @return [Boolean] true if store is available
364
+ #
365
+ attr_reader :available
366
+
367
+ #
368
+ # Returns true if the store may be out of date due to a previous
369
+ # outage, so the SDK should attempt to refresh all feature flag data
370
+ # and rewrite it to the store.
371
+ #
372
+ # This property is not meaningful to application code.
373
+ #
374
+ # @return [Boolean] true if data should be rewritten
375
+ #
376
+ attr_reader :stale
377
+ end
115
378
  end
116
379
 
117
380
  #
@@ -299,5 +562,231 @@ module LaunchDarkly
299
562
  def status
300
563
  end
301
564
  end
565
+
566
+ module DataSource
567
+ #
568
+ # An interface for querying the status of the SDK's data source. The data
569
+ # source is the component that receives updates to feature flag data;
570
+ # normally this is a streaming connection, but it could be polling or
571
+ # file data depending on your configuration.
572
+ #
573
+ # An implementation of this interface is returned by
574
+ # {LaunchDarkly::LDClient#data_source_status_provider}. Application code
575
+ # never needs to implement this interface.
576
+ #
577
+ module StatusProvider
578
+ #
579
+ # Returns the current status of the data source.
580
+ #
581
+ # All of the built-in data source implementations are guaranteed to update this status whenever they
582
+ # successfully initialize, encounter an error, or recover after an error.
583
+ #
584
+ # For a custom data source implementation, it is the responsibility of the data source to push
585
+ # status updates to the SDK; if it does not do so, the status will always be reported as
586
+ # {Status::INITIALIZING}.
587
+ #
588
+ # @return [Status]
589
+ #
590
+ def status
591
+ end
592
+
593
+ #
594
+ # Subscribes for notifications of status changes.
595
+ #
596
+ # The listener will be notified whenever any property of the status has changed. See {Status} for an
597
+ # explanation of the meaning of each property and what could cause it to change.
598
+ #
599
+ # Notifications will be dispatched on a worker thread. It is the listener's responsibility to return as soon as
600
+ # possible so as not to block subsequent notifications.
601
+ #
602
+ # @param [#update] the listener to add
603
+ #
604
+ def add_listener(listener) end
605
+
606
+ #
607
+ # Unsubscribes from notifications of status changes.
608
+ #
609
+ def remove_listener(listener) end
610
+ end
611
+
612
+ #
613
+ # Interface that a data source implementation will use to push data into
614
+ # the SDK.
615
+ #
616
+ # The data source interacts with this object, rather than manipulating
617
+ # the data store directly, so that the SDK can perform any other
618
+ # necessary operations that must happen when data is updated.
619
+ #
620
+ module UpdateSink
621
+ #
622
+ # Initializes (or re-initializes) the store with the specified set of entities. Any
623
+ # existing entries will be removed. Implementations can assume that this data set is up to
624
+ # date-- there is no need to perform individual version comparisons between the existing
625
+ # objects and the supplied features.
626
+ #
627
+ # If possible, the store should update the entire data set atomically. If that is not possible,
628
+ # it should iterate through the outer hash and then the inner hash using the existing iteration
629
+ # order of those hashes (the SDK will ensure that the items were inserted into the hashes in
630
+ # the correct order), storing each item, and then delete any leftover items at the very end.
631
+ #
632
+ # @param all_data [Hash] a hash where each key is one of the data kind objects, and each
633
+ # value is in turn a hash of string keys to entities
634
+ # @return [void]
635
+ #
636
+ def init(all_data) end
637
+
638
+ #
639
+ # Attempt to add an entity, or update an existing entity with the same key. An update
640
+ # should only succeed if the new item's `:version` is greater than the old one;
641
+ # otherwise, the method should do nothing.
642
+ #
643
+ # @param kind [Object] the kind of entity to add or update
644
+ # @param item [Hash] the entity to add or update
645
+ # @return [void]
646
+ #
647
+ def upsert(kind, item) end
648
+
649
+ #
650
+ # Attempt to delete an entity if it exists. Deletion should only succeed if the
651
+ # `version` parameter is greater than the existing entity's `:version`; otherwise, the
652
+ # method should do nothing.
653
+ #
654
+ # @param kind [Object] the kind of entity to delete
655
+ # @param key [String] the unique key of the entity
656
+ # @param version [Integer] the entity must have a lower version than this to be deleted
657
+ # @return [void]
658
+ #
659
+ def delete(kind, key, version) end
660
+
661
+ #
662
+ # Informs the SDK of a change in the data source's status.
663
+ #
664
+ # Data source implementations should use this method if they have any
665
+ # concept of being in a valid state, a temporarily disconnected state,
666
+ # or a permanently stopped state.
667
+ #
668
+ # If `new_state` is different from the previous state, and/or
669
+ # `new_error` is non-null, the SDK will start returning the new status
670
+ # (adding a timestamp for the change) from {StatusProvider#status}, and
671
+ # will trigger status change events to any registered listeners.
672
+ #
673
+ # A special case is that if {new_state} is {Status::INTERRUPTED}, but the
674
+ # previous state was {Status::INITIALIZING}, the state will remain at
675
+ # {Status::INITIALIZING} because {Status::INTERRUPTED} is only meaningful
676
+ # after a successful startup.
677
+ #
678
+ # @param new_state [Symbol]
679
+ # @param new_error [ErrorInfo, nil]
680
+ #
681
+ def update_status(new_state, new_error) end
682
+ end
683
+
684
+ #
685
+ # Information about the data source's status and about the last status change.
686
+ #
687
+ class Status
688
+ #
689
+ # The initial state of the data source when the SDK is being initialized.
690
+ #
691
+ # If it encounters an error that requires it to retry initialization, the state will remain at
692
+ # {INITIALIZING} until it either succeeds and becomes {VALID}, or permanently fails and
693
+ # becomes {OFF}.
694
+ #
695
+
696
+ INITIALIZING = :initializing
697
+
698
+ #
699
+ # Indicates that the data source is currently operational and has not had any problems since the
700
+ # last time it received data.
701
+ #
702
+ # In streaming mode, this means that there is currently an open stream connection and that at least
703
+ # one initial message has been received on the stream. In polling mode, it means that the last poll
704
+ # request succeeded.
705
+ #
706
+ VALID = :valid
707
+
708
+ #
709
+ # Indicates that the data source encountered an error that it will attempt to recover from.
710
+ #
711
+ # In streaming mode, this means that the stream connection failed, or had to be dropped due to some
712
+ # other error, and will be retried after a backoff delay. In polling mode, it means that the last poll
713
+ # request failed, and a new poll request will be made after the configured polling interval.
714
+ #
715
+ INTERRUPTED = :interrupted
716
+
717
+ #
718
+ # Indicates that the data source has been permanently shut down.
719
+ #
720
+ # This could be because it encountered an unrecoverable error (for instance, the LaunchDarkly service
721
+ # rejected the SDK key; an invalid SDK key will never become valid), or because the SDK client was
722
+ # explicitly shut down.
723
+ #
724
+ OFF = :off
725
+
726
+ # @return [Symbol] The basic state
727
+ attr_reader :state
728
+ # @return [Time] timestamp of the last state transition
729
+ attr_reader :state_since
730
+ # @return [ErrorInfo, nil] a description of the last error or nil if no errors have occurred since startup
731
+ attr_reader :last_error
732
+
733
+ def initialize(state, state_since, last_error)
734
+ @state = state
735
+ @state_since = state_since
736
+ @last_error = last_error
737
+ end
738
+ end
739
+
740
+ #
741
+ # A description of an error condition that the data source encountered.
742
+ #
743
+ class ErrorInfo
744
+ #
745
+ # An unexpected error, such as an uncaught exception, further described by {#message}.
746
+ #
747
+ UNKNOWN = :unknown
748
+
749
+ #
750
+ # An I/O error such as a dropped connection.
751
+ #
752
+ NETWORK_ERROR = :network_error
753
+
754
+ #
755
+ # The LaunchDarkly service returned an HTTP response with an error status, available with
756
+ # {#status_code}.
757
+ #
758
+ ERROR_RESPONSE = :error_response
759
+
760
+ #
761
+ # The SDK received malformed data from the LaunchDarkly service.
762
+ #
763
+ INVALID_DATA = :invalid_data
764
+
765
+ #
766
+ # The data source itself is working, but when it tried to put an update into the data store, the data
767
+ # store failed (so the SDK may not have the latest data).
768
+ #
769
+ # Data source implementations do not need to report this kind of error; it will be automatically
770
+ # reported by the SDK when exceptions are detected.
771
+ #
772
+ STORE_ERROR = :store_error
773
+
774
+ # @return [Symbol] the general category of the error
775
+ attr_reader :kind
776
+ # @return [Integer] an HTTP status or zero
777
+ attr_reader :status_code
778
+ # @return [String, nil] message an error message if applicable, or nil
779
+ attr_reader :message
780
+ # @return [Time] time the error timestamp
781
+ attr_reader :time
782
+
783
+ def initialize(kind, status_code, message, time)
784
+ @kind = kind
785
+ @status_code = status_code
786
+ @message = message
787
+ @time = time
788
+ end
789
+ end
790
+ end
302
791
  end
303
792
  end