launchdarkly-server-sdk 8.8.3-java

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (70) hide show
  1. checksums.yaml +7 -0
  2. data/LICENSE.txt +13 -0
  3. data/README.md +61 -0
  4. data/lib/launchdarkly-server-sdk.rb +1 -0
  5. data/lib/ldclient-rb/cache_store.rb +45 -0
  6. data/lib/ldclient-rb/config.rb +658 -0
  7. data/lib/ldclient-rb/context.rb +565 -0
  8. data/lib/ldclient-rb/evaluation_detail.rb +387 -0
  9. data/lib/ldclient-rb/events.rb +642 -0
  10. data/lib/ldclient-rb/expiring_cache.rb +77 -0
  11. data/lib/ldclient-rb/flags_state.rb +88 -0
  12. data/lib/ldclient-rb/impl/big_segments.rb +117 -0
  13. data/lib/ldclient-rb/impl/broadcaster.rb +78 -0
  14. data/lib/ldclient-rb/impl/context.rb +96 -0
  15. data/lib/ldclient-rb/impl/context_filter.rb +166 -0
  16. data/lib/ldclient-rb/impl/data_source.rb +188 -0
  17. data/lib/ldclient-rb/impl/data_store.rb +109 -0
  18. data/lib/ldclient-rb/impl/dependency_tracker.rb +102 -0
  19. data/lib/ldclient-rb/impl/diagnostic_events.rb +129 -0
  20. data/lib/ldclient-rb/impl/evaluation_with_hook_result.rb +34 -0
  21. data/lib/ldclient-rb/impl/evaluator.rb +539 -0
  22. data/lib/ldclient-rb/impl/evaluator_bucketing.rb +86 -0
  23. data/lib/ldclient-rb/impl/evaluator_helpers.rb +50 -0
  24. data/lib/ldclient-rb/impl/evaluator_operators.rb +131 -0
  25. data/lib/ldclient-rb/impl/event_sender.rb +100 -0
  26. data/lib/ldclient-rb/impl/event_summarizer.rb +68 -0
  27. data/lib/ldclient-rb/impl/event_types.rb +136 -0
  28. data/lib/ldclient-rb/impl/flag_tracker.rb +58 -0
  29. data/lib/ldclient-rb/impl/integrations/consul_impl.rb +170 -0
  30. data/lib/ldclient-rb/impl/integrations/dynamodb_impl.rb +300 -0
  31. data/lib/ldclient-rb/impl/integrations/file_data_source.rb +229 -0
  32. data/lib/ldclient-rb/impl/integrations/redis_impl.rb +306 -0
  33. data/lib/ldclient-rb/impl/integrations/test_data/test_data_source.rb +40 -0
  34. data/lib/ldclient-rb/impl/migrations/migrator.rb +287 -0
  35. data/lib/ldclient-rb/impl/migrations/tracker.rb +136 -0
  36. data/lib/ldclient-rb/impl/model/clause.rb +45 -0
  37. data/lib/ldclient-rb/impl/model/feature_flag.rb +254 -0
  38. data/lib/ldclient-rb/impl/model/preprocessed_data.rb +64 -0
  39. data/lib/ldclient-rb/impl/model/segment.rb +132 -0
  40. data/lib/ldclient-rb/impl/model/serialization.rb +72 -0
  41. data/lib/ldclient-rb/impl/repeating_task.rb +46 -0
  42. data/lib/ldclient-rb/impl/sampler.rb +25 -0
  43. data/lib/ldclient-rb/impl/store_client_wrapper.rb +141 -0
  44. data/lib/ldclient-rb/impl/store_data_set_sorter.rb +55 -0
  45. data/lib/ldclient-rb/impl/unbounded_pool.rb +34 -0
  46. data/lib/ldclient-rb/impl/util.rb +95 -0
  47. data/lib/ldclient-rb/impl.rb +13 -0
  48. data/lib/ldclient-rb/in_memory_store.rb +100 -0
  49. data/lib/ldclient-rb/integrations/consul.rb +45 -0
  50. data/lib/ldclient-rb/integrations/dynamodb.rb +92 -0
  51. data/lib/ldclient-rb/integrations/file_data.rb +108 -0
  52. data/lib/ldclient-rb/integrations/redis.rb +98 -0
  53. data/lib/ldclient-rb/integrations/test_data/flag_builder.rb +663 -0
  54. data/lib/ldclient-rb/integrations/test_data.rb +213 -0
  55. data/lib/ldclient-rb/integrations/util/store_wrapper.rb +246 -0
  56. data/lib/ldclient-rb/integrations.rb +6 -0
  57. data/lib/ldclient-rb/interfaces.rb +974 -0
  58. data/lib/ldclient-rb/ldclient.rb +822 -0
  59. data/lib/ldclient-rb/memoized_value.rb +32 -0
  60. data/lib/ldclient-rb/migrations.rb +230 -0
  61. data/lib/ldclient-rb/non_blocking_thread_pool.rb +46 -0
  62. data/lib/ldclient-rb/polling.rb +102 -0
  63. data/lib/ldclient-rb/reference.rb +295 -0
  64. data/lib/ldclient-rb/requestor.rb +102 -0
  65. data/lib/ldclient-rb/simple_lru_cache.rb +25 -0
  66. data/lib/ldclient-rb/stream.rb +196 -0
  67. data/lib/ldclient-rb/util.rb +132 -0
  68. data/lib/ldclient-rb/version.rb +3 -0
  69. data/lib/ldclient-rb.rb +27 -0
  70. metadata +400 -0
@@ -0,0 +1,642 @@
1
+ require "ldclient-rb/impl/context_filter"
2
+ require "ldclient-rb/impl/diagnostic_events"
3
+ require "ldclient-rb/impl/event_sender"
4
+ require "ldclient-rb/impl/event_summarizer"
5
+ require "ldclient-rb/impl/event_types"
6
+ require "ldclient-rb/impl/util"
7
+
8
+ require "concurrent"
9
+ require "concurrent/atomics"
10
+ require "concurrent/executors"
11
+ require "thread"
12
+ require "time"
13
+
14
+ #
15
+ # Analytics event processing in the SDK involves several components. The purpose of this design is to
16
+ # minimize overhead on the application threads that are generating analytics events.
17
+ #
18
+ # EventProcessor receives an analytics event from the SDK client, on an application thread. It places
19
+ # the event in a bounded queue, the "inbox", and immediately returns.
20
+ #
21
+ # On a separate worker thread, EventDispatcher consumes events from the inbox. These are considered
22
+ # "input events" because they may or may not actually be sent to LaunchDarkly; most flag evaluation
23
+ # events are not sent, but are counted and the counters become part of a single summary event.
24
+ # EventDispatcher updates those counters, creates "index" events for any contexts that have not been seen
25
+ # recently, and places any events that will be sent to LaunchDarkly into the "outbox" queue.
26
+ #
27
+ # When it is time to flush events to LaunchDarkly, the contents of the outbox are handed off to
28
+ # another worker thread which sends the HTTP request.
29
+ #
30
+
31
+ module LaunchDarkly
32
+ module EventProcessorMethods
33
+ def record_eval_event(
34
+ context,
35
+ key,
36
+ version = nil,
37
+ variation = nil,
38
+ value = nil,
39
+ reason = nil,
40
+ default = nil,
41
+ track_events = false,
42
+ debug_until = nil,
43
+ prereq_of = nil,
44
+ sampling_ratio = nil,
45
+ exclude_from_summaries = false
46
+ )
47
+ end
48
+
49
+ def record_identify_event(context)
50
+ end
51
+
52
+ def record_custom_event(
53
+ context,
54
+ key,
55
+ data = nil,
56
+ metric_value = nil
57
+ )
58
+ end
59
+
60
+ def record_migration_op_event(event)
61
+ end
62
+
63
+ def flush
64
+ end
65
+
66
+ def stop
67
+ end
68
+ end
69
+
70
+ MAX_FLUSH_WORKERS = 5
71
+ private_constant :MAX_FLUSH_WORKERS
72
+
73
+ # @private
74
+ class NullEventProcessor
75
+ include EventProcessorMethods
76
+ end
77
+
78
+ # @private
79
+ class FlushMessage
80
+ end
81
+
82
+ # @private
83
+ class FlushContextsMessage
84
+ end
85
+
86
+ # @private
87
+ class DiagnosticEventMessage
88
+ end
89
+
90
+ # @private
91
+ class SynchronousMessage
92
+ def initialize
93
+ @reply = Concurrent::Semaphore.new(0)
94
+ end
95
+
96
+ def completed
97
+ @reply.release
98
+ end
99
+
100
+ def wait_for_completion
101
+ @reply.acquire
102
+ end
103
+ end
104
+
105
+ # @private
106
+ class TestSyncMessage < SynchronousMessage
107
+ end
108
+
109
+ # @private
110
+ class StopMessage < SynchronousMessage
111
+ end
112
+
113
+ # @private
114
+ class EventProcessor
115
+ include EventProcessorMethods
116
+
117
+ def initialize(sdk_key, config, client = nil, diagnostic_accumulator = nil, test_properties = nil)
118
+ raise ArgumentError, "sdk_key must not be nil" if sdk_key.nil? # see LDClient constructor comment on sdk_key
119
+ @logger = config.logger
120
+ @inbox = SizedQueue.new(config.capacity < 100 ? 100 : config.capacity)
121
+ @flush_task = Concurrent::TimerTask.new(execution_interval: config.flush_interval) do
122
+ post_to_inbox(FlushMessage.new)
123
+ end
124
+ @flush_task.execute
125
+ @contexts_flush_task = Concurrent::TimerTask.new(execution_interval: config.context_keys_flush_interval) do
126
+ post_to_inbox(FlushContextsMessage.new)
127
+ end
128
+ @contexts_flush_task.execute
129
+ if !diagnostic_accumulator.nil?
130
+ interval = test_properties && test_properties.has_key?(:diagnostic_recording_interval) ?
131
+ test_properties[:diagnostic_recording_interval] :
132
+ config.diagnostic_recording_interval
133
+ @diagnostic_event_task = Concurrent::TimerTask.new(execution_interval: interval) do
134
+ post_to_inbox(DiagnosticEventMessage.new)
135
+ end
136
+ @diagnostic_event_task.execute
137
+ else
138
+ @diagnostic_event_task = nil
139
+ end
140
+ @stopped = Concurrent::AtomicBoolean.new(false)
141
+ @inbox_full = Concurrent::AtomicBoolean.new(false)
142
+
143
+ event_sender = (test_properties || {})[:event_sender] ||
144
+ Impl::EventSender.new(sdk_key, config, client || Util.new_http_client(config.events_uri, config))
145
+
146
+ @timestamp_fn = (test_properties || {})[:timestamp_fn] || proc { Impl::Util.current_time_millis }
147
+ @omit_anonymous_contexts = config.omit_anonymous_contexts
148
+
149
+ EventDispatcher.new(@inbox, sdk_key, config, diagnostic_accumulator, event_sender)
150
+ end
151
+
152
+ def record_eval_event(
153
+ context,
154
+ key,
155
+ version = nil,
156
+ variation = nil,
157
+ value = nil,
158
+ reason = nil,
159
+ default = nil,
160
+ track_events = false,
161
+ debug_until = nil,
162
+ prereq_of = nil,
163
+ sampling_ratio = nil,
164
+ exclude_from_summaries = false
165
+ )
166
+ post_to_inbox(LaunchDarkly::Impl::EvalEvent.new(timestamp, context, key, version, variation, value, reason,
167
+ default, track_events, debug_until, prereq_of, sampling_ratio, exclude_from_summaries))
168
+ end
169
+
170
+ def record_identify_event(context)
171
+ target_context = !@omit_anonymous_contexts ? context : context.without_anonymous_contexts
172
+ post_to_inbox(LaunchDarkly::Impl::IdentifyEvent.new(timestamp, target_context)) if target_context.valid?
173
+ end
174
+
175
+ def record_custom_event(context, key, data = nil, metric_value = nil)
176
+ post_to_inbox(LaunchDarkly::Impl::CustomEvent.new(timestamp, context, key, data, metric_value))
177
+ end
178
+
179
+ def record_migration_op_event(event)
180
+ post_to_inbox(event)
181
+ end
182
+
183
+ def flush
184
+ # flush is done asynchronously
185
+ post_to_inbox(FlushMessage.new)
186
+ end
187
+
188
+ def stop
189
+ # final shutdown, which includes a final flush, is done synchronously
190
+ if @stopped.make_true
191
+ @flush_task.shutdown
192
+ @contexts_flush_task.shutdown
193
+ @diagnostic_event_task.shutdown unless @diagnostic_event_task.nil?
194
+ # Note that here we are not calling post_to_inbox, because we *do* want to wait if the inbox
195
+ # is full; an orderly shutdown can't happen unless these messages are received.
196
+ @inbox << FlushMessage.new
197
+ stop_msg = StopMessage.new
198
+ @inbox << stop_msg
199
+ stop_msg.wait_for_completion
200
+ end
201
+ end
202
+
203
+ # exposed only for testing
204
+ def wait_until_inactive
205
+ sync_msg = TestSyncMessage.new
206
+ @inbox << sync_msg
207
+ sync_msg.wait_for_completion
208
+ end
209
+
210
+ private def timestamp
211
+ @timestamp_fn.call()
212
+ end
213
+
214
+ private def post_to_inbox(message)
215
+ begin
216
+ @inbox.push(message, non_block=true)
217
+ rescue ThreadError
218
+ # If the inbox is full, it means the EventDispatcher thread is seriously backed up with not-yet-processed
219
+ # events. This is unlikely, but if it happens, it means the application is probably doing a ton of flag
220
+ # evaluations across many threads-- so if we wait for a space in the inbox, we risk a very serious slowdown
221
+ # of the app. To avoid that, we'll just drop the event. The log warning about this will only be shown once.
222
+ if @inbox_full.make_true
223
+ @logger.warn { "[LDClient] Events are being produced faster than they can be processed; some events will be dropped" }
224
+ end
225
+ end
226
+ end
227
+ end
228
+
229
+ # @private
230
+ class EventDispatcher
231
+ def initialize(inbox, sdk_key, config, diagnostic_accumulator, event_sender)
232
+ @sdk_key = sdk_key
233
+ @config = config
234
+ @diagnostic_accumulator = config.diagnostic_opt_out? ? nil : diagnostic_accumulator
235
+ @event_sender = event_sender
236
+ @sampler = LaunchDarkly::Impl::Sampler.new(Random.new)
237
+
238
+ @context_keys = SimpleLRUCacheSet.new(config.context_keys_capacity)
239
+ @formatter = EventOutputFormatter.new(config)
240
+ @disabled = Concurrent::AtomicBoolean.new(false)
241
+ @last_known_past_time = Concurrent::AtomicReference.new(0)
242
+ @deduplicated_contexts = 0
243
+ @events_in_last_batch = 0
244
+
245
+ outbox = EventBuffer.new(config.capacity, config.logger)
246
+ flush_workers = NonBlockingThreadPool.new(MAX_FLUSH_WORKERS)
247
+
248
+ if !@diagnostic_accumulator.nil?
249
+ diagnostic_event_workers = NonBlockingThreadPool.new(1)
250
+ init_event = @diagnostic_accumulator.create_init_event(config)
251
+ send_diagnostic_event(init_event, diagnostic_event_workers)
252
+ else
253
+ diagnostic_event_workers = nil
254
+ end
255
+
256
+ Thread.new { main_loop(inbox, outbox, flush_workers, diagnostic_event_workers) }
257
+ end
258
+
259
+ private
260
+
261
+ def main_loop(inbox, outbox, flush_workers, diagnostic_event_workers)
262
+ running = true
263
+ while running do
264
+ begin
265
+ message = inbox.pop
266
+ case message
267
+ when FlushMessage
268
+ trigger_flush(outbox, flush_workers)
269
+ when FlushContextsMessage
270
+ @context_keys.clear
271
+ when DiagnosticEventMessage
272
+ send_and_reset_diagnostics(outbox, diagnostic_event_workers)
273
+ when TestSyncMessage
274
+ synchronize_for_testing(flush_workers, diagnostic_event_workers)
275
+ message.completed
276
+ when StopMessage
277
+ do_shutdown(flush_workers, diagnostic_event_workers)
278
+ running = false
279
+ message.completed
280
+ else
281
+ dispatch_event(message, outbox)
282
+ end
283
+ rescue => e
284
+ Util.log_exception(@config.logger, "Unexpected error in event processor", e)
285
+ end
286
+ end
287
+ end
288
+
289
+ def do_shutdown(flush_workers, diagnostic_event_workers)
290
+ flush_workers.shutdown
291
+ flush_workers.wait_for_termination
292
+ unless diagnostic_event_workers.nil?
293
+ diagnostic_event_workers.shutdown
294
+ diagnostic_event_workers.wait_for_termination
295
+ end
296
+ @event_sender.stop if @event_sender.respond_to?(:stop)
297
+ end
298
+
299
+ def synchronize_for_testing(flush_workers, diagnostic_event_workers)
300
+ # Used only by unit tests. Wait until all active flush workers have finished.
301
+ flush_workers.wait_all
302
+ diagnostic_event_workers.wait_all unless diagnostic_event_workers.nil?
303
+ end
304
+
305
+ def dispatch_event(event, outbox)
306
+ return if @disabled.value
307
+
308
+ # Always record the event in the summary.
309
+ outbox.add_to_summary(event) unless event.exclude_from_summaries
310
+
311
+ # Decide whether to add the event to the payload. Feature events may be added twice, once for
312
+ # the event (if tracked) and once for debugging.
313
+ will_add_full_event = false
314
+ debug_event = nil
315
+ if event.is_a?(LaunchDarkly::Impl::EvalEvent)
316
+ will_add_full_event = event.track_events
317
+ if should_debug_event(event)
318
+ debug_event = LaunchDarkly::Impl::DebugEvent.new(event)
319
+ end
320
+ else
321
+ will_add_full_event = true
322
+ end
323
+
324
+ get_indexable_context(event) do |ctx|
325
+ outbox.add_event(LaunchDarkly::Impl::IndexEvent.new(event.timestamp, ctx))
326
+ end
327
+
328
+ outbox.add_event(event) if will_add_full_event && @sampler.sample(event.sampling_ratio.nil? ? 1 : event.sampling_ratio)
329
+ outbox.add_event(debug_event) if !debug_event.nil? && @sampler.sample(event.sampling_ratio.nil? ? 1 : event.sampling_ratio)
330
+ end
331
+
332
+ private def get_indexable_context(event, &block)
333
+ return if event.context.nil?
334
+
335
+ context = !@config.omit_anonymous_contexts ? event.context : event.context.without_anonymous_contexts
336
+ return unless context.valid?
337
+
338
+ return if notice_context(context)
339
+ return if event.is_a?(LaunchDarkly::Impl::IdentifyEvent)
340
+ return if event.is_a?(LaunchDarkly::Impl::MigrationOpEvent)
341
+
342
+ yield context unless block.nil?
343
+ end
344
+
345
+ #
346
+ # Add to the set of contexts we've noticed, and return true if the context
347
+ # was already known to us.
348
+ # @param context [LaunchDarkly::LDContext]
349
+ # @return [Boolean]
350
+ #
351
+ def notice_context(context)
352
+ known = @context_keys.add(context.fully_qualified_key)
353
+ @deduplicated_contexts += 1 if known
354
+ known
355
+ end
356
+
357
+ def should_debug_event(event)
358
+ debug_until = event.debug_until
359
+ if !debug_until.nil?
360
+ last_past = @last_known_past_time.value
361
+ debug_until > last_past && debug_until > Impl::Util.current_time_millis
362
+ else
363
+ false
364
+ end
365
+ end
366
+
367
+ def trigger_flush(outbox, flush_workers)
368
+ if @disabled.value
369
+ return
370
+ end
371
+
372
+ payload = outbox.get_payload
373
+ if !payload.events.empty? || !payload.summary.counters.empty?
374
+ count = payload.events.length + (payload.summary.counters.empty? ? 0 : 1)
375
+ @events_in_last_batch = count
376
+ # If all available worker threads are busy, success will be false and no job will be queued.
377
+ success = flush_workers.post do
378
+ begin
379
+ events_out = @formatter.make_output_events(payload.events, payload.summary)
380
+ result = @event_sender.send_event_data(events_out.to_json, "#{events_out.length} events", false)
381
+ @disabled.value = true if result.must_shutdown
382
+ unless result.time_from_server.nil?
383
+ @last_known_past_time.value = (result.time_from_server.to_f * 1000).to_i
384
+ end
385
+ rescue => e
386
+ Util.log_exception(@config.logger, "Unexpected error in event processor", e)
387
+ end
388
+ end
389
+ outbox.clear if success # Reset our internal state, these events now belong to the flush worker
390
+ else
391
+ @events_in_last_batch = 0
392
+ end
393
+ end
394
+
395
+ def send_and_reset_diagnostics(outbox, diagnostic_event_workers)
396
+ return if @diagnostic_accumulator.nil?
397
+ dropped_count = outbox.get_and_clear_dropped_count
398
+ event = @diagnostic_accumulator.create_periodic_event_and_reset(dropped_count, @deduplicated_contexts, @events_in_last_batch)
399
+ @deduplicated_contexts = 0
400
+ @events_in_last_batch = 0
401
+ send_diagnostic_event(event, diagnostic_event_workers)
402
+ end
403
+
404
+ def send_diagnostic_event(event, diagnostic_event_workers)
405
+ return if diagnostic_event_workers.nil?
406
+ uri = URI(@config.events_uri + "/diagnostic")
407
+ diagnostic_event_workers.post do
408
+ begin
409
+ @event_sender.send_event_data(event.to_json, "diagnostic event", true)
410
+ rescue => e
411
+ Util.log_exception(@config.logger, "Unexpected error in event processor", e)
412
+ end
413
+ end
414
+ end
415
+ end
416
+
417
+ # @private
418
+ FlushPayload = Struct.new(:events, :summary)
419
+
420
+ # @private
421
+ class EventBuffer
422
+ def initialize(capacity, logger)
423
+ @capacity = capacity
424
+ @logger = logger
425
+ @capacity_exceeded = false
426
+ @dropped_events = 0
427
+ @events = []
428
+ @summarizer = LaunchDarkly::Impl::EventSummarizer.new
429
+ end
430
+
431
+ def add_event(event)
432
+ if @events.length < @capacity
433
+ @events.push(event)
434
+ @capacity_exceeded = false
435
+ else
436
+ @dropped_events += 1
437
+ unless @capacity_exceeded
438
+ @capacity_exceeded = true
439
+ @logger.warn { "[LDClient] Exceeded event queue capacity. Increase capacity to avoid dropping events." }
440
+ end
441
+ end
442
+ end
443
+
444
+ def add_to_summary(event)
445
+ @summarizer.summarize_event(event)
446
+ end
447
+
448
+ def get_payload
449
+ FlushPayload.new(@events, @summarizer.snapshot)
450
+ end
451
+
452
+ def get_and_clear_dropped_count
453
+ ret = @dropped_events
454
+ @dropped_events = 0
455
+ ret
456
+ end
457
+
458
+ def clear
459
+ @events = []
460
+ @summarizer.clear
461
+ end
462
+ end
463
+
464
+ # @private
465
+ class EventOutputFormatter
466
+ FEATURE_KIND = 'feature'
467
+ IDENTIFY_KIND = 'identify'
468
+ CUSTOM_KIND = 'custom'
469
+ INDEX_KIND = 'index'
470
+ DEBUG_KIND = 'debug'
471
+ MIGRATION_OP_KIND = 'migration_op'
472
+ SUMMARY_KIND = 'summary'
473
+
474
+ def initialize(config)
475
+ @context_filter = LaunchDarkly::Impl::ContextFilter.new(config.all_attributes_private, config.private_attributes)
476
+ end
477
+
478
+ # Transforms events into the format used for event sending.
479
+ def make_output_events(events, summary)
480
+ events_out = events.map { |e| make_output_event(e) }
481
+ unless summary.counters.empty?
482
+ events_out.push(make_summary_event(summary))
483
+ end
484
+ events_out
485
+ end
486
+
487
+ private def make_output_event(event)
488
+ case event
489
+
490
+ when LaunchDarkly::Impl::EvalEvent
491
+ out = {
492
+ kind: FEATURE_KIND,
493
+ creationDate: event.timestamp,
494
+ key: event.key,
495
+ value: event.value,
496
+ }
497
+
498
+ out[:default] = event.default unless event.default.nil?
499
+ out[:variation] = event.variation unless event.variation.nil?
500
+ out[:version] = event.version unless event.version.nil?
501
+ out[:prereqOf] = event.prereq_of unless event.prereq_of.nil?
502
+ out[:context] = @context_filter.filter_redact_anonymous(event.context)
503
+ out[:reason] = event.reason unless event.reason.nil?
504
+
505
+ out
506
+
507
+ when LaunchDarkly::Impl::MigrationOpEvent
508
+ out = {
509
+ kind: MIGRATION_OP_KIND,
510
+ creationDate: event.timestamp,
511
+ contextKeys: event.context.keys,
512
+ operation: event.operation.to_s,
513
+ evaluation: {
514
+ key: event.key,
515
+ value: event.evaluation.value,
516
+ reason: event.evaluation.reason,
517
+ },
518
+ }
519
+
520
+ out[:evaluation][:version] = event.version unless event.version.nil?
521
+ out[:evaluation][:default] = event.default unless event.default.nil?
522
+ out[:evaluation][:variation] = event.evaluation.variation_index unless event.evaluation.variation_index.nil?
523
+ out[:samplingRatio] = event.sampling_ratio unless event.sampling_ratio.nil? || event.sampling_ratio == 1
524
+
525
+ measurements = []
526
+
527
+ unless event.invoked.empty?
528
+ measurements << {
529
+ "key": "invoked",
530
+ "values": event.invoked.map { |origin| [origin, true] }.to_h,
531
+ }
532
+ end
533
+
534
+ unless event.consistency_check.nil?
535
+ measurement = {
536
+ "key": "consistent",
537
+ "value": event.consistency_check,
538
+ }
539
+
540
+ unless event.consistency_check_ratio.nil? || event.consistency_check_ratio == 1
541
+ measurement[:samplingRatio] = event.consistency_check_ratio
542
+ end
543
+
544
+ measurements << measurement
545
+ end
546
+
547
+
548
+ unless event.latencies.empty?
549
+ measurements << {
550
+ "key": "latency_ms",
551
+ "values": event.latencies,
552
+ }
553
+ end
554
+
555
+ unless event.errors.empty?
556
+ measurements << {
557
+ "key": "error",
558
+ "values": event.errors.map { |origin| [origin, true] }.to_h,
559
+ }
560
+ end
561
+ out[:measurements] = measurements unless measurements.empty?
562
+
563
+ out
564
+
565
+ when LaunchDarkly::Impl::IdentifyEvent
566
+ {
567
+ kind: IDENTIFY_KIND,
568
+ creationDate: event.timestamp,
569
+ key: event.context.fully_qualified_key,
570
+ context: @context_filter.filter(event.context),
571
+ }
572
+
573
+ when LaunchDarkly::Impl::CustomEvent
574
+ out = {
575
+ kind: CUSTOM_KIND,
576
+ creationDate: event.timestamp,
577
+ key: event.key,
578
+ }
579
+ out[:data] = event.data unless event.data.nil?
580
+ out[:contextKeys] = event.context.keys
581
+ out[:metricValue] = event.metric_value unless event.metric_value.nil?
582
+ out
583
+
584
+ when LaunchDarkly::Impl::IndexEvent
585
+ {
586
+ kind: INDEX_KIND,
587
+ creationDate: event.timestamp,
588
+ context: @context_filter.filter(event.context),
589
+ }
590
+
591
+ when LaunchDarkly::Impl::DebugEvent
592
+ original = event.eval_event
593
+ out = {
594
+ kind: DEBUG_KIND,
595
+ creationDate: original.timestamp,
596
+ key: original.key,
597
+ context: @context_filter.filter(original.context),
598
+ value: original.value,
599
+ }
600
+ out[:default] = original.default unless original.default.nil?
601
+ out[:variation] = original.variation unless original.variation.nil?
602
+ out[:version] = original.version unless original.version.nil?
603
+ out[:prereqOf] = original.prereq_of unless original.prereq_of.nil?
604
+ out[:reason] = original.reason unless original.reason.nil?
605
+ out
606
+
607
+ else
608
+ nil
609
+ end
610
+ end
611
+
612
+ # Transforms the summary data into the format used for event sending.
613
+ private def make_summary_event(summary)
614
+ flags = {}
615
+ summary.counters.each do |flagKey, flagInfo|
616
+ counters = []
617
+ flagInfo.versions.each do |version, variations|
618
+ variations.each do |variation, counter|
619
+ c = {
620
+ value: counter.value,
621
+ count: counter.count,
622
+ }
623
+ c[:variation] = variation unless variation.nil?
624
+ if version.nil?
625
+ c[:unknown] = true
626
+ else
627
+ c[:version] = version
628
+ end
629
+ counters.push(c)
630
+ end
631
+ end
632
+ flags[flagKey] = { default: flagInfo.default, counters: counters, contextKinds: flagInfo.context_kinds.to_a }
633
+ end
634
+ {
635
+ kind: SUMMARY_KIND,
636
+ startDate: summary[:start_date],
637
+ endDate: summary[:end_date],
638
+ features: flags,
639
+ }
640
+ end
641
+ end
642
+ end
@@ -0,0 +1,77 @@
1
+
2
+ module LaunchDarkly
3
+ # A thread-safe cache with maximum number of entries and TTL.
4
+ # Adapted from https://github.com/SamSaffron/lru_redux/blob/master/lib/lru_redux/ttl/cache.rb
5
+ # under MIT license with the following changes:
6
+ # * made thread-safe
7
+ # * removed many unused methods
8
+ # * reading a key does not reset its expiration time, only writing
9
+ # @private
10
+ class ExpiringCache
11
+ def initialize(max_size, ttl)
12
+ @max_size = max_size
13
+ @ttl = ttl
14
+ @data_lru = {}
15
+ @data_ttl = {}
16
+ @lock = Mutex.new
17
+ end
18
+
19
+ def [](key)
20
+ @lock.synchronize do
21
+ ttl_evict
22
+ @data_lru[key]
23
+ end
24
+ end
25
+
26
+ def []=(key, val)
27
+ @lock.synchronize do
28
+ ttl_evict
29
+
30
+ @data_lru.delete(key)
31
+ @data_ttl.delete(key)
32
+
33
+ @data_lru[key] = val
34
+ @data_ttl[key] = Time.now.to_f
35
+
36
+ if @data_lru.size > @max_size
37
+ key, _ = @data_lru.first # hashes have a FIFO ordering in Ruby
38
+
39
+ @data_ttl.delete(key)
40
+ @data_lru.delete(key)
41
+ end
42
+
43
+ val
44
+ end
45
+ end
46
+
47
+ def delete(key)
48
+ @lock.synchronize do
49
+ ttl_evict
50
+
51
+ @data_lru.delete(key)
52
+ @data_ttl.delete(key)
53
+ end
54
+ end
55
+
56
+ def clear
57
+ @lock.synchronize do
58
+ @data_lru.clear
59
+ @data_ttl.clear
60
+ end
61
+ end
62
+
63
+ private
64
+
65
+ def ttl_evict
66
+ ttl_horizon = Time.now.to_f - @ttl
67
+ key, time = @data_ttl.first
68
+
69
+ until time.nil? || time > ttl_horizon
70
+ @data_ttl.delete(key)
71
+ @data_lru.delete(key)
72
+
73
+ key, time = @data_ttl.first
74
+ end
75
+ end
76
+ end
77
+ end