launchdarkly-server-sdk 6.2.5 → 6.3.2

Sign up to get free protection for your applications and to get access to all the features.
@@ -0,0 +1,212 @@
1
+ require 'ldclient-rb/in_memory_store'
2
+ require 'ldclient-rb/util'
3
+
4
+ require 'concurrent/atomics'
5
+ require 'json'
6
+ require 'yaml'
7
+ require 'pathname'
8
+
9
+ module LaunchDarkly
10
+ module Impl
11
+ module Integrations
12
+ class FileDataSourceImpl
13
+ # To avoid pulling in 'listen' and its transitive dependencies for people who aren't using the
14
+ # file data source or who don't need auto-updating, we only enable auto-update if the 'listen'
15
+ # gem has been provided by the host app.
16
+ @@have_listen = false
17
+ begin
18
+ require 'listen'
19
+ @@have_listen = true
20
+ rescue LoadError
21
+ end
22
+
23
+ def initialize(feature_store, logger, options={})
24
+ @feature_store = feature_store
25
+ @logger = logger
26
+ @paths = options[:paths] || []
27
+ if @paths.is_a? String
28
+ @paths = [ @paths ]
29
+ end
30
+ @auto_update = options[:auto_update]
31
+ if @auto_update && @@have_listen && !options[:force_polling] # force_polling is used only for tests
32
+ # We have seen unreliable behavior in the 'listen' gem in JRuby 9.1 (https://github.com/guard/listen/issues/449).
33
+ # Therefore, on that platform we'll fall back to file polling instead.
34
+ if defined?(JRUBY_VERSION) && JRUBY_VERSION.start_with?("9.1.")
35
+ @use_listen = false
36
+ else
37
+ @use_listen = true
38
+ end
39
+ end
40
+ @poll_interval = options[:poll_interval] || 1
41
+ @initialized = Concurrent::AtomicBoolean.new(false)
42
+ @ready = Concurrent::Event.new
43
+ end
44
+
45
+ def initialized?
46
+ @initialized.value
47
+ end
48
+
49
+ def start
50
+ ready = Concurrent::Event.new
51
+
52
+ # We will return immediately regardless of whether the file load succeeded or failed -
53
+ # the difference can be detected by checking "initialized?"
54
+ ready.set
55
+
56
+ load_all
57
+
58
+ if @auto_update
59
+ # If we're going to watch files, then the start event will be set the first time we get
60
+ # a successful load.
61
+ @listener = start_listener
62
+ end
63
+
64
+ ready
65
+ end
66
+
67
+ def stop
68
+ @listener.stop if !@listener.nil?
69
+ end
70
+
71
+ private
72
+
73
+ def load_all
74
+ all_data = {
75
+ FEATURES => {},
76
+ SEGMENTS => {}
77
+ }
78
+ @paths.each do |path|
79
+ begin
80
+ load_file(path, all_data)
81
+ rescue => exn
82
+ LaunchDarkly::Util.log_exception(@logger, "Unable to load flag data from \"#{path}\"", exn)
83
+ return
84
+ end
85
+ end
86
+ @feature_store.init(all_data)
87
+ @initialized.make_true
88
+ end
89
+
90
+ def load_file(path, all_data)
91
+ parsed = parse_content(IO.read(path))
92
+ (parsed[:flags] || {}).each do |key, flag|
93
+ add_item(all_data, FEATURES, flag)
94
+ end
95
+ (parsed[:flagValues] || {}).each do |key, value|
96
+ add_item(all_data, FEATURES, make_flag_with_value(key.to_s, value))
97
+ end
98
+ (parsed[:segments] || {}).each do |key, segment|
99
+ add_item(all_data, SEGMENTS, segment)
100
+ end
101
+ end
102
+
103
+ def parse_content(content)
104
+ # We can use the Ruby YAML parser for both YAML and JSON (JSON is a subset of YAML and while
105
+ # not all YAML parsers handle it correctly, we have verified that the Ruby one does, at least
106
+ # for all the samples of actual flag data that we've tested).
107
+ symbolize_all_keys(YAML.safe_load(content))
108
+ end
109
+
110
+ def symbolize_all_keys(value)
111
+ # This is necessary because YAML.load doesn't have an option for parsing keys as symbols, and
112
+ # the SDK expects all objects to be formatted that way.
113
+ if value.is_a?(Hash)
114
+ value.map{ |k, v| [k.to_sym, symbolize_all_keys(v)] }.to_h
115
+ elsif value.is_a?(Array)
116
+ value.map{ |v| symbolize_all_keys(v) }
117
+ else
118
+ value
119
+ end
120
+ end
121
+
122
+ def add_item(all_data, kind, item)
123
+ items = all_data[kind]
124
+ raise ArgumentError, "Received unknown item kind #{kind} in add_data" if items.nil? # shouldn't be possible since we preinitialize the hash
125
+ key = item[:key].to_sym
126
+ if !items[key].nil?
127
+ raise ArgumentError, "#{kind[:namespace]} key \"#{item[:key]}\" was used more than once"
128
+ end
129
+ items[key] = item
130
+ end
131
+
132
+ def make_flag_with_value(key, value)
133
+ {
134
+ key: key,
135
+ on: true,
136
+ fallthrough: { variation: 0 },
137
+ variations: [ value ]
138
+ }
139
+ end
140
+
141
+ def start_listener
142
+ resolved_paths = @paths.map { |p| Pathname.new(File.absolute_path(p)).realpath.to_s }
143
+ if @use_listen
144
+ start_listener_with_listen_gem(resolved_paths)
145
+ else
146
+ FileDataSourcePoller.new(resolved_paths, @poll_interval, self.method(:load_all), @logger)
147
+ end
148
+ end
149
+
150
+ def start_listener_with_listen_gem(resolved_paths)
151
+ path_set = resolved_paths.to_set
152
+ dir_paths = resolved_paths.map{ |p| File.dirname(p) }.uniq
153
+ opts = { latency: @poll_interval }
154
+ l = Listen.to(*dir_paths, opts) do |modified, added, removed|
155
+ paths = modified + added + removed
156
+ if paths.any? { |p| path_set.include?(p) }
157
+ load_all
158
+ end
159
+ end
160
+ l.start
161
+ l
162
+ end
163
+
164
+ #
165
+ # Used internally by FileDataSource to track data file changes if the 'listen' gem is not available.
166
+ #
167
+ class FileDataSourcePoller
168
+ def initialize(resolved_paths, interval, reloader, logger)
169
+ @stopped = Concurrent::AtomicBoolean.new(false)
170
+ get_file_times = Proc.new do
171
+ ret = {}
172
+ resolved_paths.each do |path|
173
+ begin
174
+ ret[path] = File.mtime(path)
175
+ rescue Errno::ENOENT
176
+ ret[path] = nil
177
+ end
178
+ end
179
+ ret
180
+ end
181
+ last_times = get_file_times.call
182
+ @thread = Thread.new do
183
+ while true
184
+ sleep interval
185
+ break if @stopped.value
186
+ begin
187
+ new_times = get_file_times.call
188
+ changed = false
189
+ last_times.each do |path, old_time|
190
+ new_time = new_times[path]
191
+ if !new_time.nil? && new_time != old_time
192
+ changed = true
193
+ break
194
+ end
195
+ end
196
+ reloader.call if changed
197
+ rescue => exn
198
+ LaunchDarkly::Util.log_exception(logger, "Unexpected exception in FileDataSourcePoller", exn)
199
+ end
200
+ end
201
+ end
202
+ end
203
+
204
+ def stop
205
+ @stopped.make_true
206
+ @thread.run # wakes it up if it's sleeping
207
+ end
208
+ end
209
+ end
210
+ end
211
+ end
212
+ end
@@ -5,10 +5,7 @@ module LaunchDarkly
5
5
  module Impl
6
6
  module Integrations
7
7
  module Redis
8
- #
9
- # Internal implementation of the Redis feature store, intended to be used with CachingStoreWrapper.
10
- #
11
- class RedisFeatureStoreCore
8
+ class RedisStoreImplBase
12
9
  begin
13
10
  require "redis"
14
11
  require "connection_pool"
@@ -19,22 +16,14 @@ module LaunchDarkly
19
16
 
20
17
  def initialize(opts)
21
18
  if !REDIS_ENABLED
22
- raise RuntimeError.new("can't use Redis feature store because one of these gems is missing: redis, connection_pool")
19
+ raise RuntimeError.new("can't use #{description} because one of these gems is missing: redis, connection_pool")
23
20
  end
24
21
 
25
- @redis_opts = opts[:redis_opts] || Hash.new
26
- if opts[:redis_url]
27
- @redis_opts[:url] = opts[:redis_url]
28
- end
29
- if !@redis_opts.include?(:url)
30
- @redis_opts[:url] = LaunchDarkly::Integrations::Redis::default_redis_url
31
- end
32
- max_connections = opts[:max_connections] || 16
33
- @pool = opts[:pool] || ConnectionPool.new(size: max_connections) do
34
- ::Redis.new(@redis_opts)
35
- end
22
+ @pool = create_redis_pool(opts)
23
+
36
24
  # shutdown pool on close unless the client passed a custom pool and specified not to shutdown
37
25
  @pool_shutdown_on_close = (!opts[:pool] || opts.fetch(:pool_shutdown_on_close, true))
26
+
38
27
  @prefix = opts[:prefix] || LaunchDarkly::Integrations::Redis::default_prefix
39
28
  @logger = opts[:logger] || Config.default_logger
40
29
  @test_hook = opts[:test_hook] # used for unit tests, deliberately undocumented
@@ -42,10 +31,53 @@ module LaunchDarkly
42
31
  @stopped = Concurrent::AtomicBoolean.new(false)
43
32
 
44
33
  with_connection do |redis|
45
- @logger.info("RedisFeatureStore: using Redis instance at #{redis.connection[:host]}:#{redis.connection[:port]} \
46
- and prefix: #{@prefix}")
34
+ @logger.info("#{description}: using Redis instance at #{redis.connection[:host]}:#{redis.connection[:port]} and prefix: #{@prefix}")
35
+ end
36
+ end
37
+
38
+ def stop
39
+ if @stopped.make_true
40
+ return unless @pool_shutdown_on_close
41
+ @pool.shutdown { |redis| redis.close }
42
+ end
43
+ end
44
+
45
+ protected def description
46
+ "Redis"
47
+ end
48
+
49
+ protected def with_connection
50
+ @pool.with { |redis| yield(redis) }
51
+ end
52
+
53
+ private def create_redis_pool(opts)
54
+ redis_opts = opts[:redis_opts] ? opts[:redis_opts].clone : Hash.new
55
+ if opts[:redis_url]
56
+ redis_opts[:url] = opts[:redis_url]
57
+ end
58
+ if !redis_opts.include?(:url)
59
+ redis_opts[:url] = LaunchDarkly::Integrations::Redis::default_redis_url
60
+ end
61
+ max_connections = opts[:max_connections] || 16
62
+ return opts[:pool] || ConnectionPool.new(size: max_connections) do
63
+ ::Redis.new(redis_opts)
47
64
  end
48
65
  end
66
+ end
67
+
68
+ #
69
+ # Internal implementation of the Redis feature store, intended to be used with CachingStoreWrapper.
70
+ #
71
+ class RedisFeatureStoreCore < RedisStoreImplBase
72
+ def initialize(opts)
73
+ super(opts)
74
+
75
+ @test_hook = opts[:test_hook] # used for unit tests, deliberately undocumented
76
+ end
77
+
78
+ def description
79
+ "RedisFeatureStore"
80
+ end
49
81
 
50
82
  def init_internal(all_data)
51
83
  count = 0
@@ -103,8 +135,7 @@ module LaunchDarkly
103
135
  else
104
136
  final_item = old_item
105
137
  action = new_item[:deleted] ? "delete" : "update"
106
- @logger.warn { "RedisFeatureStore: attempted to #{action} #{key} version: #{old_item[:version]} \
107
- in '#{kind[:namespace]}' with a version that is the same or older: #{new_item[:version]}" }
138
+ @logger.warn { "RedisFeatureStore: attempted to #{action} #{key} version: #{old_item[:version]} in '#{kind[:namespace]}' with a version that is the same or older: #{new_item[:version]}" }
108
139
  end
109
140
  redis.unwatch
110
141
  end
@@ -117,13 +148,6 @@ module LaunchDarkly
117
148
  with_connection { |redis| redis.exists?(inited_key) }
118
149
  end
119
150
 
120
- def stop
121
- if @stopped.make_true
122
- return unless @pool_shutdown_on_close
123
- @pool.shutdown { |redis| redis.close }
124
- end
125
- end
126
-
127
151
  private
128
152
 
129
153
  def before_update_transaction(base_key, key)
@@ -142,14 +166,43 @@ module LaunchDarkly
142
166
  @prefix + ":$inited"
143
167
  end
144
168
 
145
- def with_connection
146
- @pool.with { |redis| yield(redis) }
147
- end
148
-
149
169
  def get_redis(redis, kind, key)
150
170
  Model.deserialize(kind, redis.hget(items_key(kind), key))
151
171
  end
152
172
  end
173
+
174
+ #
175
+ # Internal implementation of the Redis big segment store.
176
+ #
177
+ class RedisBigSegmentStore < RedisStoreImplBase
178
+ KEY_LAST_UP_TO_DATE = ':big_segments_synchronized_on'
179
+ KEY_USER_INCLUDE = ':big_segment_include:'
180
+ KEY_USER_EXCLUDE = ':big_segment_exclude:'
181
+
182
+ def description
183
+ "RedisBigSegmentStore"
184
+ end
185
+
186
+ def get_metadata
187
+ value = with_connection { |redis| redis.get(@prefix + KEY_LAST_UP_TO_DATE) }
188
+ Interfaces::BigSegmentStoreMetadata.new(value.nil? ? nil : value.to_i)
189
+ end
190
+
191
+ def get_membership(user_hash)
192
+ with_connection do |redis|
193
+ included_refs = redis.smembers(@prefix + KEY_USER_INCLUDE + user_hash)
194
+ excluded_refs = redis.smembers(@prefix + KEY_USER_EXCLUDE + user_hash)
195
+ if !included_refs && !excluded_refs
196
+ nil
197
+ else
198
+ membership = {}
199
+ excluded_refs.each { |ref| membership[ref] = false }
200
+ included_refs.each { |ref| membership[ref] = true }
201
+ membership
202
+ end
203
+ end
204
+ end
205
+ end
153
206
  end
154
207
  end
155
208
  end
@@ -0,0 +1,40 @@
1
+ require 'concurrent/atomics'
2
+ require 'ldclient-rb/interfaces'
3
+
4
+ module LaunchDarkly
5
+ module Impl
6
+ module Integrations
7
+ module TestData
8
+ # @private
9
+ class TestDataSource
10
+ include LaunchDarkly::Interfaces::DataSource
11
+
12
+ def initialize(feature_store, test_data)
13
+ @feature_store = feature_store
14
+ @test_data = test_data
15
+ end
16
+
17
+ def initialized?
18
+ true
19
+ end
20
+
21
+ def start
22
+ ready = Concurrent::Event.new
23
+ ready.set
24
+ init_data = @test_data.make_init_data
25
+ @feature_store.init(init_data)
26
+ ready
27
+ end
28
+
29
+ def stop
30
+ @test_data.closed_instance(self)
31
+ end
32
+
33
+ def upsert(kind, item)
34
+ @feature_store.upsert(kind, item)
35
+ end
36
+ end
37
+ end
38
+ end
39
+ end
40
+ end
@@ -0,0 +1,47 @@
1
+ require "ldclient-rb/util"
2
+
3
+ require "concurrent/atomics"
4
+
5
+ module LaunchDarkly
6
+ module Impl
7
+ class RepeatingTask
8
+ def initialize(interval, start_delay, task, logger)
9
+ @interval = interval
10
+ @start_delay = start_delay
11
+ @task = task
12
+ @logger = logger
13
+ @stopped = Concurrent::AtomicBoolean.new(false)
14
+ @worker = nil
15
+ end
16
+
17
+ def start
18
+ @worker = Thread.new do
19
+ if @start_delay
20
+ sleep(@start_delay)
21
+ end
22
+ while !@stopped.value do
23
+ started_at = Time.now
24
+ begin
25
+ @task.call
26
+ rescue => e
27
+ LaunchDarkly::Util.log_exception(@logger, "Uncaught exception from repeating task", e)
28
+ end
29
+ delta = @interval - (Time.now - started_at)
30
+ if delta > 0
31
+ sleep(delta)
32
+ end
33
+ end
34
+ end
35
+ end
36
+
37
+ def stop
38
+ if @stopped.make_true
39
+ if @worker && @worker.alive? && @worker != Thread.current
40
+ @worker.run # causes the thread to wake up if it's currently in a sleep
41
+ @worker.join
42
+ end
43
+ end
44
+ end
45
+ end
46
+ end
47
+ end
@@ -1,7 +1,10 @@
1
-
2
1
  module LaunchDarkly
3
2
  module Impl
4
3
  module Util
4
+ def self.is_bool(aObject)
5
+ [true,false].include? aObject
6
+ end
7
+
5
8
  def self.current_time_millis
6
9
  (Time.now.to_f * 1000).to_i
7
10
  end
@@ -3,6 +3,13 @@ require "ldclient-rb/integrations/util/store_wrapper"
3
3
 
4
4
  module LaunchDarkly
5
5
  module Integrations
6
+ #
7
+ # Integration with [Consul](https://www.consul.io/).
8
+ #
9
+ # Note that in order to use this integration, you must first install the gem `diplomat`.
10
+ #
11
+ # @since 5.5.0
12
+ #
6
13
  module Consul
7
14
  #
8
15
  # Default value for the `prefix` option for {new_feature_store}.
@@ -29,7 +36,7 @@ module LaunchDarkly
29
36
  # @option opts [Integer] :capacity (1000) maximum number of items in the cache
30
37
  # @return [LaunchDarkly::Interfaces::FeatureStore] a feature store object
31
38
  #
32
- def self.new_feature_store(opts, &block)
39
+ def self.new_feature_store(opts = {})
33
40
  core = LaunchDarkly::Impl::Integrations::Consul::ConsulFeatureStoreCore.new(opts)
34
41
  return LaunchDarkly::Integrations::Util::CachingStoreWrapper.new(core, opts)
35
42
  end
@@ -3,6 +3,14 @@ require "ldclient-rb/integrations/util/store_wrapper"
3
3
 
4
4
  module LaunchDarkly
5
5
  module Integrations
6
+ #
7
+ # Integration with [DynamoDB](https://aws.amazon.com/dynamodb/).
8
+ #
9
+ # Note that in order to use this integration, you must first install one of the AWS SDK gems: either
10
+ # `aws-sdk-dynamodb`, or the full `aws-sdk`.
11
+ #
12
+ # @since 5.5.0
13
+ #
6
14
  module DynamoDB
7
15
  #
8
16
  # Creates a DynamoDB-backed persistent feature store. For more details about how and why you can
@@ -38,9 +46,46 @@ module LaunchDarkly
38
46
  # @option opts [Integer] :capacity (1000) maximum number of items in the cache
39
47
  # @return [LaunchDarkly::Interfaces::FeatureStore] a feature store object
40
48
  #
41
- def self.new_feature_store(table_name, opts)
49
+ def self.new_feature_store(table_name, opts = {})
42
50
  core = LaunchDarkly::Impl::Integrations::DynamoDB::DynamoDBFeatureStoreCore.new(table_name, opts)
43
- return LaunchDarkly::Integrations::Util::CachingStoreWrapper.new(core, opts)
51
+ LaunchDarkly::Integrations::Util::CachingStoreWrapper.new(core, opts)
52
+ end
53
+
54
+ #
55
+ # Creates a DynamoDB-backed Big Segment store.
56
+ #
57
+ # Big Segments are a specific type of user segments. For more information, read the LaunchDarkly
58
+ # documentation: https://docs.launchdarkly.com/home/users/big-segments
59
+ #
60
+ # To use this method, you must first install one of the AWS SDK gems: either `aws-sdk-dynamodb`, or
61
+ # the full `aws-sdk`. Then, put the object returned by this method into the `store` property of your
62
+ # Big Segments configuration (see `Config`).
63
+ #
64
+ # @example Configuring Big Segments
65
+ # store = LaunchDarkly::Integrations::DynamoDB::new_big_segment_store("my-table-name")
66
+ # config = LaunchDarkly::Config.new(big_segments: LaunchDarkly::BigSegmentsConfig.new(store: store)
67
+ # client = LaunchDarkly::LDClient.new(my_sdk_key, config)
68
+ #
69
+ # Note that the specified table must already exist in DynamoDB. It must have a partition key called
70
+ # "namespace", and a sort key called "key" (both strings). The SDK does not create the table
71
+ # automatically because it has no way of knowing what additional properties (such as permissions
72
+ # and throughput) you would want it to have.
73
+ #
74
+ # By default, the DynamoDB client will try to get your AWS credentials and region name from
75
+ # environment variables and/or local configuration files, as described in the AWS SDK documentation.
76
+ # You can also specify any supported AWS SDK options in `dynamodb_opts`-- or, provide an
77
+ # already-configured DynamoDB client in `existing_client`.
78
+ #
79
+ # @param opts [Hash] the configuration options (these are all the same as for `new_feature_store`,
80
+ # except that there are no caching parameters)
81
+ # @option opts [Hash] :dynamodb_opts options to pass to the DynamoDB client constructor (ignored if you specify `:existing_client`)
82
+ # @option opts [Object] :existing_client an already-constructed DynamoDB client for the feature store to use
83
+ # @option opts [String] :prefix namespace prefix to add to all keys used by LaunchDarkly
84
+ # @option opts [Logger] :logger a `Logger` instance; defaults to `Config.default_logger`
85
+ # @return [LaunchDarkly::Interfaces::BigSegmentStore] a Big Segment store object
86
+ #
87
+ def self.new_big_segment_store(table_name, opts)
88
+ LaunchDarkly::Impl::Integrations::DynamoDB::DynamoDBBigSegmentStore.new(table_name, opts)
44
89
  end
45
90
  end
46
91
  end
@@ -0,0 +1,108 @@
1
+ require 'ldclient-rb/impl/integrations/file_data_source'
2
+
3
+ module LaunchDarkly
4
+ module Integrations
5
+ #
6
+ # Provides a way to use local files as a source of feature flag state. This allows using a
7
+ # predetermined feature flag state without an actual LaunchDarkly connection.
8
+ #
9
+ # Reading flags from a file is only intended for pre-production environments. Production
10
+ # environments should always be configured to receive flag updates from LaunchDarkly.
11
+ #
12
+ # To use this component, call {FileData#data_source}, and store its return value in the
13
+ # {Config#data_source} property of your LaunchDarkly client configuration. In the options
14
+ # to `data_source`, set `paths` to the file path(s) of your data file(s):
15
+ #
16
+ # file_source = LaunchDarkly::Integrations::FileData.data_source(paths: [ myFilePath ])
17
+ # config = LaunchDarkly::Config.new(data_source: file_source)
18
+ #
19
+ # This will cause the client not to connect to LaunchDarkly to get feature flags. The
20
+ # client may still make network connections to send analytics events, unless you have disabled
21
+ # this with {Config#send_events} or {Config#offline?}.
22
+ #
23
+ # Flag data files can be either JSON or YAML. They contain an object with three possible
24
+ # properties:
25
+ #
26
+ # - `flags`: Feature flag definitions.
27
+ # - `flagValues`: Simplified feature flags that contain only a value.
28
+ # - `segments`: User segment definitions.
29
+ #
30
+ # The format of the data in `flags` and `segments` is defined by the LaunchDarkly application
31
+ # and is subject to change. Rather than trying to construct these objects yourself, it is simpler
32
+ # to request existing flags directly from the LaunchDarkly server in JSON format, and use this
33
+ # output as the starting point for your file. In Linux you would do this:
34
+ #
35
+ # ```
36
+ # curl -H "Authorization: YOUR_SDK_KEY" https://sdk.launchdarkly.com/sdk/latest-all
37
+ # ```
38
+ #
39
+ # The output will look something like this (but with many more properties):
40
+ #
41
+ # {
42
+ # "flags": {
43
+ # "flag-key-1": {
44
+ # "key": "flag-key-1",
45
+ # "on": true,
46
+ # "variations": [ "a", "b" ]
47
+ # }
48
+ # },
49
+ # "segments": {
50
+ # "segment-key-1": {
51
+ # "key": "segment-key-1",
52
+ # "includes": [ "user-key-1" ]
53
+ # }
54
+ # }
55
+ # }
56
+ #
57
+ # Data in this format allows the SDK to exactly duplicate all the kinds of flag behavior supported
58
+ # by LaunchDarkly. However, in many cases you will not need this complexity, but will just want to
59
+ # set specific flag keys to specific values. For that, you can use a much simpler format:
60
+ #
61
+ # {
62
+ # "flagValues": {
63
+ # "my-string-flag-key": "value-1",
64
+ # "my-boolean-flag-key": true,
65
+ # "my-integer-flag-key": 3
66
+ # }
67
+ # }
68
+ #
69
+ # Or, in YAML:
70
+ #
71
+ # flagValues:
72
+ # my-string-flag-key: "value-1"
73
+ # my-boolean-flag-key: true
74
+ # my-integer-flag-key: 1
75
+ #
76
+ # It is also possible to specify both "flags" and "flagValues", if you want some flags
77
+ # to have simple values and others to have complex behavior. However, it is an error to use the
78
+ # same flag key or segment key more than once, either in a single file or across multiple files.
79
+ #
80
+ # If the data source encounters any error in any file-- malformed content, a missing file, or a
81
+ # duplicate key-- it will not load flags from any of the files.
82
+ #
83
+ module FileData
84
+ #
85
+ # Returns a factory for the file data source component.
86
+ #
87
+ # @param options [Hash] the configuration options
88
+ # @option options [Array] :paths The paths of the source files for loading flag data. These
89
+ # may be absolute paths or relative to the current working directory.
90
+ # @option options [Boolean] :auto_update True if the data source should watch for changes to
91
+ # the source file(s) and reload flags whenever there is a change. Auto-updating will only
92
+ # work if all of the files you specified have valid directory paths at startup time.
93
+ # Note that the default implementation of this feature is based on polling the filesystem,
94
+ # which may not perform well. If you install the 'listen' gem (not included by default, to
95
+ # avoid adding unwanted dependencies to the SDK), its native file watching mechanism will be
96
+ # used instead. However, 'listen' will not be used in JRuby 9.1 due to a known instability.
97
+ # @option options [Float] :poll_interval The minimum interval, in seconds, between checks for
98
+ # file modifications - used only if auto_update is true, and if the native file-watching
99
+ # mechanism from 'listen' is not being used. The default value is 1 second.
100
+ # @return an object that can be stored in {Config#data_source}
101
+ #
102
+ def self.data_source(options={})
103
+ return lambda { |sdk_key, config|
104
+ Impl::Integrations::FileDataSourceImpl.new(config.feature_store, config.logger, options) }
105
+ end
106
+ end
107
+ end
108
+ end