amplitude-experiment 1.3.1 → 1.5.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/amplitude-experiment.gemspec +2 -1
- data/lib/amplitude-experiment.rb +13 -1
- data/lib/experiment/cohort/cohort.rb +25 -0
- data/lib/experiment/cohort/cohort_download_api.rb +90 -0
- data/lib/experiment/cohort/cohort_loader.rb +39 -0
- data/lib/experiment/cohort/cohort_storage.rb +91 -0
- data/lib/experiment/cohort/cohort_sync_config.rb +27 -0
- data/lib/experiment/deployment/deployment_runner.rb +135 -0
- data/lib/experiment/error.rb +37 -0
- data/lib/experiment/{local/fetcher.rb → flag/flag_config_fetcher.rb} +20 -3
- data/lib/experiment/flag/flag_config_storage.rb +53 -0
- data/lib/experiment/local/assignment/assignment.rb +3 -1
- data/lib/experiment/local/assignment/assignment_service.rb +15 -14
- data/lib/experiment/local/client.rb +83 -39
- data/lib/experiment/local/config.rb +26 -2
- data/lib/experiment/local/evaluation/evaluation.rb +2 -2
- data/lib/experiment/local/evaluation/lib/linuxArm64/libevaluation_interop.so +0 -0
- data/lib/experiment/local/evaluation/lib/linuxArm64/libevaluation_interop_api.h +1 -1
- data/lib/experiment/local/evaluation/lib/linuxX64/libevaluation_interop.so +0 -0
- data/lib/experiment/local/evaluation/lib/linuxX64/libevaluation_interop_api.h +1 -1
- data/lib/experiment/local/evaluation/lib/macosArm64/libevaluation_interop.dylib +0 -0
- data/lib/experiment/local/evaluation/lib/macosArm64/libevaluation_interop_api.h +1 -1
- data/lib/experiment/local/evaluation/lib/macosX64/libevaluation_interop.dylib +0 -0
- data/lib/experiment/local/evaluation/lib/macosX64/libevaluation_interop_api.h +1 -1
- data/lib/experiment/remote/client.rb +2 -34
- data/lib/experiment/user.rb +53 -19
- data/lib/experiment/util/flag_config.rb +60 -0
- data/lib/experiment/util/poller.rb +24 -0
- data/lib/experiment/util/topological_sort.rb +39 -0
- data/lib/experiment/util/user.rb +41 -0
- data/lib/experiment/util/variant.rb +32 -0
- data/lib/experiment/variant.rb +3 -1
- data/lib/experiment/version.rb +1 -1
- metadata +31 -5
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: c79062c9e6a6f0877449f49dab03aa2445cf31700ecb92010f88d5779759d73a
|
4
|
+
data.tar.gz: 1cf7d73b53cb125ecfaa6987bb4569d7416c5b02e194ef9f90be65c8da0b2fe6
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 45e8a5540fc7a0e9a9213bdfe7c8828137d35aee0ab6d5dab50e79cac47bf858f11db626a545f131c7a5e48bf05b04b9b82944441d5c307d9562fd42973e5edb
|
7
|
+
data.tar.gz: ffebca0974025ed2e983b106468be3806eb724cb5f0a2723d0f2782b67005d679ca8cfec4edb1b5236578a855472838a1f20e81d7641e362301d35fc5bfec32c
|
@@ -24,10 +24,11 @@ Gem::Specification.new do |spec|
|
|
24
24
|
spec.add_development_dependency 'rake', '~> 13.0'
|
25
25
|
spec.add_development_dependency 'rdoc', '= 6.4'
|
26
26
|
spec.add_development_dependency 'rspec', '~> 3.6'
|
27
|
-
spec.add_development_dependency 'rubocop', '= 1.
|
27
|
+
spec.add_development_dependency 'rubocop', '= 1.22.3'
|
28
28
|
spec.add_development_dependency 'simplecov', '~> 0.21'
|
29
29
|
spec.add_development_dependency 'webmock', '~> 3.14'
|
30
30
|
spec.add_development_dependency 'yard', '~> 0.9'
|
31
|
+
spec.add_development_dependency 'dotenv', '~> 2.8.1'
|
31
32
|
spec.metadata['rubygems_mfa_required'] = 'false'
|
32
33
|
spec.add_runtime_dependency 'ffi', '~> 1.15'
|
33
34
|
end
|
data/lib/amplitude-experiment.rb
CHANGED
@@ -8,14 +8,26 @@ require 'experiment/factory'
|
|
8
8
|
require 'experiment/remote/client'
|
9
9
|
require 'experiment/local/client'
|
10
10
|
require 'experiment/local/config'
|
11
|
-
require 'experiment/local/fetcher'
|
12
11
|
require 'experiment/local/assignment/assignment'
|
13
12
|
require 'experiment/local/assignment/assignment_filter'
|
14
13
|
require 'experiment/local/assignment/assignment_service'
|
15
14
|
require 'experiment/local/assignment/assignment_config'
|
16
15
|
require 'experiment/util/lru_cache'
|
17
16
|
require 'experiment/util/hash'
|
17
|
+
require 'experiment/util/topological_sort'
|
18
|
+
require 'experiment/util/user'
|
19
|
+
require 'experiment/util/variant'
|
18
20
|
require 'experiment/error'
|
21
|
+
require 'experiment/util/flag_config'
|
22
|
+
require 'experiment/flag/flag_config_fetcher'
|
23
|
+
require 'experiment/flag/flag_config_storage'
|
24
|
+
require 'experiment/cohort/cohort_download_api'
|
25
|
+
require 'experiment/cohort/cohort'
|
26
|
+
require 'experiment/cohort/cohort_loader'
|
27
|
+
require 'experiment/cohort/cohort_storage'
|
28
|
+
require 'experiment/cohort/cohort_sync_config'
|
29
|
+
require 'experiment/deployment/deployment_runner'
|
30
|
+
require 'experiment/util/poller'
|
19
31
|
|
20
32
|
# Amplitude Experiment Module
|
21
33
|
module AmplitudeExperiment
|
@@ -0,0 +1,25 @@
|
|
1
|
+
module AmplitudeExperiment
|
2
|
+
USER_GROUP_TYPE = 'User'.freeze
|
3
|
+
# Cohort
|
4
|
+
class Cohort
|
5
|
+
attr_accessor :id, :last_modified, :size, :member_ids, :group_type
|
6
|
+
|
7
|
+
def initialize(id, last_modified, size, member_ids, group_type = USER_GROUP_TYPE)
|
8
|
+
@id = id
|
9
|
+
@last_modified = last_modified
|
10
|
+
@size = size
|
11
|
+
@member_ids = member_ids.to_set
|
12
|
+
@group_type = group_type
|
13
|
+
end
|
14
|
+
|
15
|
+
def ==(other)
|
16
|
+
return false unless other.is_a?(Cohort)
|
17
|
+
|
18
|
+
@id == other.id &&
|
19
|
+
@last_modified == other.last_modified &&
|
20
|
+
@size == other.size &&
|
21
|
+
@member_ids == other.member_ids &&
|
22
|
+
@group_type == other.group_type
|
23
|
+
end
|
24
|
+
end
|
25
|
+
end
|
@@ -0,0 +1,90 @@
|
|
1
|
+
require 'base64'
|
2
|
+
require 'json'
|
3
|
+
require 'net/http'
|
4
|
+
require 'uri'
|
5
|
+
require 'set'
|
6
|
+
|
7
|
+
module AmplitudeExperiment
|
8
|
+
# CohortDownloadApi
|
9
|
+
class CohortDownloadApi
|
10
|
+
COHORT_REQUEST_TIMEOUT_MILLIS = 5000
|
11
|
+
COHORT_REQUEST_RETRY_DELAY_MILLIS = 100
|
12
|
+
|
13
|
+
def get_cohort(cohort_id, cohort = nil)
|
14
|
+
raise NotImplementedError
|
15
|
+
end
|
16
|
+
end
|
17
|
+
|
18
|
+
# DirectCohortDownloadApi
|
19
|
+
class DirectCohortDownloadApi < CohortDownloadApi
|
20
|
+
def initialize(api_key, secret_key, max_cohort_size, server_url, logger)
|
21
|
+
super()
|
22
|
+
@api_key = api_key
|
23
|
+
@secret_key = secret_key
|
24
|
+
@max_cohort_size = max_cohort_size
|
25
|
+
@server_url = server_url
|
26
|
+
@logger = logger
|
27
|
+
end
|
28
|
+
|
29
|
+
def get_cohort(cohort_id, cohort = nil)
|
30
|
+
@logger.debug("getCohortMembers(#{cohort_id}): start")
|
31
|
+
errors = 0
|
32
|
+
|
33
|
+
loop do
|
34
|
+
begin
|
35
|
+
last_modified = cohort.nil? ? nil : cohort.last_modified
|
36
|
+
response = get_cohort_members_request(cohort_id, last_modified)
|
37
|
+
@logger.debug("getCohortMembers(#{cohort_id}): status=#{response.code}")
|
38
|
+
|
39
|
+
case response.code.to_i
|
40
|
+
when 200
|
41
|
+
cohort_info = JSON.parse(response.body)
|
42
|
+
@logger.debug("getCohortMembers(#{cohort_id}): end - resultSize=#{cohort_info['size']}")
|
43
|
+
return Cohort.new(
|
44
|
+
cohort_info['cohortId'],
|
45
|
+
cohort_info['lastModified'],
|
46
|
+
cohort_info['size'],
|
47
|
+
cohort_info['memberIds'].to_set,
|
48
|
+
cohort_info['groupType']
|
49
|
+
)
|
50
|
+
when 204
|
51
|
+
@logger.debug("getCohortMembers(#{cohort_id}): Cohort not modified")
|
52
|
+
return nil
|
53
|
+
when 413
|
54
|
+
raise CohortTooLargeError.new(cohort_id, "Cohort exceeds max cohort size: #{response.code}")
|
55
|
+
else
|
56
|
+
raise HTTPErrorResponseError.new(response.code, cohort_id, "Unexpected response code: #{response.code}") if response.code.to_i != 202
|
57
|
+
|
58
|
+
end
|
59
|
+
rescue StandardError => e
|
60
|
+
errors += 1 unless response && e.is_a?(HTTPErrorResponseError) && response.code.to_i == 429
|
61
|
+
@logger.debug("getCohortMembers(#{cohort_id}): request-status error #{errors} - #{e}")
|
62
|
+
raise e if errors >= 3 || e.is_a?(CohortTooLargeError)
|
63
|
+
end
|
64
|
+
|
65
|
+
sleep(COHORT_REQUEST_RETRY_DELAY_MILLIS / 1000.0)
|
66
|
+
end
|
67
|
+
end
|
68
|
+
|
69
|
+
private
|
70
|
+
|
71
|
+
def get_cohort_members_request(cohort_id, last_modified)
|
72
|
+
headers = {
|
73
|
+
'Authorization' => "Basic #{basic_auth}",
|
74
|
+
'Content-Type' => 'application/json;charset=utf-8',
|
75
|
+
'X-Amp-Exp-Library' => "experiment-ruby-server/#{VERSION}"
|
76
|
+
}
|
77
|
+
url = "#{@server_url}/sdk/v1/cohort/#{cohort_id}?maxCohortSize=#{@max_cohort_size}"
|
78
|
+
url += "&lastModified=#{last_modified}" if last_modified
|
79
|
+
|
80
|
+
request = Net::HTTP::Get.new(URI(url), headers)
|
81
|
+
http = PersistentHttpClient.get(@server_url, { read_timeout: COHORT_REQUEST_TIMEOUT_MILLIS }, basic_auth)
|
82
|
+
http.request(request)
|
83
|
+
end
|
84
|
+
|
85
|
+
def basic_auth
|
86
|
+
credentials = "#{@api_key}:#{@secret_key}"
|
87
|
+
Base64.strict_encode64(credentials)
|
88
|
+
end
|
89
|
+
end
|
90
|
+
end
|
@@ -0,0 +1,39 @@
|
|
1
|
+
module AmplitudeExperiment
|
2
|
+
# CohortLoader
|
3
|
+
class CohortLoader
|
4
|
+
def initialize(cohort_download_api, cohort_storage)
|
5
|
+
@cohort_download_api = cohort_download_api
|
6
|
+
@cohort_storage = cohort_storage
|
7
|
+
@jobs = {}
|
8
|
+
@lock_jobs = Mutex.new
|
9
|
+
end
|
10
|
+
|
11
|
+
def load_cohort(cohort_id)
|
12
|
+
@lock_jobs.synchronize do
|
13
|
+
unless @jobs.key?(cohort_id)
|
14
|
+
future = Concurrent::Promises.future do
|
15
|
+
load_cohort_internal(cohort_id)
|
16
|
+
ensure
|
17
|
+
remove_job(cohort_id)
|
18
|
+
end
|
19
|
+
@jobs[cohort_id] = future
|
20
|
+
end
|
21
|
+
@jobs[cohort_id]
|
22
|
+
end
|
23
|
+
end
|
24
|
+
|
25
|
+
private
|
26
|
+
|
27
|
+
def load_cohort_internal(cohort_id)
|
28
|
+
stored_cohort = @cohort_storage.cohort(cohort_id)
|
29
|
+
updated_cohort = @cohort_download_api.get_cohort(cohort_id, stored_cohort)
|
30
|
+
@cohort_storage.put_cohort(updated_cohort) unless updated_cohort.nil?
|
31
|
+
end
|
32
|
+
|
33
|
+
def remove_job(cohort_id)
|
34
|
+
@lock_jobs.synchronize do
|
35
|
+
@jobs.delete(cohort_id)
|
36
|
+
end
|
37
|
+
end
|
38
|
+
end
|
39
|
+
end
|
@@ -0,0 +1,91 @@
|
|
1
|
+
module AmplitudeExperiment
|
2
|
+
# CohortStorage
|
3
|
+
class CohortStorage
|
4
|
+
def cohort(cohort_id)
|
5
|
+
raise NotImplementedError
|
6
|
+
end
|
7
|
+
|
8
|
+
def cohorts
|
9
|
+
raise NotImplementedError
|
10
|
+
end
|
11
|
+
|
12
|
+
def get_cohorts_for_user(user_id, cohort_ids)
|
13
|
+
raise NotImplementedError
|
14
|
+
end
|
15
|
+
|
16
|
+
def get_cohorts_for_group(group_type, group_name, cohort_ids)
|
17
|
+
raise NotImplementedError
|
18
|
+
end
|
19
|
+
|
20
|
+
def put_cohort(cohort_description)
|
21
|
+
raise NotImplementedError
|
22
|
+
end
|
23
|
+
|
24
|
+
def delete_cohort(group_type, cohort_id)
|
25
|
+
raise NotImplementedError
|
26
|
+
end
|
27
|
+
|
28
|
+
def cohort_ids
|
29
|
+
raise NotImplementedError
|
30
|
+
end
|
31
|
+
end
|
32
|
+
|
33
|
+
class InMemoryCohortStorage < CohortStorage
|
34
|
+
def initialize
|
35
|
+
super
|
36
|
+
@lock = Mutex.new
|
37
|
+
@group_to_cohort_store = {}
|
38
|
+
@cohort_store = {}
|
39
|
+
end
|
40
|
+
|
41
|
+
def cohort(cohort_id)
|
42
|
+
@lock.synchronize do
|
43
|
+
@cohort_store[cohort_id]
|
44
|
+
end
|
45
|
+
end
|
46
|
+
|
47
|
+
def cohorts
|
48
|
+
@lock.synchronize do
|
49
|
+
@cohort_store.dup
|
50
|
+
end
|
51
|
+
end
|
52
|
+
|
53
|
+
def get_cohorts_for_user(user_id, cohort_ids)
|
54
|
+
get_cohorts_for_group(USER_GROUP_TYPE, user_id, cohort_ids)
|
55
|
+
end
|
56
|
+
|
57
|
+
def get_cohorts_for_group(group_type, group_name, cohort_ids)
|
58
|
+
result = Set.new
|
59
|
+
@lock.synchronize do
|
60
|
+
group_type_cohorts = @group_to_cohort_store[group_type] || Set.new
|
61
|
+
group_type_cohorts.each do |cohort_id|
|
62
|
+
members = @cohort_store[cohort_id]&.member_ids || Set.new
|
63
|
+
result.add(cohort_id) if cohort_ids.include?(cohort_id) && members.include?(group_name)
|
64
|
+
end
|
65
|
+
end
|
66
|
+
result
|
67
|
+
end
|
68
|
+
|
69
|
+
def put_cohort(cohort)
|
70
|
+
@lock.synchronize do
|
71
|
+
@group_to_cohort_store[cohort.group_type] ||= Set.new
|
72
|
+
@group_to_cohort_store[cohort.group_type].add(cohort.id)
|
73
|
+
@cohort_store[cohort.id] = cohort
|
74
|
+
end
|
75
|
+
end
|
76
|
+
|
77
|
+
def delete_cohort(group_type, cohort_id)
|
78
|
+
@lock.synchronize do
|
79
|
+
group_cohorts = @group_to_cohort_store[group_type] || Set.new
|
80
|
+
group_cohorts.delete(cohort_id)
|
81
|
+
@cohort_store.delete(cohort_id)
|
82
|
+
end
|
83
|
+
end
|
84
|
+
|
85
|
+
def cohort_ids
|
86
|
+
@lock.synchronize do
|
87
|
+
@cohort_store.keys.to_set
|
88
|
+
end
|
89
|
+
end
|
90
|
+
end
|
91
|
+
end
|
@@ -0,0 +1,27 @@
|
|
1
|
+
module AmplitudeExperiment
|
2
|
+
DEFAULT_COHORT_SYNC_URL = 'https://cohort-v2.lab.amplitude.com'.freeze
|
3
|
+
EU_COHORT_SYNC_URL = 'https://cohort-v2.lab.eu.amplitude.com'.freeze
|
4
|
+
|
5
|
+
# Experiment Cohort Sync Configuration
|
6
|
+
class CohortSyncConfig
|
7
|
+
# This configuration is used to set up the cohort loader. The cohort loader is responsible for
|
8
|
+
# downloading cohorts from the server and storing them locally.
|
9
|
+
# Parameters:
|
10
|
+
# api_key (str): The project API Key
|
11
|
+
# secret_key (str): The project Secret Key
|
12
|
+
# max_cohort_size (int): The maximum cohort size that can be downloaded
|
13
|
+
# cohort_polling_interval_millis (int): The interval in milliseconds to poll for cohorts, the minimum value is 60000
|
14
|
+
# cohort_server_url (str): The server endpoint from which to request cohorts
|
15
|
+
|
16
|
+
attr_accessor :api_key, :secret_key, :max_cohort_size, :cohort_polling_interval_millis, :cohort_server_url
|
17
|
+
|
18
|
+
def initialize(api_key, secret_key, max_cohort_size: 2_147_483_647, cohort_polling_interval_millis: 60_000,
|
19
|
+
cohort_server_url: DEFAULT_COHORT_SYNC_URL)
|
20
|
+
@api_key = api_key
|
21
|
+
@secret_key = secret_key
|
22
|
+
@max_cohort_size = max_cohort_size
|
23
|
+
@cohort_polling_interval_millis = [cohort_polling_interval_millis, 60_000].max
|
24
|
+
@cohort_server_url = cohort_server_url
|
25
|
+
end
|
26
|
+
end
|
27
|
+
end
|
@@ -0,0 +1,135 @@
|
|
1
|
+
require 'set'
|
2
|
+
|
3
|
+
module AmplitudeExperiment
|
4
|
+
# DeploymentRunner
|
5
|
+
class DeploymentRunner
|
6
|
+
def initialize(
|
7
|
+
config,
|
8
|
+
flag_config_fetcher,
|
9
|
+
flag_config_storage,
|
10
|
+
cohort_storage,
|
11
|
+
logger,
|
12
|
+
cohort_loader = nil
|
13
|
+
)
|
14
|
+
@config = config
|
15
|
+
@flag_config_fetcher = flag_config_fetcher
|
16
|
+
@flag_config_storage = flag_config_storage
|
17
|
+
@cohort_storage = cohort_storage
|
18
|
+
@cohort_loader = cohort_loader
|
19
|
+
@lock = Mutex.new
|
20
|
+
@logger = logger
|
21
|
+
@executor = Concurrent::ThreadPoolExecutor.new(
|
22
|
+
max_threads: 10,
|
23
|
+
name: 'DeploymentRunnerExecutor'
|
24
|
+
)
|
25
|
+
end
|
26
|
+
|
27
|
+
def start
|
28
|
+
@lock.synchronize do
|
29
|
+
update_flag_configs
|
30
|
+
@flag_poller = Poller.new(
|
31
|
+
@config.flag_config_polling_interval_millis / 1000.0,
|
32
|
+
method(:periodic_flag_update)
|
33
|
+
)
|
34
|
+
@flag_poller.start
|
35
|
+
if @config.cohort_sync_config
|
36
|
+
@cohort_poller = Poller.new(
|
37
|
+
@config.cohort_sync_config.cohort_polling_interval_millis / 1000.0,
|
38
|
+
method(:update_cohorts)
|
39
|
+
)
|
40
|
+
@cohort_poller.start
|
41
|
+
end
|
42
|
+
end
|
43
|
+
end
|
44
|
+
|
45
|
+
def stop
|
46
|
+
@flag_poller&.stop
|
47
|
+
@flag_poller = nil
|
48
|
+
@cohort_poller&.stop
|
49
|
+
@cohort_poller = nil
|
50
|
+
end
|
51
|
+
|
52
|
+
private
|
53
|
+
|
54
|
+
def periodic_flag_update
|
55
|
+
@logger.debug('Periodic flag update: start')
|
56
|
+
update_flag_configs
|
57
|
+
rescue StandardError => e
|
58
|
+
@logger.error("Error while updating flags: #{e}")
|
59
|
+
end
|
60
|
+
|
61
|
+
def update_flag_configs
|
62
|
+
flags = @flag_config_fetcher.fetch_v2
|
63
|
+
flag_configs = flags.each_with_object({}) { |flag, hash| hash[flag['key']] = flag }
|
64
|
+
flag_keys = flag_configs.values.map { |flag| flag['key'] }.to_set
|
65
|
+
@flag_config_storage.remove_if { |f| !flag_keys.include?(f['key']) }
|
66
|
+
|
67
|
+
unless @cohort_loader
|
68
|
+
flag_configs.each do |flag_key, flag_config|
|
69
|
+
@logger.debug("Putting non-cohort flag #{flag_key}")
|
70
|
+
@flag_config_storage.put_flag_config(flag_config)
|
71
|
+
end
|
72
|
+
return
|
73
|
+
end
|
74
|
+
|
75
|
+
new_cohort_ids = Set.new
|
76
|
+
flag_configs.each do |_, flag_config|
|
77
|
+
new_cohort_ids.merge(AmplitudeExperiment.get_all_cohort_ids_from_flag(flag_config))
|
78
|
+
end
|
79
|
+
|
80
|
+
existing_cohort_ids = @cohort_storage.cohort_ids
|
81
|
+
cohort_ids_to_download = new_cohort_ids - existing_cohort_ids
|
82
|
+
|
83
|
+
download_cohorts(cohort_ids_to_download)
|
84
|
+
|
85
|
+
updated_cohort_ids = @cohort_storage.cohort_ids
|
86
|
+
|
87
|
+
flag_configs.each do |flag_key, flag_config|
|
88
|
+
cohort_ids = AmplitudeExperiment.get_all_cohort_ids_from_flag(flag_config)
|
89
|
+
@logger.debug("Storing flag #{flag_key}")
|
90
|
+
@flag_config_storage.put_flag_config(flag_config)
|
91
|
+
missing_cohorts = cohort_ids - updated_cohort_ids
|
92
|
+
|
93
|
+
@logger.warn("Flag #{flag_key} - failed to load cohorts: #{missing_cohorts}") if missing_cohorts.any?
|
94
|
+
end
|
95
|
+
|
96
|
+
delete_unused_cohorts
|
97
|
+
@logger.debug("Refreshed #{flag_configs.size} flag configs.")
|
98
|
+
end
|
99
|
+
|
100
|
+
def download_cohorts(cohort_ids)
|
101
|
+
futures = cohort_ids.map do |cohort_id|
|
102
|
+
Concurrent::Promises.future_on(@executor) do
|
103
|
+
future = @cohort_loader.load_cohort(cohort_id)
|
104
|
+
future.value!
|
105
|
+
rescue StandardError => e
|
106
|
+
@logger.error("Failed to download cohort #{cohort_id}: #{e.message}")
|
107
|
+
nil
|
108
|
+
end
|
109
|
+
end
|
110
|
+
|
111
|
+
Concurrent::Promises.zip(*futures).value!
|
112
|
+
end
|
113
|
+
|
114
|
+
def update_cohorts
|
115
|
+
@logger.debug('Periodic cohort update: start')
|
116
|
+
cohort_ids = AmplitudeExperiment.get_all_cohort_ids_from_flags(@flag_config_storage.flag_configs)
|
117
|
+
download_cohorts(cohort_ids)
|
118
|
+
end
|
119
|
+
|
120
|
+
def delete_unused_cohorts
|
121
|
+
flag_cohort_ids = Set.new
|
122
|
+
@flag_config_storage.flag_configs.each do |_, flag|
|
123
|
+
flag_cohort_ids.merge(AmplitudeExperiment.get_all_cohort_ids_from_flag(flag))
|
124
|
+
end
|
125
|
+
|
126
|
+
storage_cohorts = @cohort_storage.cohorts
|
127
|
+
deleted_cohort_ids = storage_cohorts.keys.to_set - flag_cohort_ids
|
128
|
+
|
129
|
+
deleted_cohort_ids.each do |deleted_cohort_id|
|
130
|
+
deleted_cohort = storage_cohorts[deleted_cohort_id]
|
131
|
+
@cohort_storage.delete_cohort(deleted_cohort.group_type, deleted_cohort_id) if deleted_cohort
|
132
|
+
end
|
133
|
+
end
|
134
|
+
end
|
135
|
+
end
|
data/lib/experiment/error.rb
CHANGED
@@ -8,4 +8,41 @@ module AmplitudeExperiment
|
|
8
8
|
@status_code = status_code
|
9
9
|
end
|
10
10
|
end
|
11
|
+
|
12
|
+
class CohortDownloadError < StandardError
|
13
|
+
attr_reader :cohort_id
|
14
|
+
|
15
|
+
def initialize(cohort_id, message)
|
16
|
+
super(message)
|
17
|
+
@cohort_id = cohort_id
|
18
|
+
end
|
19
|
+
end
|
20
|
+
|
21
|
+
# CohortTooLargeError
|
22
|
+
class CohortTooLargeError < CohortDownloadError
|
23
|
+
end
|
24
|
+
|
25
|
+
# HTTPErrorResponseError
|
26
|
+
class HTTPErrorResponseError < CohortDownloadError
|
27
|
+
attr_reader :status_code
|
28
|
+
|
29
|
+
def initialize(status_code, cohort_id, message)
|
30
|
+
super(cohort_id, message)
|
31
|
+
@status_code = status_code
|
32
|
+
end
|
33
|
+
end
|
34
|
+
|
35
|
+
class CycleError < StandardError
|
36
|
+
# Raised when topological sorting encounters a cycle between flag dependencies.
|
37
|
+
attr_reader :path
|
38
|
+
|
39
|
+
def initialize(path)
|
40
|
+
super
|
41
|
+
@path = path
|
42
|
+
end
|
43
|
+
|
44
|
+
def to_s
|
45
|
+
"Detected a cycle between flags #{@path}"
|
46
|
+
end
|
47
|
+
end
|
11
48
|
end
|
@@ -9,7 +9,6 @@ module AmplitudeExperiment
|
|
9
9
|
@api_key = api_key
|
10
10
|
@server_url = server_url
|
11
11
|
@logger = logger
|
12
|
-
@http = PersistentHttpClient.get(server_url, { read_timeout: FLAG_CONFIG_TIMEOUT }, @api_key)
|
13
12
|
end
|
14
13
|
|
15
14
|
# Fetch local evaluation mode flag configs from the Experiment API server.
|
@@ -24,13 +23,30 @@ module AmplitudeExperiment
|
|
24
23
|
'X-Amp-Exp-Library' => "experiment-ruby-server/#{VERSION}"
|
25
24
|
}
|
26
25
|
request = Net::HTTP::Get.new("#{@server_url}/sdk/v1/flags", headers)
|
27
|
-
|
26
|
+
http = PersistentHttpClient.get(@server_url, { read_timeout: FLAG_CONFIG_TIMEOUT }, @api_key)
|
27
|
+
response = http.request(request)
|
28
28
|
raise "flagConfigs - received error response: #{response.code}: #{response.body}" unless response.is_a?(Net::HTTPOK)
|
29
29
|
|
30
30
|
@logger.debug("[Experiment] Fetch flag configs: #{response.body}")
|
31
31
|
response.body
|
32
32
|
end
|
33
33
|
|
34
|
+
def fetch_v2
|
35
|
+
# fetch flag_configs
|
36
|
+
headers = {
|
37
|
+
'Authorization' => "Api-Key #{@api_key}",
|
38
|
+
'Content-Type' => 'application/json;charset=utf-8',
|
39
|
+
'X-Amp-Exp-Library' => "experiment-ruby-server/#{VERSION}"
|
40
|
+
}
|
41
|
+
request = Net::HTTP::Get.new("#{@server_url}/sdk/v2/flags?v=0", headers)
|
42
|
+
http = PersistentHttpClient.get(@server_url, { read_timeout: FLAG_CONFIG_TIMEOUT }, @api_key)
|
43
|
+
response = http.request(request)
|
44
|
+
raise "flagConfigs - received error response: #{response.code}: #{response.body}" unless response.is_a?(Net::HTTPOK)
|
45
|
+
|
46
|
+
@logger.debug("[Experiment] Fetch flag configs: #{response.body}")
|
47
|
+
JSON.parse(response.body)
|
48
|
+
end
|
49
|
+
|
34
50
|
# Fetch local evaluation mode flag configs from the Experiment API server.
|
35
51
|
# These flag configs can be used to perform local evaluation.
|
36
52
|
#
|
@@ -43,7 +59,8 @@ module AmplitudeExperiment
|
|
43
59
|
'X-Amp-Exp-Library' => "experiment-ruby-server/#{VERSION}"
|
44
60
|
}
|
45
61
|
request = Net::HTTP::Get.new("#{@server_url}/sdk/rules?eval_mode=local", headers)
|
46
|
-
|
62
|
+
http = PersistentHttpClient.get(@server_url, { read_timeout: FLAG_CONFIG_TIMEOUT }, @api_key)
|
63
|
+
response = http.request(request)
|
47
64
|
raise "flagConfigs - received error response: #{response.code}: #{response.body}" unless response.is_a?(Net::HTTPOK)
|
48
65
|
|
49
66
|
flag_configs = parse(response.body)
|
@@ -0,0 +1,53 @@
|
|
1
|
+
module AmplitudeExperiment
|
2
|
+
# FlagConfigStorage
|
3
|
+
class FlagConfigStorage
|
4
|
+
def flag_config(key)
|
5
|
+
raise NotImplementedError
|
6
|
+
end
|
7
|
+
|
8
|
+
def flag_configs
|
9
|
+
raise NotImplementedError
|
10
|
+
end
|
11
|
+
|
12
|
+
def put_flag_config(flag_config)
|
13
|
+
raise NotImplementedError
|
14
|
+
end
|
15
|
+
|
16
|
+
def remove_if(&condition)
|
17
|
+
raise NotImplementedError
|
18
|
+
end
|
19
|
+
end
|
20
|
+
|
21
|
+
# InMemoryFlagConfigStorage
|
22
|
+
class InMemoryFlagConfigStorage < FlagConfigStorage
|
23
|
+
def initialize
|
24
|
+
super # Call the parent class's constructor with no arguments
|
25
|
+
@flag_configs = {}
|
26
|
+
@flag_configs_lock = Mutex.new
|
27
|
+
end
|
28
|
+
|
29
|
+
def flag_config(key)
|
30
|
+
@flag_configs_lock.synchronize do
|
31
|
+
@flag_configs[key]
|
32
|
+
end
|
33
|
+
end
|
34
|
+
|
35
|
+
def flag_configs
|
36
|
+
@flag_configs_lock.synchronize do
|
37
|
+
@flag_configs.dup
|
38
|
+
end
|
39
|
+
end
|
40
|
+
|
41
|
+
def put_flag_config(flag_config)
|
42
|
+
@flag_configs_lock.synchronize do
|
43
|
+
@flag_configs[flag_config['key']] = flag_config
|
44
|
+
end
|
45
|
+
end
|
46
|
+
|
47
|
+
def remove_if
|
48
|
+
@flag_configs_lock.synchronize do
|
49
|
+
@flag_configs.delete_if { |_key, value| yield(value) }
|
50
|
+
end
|
51
|
+
end
|
52
|
+
end
|
53
|
+
end
|
@@ -13,7 +13,9 @@ module AmplitudeExperiment
|
|
13
13
|
def canonicalize
|
14
14
|
sb = "#{@user&.user_id&.strip} #{@user&.device_id&.strip} "
|
15
15
|
results.sort.to_h.each do |key, value|
|
16
|
-
|
16
|
+
next unless value.key
|
17
|
+
|
18
|
+
sb += "#{key.strip} #{value.key&.strip} "
|
17
19
|
end
|
18
20
|
sb
|
19
21
|
end
|
@@ -8,10 +8,10 @@ module AmplitudeExperiment
|
|
8
8
|
end
|
9
9
|
|
10
10
|
def track(assignment)
|
11
|
-
@amplitude.track(to_event(assignment)) if @assignment_filter.should_track(assignment)
|
11
|
+
@amplitude.track(AssignmentService.to_event(assignment)) if @assignment_filter.should_track(assignment)
|
12
12
|
end
|
13
13
|
|
14
|
-
def to_event(assignment)
|
14
|
+
def self.to_event(assignment)
|
15
15
|
event = AmplitudeAnalytics::BaseEvent.new(
|
16
16
|
'[Experiment] Assignment',
|
17
17
|
user_id: assignment.user.user_id,
|
@@ -20,28 +20,29 @@ module AmplitudeExperiment
|
|
20
20
|
user_properties: {}
|
21
21
|
)
|
22
22
|
|
23
|
-
assignment.results.each do |results_key, result|
|
24
|
-
event.event_properties["#{results_key}.variant"] = result['variant']['key']
|
25
|
-
end
|
26
|
-
|
27
23
|
set = {}
|
28
24
|
unset = {}
|
29
25
|
|
30
|
-
assignment.results.each do |
|
31
|
-
next
|
26
|
+
assignment.results.sort.to_h.each do |flag_key, variant|
|
27
|
+
next unless variant.key
|
28
|
+
|
29
|
+
version = variant.metadata['flagVersion'] if variant.metadata
|
30
|
+
segment_name = variant.metadata['segmentName'] if variant.metadata
|
31
|
+
flag_type = variant.metadata['flagType'] if variant.metadata
|
32
|
+
default = variant.metadata ? variant.metadata.fetch('default', false) : false
|
33
|
+
event.event_properties["#{flag_key}.variant"] = variant.key
|
34
|
+
event.event_properties["#{flag_key}.details"] = "v#{version} rule:#{segment_name}" if version && segment_name
|
35
|
+
next if flag_type == FLAG_TYPE_MUTUAL_EXCLUSION_GROUP
|
32
36
|
|
33
|
-
if
|
34
|
-
unset["[Experiment] #{
|
37
|
+
if default
|
38
|
+
unset["[Experiment] #{flag_key}"] = '-'
|
35
39
|
else
|
36
|
-
set["[Experiment] #{
|
40
|
+
set["[Experiment] #{flag_key}"] = variant.key
|
37
41
|
end
|
38
42
|
end
|
39
|
-
|
40
43
|
event.user_properties['$set'] = set
|
41
44
|
event.user_properties['$unset'] = unset
|
42
|
-
|
43
45
|
event.insert_id = "#{event.user_id} #{event.device_id} #{AmplitudeExperiment.hash_code(assignment.canonicalize)} #{assignment.timestamp / DAY_MILLIS}"
|
44
|
-
|
45
46
|
event
|
46
47
|
end
|
47
48
|
end
|