bento-sdk 0.5.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,123 @@
1
+ require "time"
2
+
3
+ require "bento/sdk/defaults"
4
+ require "bento/sdk/logging"
5
+ require "bento/sdk/utils"
6
+ require "bento/sdk/worker"
7
+
8
+ module Bento
9
+ class Analytics
10
+ class Client
11
+ include Bento::Analytics::Utils
12
+ include Bento::Analytics::Logging
13
+
14
+ # @param [Hash] opts
15
+ # @option opts [String] :write_key Your project's write_key
16
+ # @option opts [FixNum] :max_queue_size Maximum number of calls to be
17
+ # remain queued.
18
+ # @option opts [Proc] :on_error Handles error calls from the API.
19
+
20
+ def initialize(opts = {})
21
+ symbolize_keys!(opts)
22
+
23
+ @queue = Queue.new
24
+ @write_key = opts[:write_key]
25
+
26
+ logger.debug("🍱Tracked events will be sent to: #{@write_key}")
27
+
28
+ @max_queue_size = opts[:max_queue_size] || Defaults::Queue::MAX_SIZE
29
+ @worker_mutex = Mutex.new
30
+ @worker = Bento::Analytics::Worker.new(@queue, @write_key, opts)
31
+ @worker_thread = nil
32
+
33
+ check_write_key!
34
+
35
+ at_exit { @worker_thread && @worker_thread[:should_exit] = true }
36
+ end
37
+
38
+ # Synchronously waits until the worker has flushed the queue.
39
+ #
40
+ # Use only for scripts which are not long-running, and will specifically
41
+ # exit
42
+ def flush
43
+ while !@queue.empty? || @worker.is_requesting?
44
+ ensure_worker_running
45
+ sleep(0.1)
46
+ end
47
+ end
48
+
49
+ # @!macro common_attrs
50
+ # @option attrs [String] :anonymous_id ID for a user when you don't know
51
+ # who they are yet. (optional but you must provide either an
52
+ # `anonymous_id` or `user_id`)
53
+ # @option attrs [Hash] :context ({})
54
+ # @option attrs [Hash] :integrations What integrations this event
55
+ # goes to (optional)
56
+ # @option attrs [String] :message_id ID that uniquely
57
+ # identifies a message across the API. (optional)
58
+ # @option attrs [Time] :timestamp When the event occurred (optional)
59
+ # @option attrs [String] :user_id The ID for this user in your database
60
+ # (optional but you must provide either an `anonymous_id` or `user_id`)
61
+ # @option attrs [Hash] :options Options such as user traits (optional)
62
+
63
+ # Tracks an event
64
+ #
65
+ # @param [Hash] attrs
66
+ #
67
+ # @option attrs [String] :event Event name
68
+ # @option attrs [Hash] :properties Event properties (optional)
69
+ # @macro common_attrs
70
+
71
+ def track(attrs)
72
+ symbolize_keys! attrs
73
+ track_but_parsed = FieldParser.parse_for_track(attrs, @write_key)
74
+ enqueue(track_but_parsed)
75
+ end
76
+
77
+ # @return [Fixnum] number of messages in the queue
78
+ def queued_messages
79
+ @queue.length
80
+ end
81
+
82
+ private
83
+
84
+ # private: Enqueues the action.
85
+ #
86
+ # returns Boolean of whether the item was added to the queue.
87
+ def enqueue(action)
88
+ if @queue.length < @max_queue_size
89
+ @queue << action
90
+ ensure_worker_running
91
+
92
+ true
93
+ else
94
+ logger.warn(
95
+ "Queue is full, dropping events. The :max_queue_size " \
96
+ "configuration parameter can be increased to prevent this from " \
97
+ "happening."
98
+ )
99
+ false
100
+ end
101
+ end
102
+
103
+ # private: Checks that the write_key is properly initialized
104
+ def check_write_key!
105
+ raise ArgumentError, "Write key must be initialized" if @write_key.nil?
106
+ end
107
+
108
+ def ensure_worker_running
109
+ return if worker_running?
110
+ @worker_mutex.synchronize do
111
+ return if worker_running?
112
+ @worker_thread = Thread.new {
113
+ @worker.run
114
+ }
115
+ end
116
+ end
117
+
118
+ def worker_running?
119
+ @worker_thread && @worker_thread.alive?
120
+ end
121
+ end
122
+ end
123
+ end
@@ -0,0 +1,85 @@
1
+ module Bento
2
+ class Configuration
3
+ DEFAULT_CONFIGURATION = {
4
+ site_uuid: nil,
5
+ publishable_key: nil,
6
+ secret_key: nil,
7
+ dev_mode: false,
8
+ sync_strategy: nil,
9
+ sync_strategy_options: {},
10
+ log_level: :warn,
11
+ }.freeze
12
+
13
+ def initialize(config_from_initialize = {})
14
+ @config = default_config
15
+ .merge(config_from_initialize)
16
+ .merge(config_from_environment)
17
+ end
18
+
19
+ def self.inherit(parent, config_from_arguments)
20
+ config = allocate
21
+ config.instance_variable_set(:@parent, parent)
22
+ config.instance_variable_set(:@config, config_from_arguments.to_hash)
23
+ config
24
+ end
25
+
26
+ def merge(other_config)
27
+ self.class.inherit(self, other_config)
28
+ end
29
+
30
+ def to_hash
31
+ config
32
+ end
33
+
34
+ def to_h
35
+ parent ? parent.to_h.merge(config) : to_hash
36
+ end
37
+
38
+ def ==(other)
39
+ config == other.config && parent == other.parent
40
+ end
41
+
42
+ protected
43
+
44
+ attr_reader :config, :parent
45
+
46
+ def default_config
47
+ DEFAULT_CONFIGURATION
48
+ end
49
+
50
+ def config_from_environment
51
+ default_config.keys.each_with_object({}) do |key, config|
52
+ value = ENV.fetch("BENTO_#{key.to_s.upcase}", nil)
53
+ config[key] = value if value
54
+ end
55
+ end
56
+
57
+ def key?(key)
58
+ config.key?(key) || parent&.key?(key)
59
+ end
60
+
61
+ def [](key)
62
+ config.key?(key) ? config[key] : parent && parent[key]
63
+ end
64
+
65
+ def []=(key, value)
66
+ config[key] = value
67
+ end
68
+
69
+ def respond_to_missing?(name, include_private = false)
70
+ name = name.to_s.sub(/=$/, '')
71
+ key?(name.to_sym) || super
72
+ end
73
+
74
+ def method_missing(name, *args, &block)
75
+ if respond_to_missing?(name)
76
+ name = name.to_s
77
+ method = name =~ /=$/ ? :[]= : :[]
78
+ name = name.sub(/=$/, '').to_sym
79
+ send(method, name, *args, &block)
80
+ else
81
+ super
82
+ end
83
+ end
84
+ end
85
+ end
@@ -0,0 +1,37 @@
1
+ module Bento
2
+ class Analytics
3
+ module Defaults
4
+ module Request
5
+ HOST = "app.bentonow.com"
6
+ PORT = 443
7
+ PATH = "/tracking/events/"
8
+ SSL = true
9
+
10
+ HEADERS = {"Accept" => "application/json",
11
+ "Content-Type" => "application/json",
12
+ "User-Agent" => "bento-ruby",}
13
+ RETRIES = 15
14
+ end
15
+
16
+ module Queue
17
+ MAX_SIZE = 10000
18
+ end
19
+
20
+ module Message
21
+ MAX_BYTES = 32768 # 32Kb
22
+ end
23
+
24
+ module MessageBatch
25
+ MAX_BYTES = 512_000 # 500Kb
26
+ MAX_SIZE = 100
27
+ end
28
+
29
+ module BackoffPolicy
30
+ MIN_TIMEOUT_MS = 100
31
+ MAX_TIMEOUT_MS = 10000
32
+ MULTIPLIER = 1.5
33
+ RANDOMIZATION_FACTOR = 0.5
34
+ end
35
+ end
36
+ end
37
+ end
@@ -0,0 +1,104 @@
1
+ require 'digest'
2
+
3
+ module Bento
4
+ class Analytics
5
+ class FieldParser
6
+ class << self
7
+ include Bento::Analytics::Utils
8
+
9
+ def parse_for_track(fields, write_key)
10
+ common = parse_common_fields(fields)
11
+
12
+ event = fields[:event]
13
+
14
+ custom_fields = fields[:custom_fields] || {}
15
+ details = fields[:details] || {}
16
+ identity = fields[:identity] || {}
17
+ visitor_uuid = fields[:visitor_uuid] || Digest::SHA2.hexdigest("api" + identity.to_s + write_key)
18
+ page = fields[:page] || {}
19
+
20
+ check_presence!(event, "event")
21
+ check_is_hash!(details, "details")
22
+ check_is_hash!(page, "page")
23
+ check_is_hash!(identity, "identity")
24
+ check_is_hash!(custom_fields, "custom_fields")
25
+
26
+ isoify_dates! details
27
+
28
+ final_event = {
29
+ id: SecureRandom.hex(10),
30
+ site: write_key,
31
+ identity: identity,
32
+ visit: Digest::SHA2.hexdigest(Time.now.strftime("%B %e, %Y") + identity.to_s + write_key),
33
+ visitor: visitor_uuid,
34
+ type: event.to_s,
35
+ date: Time.now,
36
+ browser: {
37
+ "user_agent" => "Bento/API (Rails)",
38
+ },
39
+ page: page,
40
+ details: details,
41
+ fields: custom_fields,
42
+ }
43
+
44
+ common = common.merge(final_event)
45
+ common
46
+ end
47
+
48
+ private
49
+
50
+ def parse_common_fields(fields)
51
+ timestamp = fields[:date] || Time.new
52
+ message_id = fields[:message_id].to_s if fields[:message_id]
53
+ page = fields[:page] || {}
54
+ context = {}
55
+
56
+ check_user_id! fields
57
+
58
+ check_timestamp! timestamp
59
+
60
+ add_context! context
61
+
62
+ parsed = {
63
+ page: page,
64
+ date: datetime_in_iso8601(timestamp),
65
+ identity: {email: nil},
66
+ }
67
+
68
+ parsed[:identity][:email] = fields[:identity][:email] if fields[:identity]
69
+ parsed[:visitor_uuid] = fields[:visitor_uuid] if fields[:visitor_uuid]
70
+
71
+ parsed
72
+ end
73
+
74
+ def check_user_id!(fields)
75
+ unless fields[:identity] || fields[:visitor_uuid]
76
+ raise ArgumentError, "Must supply either visitor_uuid or identity"
77
+ end
78
+ end
79
+
80
+ def check_timestamp!(timestamp)
81
+ raise ArgumentError, "Timestamp must be a Time" unless timestamp.is_a? Time
82
+ end
83
+
84
+ def add_context!(context)
85
+ context[:library] = {name: "bento-ruby"}
86
+ end
87
+
88
+ # private: Ensures that a string is non-empty
89
+ #
90
+ # obj - String|Number that must be non-blank
91
+ # name - Name of the validated value
92
+ def check_presence!(obj, name)
93
+ if obj.nil? || (obj.is_a?(String) && obj.empty?)
94
+ raise ArgumentError, "#{name} must be given"
95
+ end
96
+ end
97
+
98
+ def check_is_hash!(obj, name)
99
+ raise ArgumentError, "#{name} must be a Hash" unless obj.is_a? Hash
100
+ end
101
+ end
102
+ end
103
+ end
104
+ end
@@ -0,0 +1,60 @@
1
+ require "logger"
2
+
3
+ module Bento
4
+ class Analytics
5
+ # Wraps an existing logger and adds a prefix to all messages
6
+ class PrefixedLogger
7
+ def initialize(logger, prefix)
8
+ @logger = logger
9
+ @prefix = prefix
10
+ end
11
+
12
+ def debug(msg)
13
+ @logger.debug("#{@prefix} #{msg}")
14
+ end
15
+
16
+ def info(msg)
17
+ @logger.info("#{@prefix} #{msg}")
18
+ end
19
+
20
+ def warn(msg)
21
+ @logger.warn("#{@prefix} #{msg}")
22
+ end
23
+
24
+ def error(msg)
25
+ @logger.error("#{@prefix} #{msg}")
26
+ end
27
+ end
28
+
29
+ module Logging
30
+ class << self
31
+ def logger
32
+ return @logger if @logger
33
+
34
+ base_logger = if defined?(Rails)
35
+ Rails.logger
36
+ else
37
+ logger = Logger.new STDOUT
38
+ logger.progname = "Bento::Analytics"
39
+ logger
40
+ end
41
+ @logger = PrefixedLogger.new(base_logger, "[bento-sdk]")
42
+ end
43
+
44
+ attr_writer :logger
45
+ end
46
+
47
+ def self.included(base)
48
+ class << base
49
+ def logger
50
+ Logging.logger
51
+ end
52
+ end
53
+ end
54
+
55
+ def logger
56
+ Logging.logger
57
+ end
58
+ end
59
+ end
60
+ end
@@ -0,0 +1,72 @@
1
+ require "forwardable"
2
+ require "bento/sdk/logging"
3
+
4
+ module Bento
5
+ class Analytics
6
+ # A batch of `Message`s to be sent to the API
7
+ class MessageBatch
8
+ class JSONGenerationError < StandardError; end
9
+
10
+ extend Forwardable
11
+ include Bento::Analytics::Logging
12
+ include Bento::Analytics::Defaults::MessageBatch
13
+
14
+ def initialize(max_message_count)
15
+ @messages = []
16
+ @max_message_count = max_message_count
17
+ @json_size = 0
18
+ end
19
+
20
+ def <<(message)
21
+ begin
22
+ message_json = message.to_json
23
+ rescue => e
24
+ raise JSONGenerationError, "Serialization error: #{e}"
25
+ end
26
+
27
+ message_json_size = message_json.bytesize
28
+ if message_too_big?(message_json_size)
29
+ logger.error("a message exceeded the maximum allowed size")
30
+ else
31
+ @messages << message
32
+ @json_size += message_json_size + 1 # One byte for the comma
33
+ end
34
+ end
35
+
36
+ def full?
37
+ item_count_exhausted? || size_exhausted?
38
+ end
39
+
40
+ def clear
41
+ @messages.clear
42
+ @json_size = 0
43
+ end
44
+
45
+ def_delegators :@messages, :to_json
46
+ def_delegators :@messages, :empty?
47
+ def_delegators :@messages, :length
48
+
49
+ private
50
+
51
+ def item_count_exhausted?
52
+ @messages.length >= @max_message_count
53
+ end
54
+
55
+ def message_too_big?(message_json_size)
56
+ message_json_size > Bento::Analytics::Defaults::Message::MAX_BYTES
57
+ end
58
+
59
+ # We consider the max size here as just enough to leave room for one more
60
+ # message of the largest size possible. This is a shortcut that allows us
61
+ # to use a native Ruby `Queue` that doesn't allow peeking. The tradeoff
62
+ # here is that we might fit in less messages than possible into a batch.
63
+ #
64
+ # The alternative is to use our own `Queue` implementation that allows
65
+ # peeking, and to consider the next message size when calculating whether
66
+ # the message can be accomodated in this batch.
67
+ def size_exhausted?
68
+ @json_size >= (MAX_BYTES - Bento::Analytics::Defaults::Message::MAX_BYTES)
69
+ end
70
+ end
71
+ end
72
+ end
@@ -0,0 +1,15 @@
1
+ module Bento
2
+ class Analytics
3
+ class Response
4
+ attr_reader :status, :error
5
+
6
+ # public: Simple class to wrap responses from the API
7
+ #
8
+ #
9
+ def initialize(status = 200, error = nil)
10
+ @status = status
11
+ @error = error
12
+ end
13
+ end
14
+ end
15
+ end
@@ -0,0 +1,144 @@
1
+ require "bento/sdk/defaults"
2
+ require "bento/sdk/utils"
3
+ require "bento/sdk/response"
4
+ require "bento/sdk/logging"
5
+ require "bento/sdk/backoff_policy"
6
+ require "net/http"
7
+ require "net/https"
8
+ require "json"
9
+
10
+ module Bento
11
+ class Analytics
12
+ class Transport
13
+ include Bento::Analytics::Defaults::Request
14
+ include Bento::Analytics::Utils
15
+ include Bento::Analytics::Logging
16
+
17
+ def initialize(options = {})
18
+ options[:host] ||= HOST
19
+ options[:port] ||= PORT
20
+ options[:ssl] ||= SSL
21
+ @headers = options[:headers] || HEADERS
22
+ @path = options[:path] || PATH
23
+ @retries = options[:retries] || RETRIES
24
+ @backoff_policy =
25
+ options[:backoff_policy] || Bento::Analytics::BackoffPolicy.new
26
+
27
+ http = Net::HTTP.new(options[:host], options[:port])
28
+ http.use_ssl = options[:ssl]
29
+ http.read_timeout = 8
30
+ http.open_timeout = 4
31
+
32
+ @http = http
33
+ end
34
+
35
+ # Sends a batch of messages to the API
36
+ #
37
+ # @return [Response] API response
38
+ def send(write_key, batch)
39
+ logger.debug("Sending request for #{batch.length} items")
40
+ last_response, exception = retry_with_backoff(@retries) {
41
+ status_code, body = send_request(write_key, batch)
42
+
43
+ error = ""
44
+
45
+ should_retry = should_retry_request?(status_code, body)
46
+ logger.debug("🍱Response status code: #{status_code}")
47
+ logger.debug("🍱Response error (if any): #{error}") if error
48
+
49
+ [Bento::Analytics::Response.new(status_code, error), should_retry]
50
+ }
51
+
52
+ if exception
53
+ logger.error(exception.message)
54
+ exception.backtrace.each { |line| logger.error(line) }
55
+ Bento::Analytics::Response.new(-1, exception.to_s)
56
+ else
57
+ last_response
58
+ end
59
+ end
60
+
61
+ # Closes a persistent connection if it exists
62
+ def shutdown
63
+ @http.finish if @http.started?
64
+ end
65
+
66
+ private
67
+
68
+ def should_retry_request?(status_code, body)
69
+ if status_code >= 500
70
+ true # Server error
71
+ elsif status_code == 429
72
+ true # Rate limited
73
+ elsif status_code >= 400
74
+ logger.error(body)
75
+ false # Client error. Do not retry, but log
76
+ else
77
+ false
78
+ end
79
+ end
80
+
81
+ # Takes a block that returns [result, should_retry].
82
+ #
83
+ # Retries upto `retries_remaining` times, if `should_retry` is false or
84
+ # an exception is raised. `@backoff_policy` is used to determine the
85
+ # duration to sleep between attempts
86
+ #
87
+ # Returns [last_result, raised_exception]
88
+ def retry_with_backoff(retries_remaining, &block)
89
+ result, caught_exception = nil
90
+ should_retry = false
91
+
92
+ begin
93
+ result, should_retry = yield
94
+ return [result, nil] unless should_retry
95
+ rescue => e
96
+ should_retry = true
97
+ caught_exception = e
98
+ end
99
+
100
+ if should_retry && (retries_remaining > 1)
101
+ logger.debug("Retrying request, #{retries_remaining} retries left")
102
+ sleep(@backoff_policy.next_interval.to_f / 500)
103
+ retry_with_backoff(retries_remaining - 1, &block)
104
+ else
105
+ [result, caught_exception]
106
+ end
107
+ end
108
+
109
+ # Sends a request for the batch, returns [status_code, body]
110
+ def send_request(write_key, batch)
111
+ batch_to_json = JSON.parse(batch.to_json)
112
+ batch_to_hash = Hash[batch_to_json.each_with_index.map { |value, index| [index, value] }]
113
+
114
+ payload = JSON.generate("events" => batch_to_hash)
115
+ payload = JSON.parse(payload).to_json
116
+
117
+ request = Net::HTTP::Post.new(@path, @headers)
118
+
119
+ request.content_type = "application/json"
120
+ request["Accept"] = "application/json"
121
+ request.basic_auth(write_key, nil)
122
+
123
+ if self.class.stub
124
+ logger.debug "stubbed request to #{@path}: " \
125
+ "write key = #{write_key}, batch = #{JSON.generate(batch)}"
126
+
127
+ [200, "{}"]
128
+ else
129
+ @http.start unless @http.started? # Maintain a persistent connection
130
+ response = @http.request(request, payload)
131
+ [response.code.to_i, response.body]
132
+ end
133
+ end
134
+
135
+ class << self
136
+ attr_writer :stub
137
+
138
+ def stub
139
+ @stub || ENV["STUB"]
140
+ end
141
+ end
142
+ end
143
+ end
144
+ end