background_job 0.0.1.rc1

Sign up to get free protection for your applications and to get access to all the features.
Files changed (41) hide show
  1. checksums.yaml +7 -0
  2. data/.github/workflows/specs.yml +34 -0
  3. data/.gitignore +11 -0
  4. data/.rspec +1 -0
  5. data/.tool-versions +1 -0
  6. data/CHANGELOG.md +10 -0
  7. data/Gemfile +12 -0
  8. data/Gemfile.lock +58 -0
  9. data/LICENSE.txt +21 -0
  10. data/README.md +247 -0
  11. data/Rakefile +2 -0
  12. data/background_job.gemspec +39 -0
  13. data/bin/console +14 -0
  14. data/bin/setup +6 -0
  15. data/docker-compose.yml +6 -0
  16. data/lib/background-job.rb +3 -0
  17. data/lib/background_job/configuration/base.rb +102 -0
  18. data/lib/background_job/configuration/faktory.rb +6 -0
  19. data/lib/background_job/configuration/middleware_chain.rb +109 -0
  20. data/lib/background_job/configuration/sidekiq.rb +23 -0
  21. data/lib/background_job/configuration.rb +63 -0
  22. data/lib/background_job/errors.rb +24 -0
  23. data/lib/background_job/jobs/faktory.rb +87 -0
  24. data/lib/background_job/jobs/job.rb +126 -0
  25. data/lib/background_job/jobs/sidekiq.rb +75 -0
  26. data/lib/background_job/jobs.rb +8 -0
  27. data/lib/background_job/lock.rb +141 -0
  28. data/lib/background_job/lock_digest.rb +36 -0
  29. data/lib/background_job/middleware/unique_job/faktory.rb +41 -0
  30. data/lib/background_job/middleware/unique_job/sidekiq.rb +48 -0
  31. data/lib/background_job/middleware/unique_job.rb +67 -0
  32. data/lib/background_job/mixin/faktory.rb +56 -0
  33. data/lib/background_job/mixin/shared_interface.rb +49 -0
  34. data/lib/background_job/mixin/sidekiq.rb +61 -0
  35. data/lib/background_job/mixin.rb +6 -0
  36. data/lib/background_job/redis_pool.rb +28 -0
  37. data/lib/background_job/testing.rb +76 -0
  38. data/lib/background_job/unique_job.rb +84 -0
  39. data/lib/background_job/version.rb +5 -0
  40. data/lib/background_job.rb +87 -0
  41. metadata +131 -0
@@ -0,0 +1,109 @@
1
+ # frozen_string_literal: true
2
+
3
+ module BackgroundJob
4
+ # Middleware is code configured to run before/after push a new job.
5
+ # It is patterned after Rack middleware for some modification before push the job to the server
6
+ #
7
+ # To add a middleware:
8
+ #
9
+ # BackgroundJob.config_for(:sidekiq) do |config|
10
+ # config.middleware do |chain|
11
+ # chain.add MyMiddleware
12
+ # end
13
+ # end
14
+ #
15
+ # This is an example of a minimal middleware, note the method must return the result
16
+ # or the job will not push the server.
17
+ #
18
+ # class MyMiddleware
19
+ # def call(job, conn_pool)
20
+ # puts "Before push"
21
+ # result = yield
22
+ # puts "After push"
23
+ # result
24
+ # end
25
+ # end
26
+ #
27
+ class Configuration::MiddlewareChain
28
+ include Enumerable
29
+ attr_reader :entries
30
+
31
+ def initialize_copy(copy)
32
+ copy.instance_variable_set(:@entries, entries.dup)
33
+ end
34
+
35
+ def each(&block)
36
+ entries.each(&block)
37
+ end
38
+
39
+ def initialize
40
+ @entries = []
41
+ yield self if block_given?
42
+ end
43
+
44
+ def remove(klass)
45
+ entries.delete_if { |entry| entry.klass == klass }
46
+ end
47
+
48
+ def add(klass, *args)
49
+ remove(klass) if exists?(klass)
50
+ entries << Entry.new(klass, *args)
51
+ end
52
+
53
+ def prepend(klass, *args)
54
+ remove(klass) if exists?(klass)
55
+ entries.insert(0, Entry.new(klass, *args))
56
+ end
57
+
58
+ def insert_before(oldklass, newklass, *args)
59
+ i = entries.index { |entry| entry.klass == newklass }
60
+ new_entry = i.nil? ? Entry.new(newklass, *args) : entries.delete_at(i)
61
+ i = entries.index { |entry| entry.klass == oldklass } || 0
62
+ entries.insert(i, new_entry)
63
+ end
64
+
65
+ def insert_after(oldklass, newklass, *args)
66
+ i = entries.index { |entry| entry.klass == newklass }
67
+ new_entry = i.nil? ? Entry.new(newklass, *args) : entries.delete_at(i)
68
+ i = entries.index { |entry| entry.klass == oldklass } || entries.count - 1
69
+ entries.insert(i+1, new_entry)
70
+ end
71
+
72
+ def exists?(klass)
73
+ any? { |entry| entry.klass == klass }
74
+ end
75
+
76
+ def retrieve
77
+ map(&:make_new)
78
+ end
79
+
80
+ def clear
81
+ entries.clear
82
+ end
83
+
84
+ def invoke(*args)
85
+ chain = retrieve.dup
86
+ traverse_chain = lambda do
87
+ if chain.empty?
88
+ yield
89
+ else
90
+ chain.pop.call(*args, &traverse_chain)
91
+ end
92
+ end
93
+ traverse_chain.call
94
+ end
95
+
96
+ class Entry
97
+ attr_reader :klass, :args
98
+
99
+ def initialize(klass, *args)
100
+ @klass = klass
101
+ @args = args
102
+ end
103
+
104
+ def make_new
105
+ @klass.new(*@args)
106
+ end
107
+ end
108
+ end
109
+ end
@@ -0,0 +1,23 @@
1
+ # frozen_string_literal: true
2
+
3
+ module BackgroundJob
4
+ class Configuration::Sidekiq < Configuration::Base
5
+ attribute_accessor :redis, write: false
6
+ # It's recommended to not use the namespace option in Sidekiq.
7
+ # @see http://www.mikeperham.com/2015/09/24/storing-data-with-redis/#namespaces
8
+ attribute_accessor :namespace
9
+
10
+ def redis_pool
11
+ @redis_pool ||= if redis
12
+ BackgroundJob::RedisPool.new(redis)
13
+ else
14
+ BackgroundJob.config.redis_pool
15
+ end
16
+ end
17
+
18
+ def redis=(value)
19
+ @redis_pool = nil
20
+ @redis = value
21
+ end
22
+ end
23
+ end
@@ -0,0 +1,63 @@
1
+ # frozen_string_literal: true
2
+
3
+ require 'set'
4
+
5
+ module BackgroundJob
6
+ class ConfigService < Set
7
+ def sidekiq?
8
+ include?(:sidekiq)
9
+ end
10
+
11
+ def faktory?
12
+ include?(:faktory)
13
+ end
14
+ end
15
+
16
+ class Configuration
17
+ attr_reader :redis
18
+
19
+ def redis=(value)
20
+ @redis_pool = nil
21
+ @redis = value
22
+ end
23
+
24
+ def redis_pool
25
+ @redis_pool ||= BackgroundJob::RedisPool.new(redis)
26
+ end
27
+
28
+ def services
29
+ @services ||= ConfigService.new
30
+ end
31
+
32
+ def faktory
33
+ @faktory ||= begin
34
+ services.add(:faktory)
35
+ require_relative 'jobs/faktory'
36
+ Configuration::Faktory.new
37
+ end
38
+ if block_given?
39
+ yield @faktory
40
+ else
41
+ @faktory
42
+ end
43
+ end
44
+
45
+ def sidekiq
46
+ @sidekiq ||= begin
47
+ services.add(:sidekiq)
48
+ require_relative 'jobs/sidekiq'
49
+ Configuration::Sidekiq.new
50
+ end
51
+ if block_given?
52
+ yield @sidekiq
53
+ else
54
+ @sidekiq
55
+ end
56
+ end
57
+ end
58
+ end
59
+
60
+ require_relative 'configuration/base'
61
+ require_relative 'configuration/faktory'
62
+ require_relative 'configuration/sidekiq'
63
+ require_relative 'configuration/middleware_chain'
@@ -0,0 +1,24 @@
1
+ # frozen_string_literal: true
2
+
3
+ module BackgroundJob
4
+ class Error < StandardError
5
+ end
6
+
7
+ class InvalidConfigError < Error
8
+ end
9
+ class NotDefinedJobError < Error
10
+ def initialize(job_class)
11
+ @job_class = job_class
12
+ end
13
+
14
+ def message
15
+ format(
16
+ "The %<job_class>p is not defined and the BackgroundJob is configured to work on strict mode.\n" +
17
+ "it's highly recommended to include this job class to the list of known jobs.\n" +
18
+ "Example: `BackgroundJob.config_for(:sidekiq) { |config| config.jobs = { %<job_class>p => {} } }`\n" +
19
+ 'Another option is to set config.strict = false',
20
+ job_class: @job_class,
21
+ )
22
+ end
23
+ end
24
+ end
@@ -0,0 +1,87 @@
1
+ # frozen_string_literal: true
2
+
3
+ require_relative '../mixin/faktory'
4
+
5
+ module BackgroundJob
6
+ module Jobs
7
+ class Faktory < Job
8
+ def initialize(job_class, **options)
9
+ super(
10
+ job_class,
11
+ **Mixin::Faktory.background_job_options(job_class, strict_check: true),
12
+ **options
13
+ )
14
+ @options.slice(:queue, :reserve_for).each do |key, value|
15
+ @payload[key.to_s] = value
16
+ end
17
+ @payload['jobtype'] = job_class.to_s
18
+ @payload['retry'] = parse_retry(@options[:retry])
19
+ @payload['created_at'] ||= Time.now.to_f
20
+ end
21
+
22
+ # Push job to Faktory
23
+ # * If job has the 'at' key. Then schedule it
24
+ # * Otherwise enqueue for immediate execution
25
+ #
26
+ # @raise [BackgroundJob::Error] raise and error when faktory dependency is not loaded
27
+ # @return [Hash] Payload that was sent to server
28
+ def push
29
+ unless Object.const_defined?(:Faktory)
30
+ raise BackgroundJob::Error, <<~ERR
31
+ Faktory client for ruby is not loaded. You must install and require https://github.com/contribsys/faktory_job_ruby.
32
+ ERR
33
+ end
34
+ normalize_before_push!
35
+ pool = Thread.current[:faktory_via_pool] || ::Faktory.server_pool
36
+ BackgroundJob.config.faktory.middleware.invoke(self, :faktory) do
37
+ ::Faktory.client_middleware.invoke(payload, pool) do
38
+ pool.with do |c|
39
+ c.push(payload)
40
+ end
41
+ end
42
+ payload
43
+ end
44
+ end
45
+
46
+ protected
47
+
48
+ def normalize_before_push!
49
+ with_job_jid # Generate a unique job id
50
+ payload['enqueued_at'] = Time.now.to_f
51
+ {'created_at' => false, 'enqueued_at' => false, 'at' => true}.each do |field, past_remove|
52
+ # Optimization to enqueue something now that is scheduled to go out now or in the past
53
+ if (time = payload.delete(field)) &&
54
+ (!past_remove || (past_remove && time > Time.now.to_f))
55
+ payload[field] = parse_time(time)
56
+ end
57
+ end
58
+ end
59
+
60
+ # Convert job retry value acording to the Go struct datatype.
61
+ #
62
+ # * 25 is the default.
63
+ # * 0 means the job is completely ephemeral. No matter if it fails or succeeds, it will be discarded.
64
+ # * -1 means the job will go straight to the Dead set if it fails, no retries.
65
+ def parse_retry(value)
66
+ case value
67
+ when Numeric then value.to_i
68
+ when false then -1
69
+ else
70
+ 25
71
+ end
72
+ end
73
+
74
+ def parse_time(value)
75
+ case value
76
+ when Numeric then Time.at(value).to_datetime.rfc3339(9)
77
+ when Time then value.to_datetime.rfc3339(9)
78
+ when DateTime then value.rfc3339(9)
79
+ end
80
+ end
81
+
82
+ def to_json(value)
83
+ MultiJson.dump(value, mode: :compat)
84
+ end
85
+ end
86
+ end
87
+ end
@@ -0,0 +1,126 @@
1
+ # frozen_string_literal: true
2
+
3
+ require_relative '../unique_job'
4
+
5
+ # DSL used to create a job. It's generic so it can be used with any adapter.
6
+ module BackgroundJob
7
+ class Jobs::Job
8
+ attr_reader :options, :payload, :job_class, :unique_job
9
+
10
+ def initialize(job_class, **options)
11
+ @job_class = job_class
12
+ @options = options
13
+ @payload = {}
14
+ unique(@options.delete(:uniq)) if @options.key?(:uniq)
15
+ end
16
+
17
+ # Push the job to the service backend
18
+ #
19
+ # @abstract
20
+ def push
21
+ raise NotImplementedError
22
+ end
23
+
24
+ %i[created_at enqueued_at].each do |method_name|
25
+ define_method method_name do |value|
26
+ payload[method_name.to_s] = \
27
+ case value
28
+ when Numeric then value.to_f
29
+ when String then Time.parse(value).to_f
30
+ when Time, DateTime then value.to_f
31
+ else
32
+ raise ArgumentError, format('The %<v>p is not a valid value for %<m>s.', v: value, m: method_name)
33
+ end
34
+
35
+ self
36
+ end
37
+ end
38
+
39
+ # Adds arguments to the job
40
+ # @return self
41
+ def with_args(*args)
42
+ payload['args'] = args
43
+
44
+ self
45
+ end
46
+
47
+ # Schedule the time when a job will be executed. Jobs which are scheduled in the past are enqueued for immediate execution.
48
+ # @param timestamp [Numeric] timestamp, numeric or something that acts numeric.
49
+ # @return self
50
+ def in(timestamp)
51
+ now = Time.now.to_f
52
+ timestamp = Time.parse(timestamp) if timestamp.is_a?(String)
53
+ int = timestamp.respond_to?(:strftime) ? timestamp.to_f : now + timestamp.to_f
54
+ return self if int <= now
55
+
56
+ payload['at'] = int
57
+ payload['created_at'] = now
58
+
59
+ self
60
+ end
61
+ alias_method :at, :in
62
+
63
+ # Wrap uniq options
64
+ #
65
+ # @param value [Hash] Unique configurations with `across`, `timeout` and `unlock_policy`
66
+ # @return self
67
+ def unique(value)
68
+ value = {} if value == true
69
+ @unique_job = \
70
+ case value
71
+ when Hash then UniqueJob.coerce(value)
72
+ when UniqueJob then value
73
+ else
74
+ nil
75
+ end
76
+
77
+ self
78
+ end
79
+
80
+ def with_job_jid(jid = nil)
81
+ payload['jid'] ||= jid || BackgroundJob.jid
82
+
83
+ self
84
+ end
85
+
86
+ def eql?(other)
87
+ return false unless other.is_a?(self.class)
88
+
89
+ job_class == other.job_class && \
90
+ payload == other.payload &&
91
+ options == other.options &&
92
+ unique_job == other.unique_job
93
+ end
94
+ alias == eql?
95
+
96
+ def unique_job?
97
+ unique_job.is_a?(UniqueJob)
98
+ end
99
+
100
+ def to_s
101
+ # format(
102
+ # '#<%<c>s:0x%<o>x job_class=%<j>p, payload=%<p>p, options=%<o>p, unique_job=%<u>p>',
103
+ # c: self.class, o: object_id, j: job_class, p: payload, o: options, u: unique_job
104
+ # )
105
+ str = format(
106
+ '#<%<c>s:0x%<o>x job_class=%<j>p',
107
+ c: self.class, o: object_id, j: job_class
108
+ )
109
+ if (args = payload['args'])
110
+ str += format(', args=%<p>p', p: args)
111
+ end
112
+ str += format(', options=%<o>p', o: options) unless options.empty?
113
+ str += format(', unique_job=%<u>p', u: unique_job) if unique_job
114
+ str += '>'
115
+ str
116
+ end
117
+
118
+ private
119
+
120
+ # Normalize payload before pushing to the service
121
+ # @abstract
122
+ def normalize_before_push!
123
+ # noop
124
+ end
125
+ end
126
+ end
@@ -0,0 +1,75 @@
1
+ # frozen_string_literal: true
2
+
3
+ require_relative '../mixin/sidekiq'
4
+
5
+
6
+ module BackgroundJob
7
+ module Jobs
8
+ class Sidekiq < Job
9
+ OPTIONS_TO_PAYLOAD = %i[queue retry].freeze
10
+
11
+ def initialize(job_class, **options)
12
+ super(
13
+ job_class,
14
+ **Mixin::Sidekiq.background_job_options(job_class, strict_check: true),
15
+ **options,
16
+ )
17
+ @options.slice(*OPTIONS_TO_PAYLOAD).each do |key, value|
18
+ @payload[key.to_s] = value
19
+ end
20
+ @payload['class'] = job_class.to_s
21
+ @payload['created_at'] ||= Time.now.to_f
22
+ end
23
+
24
+ # Push sidekiq to the Sidekiq(Redis actually).
25
+ # * If job has the 'at' key. Then schedule it
26
+ # * Otherwise enqueue for immediate execution
27
+ #
28
+ # @return [Hash] Payload that was sent to redis
29
+ def push
30
+ normalize_before_push!
31
+
32
+ BackgroundJob.config.sidekiq.middleware.invoke(self, :sidekiq) do
33
+ # Optimization to enqueue something now that is scheduled to go out now or in the past
34
+ if (timestamp = payload.delete('at')) && (timestamp > Time.now.to_f)
35
+ redis_pool.with do |redis|
36
+ redis.zadd(scheduled_queue_name, timestamp.to_f.to_s, to_json(payload))
37
+ end
38
+ else
39
+ redis_pool.with do |redis|
40
+ redis.lpush(immediate_queue_name, to_json(payload))
41
+ end
42
+ end
43
+ payload
44
+ end
45
+ end
46
+
47
+ protected
48
+
49
+ def normalize_before_push!
50
+ with_job_jid # Generate a unique job id
51
+ payload['enqueued_at'] = Time.now.to_f
52
+ end
53
+
54
+ def redis_pool
55
+ BackgroundJob.config.sidekiq.redis_pool
56
+ end
57
+
58
+ def namespace
59
+ BackgroundJob.config.sidekiq.namespace
60
+ end
61
+
62
+ def scheduled_queue_name
63
+ [namespace, 'schedule'].compact.join(':')
64
+ end
65
+
66
+ def immediate_queue_name
67
+ [namespace, 'queue', payload.fetch('queue')].compact.join(':')
68
+ end
69
+
70
+ def to_json(value)
71
+ MultiJson.dump(value, mode: :compat)
72
+ end
73
+ end
74
+ end
75
+ end
@@ -0,0 +1,8 @@
1
+ # frozen_string_literal: true
2
+
3
+ module BackgroundJob
4
+ module Jobs
5
+ end
6
+ end
7
+
8
+ require_relative 'jobs/job'
@@ -0,0 +1,141 @@
1
+ # frozen_string_literal: true
2
+
3
+ module BackgroundJob
4
+ # Class Lock provides access to redis "sorted set" used to control unique jobs
5
+ class Lock
6
+ attr_reader :digest, :lock_id, :ttl
7
+
8
+ # @param :digest [String] It's the uniq string used to group similar jobs
9
+ # @param :lock_id [String] The uniq job id
10
+ # @param :ttl [Float] The timestamp related lifietime of the lock before being discarded.
11
+ def initialize(digest:, lock_id:, ttl:)
12
+ @digest = digest
13
+ @lock_id = lock_id
14
+ @ttl = ttl
15
+ end
16
+
17
+ # Initialize a Lock object from hash
18
+ #
19
+ # @param value [Hash] Hash with lock properties
20
+ # @return [BackgroundJob::Lock, nil]
21
+ def self.coerce(value)
22
+ return unless value.is_a?(Hash)
23
+
24
+ digest = value[:digest] || value['digest']
25
+ lock_id = value[:lock_id] || value['lock_id']
26
+ ttl = value[:ttl] || value['ttl']
27
+ return if [digest, lock_id, ttl].any?(&:nil?)
28
+
29
+ new(digest: digest, lock_id: lock_id, ttl: ttl)
30
+ end
31
+
32
+ # Remove expired locks from redis "sorted set"
33
+ #
34
+ # @param [String] digest It's the uniq string used to group similar jobs
35
+ def self.flush_expired_members(digest, redis: nil)
36
+ return unless digest
37
+
38
+ caller = ->(redis) { redis.zremrangebyscore(digest, '-inf', "(#{now}") }
39
+
40
+ if redis
41
+ caller.(redis)
42
+ else
43
+ BackgroundJob.config.redis_pool.with { |conn| caller.(conn) }
44
+ end
45
+ end
46
+
47
+ # Remove all locks from redis "sorted set"
48
+ #
49
+ # @param [String] digest It's the uniq string used to group similar jobs
50
+ def self.flush(digest, redis: nil)
51
+ return unless digest
52
+
53
+ caller = ->(conn) { conn.del(digest) }
54
+
55
+ if redis
56
+ caller.(redis)
57
+ else
58
+ BackgroundJob.config.redis_pool.with { |conn| caller.(conn) }
59
+ end
60
+ end
61
+
62
+ # Number of locks
63
+ #
64
+ # @param digest [String] It's the uniq string used to group similar jobs
65
+ # @option [Number] from The begin of set. Default to 0
66
+ # @option [Number] to The end of set. Default to the timestamp of 1 week from now
67
+ # @return Number the amount of entries that within digest
68
+ def self.count(digest, from: 0, to: nil, redis: nil)
69
+ to ||= Time.now.to_f + BackgroundJob::UniqueJob::VALID_OPTIONS[:timeout]
70
+ caller = ->(conn) { conn.zcount(digest, from, to) }
71
+
72
+ if redis
73
+ caller.(redis)
74
+ else
75
+ BackgroundJob.config.redis_pool.with { |conn| caller.(conn) }
76
+ end
77
+ end
78
+
79
+ def to_hash
80
+ {
81
+ 'ttl' => ttl,
82
+ 'digest' => (digest.to_s if digest),
83
+ 'lock_id' => (lock_id.to_s if lock_id),
84
+ }
85
+ end
86
+
87
+ # @return [Float] A float timestamp of current time
88
+ def self.now
89
+ Time.now.to_f
90
+ end
91
+
92
+ # Remove lock_id lock from redis
93
+ # @return [Boolean] Returns true when it's locked or false when there is no lock
94
+ def unlock
95
+ redis_pool.with do |conn|
96
+ conn.zrem(digest, lock_id)
97
+ end
98
+ end
99
+
100
+ # Adds lock_id lock to redis
101
+ # @return [Boolean] Returns true when it's a fresh lock or false when lock already exists
102
+ def lock
103
+ redis_pool.with do |conn|
104
+ conn.zadd(digest, ttl, lock_id)
105
+ end
106
+ end
107
+
108
+ # Check if the lock_id lock exist
109
+ # @return [Boolean] true or false when lock exist or not
110
+ def locked?
111
+ locked = false
112
+
113
+ redis_pool.with do |conn|
114
+ timestamp = conn.zscore(digest, lock_id)
115
+ return false unless timestamp
116
+
117
+ locked = timestamp >= now
118
+ self.class.flush_expired_members(digest, redis: conn)
119
+ end
120
+
121
+ locked
122
+ end
123
+
124
+ def eql?(other)
125
+ return false unless other.is_a?(self.class)
126
+
127
+ [digest, lock_id, ttl] == [other.digest, other.lock_id, other.ttl]
128
+ end
129
+ alias == eql?
130
+
131
+ protected
132
+
133
+ def now
134
+ self.class.now
135
+ end
136
+
137
+ def redis_pool
138
+ BackgroundJob.config.redis_pool
139
+ end
140
+ end
141
+ end
@@ -0,0 +1,36 @@
1
+ # frozen_string_literal: true
2
+
3
+ module BackgroundJob
4
+ # Class Lock generates the uniq digest acording to the uniq config
5
+ class LockDigest
6
+ NAMESPACE = 'bgjb'
7
+ BASE = 'uniq'.freeze
8
+ SEPARATOR = ':'.freeze
9
+
10
+ def initialize(*keys, across:)
11
+ @keys = keys.map { |k| k.to_s.strip.downcase }
12
+ @across = across.to_sym
13
+ end
14
+
15
+ def to_s
16
+ case @across
17
+ when :systemwide
18
+ build_name(*@keys.slice(0..-2))
19
+ when :queue
20
+ build_name(*@keys)
21
+ else
22
+ raise Error, format(
23
+ 'Could not resolve the lock digest using across %<across>p. ' +
24
+ 'Valid options are :systemwide and :queue',
25
+ across: @across,
26
+ )
27
+ end
28
+ end
29
+
30
+ private
31
+
32
+ def build_name(*segments)
33
+ [NAMESPACE, BASE, *segments].compact.join(SEPARATOR)
34
+ end
35
+ end
36
+ end