restforce-db 3.5.0 → 4.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA1:
3
- metadata.gz: a09f48ae39e990498c4277c8c538ddefa6299c64
4
- data.tar.gz: f908032e59d2f1fc53177e396e6d3166f5643b6d
3
+ metadata.gz: c63f46aaa750d8936499c83d7e6f135e962c9d2e
4
+ data.tar.gz: 2838e8434cef8bb652fb3aac1251e4ce287f412d
5
5
  SHA512:
6
- metadata.gz: 7264171810bbe421b589db829948ebf027487958ebdf4d2b29db5ce18842e0bb1c05c16b2d1567522e916f8d85041a892a507e23661f77b4bbb83d5eac66df9c
7
- data.tar.gz: 4a19e74fb51bf354a950c9c012995bf3b2b65053d9a7636fe147141587d9b78d2c4be1600db7ab2aaa17e405fdf0b73aabb88eac6518c2613a1ffac77301d654
6
+ metadata.gz: 0c5ac6edc50ab9ea35777bcfc677385feb7ea00734108b560012d249612e7c2f29dd65663d9390ceff16ec9272126f62014f282630f66d566128e0429cc0441c
7
+ data.tar.gz: 88c449e199a291dd8ea652d933dd51e0c07ed16e9cba034e297fd1cdeeb7f6aa69f7a9cde5cca5f93ea3125b1ee2b9fc17ab8f1e79d874f46adfc98ca020b139
data/lib/file_daemon.rb CHANGED
@@ -15,12 +15,21 @@ module FileDaemon
15
15
  # :nodoc:
16
16
  module ClassMethods
17
17
 
18
+ # Public: Force-reopen all files at their current paths. Allows for rotation
19
+ # of log files outside of the context of an actual process fork.
20
+ #
21
+ # Returns nothing.
22
+ def reopen_files
23
+ before_fork
24
+ after_fork
25
+ end
26
+
18
27
  # Public: Store the list of currently open file descriptors so that they
19
28
  # may be reopened when a new process is spawned.
20
29
  #
21
30
  # Returns nothing.
22
31
  def before_fork
23
- @files_to_reopen ||= ObjectSpace.each_object(File).reject(&:closed?)
32
+ @files_to_reopen = ObjectSpace.each_object(File).reject(&:closed?)
24
33
  end
25
34
 
26
35
  # Public: Reopen all file descriptors that have been stored through the
@@ -0,0 +1,45 @@
1
+ require "English"
2
+
3
+ # ForkedProcess exposes a small API for performing a block of code in a
4
+ # forked process, and relaying its output to another block.
5
+ class ForkedProcess
6
+
7
+ # Public: Define a callback which will be run in a forked process.
8
+ #
9
+ # Yields an IO object opened for writing when `run` is invoked.
10
+ # Returns nothing.
11
+ def write(&block)
12
+ @write_block = block
13
+ end
14
+
15
+ # Public: Define a callback which reads in the output from the forked
16
+ # process.
17
+ #
18
+ # Yields an IO object opened for reading when `run` is invoked.
19
+ # Returns nothing.
20
+ def read(&block)
21
+ @read_block = block
22
+ end
23
+
24
+ # Public: Fork a process, opening a pipe for IO and yielding the write and
25
+ # read components to the relevant blocks.
26
+ #
27
+ # Returns nothing.
28
+ def run
29
+ reader, writer = IO.pipe
30
+
31
+ pid = fork do
32
+ reader.close
33
+ @write_block.call(writer)
34
+ writer.close
35
+ exit!(0)
36
+ end
37
+
38
+ writer.close
39
+ @read_block.call(reader)
40
+ Process.wait(pid)
41
+
42
+ raise "Forked process did not exit successfully" unless $CHILD_STATUS.success?
43
+ end
44
+
45
+ end
@@ -7,6 +7,33 @@ module Restforce
7
7
  # not yet supported by the base gem.
8
8
  class Client < ::Restforce::Data::Client
9
9
 
10
+ # Public: Instantiate a new Restforce::DB::Client. Updates the middleware
11
+ # stack to account for some additional instrumentation and automatically
12
+ # retry timed out requests.
13
+ def initialize(**_)
14
+ super
15
+
16
+ # NOTE: By default, the Retry middleware will catch timeout exceptions,
17
+ # and retry up to two times. For more information, see:
18
+ # https://github.com/lostisland/faraday/blob/master/lib/faraday/request/retry.rb
19
+ middleware.insert(
20
+ -2,
21
+ Faraday::Request::Retry,
22
+ methods: [:get, :head, :options, :put, :patch, :delete],
23
+ )
24
+
25
+ middleware.insert_after(
26
+ Restforce::Middleware::InstanceURL,
27
+ FaradayMiddleware::Instrumentation,
28
+ name: "request.restforce_db",
29
+ )
30
+
31
+ middleware.insert_before(
32
+ FaradayMiddleware::Instrumentation,
33
+ Restforce::DB::Middleware::StoreRequestBody,
34
+ )
35
+ end
36
+
10
37
  # Public: Get a list of Salesforce records which have been deleted between
11
38
  # the specified times.
12
39
  #
@@ -15,7 +15,6 @@ module Restforce
15
15
  # args - A set of command line arguments to pass to the OptionParser.
16
16
  def initialize(args)
17
17
  @options = {
18
- verbose: false,
19
18
  pid_dir: Rails.root.join("tmp", "pids"),
20
19
  config: Rails.root.join("config", "restforce-db.yml"),
21
20
  tracker: Rails.root.join("config", ".restforce"),
@@ -104,9 +103,6 @@ module Restforce
104
103
  opt.on("-t FILE", "--tracker FILE", "The file where run characteristics should be logged.") do |file|
105
104
  @options[:tracker] = file
106
105
  end
107
- opt.on("-v", "--verbose", "Turn on noisy logging.") do
108
- @options[:verbose] = true
109
- end
110
106
  end
111
107
  end
112
108
 
@@ -10,19 +10,54 @@ module Restforce
10
10
  # specific field.
11
11
  RELATIONSHIP_MATCHER = /(.+)__r\./.freeze
12
12
 
13
- # Internal: Get a global cache with which to store/fetch the writable
14
- # fields for each Salesforce SObject Type.
15
- #
16
- # Returns a Hash.
17
- def self.field_cache
18
- @field_cache ||= {}
19
- end
13
+ class << self
14
+
15
+ # Public: Fetch the field metadata for all Salesforce models registered
16
+ # through mappings in the system. Useful to ensure that forked worker
17
+ # processes have access to all of the field metadata without the need
18
+ # for additional querying.
19
+ #
20
+ # Returns nothing.
21
+ def preload
22
+ Registry.each { |mapping| fetch(mapping.salesforce_model) }
23
+ end
24
+
25
+ # Public: Get a global cache with which to store/fetch the field
26
+ # metadata for each Salesforce Object Type.
27
+ #
28
+ # Returns a Hash.
29
+ def field_cache
30
+ @field_cache ||= {}
31
+ end
32
+
33
+ # Public: Get a collection of all fields for the passed Salesforce
34
+ # Object Type, with an indication of whether or not they are readable
35
+ # and writable for both create and update actions.
36
+ #
37
+ # sobject_type - A String name of an Object Type in Salesforce.
38
+ #
39
+ # Returns a Hash.
40
+ def fetch(sobject_type)
41
+ field_cache[sobject_type] ||= begin
42
+ fields = DB.client.describe(sobject_type).fields
43
+
44
+ fields.each_with_object({}) do |field, permissions|
45
+ permissions[field["name"]] = {
46
+ read: true,
47
+ create: field["createable"],
48
+ update: field["updateable"],
49
+ }
50
+ end
51
+ end
52
+ end
53
+
54
+ # Public: Clear out the global field cache.
55
+ #
56
+ # Returns nothing.
57
+ def reset
58
+ @field_cache = {}
59
+ end
20
60
 
21
- # Internal: Clear out the global field cache.
22
- #
23
- # Returns nothing.
24
- def self.reset
25
- @field_cache = {}
26
61
  end
27
62
 
28
63
  # Public: Get a list of valid fields for a specific action from the passed
@@ -69,7 +104,7 @@ module Restforce
69
104
  #
70
105
  # Returns a Boolean.
71
106
  def available?(sobject_type, field, action)
72
- permissions = field_metadata(sobject_type)[field]
107
+ permissions = self.class.fetch(sobject_type)[field]
73
108
  return false unless permissions
74
109
 
75
110
  permissions[action]
@@ -89,27 +124,6 @@ module Restforce
89
124
  field =~ RELATIONSHIP_MATCHER
90
125
  end
91
126
 
92
- # Internal: Get a collection of all fields for the passed Salesforce
93
- # SObject Type, with an indication of whether or not they are readable and
94
- # writable for both create and update actions.
95
- #
96
- # sobject_type - A String name of an SObject Type in Salesforce.
97
- #
98
- # Returns a Hash.
99
- def field_metadata(sobject_type)
100
- self.class.field_cache[sobject_type] ||= begin
101
- fields = Restforce::DB.client.describe(sobject_type).fields
102
-
103
- fields.each_with_object({}) do |field, permissions|
104
- permissions[field["name"]] = {
105
- read: true,
106
- create: field["createable"],
107
- update: field["updateable"],
108
- }
109
- end
110
- end
111
- end
112
-
113
127
  end
114
128
 
115
129
  end
@@ -0,0 +1,43 @@
1
+ module Restforce
2
+
3
+ module DB
4
+
5
+ # Restforce::DB::Loggable defines shared behaviors for objects which
6
+ # need access to generic logging functionality.
7
+ module Loggable
8
+
9
+ # Public: Add a `logger` attribute to the object including this module.
10
+ #
11
+ # base - The object which is including the `Loggable` module.
12
+ def self.included(base)
13
+ base.send :attr_accessor, :logger
14
+ end
15
+
16
+ private
17
+
18
+ # Internal: Log the passed text at the specified level.
19
+ #
20
+ # text - The piece of text which should be logged for this worker.
21
+ # level - The level at which the text should be logged. Defaults to :info.
22
+ #
23
+ # Returns nothing.
24
+ def log(text, level = :info)
25
+ return unless logger
26
+ logger.send(level, text)
27
+ end
28
+
29
+ # Internal: Log an error for the worker, outputting the entire error
30
+ # stacktrace and applying the appropriate log level.
31
+ #
32
+ # exception - An Exception object.
33
+ #
34
+ # Returns nothing.
35
+ def error(exception)
36
+ log exception, :error
37
+ end
38
+
39
+ end
40
+
41
+ end
42
+
43
+ end
@@ -15,6 +15,8 @@ module Restforce
15
15
  :@timestamp_cache,
16
16
  :cache_timestamp,
17
17
  :changed?,
18
+ :dump_timestamps,
19
+ :load_timestamps,
18
20
  )
19
21
 
20
22
  # Public: Initialize a new Restforce::DB::Runner.
@@ -0,0 +1,96 @@
1
+ require "restforce/db/loggable"
2
+ require "restforce/db/task"
3
+ require "restforce/db/accumulator"
4
+ require "restforce/db/attacher"
5
+ require "restforce/db/associator"
6
+ require "restforce/db/cleaner"
7
+ require "restforce/db/collector"
8
+ require "restforce/db/initializer"
9
+ require "restforce/db/synchronizer"
10
+
11
+ module Restforce
12
+
13
+ # :nodoc:
14
+ module DB
15
+
16
+ # TaskMapping is a small data structure used to pass top-level task
17
+ # information through to a SynchronizationError when necessary.
18
+ TaskMapping = Struct.new(:id, :mapping)
19
+
20
+ # Restforce::DB::TaskManager defines the run sequence and invocation of each
21
+ # of the Restforce::DB::Task subclasses during a single processing loop for
22
+ # the top-level Worker object.
23
+ class TaskManager
24
+
25
+ include Loggable
26
+
27
+ # Public: Initialize a new Restforce::DB::TaskManager for a given runner
28
+ # state.
29
+ #
30
+ # runner - A Restforce::DB::Runner for a specific period of time.
31
+ # logger - A Logger object (optional).
32
+ def initialize(runner, logger: nil)
33
+ @runner = runner
34
+ @logger = logger
35
+ @changes = Hash.new { |h, k| h[k] = Accumulator.new }
36
+ end
37
+
38
+ # Public: Run each of the sync tasks in a defined order for the supplied
39
+ # runner's current state.
40
+ #
41
+ # Returns nothing.
42
+ def perform
43
+ Registry.each do |mapping|
44
+ run("CLEANING RECORDS", Cleaner, mapping)
45
+ run("ATTACHING RECORDS", Attacher, mapping)
46
+ run("PROPAGATING RECORDS", Initializer, mapping)
47
+ run("COLLECTING CHANGES", Collector, mapping)
48
+ end
49
+
50
+ # NOTE: We can only perform the synchronization after all record changes
51
+ # have been aggregated, so this second loop is necessary.
52
+ Registry.each do |mapping|
53
+ run("UPDATING ASSOCIATIONS", Associator, mapping)
54
+ run("APPLYING CHANGES", Synchronizer, mapping)
55
+ end
56
+ end
57
+
58
+ private
59
+
60
+ # Internal: Log a description and response time for a specific named task.
61
+ #
62
+ # name - A String task name.
63
+ # task_class - A Restforce::DB::Task subclass.
64
+ # mapping - A Restforce::DB::Mapping.
65
+ #
66
+ # Returns a Boolean.
67
+ def run(name, task_class, mapping)
68
+ log " #{name} between #{mapping.database_model.name} and #{mapping.salesforce_model}"
69
+ runtime = Benchmark.realtime { task task_class, mapping }
70
+ log format(" FINISHED #{name} after %.4f", runtime)
71
+
72
+ true
73
+ rescue => e
74
+ error(e)
75
+
76
+ false
77
+ end
78
+
79
+ # Internal: Run the passed mapping through the supplied Task class.
80
+ #
81
+ # task_class - A Restforce::DB::Task subclass.
82
+ # mapping - A Restforce::DB::Mapping.
83
+ #
84
+ # Returns nothing.
85
+ def task(task_class, mapping)
86
+ task_class.new(mapping, @runner).run(@changes)
87
+ rescue Faraday::Error::ClientError => e
88
+ task_mapping = TaskMapping.new(task_class, mapping)
89
+ error SynchronizationError.new(e, task_mapping)
90
+ end
91
+
92
+ end
93
+
94
+ end
95
+
96
+ end
@@ -71,6 +71,26 @@ module Restforce
71
71
  @cache = {}
72
72
  end
73
73
 
74
+ # Public: Load the previous collection of cached timestamps from the
75
+ # passed readable object.
76
+ #
77
+ # io - An IO object opened for reading.
78
+ #
79
+ # Returns nothing.
80
+ def load_timestamps(io)
81
+ @cache = YAML.load(io.read) || {}
82
+ end
83
+
84
+ # Public: Dump the currently cached timestamps into the specified
85
+ # writable object.
86
+ #
87
+ # io - An IO object opened for writing.
88
+ #
89
+ # Returns nothing.
90
+ def dump_timestamps(io)
91
+ io.write(YAML.dump(@cache))
92
+ end
93
+
74
94
  private
75
95
 
76
96
  # Internal: Get a unique cache key for the passed instance.
@@ -3,7 +3,7 @@ module Restforce
3
3
  # :nodoc:
4
4
  module DB
5
5
 
6
- VERSION = "3.5.0"
6
+ VERSION = "4.0.0"
7
7
 
8
8
  end
9
9
 
@@ -1,23 +1,29 @@
1
1
  require "file_daemon"
2
+ require "forked_process"
3
+ require "restforce/db/task_manager"
4
+ require "restforce/db/loggable"
2
5
 
3
6
  module Restforce
4
7
 
5
- # :nodoc:
6
8
  module DB
7
9
 
8
- # TaskMapping is a small data structure used to pass top-level task
9
- # information through to a SynchronizationError when necessary.
10
- TaskMapping = Struct.new(:id, :mapping)
11
-
12
10
  # Restforce::DB::Worker represents the primary polling loop through which
13
11
  # all record synchronization occurs.
14
12
  class Worker
15
13
 
16
14
  include FileDaemon
15
+ include Loggable
17
16
 
18
17
  DEFAULT_INTERVAL = 5
19
18
  DEFAULT_DELAY = 1
20
19
 
20
+ # TERM and INT signals should trigger a graceful shutdown.
21
+ GRACEFUL_SHUTDOWN_SIGNALS = %w(TERM INT).freeze
22
+
23
+ # HUP and USR1 will reopen all files at their original paths, to
24
+ # accommodate log rotation.
25
+ ROTATION_SIGNALS = %w(HUP USR1).freeze
26
+
21
27
  attr_accessor :logger, :tracker
22
28
 
23
29
  # Public: Initialize a new Restforce::DB::Worker.
@@ -27,7 +33,6 @@ module Restforce
27
33
  # interval - The maximum polling loop rest time.
28
34
  # delay - The amount of time by which to offset queries.
29
35
  # config - The path to a client configuration file.
30
- # verbose - Display command line output? Defaults to false.
31
36
  def initialize(options = {})
32
37
  @options = options
33
38
  @interval = @options.fetch(:interval) { DEFAULT_INTERVAL }
@@ -45,7 +50,10 @@ module Restforce
45
50
  config.logger = logger
46
51
  end
47
52
 
48
- %w(TERM INT).each { |signal| trap(signal) { stop } }
53
+ GRACEFUL_SHUTDOWN_SIGNALS.each { |signal| trap(signal) { stop } }
54
+ ROTATION_SIGNALS.each { |signal| trap(signal) { Worker.reopen_files } }
55
+
56
+ preload
49
57
 
50
58
  loop do
51
59
  runtime = Benchmark.realtime { perform }
@@ -66,28 +74,58 @@ module Restforce
66
74
 
67
75
  private
68
76
 
77
+ # Internal: Populate the field cache for each Salesforce object in the
78
+ # defined mappings.
79
+ #
80
+ # NOTE: To work around thread-safety issues with Typheous (and possibly
81
+ # some other HTTP adapters, we need to fork our preloading to prevent
82
+ # intialization of our Client object in the context of the master Worker
83
+ # process.
84
+ #
85
+ # Returns a Hash.
86
+ def preload
87
+ forked = ForkedProcess.new
88
+
89
+ forked.write do |writer|
90
+ log "INITIALIZING..."
91
+ FieldProcessor.preload
92
+ YAML.dump(FieldProcessor.field_cache, writer)
93
+ end
94
+
95
+ forked.read do |reader|
96
+ FieldProcessor.field_cache.merge!(YAML.load(reader.read))
97
+ end
98
+
99
+ forked.run
100
+ end
101
+
69
102
  # Internal: Perform the synchronization loop, recording the time that the
70
103
  # run is performed so that future runs can pick up where the last run
71
104
  # left off.
72
105
  #
106
+ # NOTE: In order to keep our long-term memory usage in check, we fork a
107
+ # task manager to process the tasks for each synchronization loop. Once
108
+ # the subprocess dies, its memory can be reclaimed by the OS.
109
+ #
73
110
  # Returns nothing.
74
111
  def perform
112
+ reset!
113
+
75
114
  track do
76
- reset!
115
+ forked = ForkedProcess.new
116
+
117
+ forked.write do |writer|
118
+ Worker.after_fork
119
+ task_manager.perform
77
120
 
78
- Restforce::DB::Registry.each do |mapping|
79
- run("CLEANING RECORDS", Cleaner, mapping)
80
- run("ATTACHING RECORDS", Attacher, mapping)
81
- run("PROPAGATING RECORDS", Initializer, mapping)
82
- run("COLLECTING CHANGES", Collector, mapping)
121
+ runner.dump_timestamps(writer)
83
122
  end
84
123
 
85
- # NOTE: We can only perform the synchronization after all record
86
- # changes have been aggregated, so this second loop is necessary.
87
- Restforce::DB::Registry.each do |mapping|
88
- run("UPDATING ASSOCIATIONS", Associator, mapping)
89
- run("APPLYING CHANGES", Synchronizer, mapping)
124
+ forked.read do |reader|
125
+ runner.load_timestamps(reader)
90
126
  end
127
+
128
+ forked.run
91
129
  end
92
130
  end
93
131
 
@@ -97,7 +135,15 @@ module Restforce
97
135
  # Returns nothing.
98
136
  def reset!
99
137
  runner.tick!
100
- @changes = Hash.new { |h, k| h[k] = Accumulator.new }
138
+ Worker.before_fork
139
+ end
140
+
141
+ # Internal: Get a new TaskManager instance, which reflects the current
142
+ # runner state.
143
+ #
144
+ # Returns a Restforce::DB::TaskManager.
145
+ def task_manager
146
+ TaskManager.new(runner, logger: logger)
101
147
  end
102
148
 
103
149
  # Internal: Run the passed block, updating the tracker with the time at
@@ -115,9 +161,9 @@ module Restforce
115
161
  log "SYNCHRONIZING"
116
162
  end
117
163
 
118
- yield
164
+ duration = Benchmark.realtime { yield }
165
+ log format("DONE after %.4f", duration)
119
166
 
120
- log "DONE"
121
167
  tracker.track(runtime)
122
168
  else
123
169
  yield
@@ -132,38 +178,6 @@ module Restforce
132
178
  @runner ||= Runner.new(@options.fetch(:delay) { DEFAULT_DELAY })
133
179
  end
134
180
 
135
- # Internal: Log a description and response time for a specific named task.
136
- #
137
- # name - A String task name.
138
- # task_class - A Restforce::DB::Task subclass.
139
- # mapping - A Restforce::DB::Mapping.
140
- #
141
- # Returns a Boolean.
142
- def run(name, task_class, mapping)
143
- log " #{name} between #{mapping.database_model.name} and #{mapping.salesforce_model}"
144
- runtime = Benchmark.realtime { task task_class, mapping }
145
- log format(" COMPLETE after %.4f", runtime)
146
-
147
- true
148
- rescue => e
149
- error(e)
150
-
151
- false
152
- end
153
-
154
- # Internal: Run the passed mapping through the supplied Task class.
155
- #
156
- # task_class - A Restforce::DB::Task subclass.
157
- # mapping - A Restforce::DB::Mapping.
158
- #
159
- # Returns nothing.
160
- def task(task_class, mapping)
161
- task_class.new(mapping, runner).run(@changes)
162
- rescue Faraday::Error::ClientError => e
163
- task_mapping = TaskMapping.new(task_class, mapping)
164
- error SynchronizationError.new(e, task_mapping)
165
- end
166
-
167
181
  # Internal: Has this worker been instructed to stop?
168
182
  #
169
183
  # Returns a boolean.
@@ -171,29 +185,6 @@ module Restforce
171
185
  @exit == true
172
186
  end
173
187
 
174
- # Internal: Log the passed text at the specified level.
175
- #
176
- # text - The piece of text which should be logged for this worker.
177
- # level - The level at which the text should be logged. Defaults to :info.
178
- #
179
- # Returns nothing.
180
- def log(text, level = :info)
181
- puts text if @options[:verbose]
182
-
183
- return unless logger
184
- logger.send(level, text)
185
- end
186
-
187
- # Internal: Log an error for the worker, outputting the entire error
188
- # stacktrace and applying the appropriate log level.
189
- #
190
- # exception - An Exception object.
191
- #
192
- # Returns nothing.
193
- def error(exception)
194
- logger.error(exception)
195
- end
196
-
197
188
  end
198
189
 
199
190
  end
data/lib/restforce/db.rb CHANGED
@@ -39,18 +39,10 @@ require "restforce/db/record_cache"
39
39
  require "restforce/db/timestamp_cache"
40
40
  require "restforce/db/runner"
41
41
 
42
- require "restforce/db/task"
43
- require "restforce/db/accumulator"
44
- require "restforce/db/attacher"
45
42
  require "restforce/db/adapter"
46
- require "restforce/db/associator"
47
43
  require "restforce/db/attribute_map"
48
- require "restforce/db/cleaner"
49
- require "restforce/db/collector"
50
- require "restforce/db/initializer"
51
44
  require "restforce/db/mapping"
52
45
  require "restforce/db/model"
53
- require "restforce/db/synchronizer"
54
46
  require "restforce/db/tracker"
55
47
  require "restforce/db/worker"
56
48
 
@@ -89,7 +81,7 @@ module Restforce
89
81
  # Returns a Restforce::Data::Client instance.
90
82
  def self.client
91
83
  @client ||= begin
92
- client = DB::Client.new(
84
+ DB::Client.new(
93
85
  username: configuration.username,
94
86
  password: configuration.password,
95
87
  security_token: configuration.security_token,
@@ -100,36 +92,9 @@ module Restforce
100
92
  timeout: configuration.timeout,
101
93
  adapter: configuration.adapter,
102
94
  )
103
- setup_middleware(client)
104
- client
105
95
  end
106
96
  end
107
97
 
108
- # Internal: Sets up the Restforce client's middleware handlers.
109
- #
110
- # Returns nothing.
111
- def self.setup_middleware(client)
112
- # NOTE: By default, the Retry middleware will catch timeout exceptions,
113
- # and retry up to two times. For more information, see:
114
- # https://github.com/lostisland/faraday/blob/master/lib/faraday/request/retry.rb
115
- client.middleware.insert(
116
- -2,
117
- Faraday::Request::Retry,
118
- methods: [:get, :head, :options, :put, :patch, :delete],
119
- )
120
-
121
- client.middleware.insert_after(
122
- Restforce::Middleware::InstanceURL,
123
- FaradayMiddleware::Instrumentation,
124
- name: "request.restforce_db",
125
- )
126
-
127
- client.middleware.insert_before(
128
- FaradayMiddleware::Instrumentation,
129
- Restforce::DB::Middleware::StoreRequestBody,
130
- )
131
- end
132
-
133
98
  # Public: Get the ID of the Salesforce user which is being used to access
134
99
  # the Salesforce API.
135
100
  #
@@ -84,4 +84,34 @@ describe Restforce::DB::TimestampCache do
84
84
  end
85
85
  end
86
86
 
87
+ describe "I/O operations" do
88
+ let(:io) { IO.pipe }
89
+ let(:reader) { io.first }
90
+ let(:writer) { io.last }
91
+
92
+ describe "#dump_timestamps" do
93
+ before do
94
+ cache.cache_timestamp instance
95
+ cache.dump_timestamps(writer)
96
+ writer.close
97
+ end
98
+
99
+ it "writes a YAML dump of the cache to the passed I/O object" do
100
+ expect(YAML.load(reader.read)).to_equal [record_type, id] => timestamp
101
+ end
102
+ end
103
+
104
+ describe "#load_timestamps" do
105
+ before do
106
+ YAML.dump({ [record_type, id] => timestamp }, writer)
107
+ writer.close
108
+ cache.load_timestamps(reader)
109
+ end
110
+
111
+ it "reloads its internal cache from the passed I/O object" do
112
+ expect(cache.timestamp(instance)).to_equal timestamp
113
+ end
114
+ end
115
+ end
116
+
87
117
  end
@@ -20,7 +20,8 @@ describe Restforce::DB::Worker do
20
20
 
21
21
  ## 1b. The record is synced to Salesforce.
22
22
  worker.send :reset!
23
- worker.send :task, Restforce::DB::Initializer, mapping
23
+ manager = worker.send(:task_manager)
24
+ manager.send :task, Restforce::DB::Initializer, mapping
24
25
 
25
26
  expect(database_record.reload).to_be :salesforce_id?
26
27
  Salesforce.records << [salesforce_model, database_record.salesforce_id]
@@ -44,8 +45,9 @@ describe Restforce::DB::Worker do
44
45
  # We sleep here to ensure we pick up our manual changes.
45
46
  sleep 1 if VCR.current_cassette.recording?
46
47
  worker.send :reset!
47
- worker.send :task, Restforce::DB::Collector, mapping
48
- worker.send :task, Restforce::DB::Synchronizer, mapping
48
+ manager = worker.send(:task_manager)
49
+ manager.send :task, Restforce::DB::Collector, mapping
50
+ manager.send :task, Restforce::DB::Synchronizer, mapping
49
51
  end
50
52
  end
51
53
 
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: restforce-db
3
3
  version: !ruby/object:Gem::Version
4
- version: 3.5.0
4
+ version: 4.0.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - Andrew Horner
8
8
  autorequire:
9
9
  bindir: exe
10
10
  cert_chain: []
11
- date: 2015-08-11 00:00:00.000000000 Z
11
+ date: 2015-08-25 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: activerecord
@@ -201,6 +201,7 @@ files:
201
201
  - bin/setup
202
202
  - circle.yml
203
203
  - lib/file_daemon.rb
204
+ - lib/forked_process.rb
204
205
  - lib/generators/restforce/install_generator.rb
205
206
  - lib/generators/restforce/migration_generator.rb
206
207
  - lib/generators/templates/config.yml
@@ -231,6 +232,7 @@ files:
231
232
  - lib/restforce/db/instances/active_record.rb
232
233
  - lib/restforce/db/instances/base.rb
233
234
  - lib/restforce/db/instances/salesforce.rb
235
+ - lib/restforce/db/loggable.rb
234
236
  - lib/restforce/db/mapping.rb
235
237
  - lib/restforce/db/middleware/store_request_body.rb
236
238
  - lib/restforce/db/model.rb
@@ -248,6 +250,7 @@ files:
248
250
  - lib/restforce/db/synchronization_error.rb
249
251
  - lib/restforce/db/synchronizer.rb
250
252
  - lib/restforce/db/task.rb
253
+ - lib/restforce/db/task_manager.rb
251
254
  - lib/restforce/db/timestamp_cache.rb
252
255
  - lib/restforce/db/tracker.rb
253
256
  - lib/restforce/db/version.rb