eco-helpers 2.7.12 → 2.7.14

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
data/Gemfile CHANGED
@@ -4,3 +4,5 @@ git_source(:github) {|repo_name| "https://github.com/#{repo_name}" }
4
4
 
5
5
  # Specify your gem's dependencies in eco-helpers.gemspec
6
6
  gemspec
7
+
8
+ # gem 'rubocop-rake', require: false
data/eco-helpers.gemspec CHANGED
@@ -10,13 +10,13 @@ Gem::Specification.new do |spec|
10
10
  spec.authors = ["Oscar Segura"]
11
11
  spec.email = ["oscar@ecoportal.co.nz"]
12
12
  spec.date = %q{2018-09-05}
13
- spec.summary = %q{eco-helpers to manage people api cases}
13
+ spec.summary = %q(eco-helpers to manage people api cases)
14
14
  spec.homepage = "https://www.ecoportal.com"
15
15
  spec.licenses = %w[MIT]
16
16
 
17
17
  spec.required_ruby_version = '>= 2.7.2'
18
18
 
19
- spec.files = `git ls-files -z`.split("\x0").reject do |f|
19
+ spec.files = `git ls-files -z`.split("\x0").reject do |f|
20
20
  f.match(%r{^(test|spec|features)/})
21
21
  end
22
22
 
@@ -25,29 +25,31 @@ Gem::Specification.new do |spec|
25
25
  spec.require_paths = ["lib"]
26
26
 
27
27
  spec.add_development_dependency "bundler", ">= 2.4.12", "< 3"
28
- spec.add_development_dependency "rspec", ">= 3.12.0", "< 4"
29
28
  spec.add_development_dependency "rake", ">= 13.0.3", "< 14"
30
- spec.add_development_dependency "yard", ">= 0.9.34", "< 1"
31
29
  spec.add_development_dependency "redcarpet", ">= 3.6.0", "< 4"
30
+ spec.add_development_dependency "rspec", ">= 3.12.0", "< 4"
31
+ spec.add_development_dependency "rubocop", "~> 1"
32
+ spec.add_development_dependency "rubocop-rake", "~> 0"
33
+ spec.add_development_dependency "yard", ">= 0.9.34", "< 1"
32
34
 
33
- spec.add_dependency 'ecoportal-api', '>= 0.9.8', '< 0.10'
34
- spec.add_dependency 'ecoportal-api-v2', '>= 1.1.8', '< 1.2'
35
- spec.add_dependency 'ecoportal-api-graphql', '>= 0.3.18', '< 0.4'
36
- spec.add_dependency 'ed25519', '>= 1.2'
37
- spec.add_dependency 'bcrypt_pbkdf', '>= 1.0'
35
+ spec.add_dependency 'amatch', '>= 0.4.1', '< 0.5'
38
36
  spec.add_dependency 'aws-sdk-s3', '>= 1.142.0', '< 2'
39
37
  spec.add_dependency 'aws-sdk-ses', '>= 1.58.0', '< 2'
38
+ spec.add_dependency 'bcrypt_pbkdf', '>= 1.0'
39
+ spec.add_dependency 'docx', '>= 0.8.0', '< 0.9'
40
40
  spec.add_dependency 'dotenv', '>= 2.8.1', '< 3'
41
- spec.add_dependency 'net-ssh', '>= 7.2.1', '< 8'
42
- spec.add_dependency 'net-sftp', '>= 4.0.0', '< 5'
43
- spec.add_dependency 'hashdiff', '>= 1.1.0', '< 1.2'
41
+ spec.add_dependency 'ecoportal-api', '>= 0.9.8', '< 0.10'
42
+ spec.add_dependency 'ecoportal-api-graphql', '>= 0.3.18', '< 0.4'
43
+ spec.add_dependency 'ecoportal-api-v2', '>= 1.1.8', '< 1.2'
44
+ spec.add_dependency 'ed25519', '>= 1.2'
45
+ spec.add_dependency 'fast_excel', '>= 0.5.0', '< 0.6'
44
46
  spec.add_dependency 'fuzzy_match', '>= 2.1.0', '< 2.2'
45
- spec.add_dependency 'amatch', '>= 0.4.1', '< 0.5'
47
+ spec.add_dependency 'hashdiff', '>= 1.1.0', '< 1.2'
46
48
  spec.add_dependency 'jaro_winkler', '>= 1.5.6', '< 1.6'
49
+ spec.add_dependency 'net-sftp', '>= 4.0.0', '< 5'
50
+ spec.add_dependency 'net-ssh', '>= 7.2.1', '< 8'
47
51
  spec.add_dependency 'nokogiri', '>= 1.13', '< 1.17'
48
52
  spec.add_dependency 'roo', '>= 2.10.1', '< 2.11'
49
53
  spec.add_dependency 'roo-xls', '>= 1.2.0', '< 1.3'
50
- spec.add_dependency 'fast_excel', '>= 0.5.0', '< 0.6'
51
- spec.add_dependency 'docx', '>= 0.8.0', '< 0.9'
52
54
  spec.add_dependency 'rubyzip', '>= 2.3.2', '< 2.4'
53
55
  end
@@ -3,17 +3,20 @@ module Eco
3
3
  module Common
4
4
  module Session
5
5
  class Logger < Eco::Language::BasicLogger
6
- TIMESTAMP_PATTERN = '%Y-%m-%dT%H:%M:%S'
6
+ TIMESTAMP_PATTERN = '%Y-%m-%dT%H:%M:%S'.freeze
7
7
 
8
8
  attr_reader :cache
9
9
 
10
10
  def initialize(file_level: ::Logger::DEBUG, log_file: nil, enviro: nil, **kargs)
11
11
  super(**kargs)
12
- raise "Required Environment object (enviro:). Given: #{enviro}" if enviro && !enviro.is_a?(Eco::API::Common::Session::Environment)
12
+
13
+ msg = "Required Environment object (enviro:). Given: #{enviro.class}"
14
+ raise ArgumentError, msg if enviro && !enviro.is_a?(Eco::API::Common::Session::Environment)
15
+
13
16
  @enviro = enviro
14
17
  @cache = Logger::Cache.new
15
18
 
16
- if log_file = fetch_log_file(log_file)
19
+ if (log_file = fetch_log_file(log_file))
17
20
  loggers[:file] = ::Logger.new(log_file).tap do |logger|
18
21
  logger.formatter = format_proc(console: false) do |severity, datetime, msg, formatted_msg|
19
22
  cache.add(severity, datetime, msg, formatted_msg)
@@ -34,15 +37,16 @@ module Eco
34
37
  private
35
38
 
36
39
  def config(attr)
37
- return nil unless cnf = @enviro&.config&.logger
40
+ return unless (cnf = @enviro&.config&.logger)
41
+
38
42
  cnf.send(attr) if cnf.respond_to?(attr)
39
43
  end
40
44
 
41
45
  def if_config(attr)
42
- unless (value = config(attr)).nil?
43
- yield(value) if block_given?
44
- value
45
- end
46
+ return if (value = config(attr)).nil?
47
+
48
+ yield(value) if block_given?
49
+ value
46
50
  end
47
51
 
48
52
  def fetch_log_file(log_file)
@@ -24,7 +24,7 @@ module Eco
24
24
  end
25
25
 
26
26
  def empty?
27
- count == 0
27
+ count.zero?
28
28
  end
29
29
 
30
30
  def each(&block)
@@ -90,15 +90,16 @@ module Eco
90
90
  # @return [Hash<Eco::API::Session::Batch::Job, Eco::API::Session::Batch::Status>]
91
91
  def launch(simulate: false)
92
92
  each_with_index do |job, idx|
93
- if job.pending?
94
- status[job] = job_status = job.launch(simulate: simulate)
95
- callback = @callbacks[job]
96
- callback.call(job, job_status) if callback
97
- Eco::API::Session::Batch::JobsGroups.counter(delay_between_jobs) if !simulate && idx < self.length - 1
98
- end
93
+ next unless job.pending?
94
+
95
+ status[job] = job_status = job.launch(simulate: simulate)
96
+ callback = @callbacks[job]
97
+ callback&.call(job, job_status)
98
+ Eco::API::Session::Batch::JobsGroups.counter(delay_between_jobs) if !simulate && idx < length - 1
99
99
  end
100
100
  launch(simulate: simulate) if pending?
101
- return status
101
+
102
+ status
102
103
  end
103
104
 
104
105
  def find_jobs(type:)
@@ -113,7 +114,7 @@ module Eco
113
114
  yield(job, job_status)
114
115
  end
115
116
  self
116
- else
117
+ else # rubocop:disable Naming/MemoizedInstanceVariableName
117
118
  @jobs_status ||= {}
118
119
  end
119
120
  end
@@ -0,0 +1,23 @@
1
+ class Eco::API::UseCases::Default::People::Migrate::RemapTags
2
+ class Cli < Eco::API::UseCases::Cli
3
+ desc 'Maps filter_tags and default_tag based on input file'
4
+
5
+ callback do |_people, _session, options, _usecase|
6
+ if (file = SCR.get_file('-remap-tags', required: true, should_exist: true))
7
+ options.deep_merge!(input: {file: {name: file}})
8
+ end
9
+ end
10
+
11
+ add_option('-remove-source-tag', 'Whether source mapped tags should be removed') do |options|
12
+ options.deep_merge!(usecase: {remove_source_tag: true})
13
+ end
14
+
15
+ add_option('-clear-unknown-tag', 'Whether unknown tags should be cleared') do |options|
16
+ options.deep_merge!(usecase: {clear_unknown_tags: true})
17
+ end
18
+
19
+ add_option('-clear-archived-nodes', 'Whether archived nodes should be cleared') do |options|
20
+ options.deep_merge!(usecase: {clear_archived_nodes: true})
21
+ end
22
+ end
23
+ end
@@ -0,0 +1,269 @@
1
+ class Eco::API::UseCases::Default::People::Migrate::RemapTags < Eco::API::Common::Loaders::UseCase
2
+ name 'remap-tags'
3
+ type :transform
4
+
5
+ require_relative 'cli/remap_tags_cli'
6
+
7
+ REGISTER_TAGS = %w[
8
+ EVENT INJURY RISK CONTRACTOR PERMIT
9
+ AUDIT JSEA
10
+ TRAINING INDUCTION
11
+ MEETING PPE CHEMICAL
12
+ PLANT ASSET
13
+ POLICY IDEA REPORTS
14
+ ].freeze
15
+
16
+ def main(*_args)
17
+ tune_options!
18
+
19
+ people.each do |person|
20
+ update_job.add(person)
21
+
22
+ update_filter_tags!(person)
23
+ update_default_tag!(person)
24
+ clear_unkown_tags!(person) if clear_unknown_tags?
25
+ clear_archived_nodes!(person) if clear_archived_nodes?
26
+ end
27
+ end
28
+
29
+ def update_job
30
+ @update_job ||= session.new_job('main', 'update', :update, usecase) do |_job, _job_status|
31
+ report_removed_src_tags
32
+ report_archived_cleared
33
+ report_unknown_cleared
34
+ end
35
+ end
36
+
37
+ private
38
+
39
+ # If the map on `value` exists, it returns the new value,
40
+ # otherwise mirrors the `value` (to prevent data loss)
41
+ def apply_map(value)
42
+ case value
43
+ when Array
44
+ value.map {|val| apply_map(val)}
45
+ when String
46
+ if (new_tag = id_maps[value])
47
+ new_tag
48
+ else
49
+ value
50
+ end
51
+ end
52
+ end
53
+
54
+ def update_filter_tags!(person)
55
+ mapped_tags = apply_map(person.filter_tags)
56
+
57
+ if remove_source_tag?
58
+ removed = person.filter_tags - mapped_tags
59
+ removed_src_tag!(*removed)
60
+ person.filter_tags = mapped_tags
61
+ else
62
+ person.filter_tags |= mapped_tags
63
+ end
64
+ end
65
+
66
+ # We always map the default_tag
67
+ def update_default_tag!(person)
68
+ return unless (account = person.account)
69
+
70
+ default_tag = apply_map(account.default_tag)
71
+ return unless default_tag.nil? || active_node?(default_tag)
72
+
73
+ account.default_tag = default_tag
74
+ end
75
+
76
+ # @note it still keeps archived nodes
77
+ def clear_unkown_tags!(person)
78
+ person.filter_tags = person.filter_tags.select do |tag|
79
+ known_tag?(tag).tap do |known|
80
+ cleared_unknown_tag!(tag) unless known
81
+ end
82
+ end
83
+ end
84
+
85
+ # @note it only targets archived nodes
86
+ def clear_archived_nodes!(person)
87
+ person.filter_tags = person.filter_tags.reject do |tag|
88
+ archived_node?(tag).tap do |known|
89
+ cleared_archived_node!(tag) unless known
90
+ end
91
+ end
92
+ end
93
+
94
+ def report_removed_src_tags
95
+ return if removed_src_tags.empty?
96
+
97
+ log(:info) {
98
+ msg = "Here list of the #{removed_src_tags.count} removed source-mapped tags..."
99
+ msg << "\n • "
100
+ msg << removed_src_tags.sort.join("\n • ")
101
+ msg
102
+ }
103
+ end
104
+
105
+ # Clean OLD Tags
106
+ def report_unknown_cleared
107
+ report_cleared(cleared_unknown_tags, msg: 'Cleared up the following non existing tags:')
108
+ end
109
+
110
+ def report_archived_cleared
111
+ report_cleared(cleared_archived_nodes, msg: 'Cleared up the following archived nodes:')
112
+ end
113
+
114
+ # Whether it should remove the original tag
115
+ # @note we might want to preserve it at the moment, and remove them
116
+ # at a later stage (i.e. after migrating the pages)
117
+ def remove_source_tag?
118
+ options.dig(:usecase, :remove_source_tag) || false
119
+ end
120
+
121
+ # Whether it should clear out any unkonwn tag
122
+ def clear_unknown_tags?
123
+ options.dig(:usecase, :clear_unknown_tags) || false
124
+ end
125
+
126
+ # Whether it should clear out any archived node
127
+ def clear_archived_nodes?
128
+ options.dig(:usecase, :clear_archived_nodes) || false
129
+ end
130
+
131
+ def report_cleared(data, msg:)
132
+ return if data.empty?
133
+
134
+ cleared_sorted = data.sort_by {|tag, count| [count * -1, tag]}
135
+
136
+ msg << "\n • "
137
+ msg += cleared_sorted.map do |tag, count|
138
+ "'#{tag}' (#{count})"
139
+ end.join("\n • ")
140
+
141
+ log(:info) { msg }
142
+ data.clear
143
+ end
144
+
145
+ def removed_src_tag!(*tags)
146
+ @removed_src_tags = removed_src_tags | tags
147
+ end
148
+
149
+ def removed_src_tags
150
+ @removed_src_tags ||= []
151
+ end
152
+
153
+ def cleared_unknown_tag!(tag)
154
+ cleared_unknown_tags[tag] ||= 0
155
+ cleared_unknown_tags[tag] += 1
156
+ end
157
+
158
+ def cleared_unknown_tags
159
+ @cleared_unknown_tags ||= {}
160
+ end
161
+
162
+ def cleared_archived_node!(tag)
163
+ cleared_archived_nodes[tag] ||= 0
164
+ cleared_archived_nodes[tag] += 1
165
+ end
166
+
167
+ def cleared_archived_nodes
168
+ @cleared_archived_nodes ||= {}
169
+ end
170
+
171
+ # @note validations on src -> dst location ids isn't done because
172
+ # we might be in a case where we are moving from old locations to
173
+ # location ids, but it might be a case where we are mapping an actual
174
+ # location id to a custom tag as well... so trying to keep this standard.
175
+ def id_maps
176
+ @id_maps ||= input_csv.each.with_object({}) do |row, out|
177
+ from, to = row.fields.to_a.first(2)
178
+ froms = from.split('|').map(&:strip).map(&:upcase).reject(&:empty?)
179
+ tos = to.split('|').map(&:strip).map(&:upcase).reject(&:empty?)
180
+ from = froms.first
181
+ to = tos.first
182
+ next if from == to
183
+ next unless from && to
184
+
185
+ out[from] = to
186
+ end
187
+ end
188
+
189
+ def input_csv
190
+ @input_csv ||= Eco::CSV.read(input_file, encoding: input_encoding)
191
+ end
192
+
193
+ def input_encoding
194
+ options.dig(:input, :file, :encoding) || 'utf-8'
195
+ end
196
+
197
+ def input_file
198
+ @input_file ||= options.dig(:input, :file, :name).tap do |file|
199
+ if file.nil?
200
+ log(:warn) { "No input file specified" }
201
+ exit(1)
202
+ elsif File.exist?(file)
203
+ log(:info) { "Using input file '#{file}'" }
204
+ else
205
+ log(:error) { "File not found '#{file}'" }
206
+ exit(1)
207
+ end
208
+ end
209
+ end
210
+
211
+ def default_tag(person)
212
+ return unless (account = person.account)
213
+ account.default_tag
214
+ end
215
+
216
+ def known_tag?(value)
217
+ register_tag?(value) || tag?(value)
218
+ end
219
+
220
+ def unknown_tag?(value)
221
+ !known_tag?(value)
222
+ end
223
+
224
+ def register_tag?(value)
225
+ register_tags.include?(value)
226
+ end
227
+
228
+ def archived_tag?(value)
229
+ return false if register_tag?(value)
230
+ return false unless (node = tagtree.node(value))
231
+ node.archived
232
+ end
233
+
234
+ def tag?(value)
235
+ return false if value.nil?
236
+ return true if tagtree.tag?(value)
237
+ register_tags.any? { |reg| value.upcase == reg }
238
+ end
239
+
240
+ def active_node?(value)
241
+ return false if value.nil?
242
+ return false unless tagtree.tag?(value)
243
+ !archived_node?(value)
244
+ end
245
+
246
+ def archived_node?(value)
247
+ return false unless (node = tagtree.node(value))
248
+ node.archived
249
+ end
250
+
251
+ # Get all the location structures merged into a single one
252
+ def tagtree
253
+ @tagtree ||= session.tagtree(
254
+ live: true,
255
+ include_archived: true,
256
+ merge: true
257
+ )
258
+ end
259
+
260
+ def register_tags
261
+ @register_tags ||= self.class::REGISTER_TAGS.compact.map(&:upcase)
262
+ end
263
+
264
+ def tune_options!
265
+ # options.deep_merge!(include: {excluded: true})
266
+ options.deep_merge!(skip: {api_policies: true})
267
+ options.deep_merge!(skip: {batch_policy: true})
268
+ end
269
+ end
@@ -0,0 +1,6 @@
1
+ module Eco::API::UseCases::Default::People
2
+ module Migrate
3
+ end
4
+ end
5
+
6
+ require_relative 'migrate/remap_tags_case'
@@ -12,4 +12,4 @@ end
12
12
  require_relative 'people/treat'
13
13
  require_relative 'people/utils'
14
14
  require_relative 'people/amend'
15
-
15
+ require_relative 'people/migrate'
@@ -0,0 +1,15 @@
1
+ class Eco::API::UseCases::Default::People::Utils::SplitCsv
2
+ class Cli < Eco::API::UseCases::Cli
3
+ desc "Splits an input file into multiple ones"
4
+
5
+ callback do |_sess, options, _case|
6
+ file = SCR.get_file(cli_name, required: true, should_exist: true)
7
+ options.deep_merge!(source: {file: file})
8
+ end
9
+
10
+ add_option("-max-rows", "The max count of rows of the output files") do |options|
11
+ count = SCR.get_arg("-max-rows", with_param: true)
12
+ options.deep_merge!(output: {file: {max_rows: count}})
13
+ end
14
+ end
15
+ end
@@ -0,0 +1,34 @@
1
+ class Eco::API::UseCases::Default::People::Utils::SplitCsv < Eco::API::Common::Loaders::UseCase
2
+ require_relative 'cli/split_csv_cli'
3
+
4
+ MAX_ROWS = 15_000
5
+
6
+ name "split-csv"
7
+ type :other
8
+
9
+ def main(*_args)
10
+ Eco::CSV.split(input_file, max_rows: max_rows).each do |file|
11
+ log(:info) {
12
+ "Generated file '#{file}'"
13
+ }
14
+ end
15
+ end
16
+
17
+ private
18
+
19
+ def input_file
20
+ options.dig(:source, :file)
21
+ end
22
+
23
+ def max_rows
24
+ max_rows_options || self.class::MAX_ROWS
25
+ end
26
+
27
+ def max_rows_options
28
+ return nil unless (num = options.dig(:output, :file, :max_rows))
29
+
30
+ num = num.to_i
31
+ num = nil if num.zero?
32
+ num
33
+ end
34
+ end
@@ -0,0 +1,12 @@
1
+ module Eco
2
+ module API
3
+ class UseCases
4
+ class Default
5
+ module Utils
6
+ end
7
+ end
8
+ end
9
+ end
10
+ end
11
+
12
+ require_relative 'utils/split_csv_case'
@@ -14,3 +14,4 @@ end
14
14
 
15
15
  require_relative 'default/people'
16
16
  require_relative 'default/locations'
17
+ require_relative 'default/utils'
@@ -69,8 +69,7 @@ class Eco::API::UseCases::GraphQL::Samples::Location
69
69
  end
70
70
 
71
71
  # Generates the file and pushes to the SFTP folder
72
- # @note this method can only work if we can run cummulative dry-runs to the back-end.
73
- # This is only possible using a draft, which is not that desired.
72
+ # @note it also displays the mappings on screen
74
73
  # @note the SFTP push only happens if `remote_subfolder` is defined, via:
75
74
  # 1. `options.dig(:sftp, :remote_subfolder)`
76
75
  # 2. `REMOTE_FOLDER` const
@@ -25,10 +25,10 @@ class Eco::API::UseCases::GraphQL::Samples::Location
25
25
  end
26
26
 
27
27
  # Generates the file
28
- # @note this method can only work if we can run cummulative dry-runs to the back-end.
29
- # This is only possible using a draft, which is not that desired.
28
+ # @note this method used to only work if we could run cummulative dry-runs to the back-end.
29
+ # However, after RS P3, as mappings are one-to-one (not many-to-many per row),
30
+ # we can just display the mappings in dry-run as well.
30
31
  def close_handling_tags_remap_csv
31
- return false if simulate?
32
32
  if tags_remap_table.any?
33
33
  puts "REMAP LOC IDs CSV (content):"
34
34
  puts tags_remap_table
@@ -50,7 +50,9 @@ class Eco::API::UseCases::GraphQL::Samples::Location
50
50
  msg = "Expecting CommandResults object. Given: #{results.class}"
51
51
  raise msg unless results.is_a?(request_results_class)
52
52
 
53
- results.applied.each do |result|
53
+ target = simulate?? results.results : results.applied
54
+
55
+ target.each do |result|
54
56
  prev_id, new_id = result.command_input_data.values_at(:nodeId, :newId)
55
57
  next if new_id.nil? # not an id change
56
58
  next if prev_id == new_id
@@ -0,0 +1,114 @@
1
+ module Eco
2
+ class CSV
3
+ class Split
4
+ include Eco::Language::AuxiliarLogger
5
+
6
+ attr_reader :filename
7
+
8
+ def initialize(filename, max_rows:, **kargs)
9
+ raise ArgumentError, "File '#{filename}' does not exist" unless ::File.exist?(filename)
10
+ @filename = filename
11
+ @max_rows = max_rows
12
+ @params = kargs
13
+ init
14
+ end
15
+
16
+ # @yield [idx, file] a block to spot the filename
17
+ # @yieldparam idx [Integer] the number of the file
18
+ # @yieldparam file [String] the default name of the file
19
+ # @yieldreturn [String] the filename of the file `idx`.
20
+ # - If `nil` it will create its own filename convention
21
+ # @return [Array<String>] names of the generated files
22
+ def call(&block)
23
+ stream.for_each do |row, ridx|
24
+ copy_row(row, ridx, &block)
25
+ end
26
+ out_files
27
+ ensure
28
+ puts "Close at row #{row_idx}"
29
+ @csv&.close
30
+ end
31
+
32
+ private
33
+
34
+ attr_reader :params
35
+ attr_reader :idx, :max_rows
36
+ attr_reader :headers, :row_idx
37
+
38
+ attr_accessor :exception
39
+
40
+ def copy_row(row, ridx, &block)
41
+ @headers ||= row.headers
42
+ @row_idx = ridx
43
+ current_csv(ridx, &block) << row.fields
44
+ end
45
+
46
+ def current_csv(ridx)
47
+ if split?(ridx) || @csv.nil?
48
+ puts "Split at row #{row_idx}"
49
+ @csv&.close
50
+ out_filename = generate_name(nidx = next_idx)
51
+ out_filename = yield(nidx, out_filename) if block_given?
52
+ @csv = ::CSV.open(out_filename, "w")
53
+ @csv << headers
54
+ out_files << out_filename
55
+ end
56
+ @csv
57
+ end
58
+
59
+ def split?(ridx)
60
+ ((ridx + 1) % max_rows).zero?
61
+ end
62
+
63
+ def next_idx
64
+ idx.tap { @idx += 1 }
65
+ end
66
+
67
+ def init
68
+ @idx ||= 0 # rubocop:disable Naming/MemoizedInstanceVariableName
69
+ end
70
+
71
+ def stream
72
+ @stream ||= Eco::CSV::Stream.new(filename, **params)
73
+ end
74
+
75
+ def generate_name(fidx)
76
+ File.join(input_dir, "#{input_name}_#{file_number(fidx)}#{input_ext}")
77
+ end
78
+
79
+ def file_number(num)
80
+ "#{zeroed}#{num}"[-5..]
81
+ end
82
+
83
+ def zeroed
84
+ "0" * 5
85
+ end
86
+
87
+ def out_files
88
+ @out_files ||= []
89
+ end
90
+
91
+ def input_name
92
+ @input_name ||= File.basename(input_basename, input_ext)
93
+ end
94
+
95
+ def input_ext
96
+ @input_ext ||= input_basename.split('.')[1..].join('.').then do |name|
97
+ ".#{name}"
98
+ end
99
+ end
100
+
101
+ def input_basename
102
+ @input_basename ||= File.basename(input_full_filename)
103
+ end
104
+
105
+ def input_dir
106
+ @input_dir = File.dirname(input_full_filename)
107
+ end
108
+
109
+ def input_full_filename
110
+ @input_full_filename ||= File.expand_path(filename)
111
+ end
112
+ end
113
+ end
114
+ end