darrell-activewarehouse-etl 0.9.1.4

Sign up to get free protection for your applications and to get access to all the features.
Files changed (83) hide show
  1. data/CHANGELOG +198 -0
  2. data/LICENSE +7 -0
  3. data/README +99 -0
  4. data/Rakefile +175 -0
  5. data/TODO +28 -0
  6. data/bin/etl +28 -0
  7. data/bin/etl.cmd +8 -0
  8. data/examples/database.example.yml +16 -0
  9. data/lib/etl/batch/batch.rb +111 -0
  10. data/lib/etl/batch/directives.rb +55 -0
  11. data/lib/etl/batch.rb +2 -0
  12. data/lib/etl/builder/date_dimension_builder.rb +96 -0
  13. data/lib/etl/builder/time_dimension_builder.rb +31 -0
  14. data/lib/etl/builder.rb +2 -0
  15. data/lib/etl/commands/etl.rb +89 -0
  16. data/lib/etl/control/control.rb +405 -0
  17. data/lib/etl/control/destination/database_destination.rb +97 -0
  18. data/lib/etl/control/destination/file_destination.rb +126 -0
  19. data/lib/etl/control/destination.rb +448 -0
  20. data/lib/etl/control/source/database_source.rb +220 -0
  21. data/lib/etl/control/source/enumerable_source.rb +11 -0
  22. data/lib/etl/control/source/file_source.rb +90 -0
  23. data/lib/etl/control/source/model_source.rb +39 -0
  24. data/lib/etl/control/source.rb +109 -0
  25. data/lib/etl/control.rb +3 -0
  26. data/lib/etl/core_ext/time/calculations.rb +42 -0
  27. data/lib/etl/core_ext/time.rb +5 -0
  28. data/lib/etl/core_ext.rb +1 -0
  29. data/lib/etl/engine.rb +556 -0
  30. data/lib/etl/execution/base.rb +9 -0
  31. data/lib/etl/execution/batch.rb +8 -0
  32. data/lib/etl/execution/job.rb +8 -0
  33. data/lib/etl/execution/migration.rb +85 -0
  34. data/lib/etl/execution.rb +19 -0
  35. data/lib/etl/generator/generator.rb +20 -0
  36. data/lib/etl/generator/surrogate_key_generator.rb +39 -0
  37. data/lib/etl/generator.rb +2 -0
  38. data/lib/etl/http_tools.rb +139 -0
  39. data/lib/etl/parser/apache_combined_log_parser.rb +49 -0
  40. data/lib/etl/parser/delimited_parser.rb +74 -0
  41. data/lib/etl/parser/fixed_width_parser.rb +65 -0
  42. data/lib/etl/parser/parser.rb +41 -0
  43. data/lib/etl/parser/sax_parser.rb +218 -0
  44. data/lib/etl/parser/xml_parser.rb +65 -0
  45. data/lib/etl/parser.rb +11 -0
  46. data/lib/etl/processor/block_processor.rb +14 -0
  47. data/lib/etl/processor/bulk_import_processor.rb +83 -0
  48. data/lib/etl/processor/check_exist_processor.rb +80 -0
  49. data/lib/etl/processor/check_unique_processor.rb +35 -0
  50. data/lib/etl/processor/copy_field_processor.rb +26 -0
  51. data/lib/etl/processor/encode_processor.rb +55 -0
  52. data/lib/etl/processor/hierarchy_exploder_processor.rb +55 -0
  53. data/lib/etl/processor/print_row_processor.rb +12 -0
  54. data/lib/etl/processor/processor.rb +25 -0
  55. data/lib/etl/processor/rename_processor.rb +24 -0
  56. data/lib/etl/processor/require_non_blank_processor.rb +26 -0
  57. data/lib/etl/processor/row_processor.rb +17 -0
  58. data/lib/etl/processor/sequence_processor.rb +23 -0
  59. data/lib/etl/processor/surrogate_key_processor.rb +53 -0
  60. data/lib/etl/processor/truncate_processor.rb +35 -0
  61. data/lib/etl/processor.rb +11 -0
  62. data/lib/etl/row.rb +20 -0
  63. data/lib/etl/screen/row_count_screen.rb +20 -0
  64. data/lib/etl/screen.rb +14 -0
  65. data/lib/etl/transform/block_transform.rb +13 -0
  66. data/lib/etl/transform/date_to_string_transform.rb +20 -0
  67. data/lib/etl/transform/decode_transform.rb +51 -0
  68. data/lib/etl/transform/default_transform.rb +20 -0
  69. data/lib/etl/transform/foreign_key_lookup_transform.rb +169 -0
  70. data/lib/etl/transform/hierarchy_lookup_transform.rb +49 -0
  71. data/lib/etl/transform/ordinalize_transform.rb +12 -0
  72. data/lib/etl/transform/sha1_transform.rb +13 -0
  73. data/lib/etl/transform/string_to_date_transform.rb +16 -0
  74. data/lib/etl/transform/string_to_datetime_transform.rb +14 -0
  75. data/lib/etl/transform/string_to_time_transform.rb +11 -0
  76. data/lib/etl/transform/transform.rb +61 -0
  77. data/lib/etl/transform/trim_transform.rb +26 -0
  78. data/lib/etl/transform/type_transform.rb +35 -0
  79. data/lib/etl/transform.rb +2 -0
  80. data/lib/etl/util.rb +59 -0
  81. data/lib/etl/version.rb +9 -0
  82. data/lib/etl.rb +83 -0
  83. metadata +245 -0
@@ -0,0 +1,97 @@
1
+ module ETL #:nodoc:
2
+ module Control #:nodoc:
3
+ # Destination which writes directly to a database. This is useful when you are dealing with
4
+ # a small amount of data. For larger amounts of data you should probably use the bulk
5
+ # loader if it is supported with your target database as it will use a much faster load
6
+ # method.
7
+ class DatabaseDestination < Destination
8
+ # The target connection
9
+ attr_reader :target
10
+
11
+ # The table
12
+ attr_reader :table
13
+
14
+ # Specify the order from the source
15
+ attr_reader :order
16
+
17
+ # Set to true to truncate the destination table first
18
+ attr_reader :truncate
19
+
20
+ # Initialize the database destination
21
+ #
22
+ # * <tt>control</tt>: The ETL::Control::Control instance
23
+ # * <tt>configuration</tt>: The configuration Hash
24
+ # * <tt>mapping</tt>: The mapping
25
+ #
26
+ # Configuration options:
27
+ # * <tt>:database</tt>: The database name (REQUIRED)
28
+ # * <tt>:target</tt>: The target connection (REQUIRED)
29
+ # * <tt>:table</tt>: The table to write to (REQUIRED)
30
+ # * <tt>:truncate</tt>: Set to true to truncate before writing (defaults to false)
31
+ # * <tt>:unique</tt>: Set to true to only insert unique records (defaults to false)
32
+ # * <tt>:append_rows</tt>: Array of rows to append
33
+ #
34
+ # Mapping options:
35
+ # * <tt>:order</tt>: The order of fields to write (REQUIRED)
36
+ def initialize(control, configuration, mapping={})
37
+ super
38
+ @target = configuration[:target]
39
+ @table = configuration[:table]
40
+ @truncate = configuration[:truncate] ||= false
41
+ @unique = configuration[:unique] ? configuration[:unique] + [scd_effective_date_field] : configuration[:unique]
42
+ @unique.uniq! unless @unique.nil?
43
+ @order = mapping[:order] ? mapping[:order] + scd_required_fields : order_from_source
44
+ @order.uniq! unless @order.nil?
45
+ raise ControlError, "Order required in mapping" unless @order
46
+ raise ControlError, "Table required" unless @table
47
+ raise ControlError, "Target required" unless @target
48
+ end
49
+
50
+ # Flush the currently buffered data
51
+ def flush
52
+ conn.transaction do
53
+ buffer.flatten.each do |row|
54
+ # check to see if this row's compound key constraint already exists
55
+ # note that the compound key constraint may not utilize virtual fields
56
+ next unless row_allowed?(row)
57
+
58
+ # add any virtual fields
59
+ add_virtuals!(row)
60
+
61
+ names = []
62
+ values = []
63
+ order.each do |name|
64
+ names << conn.quote_column_name(name)
65
+ values << conn.quote(row[name])
66
+ end
67
+ q = "INSERT INTO #{conn.quote_table_name(table_name)} (#{names.join(',')}) VALUES (#{values.join(',')})"
68
+ ETL::Engine.logger.debug("Executing insert: #{q}")
69
+ conn.insert(q, "Insert row #{current_row}")
70
+ @current_row += 1
71
+ end
72
+ buffer.clear
73
+ end
74
+ end
75
+
76
+ # Close the connection
77
+ def close
78
+ buffer << append_rows if append_rows
79
+ flush
80
+ end
81
+
82
+ private
83
+ def conn
84
+ @conn ||= begin
85
+ conn = ETL::Engine.connection(target)
86
+ conn.truncate(table_name) if truncate
87
+ conn
88
+ end
89
+ end
90
+
91
+ def table_name
92
+ ETL::Engine.table(table, ETL::Engine.connection(target))
93
+ end
94
+
95
+ end
96
+ end
97
+ end
@@ -0,0 +1,126 @@
1
+ # This source file contains the ETL::Control::FileDestination
2
+
3
+ module ETL #:nodoc:
4
+ module Control #:nodoc:
5
+ # File as the final destination.
6
+ class FileDestination < Destination
7
+ # The File to write to
8
+ attr_reader :file
9
+
10
+ # The output order
11
+ attr_reader :order
12
+
13
+ # Flag which indicates to append (default is to overwrite)
14
+ attr_accessor :append
15
+
16
+ # The separator
17
+ attr_accessor :separator
18
+
19
+ # The end of line marker
20
+ attr_accessor :eol
21
+
22
+ # The enclosure character
23
+ attr_accessor :enclose
24
+
25
+ # Initialize the object.
26
+ # * <tt>control</tt>: The Control object
27
+ # * <tt>configuration</tt>: The configuration map
28
+ # * <tt>mapping</tt>: The output mapping
29
+ #
30
+ # Configuration options:
31
+ # * <tt>:file<tt>: The file to write to (REQUIRED)
32
+ # * <tt>:append</tt>: Set to true to append to the file (default is to overwrite)
33
+ # * <tt>:separator</tt>: Record separator (default is a comma)
34
+ # * <tt>:eol</tt>: End of line marker (default is \n)
35
+ # * <tt>:enclose</tt>: Enclosure character (default is none)
36
+ # * <tt>:unique</tt>: Set to true to only write unique records
37
+ # * <tt>:append_rows</tt>: Array of rows to append
38
+ #
39
+ # Mapping options:
40
+ # * <tt>:order</tt>: The order array
41
+ def initialize(control, configuration, mapping={})
42
+ super
43
+ path = Pathname.new(configuration[:file])
44
+ @file = path.absolute? ? path : Pathname.new(File.dirname(File.expand_path(control.file))) + path
45
+ @append = configuration[:append] ||= false
46
+ @separator = configuration[:separator] ||= ','
47
+ @eol = configuration[:eol] ||= "\n"
48
+ @enclose = configuration[:enclose]
49
+ @unique = configuration[:unique] ? configuration[:unique] + scd_required_fields : configuration[:unique]
50
+ @unique.uniq! unless @unique.nil?
51
+ @order = mapping[:order] ? mapping[:order] + scd_required_fields : order_from_source
52
+ @order.uniq! unless @order.nil?
53
+ raise ControlError, "Order required in mapping" unless @order
54
+ end
55
+
56
+ # Close the destination. This will flush the buffer and close the underlying stream or connection.
57
+ def close
58
+ buffer << append_rows if append_rows
59
+ flush
60
+ f.close
61
+ end
62
+
63
+ # Flush the destination buffer
64
+ def flush
65
+ #puts "Flushing buffer (#{file}) with #{buffer.length} rows"
66
+ buffer.flatten.each do |row|
67
+ #puts "row change type: #{row.change_type}"
68
+ # check to see if this row's compound key constraint already exists
69
+ # note that the compound key constraint may not utilize virtual fields
70
+ next unless row_allowed?(row)
71
+
72
+ # add any virtual fields
73
+ add_virtuals!(row)
74
+
75
+ # collect all of the values using the order designated in the configuration
76
+ values = order.collect do |name|
77
+ value = row[name]
78
+ case value
79
+ when Date, Time, DateTime
80
+ value.to_s(:db)
81
+ else
82
+ value.to_s
83
+ end
84
+ end
85
+
86
+ values.collect! { |v| v.gsub(/\\/, '\\\\\\\\')}
87
+ values.collect! { |v| v.gsub(separator, "\\#{separator}")}
88
+ values.collect! { |v| v.gsub(/\n|\r/, '')}
89
+
90
+ # enclose the value if required
91
+ if !enclose.nil?
92
+ values.collect! { |v| enclose + v.gsub(/(#{enclose})/, '\\\\\1') + enclose }
93
+ end
94
+
95
+ # write the values joined by the separator defined in the configuration
96
+ f.write(values.join(separator))
97
+
98
+ # write the end-of-line
99
+ f.write(eol)
100
+ end
101
+ f.flush
102
+ buffer.clear
103
+ #puts "After flush there are #{buffer.length} rows"
104
+ end
105
+
106
+ private
107
+ # Get the open file stream
108
+ def f
109
+ @f ||= open(file, mode)
110
+ end
111
+
112
+ def options
113
+ @options ||= {
114
+ :col_sep => separator,
115
+ :row_sep => eol,
116
+ :force_quotes => !enclose.nil?
117
+ }
118
+ end
119
+
120
+ # Get the appropriate mode to open the file stream
121
+ def mode
122
+ append ? 'a' : 'w'
123
+ end
124
+ end
125
+ end
126
+ end
@@ -0,0 +1,448 @@
1
+ module ETL #:nodoc:
2
+ module Control #:nodoc:
3
+ # Base class for destinations.
4
+ class Destination
5
+ # Read-only accessor for the ETL::Control::Control instance
6
+ attr_reader :control
7
+
8
+ # Read-only accessor for the configuration Hash
9
+ attr_reader :configuration
10
+
11
+ # Read-only accessor for the destination mapping Hash
12
+ attr_reader :mapping
13
+
14
+ # Accessor to the buffer size
15
+ attr_accessor :buffer_size
16
+
17
+ # Unique flag.
18
+ attr_accessor :unique
19
+
20
+ # A condition for writing
21
+ attr_accessor :condition
22
+
23
+ # An array of rows to append to the destination
24
+ attr_accessor :append_rows
25
+
26
+ class << self
27
+ # Get the destination class for the specified name.
28
+ #
29
+ # For example if name is :database or 'database' then the
30
+ # DatabaseDestination class is returned
31
+ def class_for_name(name)
32
+ ETL::Control.const_get("#{name.to_s.camelize}Destination")
33
+ end
34
+ end
35
+
36
+ # Initialize the destination
37
+ #
38
+ # Arguments:
39
+ # * <tt>control</tt>: The ETL::Control::Control instance
40
+ # * <tt>configuration</tt>: The configuration Hash
41
+ # * <tt>mapping</tt>: The mapping Hash
42
+ #
43
+ # Options:
44
+ # * <tt>:buffer_size</tt>: The output buffer size (default 1000 records)
45
+ # * <tt>:condition</tt>: A conditional proc that must return true for the
46
+ # row to be written
47
+ # * <tt>:append_rows</tt>: An array of rows to append
48
+ def initialize(control, configuration, mapping)
49
+ @control = control
50
+ @configuration = configuration
51
+ @mapping = mapping
52
+ @buffer_size = configuration[:buffer_size] ||= 100
53
+ @condition = configuration[:condition]
54
+ @append_rows = configuration[:append_rows]
55
+ end
56
+
57
+ # Get the current row number
58
+ def current_row
59
+ @current_row ||= 1
60
+ end
61
+
62
+ # Write the given row
63
+ def write(row)
64
+ if @condition.nil? || @condition.call(row)
65
+ process_change(row)
66
+ end
67
+ flush if buffer.length >= buffer_size
68
+ end
69
+
70
+ # Abstract method
71
+ def flush
72
+ raise NotImplementedError, "flush method must be implemented by subclasses"
73
+ end
74
+
75
+ # Abstract method
76
+ def close
77
+ raise NotImplementedError, "close method must be implemented by subclasses"
78
+ end
79
+
80
+ def errors
81
+ @errors ||= []
82
+ end
83
+
84
+ protected
85
+ # Access the buffer
86
+ def buffer
87
+ @buffer ||= []
88
+ end
89
+
90
+ # Access the generators map
91
+ def generators
92
+ @generators ||= {}
93
+ end
94
+
95
+ # Get the order of elements from the source order
96
+ def order_from_source
97
+ order = []
98
+ control.sources.first.definition.each do |item|
99
+ case item
100
+ when Hash
101
+ order << item[:name]
102
+ else
103
+ order << item
104
+ end
105
+ end
106
+ order
107
+ end
108
+
109
+ # Return true if the row is allowed. The row will not be allowed if the
110
+ # :unique option is specified in the configuration and the compound key
111
+ # already exists
112
+ def row_allowed?(row)
113
+ if unique
114
+ key = (unique.collect { |k| row[k] }).join('|')
115
+ return false if compound_key_constraints[key]
116
+ compound_key_constraints[key] = 1
117
+ end
118
+ return true
119
+ end
120
+
121
+ # Get a hash of compound key contraints. This is used to determine if a
122
+ # row can be written when the unique option is specified
123
+ def compound_key_constraints
124
+ @compound_key_constraints ||= {}
125
+ end
126
+
127
+ # Return fields which are Slowly Changing Dimension fields.
128
+ # Uses the scd_fields specified in the configuration. If that's
129
+ # missing, uses all of the row's fields.
130
+ def scd_fields(row)
131
+ @scd_fields ||= configuration[:scd_fields] || row.keys
132
+ ETL::Engine.logger.debug "@scd_fields is: #{@scd_fields.inspect}"
133
+ @scd_fields
134
+ end
135
+
136
+ # returns the fields that are required to identify an SCD
137
+ def scd_required_fields
138
+ if scd?
139
+ [scd_effective_date_field, scd_end_date_field, scd_latest_version_field]
140
+ else
141
+ []
142
+ end
143
+ end
144
+
145
+ def non_scd_fields(row)
146
+ @non_scd_fields ||= row.keys - natural_key - scd_fields(row) - [primary_key] - scd_required_fields
147
+ ETL::Engine.logger.debug "@non_scd_fields is: #{@non_scd_fields.inspect}"
148
+ @non_scd_fields
149
+ end
150
+
151
+ def non_evolving_fields
152
+ (Array(configuration[:scd][:non_evolving_fields]) << primary_key).uniq
153
+ end
154
+
155
+ def scd?
156
+ !configuration[:scd].nil?
157
+ end
158
+
159
+ def scd_type
160
+ scd? ? configuration[:scd][:type] : nil
161
+ end
162
+
163
+ # Get the Slowly Changing Dimension effective date field. Defaults to
164
+ # 'effective_date'.
165
+ def scd_effective_date_field
166
+ configuration[:scd][:effective_date_field] || :effective_date if scd?
167
+ end
168
+
169
+ # Get the Slowly Changing Dimension end date field. Defaults to
170
+ # 'end_date'.
171
+ def scd_end_date_field
172
+ configuration[:scd][:end_date_field] || :end_date if scd?
173
+ end
174
+
175
+ # Get the Slowly Changing Dimension latest version field. Defaults to
176
+ # 'latest_version'.
177
+ def scd_latest_version_field
178
+ configuration[:scd][:latest_version_field] || :latest_version if scd?
179
+ end
180
+
181
+ # Return the natural key field names, defaults to []
182
+ def natural_key
183
+ @natural_key ||= determine_natural_key
184
+ end
185
+
186
+ # Get the dimension table if specified
187
+ def dimension_table
188
+ @dimension_table ||= if scd?
189
+ ETL::Engine.table(configuration[:scd][:dimension_table], dimension_target) or raise ConfigurationError, "dimension_table setting required"
190
+ end
191
+ end
192
+
193
+ # Get the dimension target if specified
194
+ def dimension_target
195
+ @dimension_target ||= if scd?
196
+ configuration[:scd][:dimension_target] or raise ConfigurationError, "dimension_target setting required"
197
+ end
198
+ end
199
+
200
+ # Process a row to determine the change type
201
+ def process_change(row)
202
+ ETL::Engine.logger.debug "Processing row: #{row.inspect}"
203
+ return unless row
204
+
205
+ # Change processing can only occur if the natural key exists in the row
206
+ ETL::Engine.logger.debug "Checking for natural key existence"
207
+ unless has_natural_key?(row)
208
+ buffer << row
209
+ return
210
+ end
211
+
212
+ @timestamp = Time.now
213
+
214
+ # See if the scd_fields of the current record have changed
215
+ # from the last time this record was loaded into the data
216
+ # warehouse. If they match then throw away this row (no need
217
+ # to process). If they do not match then the record is an
218
+ # 'update'. If the record doesn't exist then it is an 'insert'
219
+ ETL::Engine.logger.debug "Checking record for SCD change"
220
+ if @existing_row = preexisting_row(row)
221
+ if has_scd_field_changes?(row)
222
+ process_scd_change(row)
223
+ else
224
+ process_scd_match(row)
225
+ end
226
+ else
227
+ schedule_new_record(row)
228
+ end
229
+ end
230
+
231
+ # Add any virtual fields to the row. Virtual rows will get their value
232
+ # from one of the following:
233
+ # * If the mapping is a Class, then an object which implements the next
234
+ # method
235
+ # * If the mapping is a Symbol, then the XGenerator where X is the
236
+ # classified symbol
237
+ # * If the mapping is a Proc, then it will be called with the row
238
+ # * Otherwise the value itself will be assigned to the field
239
+ def add_virtuals!(row)
240
+ if mapping[:virtual]
241
+ mapping[:virtual].each do |key,value|
242
+ # If the row already has the virtual set, assume that's correct
243
+ next if row[key]
244
+ # Engine.logger.debug "Mapping virtual #{key}/#{value} for row #{row}"
245
+ case value
246
+ when Class
247
+ generator = generators[key] ||= value.new
248
+ row[key] = generator.next
249
+ when Symbol
250
+ generator = generators[key] ||= ETL::Generator::Generator.class_for_name(value).new(options)
251
+ row[key] = generator.next
252
+ when Proc
253
+ row[key] = value.call(row)
254
+ else
255
+ if value.is_a?(ETL::Generator::Generator)
256
+ row[key] = value.next
257
+ else
258
+ row[key] = value
259
+ end
260
+ end
261
+ end
262
+ end
263
+ end
264
+
265
+ private
266
+
267
+ # Determine the natural key. This method will always return an array
268
+ # of symbols. The default value is [].
269
+ def determine_natural_key
270
+ Array(configuration[:natural_key]).collect(&:to_sym)
271
+ end
272
+
273
+ # Check whether a natural key has been defined, and if so, whether
274
+ # this row has enough information to do searches based on that natural
275
+ # key.
276
+ #
277
+ # TODO: This should be factored out into
278
+ # ETL::Row#has_all_fields?(field_array) But that's not possible
279
+ # until *all* sources cast to ETL::Row, instead of sometimes
280
+ # using Hash
281
+ def has_natural_key?(row)
282
+ natural_key.any? && natural_key.all? { |key| row.has_key?(key) }
283
+ end
284
+
285
+ # Helper for generating the SQL where clause that allows searching
286
+ # by a natural key
287
+ def natural_key_equality_for_row(row)
288
+ statement = []
289
+ values = []
290
+ natural_key.each do |nk|
291
+ if row[nk].nil?
292
+ statement << "#{nk} IS NULL"
293
+ else
294
+ statement << "#{nk} = ?"
295
+ values << row[nk]
296
+ end
297
+ end
298
+ statement = statement.join(" AND ")
299
+ x=ETL::Execution::Base.send(:sanitize_sql_array, [statement, *values])
300
+ return x
301
+ end
302
+
303
+ # Do all the steps required when a SCD *has* changed. Exact steps
304
+ # depend on what type of SCD we're handling.
305
+ def process_scd_change(row)
306
+ ETL::Engine.logger.debug "SCD fields do not match"
307
+
308
+ if scd_type == 2
309
+ # SCD Type 2: new row should be added and old row should be updated
310
+ ETL::Engine.logger.debug "type 2 SCD"
311
+
312
+ # To update the old row, we delete the version in the database
313
+ # and insert a new expired version
314
+
315
+ # If there is no truncate then the row will exist twice in the database
316
+ delete_outdated_record
317
+
318
+ ETL::Engine.logger.debug "expiring original record"
319
+ @existing_row[scd_end_date_field] = @timestamp
320
+ @existing_row[scd_latest_version_field] = false
321
+
322
+ buffer << @existing_row
323
+
324
+ elsif scd_type == 1
325
+ # SCD Type 1: only the new row should be added
326
+ ETL::Engine.logger.debug "type 1 SCD"
327
+
328
+ # Copy primary key, and other non-evolving fields over from
329
+ # original version of record
330
+ non_evolving_fields.each do |non_evolving_field|
331
+ row[non_evolving_field] = @existing_row[non_evolving_field]
332
+ end
333
+
334
+ # If there is no truncate then the row will exist twice in the database
335
+ delete_outdated_record
336
+ else
337
+ # SCD Type 3: not supported
338
+ ETL::Engine.logger.debug "SCD type #{scd_type} not supported"
339
+ end
340
+
341
+ # In all cases, the latest, greatest version of the record
342
+ # should go into the load
343
+ schedule_new_record(row)
344
+ end
345
+
346
+ # Do all the steps required when a SCD has *not* changed. Exact
347
+ # steps depend on what type of SCD we're handling.
348
+ def process_scd_match(row)
349
+ ETL::Engine.logger.debug "SCD fields match"
350
+
351
+ if scd_type == 2 && has_non_scd_field_changes?(row)
352
+ ETL::Engine.logger.debug "Non-SCD field changes"
353
+ # Copy important data over from original version of record
354
+ row[primary_key] = @existing_row[primary_key]
355
+ row[scd_end_date_field] = @existing_row[scd_end_date_field]
356
+ row[scd_effective_date_field] = @existing_row[scd_effective_date_field]
357
+ row[scd_latest_version_field] = @existing_row[scd_latest_version_field]
358
+
359
+ # If there is no truncate then the row will exist twice in the database
360
+ delete_outdated_record
361
+
362
+ buffer << row
363
+ else
364
+ # The record is totally the same, so skip it
365
+ end
366
+ end
367
+
368
+ # Find the version of this row that already exists in the datawarehouse.
369
+ def preexisting_row(row)
370
+ q = "SELECT * FROM #{dimension_table} WHERE #{natural_key_equality_for_row(row)}"
371
+ q << " AND #{scd_latest_version_field}" if scd_type == 2
372
+
373
+ ETL::Engine.logger.debug "looking for original record"
374
+ result = connection.select_one(q)
375
+
376
+ ETL::Engine.logger.debug "Result: #{result.inspect}"
377
+
378
+ result ? ETL::Row[result.symbolize_keys!] : nil
379
+ end
380
+
381
+ # Check whether non-scd fields have changed since the last
382
+ # load of this record.
383
+ def has_scd_field_changes?(row)
384
+ scd_fields(row).any? { |csd_field|
385
+ ETL::Engine.logger.debug "Row: #{row.inspect}"
386
+ ETL::Engine.logger.debug "Existing Row: #{@existing_row.inspect}"
387
+ ETL::Engine.logger.debug "comparing: #{row[csd_field].to_s} != #{@existing_row[csd_field].to_s}"
388
+ if row[csd_field].to_s != @existing_row[csd_field].to_s
389
+ x=true
390
+ else
391
+ x=false
392
+ end
393
+ ETL::Engine.logger.debug "Fields differ?: #{x}"
394
+ x
395
+ }
396
+ end
397
+
398
+ # Check whether non-scd fields have changed since the last
399
+ # load of this record.
400
+ def has_non_scd_field_changes?(row)
401
+ non_scd_fields(row).any? { |non_csd_field| row[non_csd_field].to_s != @existing_row[non_csd_field].to_s }
402
+ end
403
+
404
+ # Grab, or re-use, a database connection for running queries directly
405
+ # during the destination processing.
406
+ def connection
407
+ @conn ||= ETL::Engine.connection(dimension_target)
408
+ end
409
+
410
+ # Utility for removing a row that has outdated information. Note
411
+ # that this deletes directly from the database, even if this is a file
412
+ # destination. It needs to do this because you can't do deletes in a
413
+ # bulk load.
414
+ def delete_outdated_record
415
+ ETL::Engine.logger.debug "deleting old row"
416
+
417
+ q = "DELETE FROM #{dimension_table} WHERE #{primary_key} = #{@existing_row[primary_key]}"
418
+ connection.delete(q)
419
+ end
420
+
421
+ # Schedule the latest, greatest version of the row for insertion
422
+ # into the database
423
+ def schedule_new_record(row)
424
+ ETL::Engine.logger.debug "writing new record"
425
+ if scd_type == 2
426
+ row[scd_effective_date_field] = @timestamp
427
+ row[scd_end_date_field] = '9999-12-31 00:00:00'
428
+ row[scd_latest_version_field] = true
429
+ end
430
+ buffer << row
431
+ end
432
+
433
+ # Get the name of the primary key for this table. Asks the dimension
434
+ # model class for this information, but if that class hasn't been
435
+ # defined, just defaults to :id.
436
+ def primary_key
437
+ return @primary_key if @primary_key
438
+ @primary_key = dimension_table.to_s.camelize.constantize.primary_key.to_sym
439
+ rescue NameError => e
440
+ ETL::Engine.logger.debug "couldn't get primary_key from dimension model class, using default :id"
441
+ @primary_key = :id
442
+ end
443
+
444
+ end
445
+ end
446
+ end
447
+
448
+ Dir[File.dirname(__FILE__) + "/destination/*.rb"].each { |file| require(file) }