burner 1.0.0.pre.alpha → 1.0.0.pre.alpha.5

Sign up to get free protection for your applications and to get access to all the features.
Files changed (39) hide show
  1. checksums.yaml +4 -4
  2. data/README.md +320 -2
  3. data/burner.gemspec +3 -0
  4. data/lib/burner.rb +10 -0
  5. data/lib/burner/cli.rb +7 -7
  6. data/lib/burner/job.rb +4 -2
  7. data/lib/burner/jobs.rb +30 -10
  8. data/lib/burner/jobs/collection/arrays_to_objects.rb +43 -0
  9. data/lib/burner/jobs/collection/graph.rb +43 -0
  10. data/lib/burner/jobs/collection/objects_to_arrays.rb +54 -0
  11. data/lib/burner/jobs/collection/shift.rb +43 -0
  12. data/lib/burner/jobs/collection/transform.rb +64 -0
  13. data/lib/burner/jobs/collection/transform/attribute.rb +33 -0
  14. data/lib/burner/jobs/collection/transform/attribute_renderer.rb +36 -0
  15. data/lib/burner/jobs/collection/unpivot.rb +45 -0
  16. data/lib/burner/jobs/collection/values.rb +50 -0
  17. data/lib/burner/jobs/deserialize/csv.rb +28 -0
  18. data/lib/burner/jobs/deserialize/json.rb +1 -1
  19. data/lib/burner/jobs/deserialize/yaml.rb +1 -1
  20. data/lib/burner/jobs/dummy.rb +1 -1
  21. data/lib/burner/jobs/echo.rb +2 -2
  22. data/lib/burner/jobs/io/base.rb +3 -16
  23. data/lib/burner/jobs/io/exist.rb +43 -0
  24. data/lib/burner/jobs/io/read.rb +12 -2
  25. data/lib/burner/jobs/io/write.rb +25 -3
  26. data/lib/burner/jobs/serialize/csv.rb +38 -0
  27. data/lib/burner/jobs/serialize/json.rb +1 -1
  28. data/lib/burner/jobs/serialize/yaml.rb +1 -1
  29. data/lib/burner/jobs/set.rb +1 -1
  30. data/lib/burner/jobs/sleep.rb +1 -1
  31. data/lib/burner/modeling.rb +10 -0
  32. data/lib/burner/modeling/key_index_mapping.rb +29 -0
  33. data/lib/burner/payload.rb +19 -4
  34. data/lib/burner/pipeline.rb +10 -3
  35. data/lib/burner/step.rb +5 -3
  36. data/lib/burner/string_template.rb +6 -5
  37. data/lib/burner/version.rb +1 -1
  38. data/lib/burner/written_file.rb +28 -0
  39. metadata +59 -2
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 920f3b0f0027e21b30da75f86335d75d2d351f49c8b68662de7226ade43474c6
4
- data.tar.gz: b7f4189e37023f1d627ea5222df0f397a24a69c7ce3721b13ce714d2e274ee94
3
+ metadata.gz: 57b5b4290b72962e10ce7ea7244a1dcfb43894cff1d72ef4e0684de4cdea4a35
4
+ data.tar.gz: 2936c408e7ffa3e1e9883510d890329870d5c1a4c129fef1fb283103568c9508
5
5
  SHA512:
6
- metadata.gz: d488edf32a95e5da64190c512f365d76e2414d16712cee9b56a400bd62e11ed7824a316a95923af15f0ecac3390e4be359d2e31ffc33fb13129309c4efce9403
7
- data.tar.gz: 8640cfdb2fa4241fb2a493c2ac2aed62e08e1826446907f4b16d043276fffee656c787d2079545fe03d52b8f45756240a523ad878fc89063884868c3f94bfecd
6
+ metadata.gz: f514870aa2b12cc4fc34f3952cabf8c738985dac1a6602c7f41aec3f28b83738991d5cf82d429b62d76b0f3a96b85907e51d0e06db0ce33579d1dc74dc55409e
7
+ data.tar.gz: 2e9ee10f91bb28eb4d79091ae7106f6da640daaf6b5ac2f77e828e81743dfd55d6baca12a0e376c3be709483fe7a94e0f840f47ed2ecb5031233f4ff9ae7db3a
data/README.md CHANGED
@@ -2,7 +2,7 @@
2
2
 
3
3
  [![Gem Version](https://badge.fury.io/rb/burner.svg)](https://badge.fury.io/rb/burner) [![Build Status](https://travis-ci.org/bluemarblepayroll/burner.svg?branch=master)](https://travis-ci.org/bluemarblepayroll/burner) [![Maintainability](https://api.codeclimate.com/v1/badges/dbc3757929b67504f6ca/maintainability)](https://codeclimate.com/github/bluemarblepayroll/burner/maintainability) [![Test Coverage](https://api.codeclimate.com/v1/badges/dbc3757929b67504f6ca/test_coverage)](https://codeclimate.com/github/bluemarblepayroll/burner/test_coverage) [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT)
4
4
 
5
- TODO
5
+ This library serves as the skeleton for a processing engine. It allows you to organize your code into Jobs, then stitch those jobs together as steps.
6
6
 
7
7
  ## Installation
8
8
 
@@ -20,7 +20,325 @@ bundle add burner
20
20
 
21
21
  ## Examples
22
22
 
23
- TODO
23
+ The purpose of this library is to provide a framework for creating highly de-coupled functions (known as jobs), and then allow for the stitching of them back together in any arbitrary order (know as steps.) Although our example will be somewhat specific and contrived, the only limit to what the jobs and order of jobs are is up to your imagination.
24
+
25
+ ### JSON-to-YAML File Converter
26
+
27
+ All the jobs for this example are shipped with this library. In this example, we will write a pipeline that can read a JSON file and convert it to YAML. Pipelines are data-first so we can represent a pipeline using a hash:
28
+
29
+ ````ruby
30
+ pipeline = {
31
+ jobs: [
32
+ {
33
+ name: :read,
34
+ type: 'io/read',
35
+ path: '{input_file}'
36
+ },
37
+ {
38
+ name: :output_id,
39
+ type: :echo,
40
+ message: 'The job id is: {__id}'
41
+ },
42
+ {
43
+ name: :output_value,
44
+ type: :echo,
45
+ message: 'The current value is: {__value}'
46
+ },
47
+ {
48
+ name: :parse,
49
+ type: 'deserialize/json'
50
+ },
51
+ {
52
+ name: :convert,
53
+ type: 'serialize/yaml'
54
+ },
55
+ {
56
+ name: :write,
57
+ type: 'io/write',
58
+ path: '{output_file}'
59
+ }
60
+ ],
61
+ steps: %i[
62
+ read
63
+ output_id
64
+ output_value
65
+ parse
66
+ convert
67
+ output_value
68
+ write
69
+ ]
70
+ }
71
+
72
+ params = {
73
+ input_file: 'input.json',
74
+ output_file: 'output.yaml'
75
+ }
76
+
77
+ payload = Burner::Payload.new(params: params)
78
+ ````
79
+
80
+ Assuming we are running this script from a directory where an `input.json` file exists, we can then programatically process the pipeline:
81
+
82
+ ````ruby
83
+ Burner::Pipeline.make(pipeline).execute(payload: payload)
84
+ ````
85
+
86
+ We should now see a output.yaml file created.
87
+
88
+ Some notes:
89
+
90
+ * Some values are able to be string-interpolated using the provided Payload#params. This allows for the passing runtime configuration/data into pipelines/jobs.
91
+ * The job's ID can be accessed using the `__id` key.
92
+ * The current job's payload value can be accessed using the `__value` key.
93
+ * Jobs can be re-used (just like the output_id and output_value jobs).
94
+
95
+ ### Capturing Feedback / Output
96
+
97
+ By default, output will be emitted to `$stdout`. You can add or change listeners by passing in optional values into Pipeline#execute. For example, say we wanted to capture the output from our json-to-yaml example:
98
+
99
+ ````ruby
100
+ class StringOut
101
+ def initialize
102
+ @io = StringIO.new
103
+ end
104
+
105
+ def puts(msg)
106
+ tap { io.write("#{msg}\n") }
107
+ end
108
+
109
+ def read
110
+ io.rewind
111
+ io.read
112
+ end
113
+
114
+ private
115
+
116
+ attr_reader :io
117
+ end
118
+
119
+ string_out = StringOut.new
120
+ output = Burner::Output.new(outs: string_out)
121
+ payload = Burner::Payload.new(params: params)
122
+
123
+ Burner::Pipeline.make(pipeline).execute(output: output, payload: payload)
124
+
125
+ log = string_out.read
126
+ ````
127
+
128
+ The value of `log` should now look similar to:
129
+
130
+ ````bash
131
+ [8bdc394e-7047-4a1a-87ed-6c54ed690ed5 | 2020-10-14 13:49:59 UTC] Pipeline started with 7 step(s)
132
+ [8bdc394e-7047-4a1a-87ed-6c54ed690ed5 | 2020-10-14 13:49:59 UTC] Parameters:
133
+ [8bdc394e-7047-4a1a-87ed-6c54ed690ed5 | 2020-10-14 13:49:59 UTC] - input_file: input.json
134
+ [8bdc394e-7047-4a1a-87ed-6c54ed690ed5 | 2020-10-14 13:49:59 UTC] - output_file: output.yaml
135
+ [8bdc394e-7047-4a1a-87ed-6c54ed690ed5 | 2020-10-14 13:49:59 UTC] --------------------------------------------------------------------------------
136
+ [8bdc394e-7047-4a1a-87ed-6c54ed690ed5 | 2020-10-14 13:49:59 UTC] [1] Burner::Jobs::IO::Read::read
137
+ [8bdc394e-7047-4a1a-87ed-6c54ed690ed5 | 2020-10-14 13:49:59 UTC] - Reading: spec/fixtures/input.json
138
+ [8bdc394e-7047-4a1a-87ed-6c54ed690ed5 | 2020-10-14 13:49:59 UTC] - Completed in: 0.0 second(s)
139
+ [8bdc394e-7047-4a1a-87ed-6c54ed690ed5 | 2020-10-14 13:49:59 UTC] [2] Burner::Jobs::Echo::output_id
140
+ [8bdc394e-7047-4a1a-87ed-6c54ed690ed5 | 2020-10-14 13:49:59 UTC] - The job id is:
141
+ [8bdc394e-7047-4a1a-87ed-6c54ed690ed5 | 2020-10-14 13:49:59 UTC] - Completed in: 0.0 second(s)
142
+ [8bdc394e-7047-4a1a-87ed-6c54ed690ed5 | 2020-10-14 13:49:59 UTC] [3] Burner::Jobs::Echo::output_value
143
+ [8bdc394e-7047-4a1a-87ed-6c54ed690ed5 | 2020-10-14 13:49:59 UTC] - The current value is:
144
+ [8bdc394e-7047-4a1a-87ed-6c54ed690ed5 | 2020-10-14 13:49:59 UTC] - Completed in: 0.0 second(s)
145
+ [8bdc394e-7047-4a1a-87ed-6c54ed690ed5 | 2020-10-14 13:49:59 UTC] [4] Burner::Jobs::Deserialize::Json::parse
146
+ [8bdc394e-7047-4a1a-87ed-6c54ed690ed5 | 2020-10-14 13:49:59 UTC] - Completed in: 0.0 second(s)
147
+ [8bdc394e-7047-4a1a-87ed-6c54ed690ed5 | 2020-10-14 13:49:59 UTC] [5] Burner::Jobs::Serialize::Yaml::convert
148
+ [8bdc394e-7047-4a1a-87ed-6c54ed690ed5 | 2020-10-14 13:49:59 UTC] - Completed in: 0.0 second(s)
149
+ [8bdc394e-7047-4a1a-87ed-6c54ed690ed5 | 2020-10-14 13:49:59 UTC] [6] Burner::Jobs::Echo::output_value
150
+ [8bdc394e-7047-4a1a-87ed-6c54ed690ed5 | 2020-10-14 13:49:59 UTC] - The current value is:
151
+ [8bdc394e-7047-4a1a-87ed-6c54ed690ed5 | 2020-10-14 13:49:59 UTC] - Completed in: 0.0 second(s)
152
+ [8bdc394e-7047-4a1a-87ed-6c54ed690ed5 | 2020-10-14 13:49:59 UTC] [7] Burner::Jobs::IO::Write::write
153
+ [8bdc394e-7047-4a1a-87ed-6c54ed690ed5 | 2020-10-14 13:49:59 UTC] - Writing: output.yaml
154
+ [8bdc394e-7047-4a1a-87ed-6c54ed690ed5 | 2020-10-14 13:49:59 UTC] - Completed in: 0.0 second(s)
155
+ [8bdc394e-7047-4a1a-87ed-6c54ed690ed5 | 2020-10-14 13:49:59 UTC] --------------------------------------------------------------------------------
156
+ [8bdc394e-7047-4a1a-87ed-6c54ed690ed5 | 2020-10-14 13:49:59 UTC] Pipeline ended, took 0.001 second(s) to complete
157
+ ````
158
+
159
+ Notes:
160
+
161
+ * The Job ID is specified as the leading UUID in each line.
162
+ * `outs` can be provided an array of listeners, as long as each listener responds to `puts(msg)`.
163
+
164
+ ### Command Line Pipeline Processing
165
+
166
+ This library also ships with a built-in script `exe/burner` that illustrates using the `Burner::Cli` API. This class can take in an array of arguments (similar to a command-line) and execute a pipeline. The first argument is the path to a YAML file with the pipeline's configuration and each subsequent argument is a param in `key=value` form. Here is how the json-to-yaml example can utilize this interface:
167
+
168
+ #### Create YAML Pipeline Configuration File
169
+
170
+ Write the following json_to_yaml_pipeline.yaml file to disk:
171
+
172
+ ````yaml
173
+ jobs:
174
+ - name: read
175
+ type: io/read
176
+ path: '{input_file}'
177
+
178
+ - name: output_id
179
+ type: echo
180
+ message: 'The job id is: {__id}'
181
+
182
+ - name: output_value
183
+ type: echo
184
+ message: 'The current value is: {__value}'
185
+
186
+ - name: parse
187
+ type: deserialize/json
188
+
189
+ - name: convert
190
+ type: serialize/yaml
191
+
192
+ - name: write
193
+ type: io/write
194
+ path: '{output_file}'
195
+
196
+ steps:
197
+ - read
198
+ - output_id
199
+ - output_value
200
+ - parse
201
+ - convert
202
+ - output_value
203
+ - write
204
+ ````
205
+
206
+ #### Run Using Script
207
+
208
+ From the command-line, run:
209
+
210
+ ````bash
211
+ bundle exec burner json_to_yaml_pipeline.yaml input_file=input.json output_file=output.yaml
212
+ ````
213
+
214
+ The pipeline should be processed and output.yaml should be created.
215
+
216
+ #### Run Using Programmatic API
217
+
218
+ Instead of the script, you can invoke it using code:
219
+
220
+ ````ruby
221
+ args = %w[
222
+ json_to_yaml_pipeline.yaml
223
+ input_file=input.json
224
+ output_file=output.yaml
225
+ ]
226
+
227
+ Burner::Cli.new(args).invoke
228
+ ````
229
+
230
+ ### Core Job Library
231
+
232
+ This library only ships with very basic, rudimentary jobs that are meant to just serve as a baseline:
233
+
234
+ #### Collection
235
+
236
+ * **collection/arrays_to_objects** [mappings]: Convert an array of arrays to an array of objects.
237
+ * **collection/graph** [config, key]: Use (Hashematics)[https://github.com/bluemarblepayroll/hashematics] to turn a flat array of objects into a deeply nested object tree.
238
+ * **collection/objects_to_arrays** [mappings]: Convert an array of objects to an array of arrays.
239
+ * **collection/shift** [amount]: Remove the first N number of elements from an array.
240
+ * **collection/transform** [attributes, exclusive, separator]: Iterate over all objects and transform each key per the attribute transformers specifications. If exclusive is set to false then the current object will be overridden/merged. Separator can also be set for key path support. This job uses (Realize)[https://github.com/bluemarblepayroll/realize], which provides its own extendable value-transformation pipeline.
241
+ * **collection/unpivot** [pivot_set]: Take an array of objects and unpivot specific sets of keys into rows. Under the hood it uses [HashMath's Unpivot class](https://github.com/bluemarblepayroll/hash_math#unpivot-hash-key-coalescence-and-row-extrapolation).
242
+ * **collection/values** [include_keys]: Take an array of objects and call `#values` on each object. If include_keys is true (it is false by default), then call `#keys` on the first object and inject that as a "header" object.
243
+
244
+ #### De-serialization
245
+
246
+ * **deserialize/csv** []: Take a CSV string and de-serialize into object(s). Currently it will return an array of arrays, with each nested array representing one row.
247
+ * **deserialize/json** []: Treat input as a string and de-serialize it to JSON.
248
+ * **deserialize/yaml** [safe]: Treat input as a string and de-serialize it to YAML. By default it will try and (safely de-serialize)[https://ruby-doc.org/stdlib-2.6.1/libdoc/psych/rdoc/Psych.html#method-c-safe_load] it (only using core classes). If you wish to de-serialize it to any class type, pass in `safe: false`
249
+
250
+ #### IO
251
+
252
+ * **io/exist** [path, short_circuit]: Check to see if a file exists. The path parameter can be interpolated using `Payload#params`. If short_circuit was set to true (defaults to false) and the file does not exist then the pipeline will be short-circuited.
253
+ * **io/read** [binary, path]: Read in a local file. The path parameter can be interpolated using `Payload#params`. If the contents are binary, pass in `binary: true` to open it up in binary+read mode.
254
+ * **io/write** [binary, path]: Write to a local file. The path parameter can be interpolated using `Payload#params`. If the contents are binary, pass in `binary: true` to open it up in binary+write mode.
255
+
256
+ #### Serialization
257
+
258
+ * **serialize/csv** []: Take an array of arrays and create a CSV.
259
+ * **serialize/json** []: Convert value to JSON.
260
+ * **serialize/yaml** []: Convert value to YAML.
261
+
262
+ #### General
263
+
264
+ * **dummy** []: Do nothing
265
+ * **echo** [message]: Write a message to the output. The message parameter can be interpolated using `Payload#params`.
266
+ * **set** [value]: Set the value to any arbitrary value.
267
+ * **sleep** [seconds]: Sleep the thread for X number of seconds.
268
+
269
+
270
+ ### Adding & Registering Jobs
271
+
272
+ Where this library shines is when additional jobs are plugged in. Burner uses its `Burner::Jobs` class as its class-level registry built with (acts_as_hashable)[https://github.com/bluemarblepayroll/acts_as_hashable]'s acts_as_hashable_factory directive.
273
+
274
+ Let's say we would like to register a job to parse a CSV:
275
+
276
+ ````ruby
277
+ class ParseCsv < Burner::Job
278
+ def perform(output, payload)
279
+ payload.value = CSV.parse(payload.value, headers: true).map(&:to_h)
280
+
281
+ nil
282
+ end
283
+ end
284
+
285
+ Burner::Jobs.register('parse_csv', ParseCsv)
286
+ ````
287
+
288
+ `parse_csv` is now recognized as a valid job and we can use it:
289
+
290
+ ````ruby
291
+ pipeline = {
292
+ jobs: [
293
+ {
294
+ name: :read,
295
+ type: 'io/read',
296
+ path: '{input_file}'
297
+ },
298
+ {
299
+ name: :output_id,
300
+ type: :echo,
301
+ message: 'The job id is: {__id}'
302
+ },
303
+ {
304
+ name: :output_value,
305
+ type: :echo,
306
+ message: 'The current value is: {__value}'
307
+ },
308
+ {
309
+ name: :parse,
310
+ type: :parse_csv
311
+ },
312
+ {
313
+ name: :convert,
314
+ type: 'serialize/yaml'
315
+ },
316
+ {
317
+ name: :write,
318
+ type: 'io/write',
319
+ path: '{output_file}'
320
+ }
321
+ ],
322
+ steps: %i[
323
+ read
324
+ output_id
325
+ output_value
326
+ parse
327
+ convert
328
+ output_value
329
+ write
330
+ ]
331
+ }
332
+
333
+ params = {
334
+ input_file: File.join('spec', 'fixtures', 'cars.csv'),
335
+ output_file: File.join(TEMP_DIR, "#{SecureRandom.uuid}.yaml")
336
+ }
337
+
338
+ payload = Burner::Payload.new(params: params)
339
+
340
+ Burner::Pipeline.make(pipeline).execute(output: output, payload: payload)
341
+ ````
24
342
 
25
343
  ## Contributing
26
344
 
@@ -29,7 +29,10 @@ Gem::Specification.new do |s|
29
29
  s.required_ruby_version = '>= 2.5'
30
30
 
31
31
  s.add_dependency('acts_as_hashable', '~>1.2')
32
+ s.add_dependency('hashematics', '~>1.1')
33
+ s.add_dependency('hash_math', '~>1.2')
32
34
  s.add_dependency('objectable', '~>1.0')
35
+ s.add_dependency('realize', '~>1.2')
33
36
  s.add_dependency('stringento', '~>2.1')
34
37
 
35
38
  s.add_development_dependency('guard-rspec', '~>4.7')
@@ -9,11 +9,21 @@
9
9
 
10
10
  require 'acts_as_hashable'
11
11
  require 'benchmark'
12
+ require 'csv'
13
+ require 'forwardable'
14
+ require 'hash_math'
15
+ require 'hashematics'
12
16
  require 'json'
13
17
  require 'objectable'
18
+ require 'realize'
14
19
  require 'securerandom'
15
20
  require 'singleton'
16
21
  require 'stringento'
22
+ require 'time'
17
23
  require 'yaml'
18
24
 
25
+ # Common/Shared
26
+ require_relative 'burner/modeling'
27
+
28
+ # Main Entrypoint(s)
19
29
  require_relative 'burner/cli'
@@ -12,18 +12,18 @@ require_relative 'pipeline'
12
12
  module Burner
13
13
  # Process a single string as a Pipeline. This is mainly to back the command-line interface.
14
14
  class Cli
15
- attr_reader :params, :pipeline
15
+ attr_reader :payload, :pipeline
16
16
 
17
17
  def initialize(args)
18
- path = args.first
19
- cli_params = extract_cli_params(args)
20
- config = read_yaml(path)
21
- @pipeline = Burner::Pipeline.make(jobs: config['jobs'], steps: config['steps'])
22
- @params = (config['params'] || {}).merge(cli_params)
18
+ path = args.first
19
+ params = extract_cli_params(args)
20
+ config = read_yaml(path)
21
+ @pipeline = Burner::Pipeline.make(jobs: config['jobs'], steps: config['steps'])
22
+ @payload = Payload.new(params: params)
23
23
  end
24
24
 
25
25
  def execute
26
- pipeline.execute(params: params)
26
+ pipeline.execute(payload: payload)
27
27
  end
28
28
 
29
29
  private
@@ -28,8 +28,10 @@ module Burner
28
28
 
29
29
  private
30
30
 
31
- def eval_string_template(expression, input)
32
- string_template.evaluate(expression, input)
31
+ def job_string_template(expression, output, payload)
32
+ templatable_params = payload.params.merge(__id: output.id, __value: payload.value)
33
+
34
+ string_template.evaluate(expression, templatable_params)
33
35
  end
34
36
  end
35
37
  end
@@ -8,12 +8,22 @@
8
8
  #
9
9
 
10
10
  require_relative 'job'
11
+ require_relative 'jobs/collection/arrays_to_objects'
12
+ require_relative 'jobs/collection/graph'
13
+ require_relative 'jobs/collection/objects_to_arrays'
14
+ require_relative 'jobs/collection/shift'
15
+ require_relative 'jobs/collection/transform'
16
+ require_relative 'jobs/collection/unpivot'
17
+ require_relative 'jobs/collection/values'
18
+ require_relative 'jobs/deserialize/csv'
11
19
  require_relative 'jobs/deserialize/json'
12
20
  require_relative 'jobs/deserialize/yaml'
13
21
  require_relative 'jobs/dummy'
14
22
  require_relative 'jobs/echo'
23
+ require_relative 'jobs/io/exist'
15
24
  require_relative 'jobs/io/read'
16
25
  require_relative 'jobs/io/write'
26
+ require_relative 'jobs/serialize/csv'
17
27
  require_relative 'jobs/serialize/json'
18
28
  require_relative 'jobs/serialize/yaml'
19
29
  require_relative 'jobs/set'
@@ -26,15 +36,25 @@ module Burner
26
36
  class Jobs
27
37
  acts_as_hashable_factory
28
38
 
29
- register 'deserialize/json', Deserialize::Json
30
- register 'deserialize/yaml', Deserialize::Yaml
31
- register 'dummy', '', Dummy
32
- register 'echo', Echo
33
- register 'io/read', IO::Read
34
- register 'io/write', IO::Write
35
- register 'serialize/json', Serialize::Json
36
- register 'serialize/yaml', Serialize::Yaml
37
- register 'set', Set
38
- register 'sleep', Sleep
39
+ register 'collection/arrays_to_objects', Collection::ArraysToObjects
40
+ register 'collection/graph', Collection::Graph
41
+ register 'collection/objects_to_arrays', Collection::ObjectsToArrays
42
+ register 'collection/shift', Collection::Shift
43
+ register 'collection/transform', Collection::Transform
44
+ register 'collection/unpivot', Collection::Unpivot
45
+ register 'collection/values', Collection::Values
46
+ register 'deserialize/csv', Deserialize::Csv
47
+ register 'deserialize/json', Deserialize::Json
48
+ register 'deserialize/yaml', Deserialize::Yaml
49
+ register 'dummy', '', Dummy
50
+ register 'echo', Echo
51
+ register 'io/exist', IO::Exist
52
+ register 'io/read', IO::Read
53
+ register 'io/write', IO::Write
54
+ register 'serialize/csv', Serialize::Csv
55
+ register 'serialize/json', Serialize::Json
56
+ register 'serialize/yaml', Serialize::Yaml
57
+ register 'set', Set
58
+ register 'sleep', Sleep
39
59
  end
40
60
  end