daru 0.1.3.1 → 0.1.4

Sign up to get free protection for your applications and to get access to all the features.
Files changed (92) hide show
  1. checksums.yaml +4 -4
  2. data/.gitignore +2 -0
  3. data/.rspec +2 -1
  4. data/.rspec_formatter.rb +33 -0
  5. data/.rubocop.yml +26 -2
  6. data/History.md +38 -0
  7. data/README.md +22 -13
  8. data/Rakefile +50 -2
  9. data/benchmarks/csv_reading.rb +22 -0
  10. data/daru.gemspec +9 -2
  11. data/lib/daru.rb +36 -4
  12. data/lib/daru/accessors/array_wrapper.rb +6 -1
  13. data/lib/daru/accessors/dataframe_by_row.rb +10 -2
  14. data/lib/daru/accessors/gsl_wrapper.rb +1 -3
  15. data/lib/daru/accessors/nmatrix_wrapper.rb +9 -0
  16. data/lib/daru/category.rb +935 -0
  17. data/lib/daru/core/group_by.rb +29 -38
  18. data/lib/daru/core/merge.rb +186 -145
  19. data/lib/daru/core/query.rb +22 -11
  20. data/lib/daru/dataframe.rb +976 -885
  21. data/lib/daru/date_time/index.rb +166 -166
  22. data/lib/daru/date_time/offsets.rb +66 -77
  23. data/lib/daru/formatters/table.rb +54 -0
  24. data/lib/daru/helpers/array.rb +40 -0
  25. data/lib/daru/index.rb +476 -73
  26. data/lib/daru/io/io.rb +66 -45
  27. data/lib/daru/io/sql_data_source.rb +33 -62
  28. data/lib/daru/iruby/helpers.rb +38 -0
  29. data/lib/daru/iruby/templates/dataframe.html.erb +52 -0
  30. data/lib/daru/iruby/templates/dataframe_mi.html.erb +58 -0
  31. data/lib/daru/iruby/templates/multi_index.html.erb +12 -0
  32. data/lib/daru/iruby/templates/vector.html.erb +27 -0
  33. data/lib/daru/iruby/templates/vector_mi.html.erb +36 -0
  34. data/lib/daru/maths/arithmetic/dataframe.rb +16 -18
  35. data/lib/daru/maths/arithmetic/vector.rb +4 -6
  36. data/lib/daru/maths/statistics/dataframe.rb +8 -15
  37. data/lib/daru/maths/statistics/vector.rb +120 -98
  38. data/lib/daru/monkeys.rb +12 -40
  39. data/lib/daru/plotting/gruff.rb +3 -0
  40. data/lib/daru/plotting/gruff/category.rb +49 -0
  41. data/lib/daru/plotting/gruff/dataframe.rb +91 -0
  42. data/lib/daru/plotting/gruff/vector.rb +57 -0
  43. data/lib/daru/plotting/nyaplot.rb +3 -0
  44. data/lib/daru/plotting/nyaplot/category.rb +34 -0
  45. data/lib/daru/plotting/nyaplot/dataframe.rb +187 -0
  46. data/lib/daru/plotting/nyaplot/vector.rb +46 -0
  47. data/lib/daru/vector.rb +694 -421
  48. data/lib/daru/version.rb +1 -1
  49. data/profile/_base.rb +23 -0
  50. data/profile/df_to_a.rb +10 -0
  51. data/profile/filter.rb +13 -0
  52. data/profile/joining.rb +13 -0
  53. data/profile/sorting.rb +12 -0
  54. data/profile/vector_each_with_index.rb +9 -0
  55. data/spec/accessors/wrappers_spec.rb +2 -4
  56. data/spec/categorical_spec.rb +1734 -0
  57. data/spec/core/group_by_spec.rb +52 -2
  58. data/spec/core/merge_spec.rb +63 -2
  59. data/spec/core/query_spec.rb +236 -80
  60. data/spec/dataframe_spec.rb +1373 -79
  61. data/spec/date_time/data_spec.rb +3 -5
  62. data/spec/date_time/index_spec.rb +154 -17
  63. data/spec/date_time/offsets_spec.rb +3 -4
  64. data/spec/fixtures/empties.dat +2 -0
  65. data/spec/fixtures/strings.dat +2 -0
  66. data/spec/formatters/table_formatter_spec.rb +99 -0
  67. data/spec/helpers_spec.rb +8 -0
  68. data/spec/index/categorical_index_spec.rb +168 -0
  69. data/spec/index/index_spec.rb +283 -0
  70. data/spec/index/multi_index_spec.rb +570 -0
  71. data/spec/io/io_spec.rb +31 -4
  72. data/spec/io/sql_data_source_spec.rb +0 -1
  73. data/spec/iruby/dataframe_spec.rb +172 -0
  74. data/spec/iruby/helpers_spec.rb +49 -0
  75. data/spec/iruby/multi_index_spec.rb +37 -0
  76. data/spec/iruby/vector_spec.rb +107 -0
  77. data/spec/math/arithmetic/dataframe_spec.rb +71 -13
  78. data/spec/math/arithmetic/vector_spec.rb +8 -10
  79. data/spec/math/statistics/dataframe_spec.rb +3 -5
  80. data/spec/math/statistics/vector_spec.rb +45 -55
  81. data/spec/monkeys_spec.rb +32 -9
  82. data/spec/plotting/dataframe_spec.rb +386 -0
  83. data/spec/plotting/vector_spec.rb +230 -0
  84. data/spec/shared/vector_display_spec.rb +215 -0
  85. data/spec/spec_helper.rb +23 -0
  86. data/spec/vector_spec.rb +905 -138
  87. metadata +143 -11
  88. data/.rubocop_todo.yml +0 -44
  89. data/lib/daru/plotting/dataframe.rb +0 -104
  90. data/lib/daru/plotting/vector.rb +0 -38
  91. data/spec/daru_spec.rb +0 -58
  92. data/spec/index_spec.rb +0 -375
@@ -11,20 +11,21 @@ module Daru
11
11
  end
12
12
  end
13
13
 
14
+ TUPLE_SORTER = lambda do |a, b|
15
+ if a && b
16
+ a.compact <=> b.compact
17
+ else
18
+ a ? 1 : -1
19
+ end
20
+ end
21
+
14
22
  def initialize context, names
15
23
  @groups = {}
16
24
  @non_group_vectors = context.vectors.to_a - names
17
25
  @context = context
18
26
  vectors = names.map { |vec| context[vec].to_a }
19
27
  tuples = vectors[0].zip(*vectors[1..-1])
20
- keys =
21
- tuples.uniq.sort do |a,b|
22
- if a && b
23
- a.compact <=> b.compact
24
- else
25
- a ? 1 : -1
26
- end
27
- end
28
+ keys = tuples.uniq.sort(&TUPLE_SORTER)
28
29
 
29
30
  keys.each do |key|
30
31
  @groups[key] = all_indices_for(tuples, key)
@@ -189,17 +190,9 @@ module Daru
189
190
  # # 5 bar two 6 66
190
191
  def get_group group
191
192
  indexes = @groups[group]
192
- elements = []
193
-
194
- @context.each_vector do |vector|
195
- elements << vector.to_a
196
- end
197
- rows = []
193
+ elements = @context.each_vector.map(&:to_a)
198
194
  transpose = elements.transpose
199
-
200
- indexes.each do |idx|
201
- rows << transpose[idx]
202
- end
195
+ rows = indexes.each.map { |idx| transpose[idx] }
203
196
 
204
197
  new_index =
205
198
  begin
@@ -207,6 +200,7 @@ module Daru
207
200
  rescue IndexError
208
201
  indexes
209
202
  end
203
+
210
204
  Daru::DataFrame.rows(
211
205
  rows, index: new_index, order: @context.vectors
212
206
  )
@@ -224,7 +218,7 @@ module Daru
224
218
  # })
225
219
  # df.group_by([:a]).reduce('') { |result, row| result += row[:c]; result }
226
220
  # # =>
227
- # # #<Daru::Vector:70343147159900 @name = nil @metadata = {} @size = 2 >
221
+ # # #<Daru::Vector:70343147159900 @name = nil @size = 2 >
228
222
  # # nil
229
223
  # # a ACE
230
224
  # # b BDF
@@ -268,33 +262,30 @@ module Daru
268
262
  end
269
263
 
270
264
  def apply_method method_type, method
271
- multi_index = multi_indexed_grouping?
272
- rows, order = [], []
273
-
274
- @groups.each do |_group, indexes|
275
- single_row = []
276
- @non_group_vectors.each do |ngvector|
277
- vec = @context[ngvector]
278
- if method_type == :numeric && vec.type == :numeric
279
- slice = vec[*indexes]
280
- single_row << (slice.is_a?(Daru::Vector) ? slice.send(method) : slice)
281
- end
282
- end
283
-
284
- rows << single_row
265
+ order = @non_group_vectors.select do |ngvec|
266
+ method_type == :numeric && @context[ngvec].type == :numeric
285
267
  end
286
268
 
287
- @non_group_vectors.each do |ngvec|
288
- order << ngvec if
289
- method_type == :numeric && @context[ngvec].type == :numeric
269
+ rows = @groups.map do |_group, indexes|
270
+ order.map do |ngvector|
271
+ slice = @context[ngvector][*indexes]
272
+ slice.is_a?(Daru::Vector) ? slice.send(method) : slice
273
+ end
290
274
  end
291
275
 
292
- index = @groups.keys
293
- index = multi_index ? Daru::MultiIndex.from_tuples(index) : Daru::Index.new(index.flatten)
276
+ index = apply_method_index
294
277
  order = Daru::Index.new(order)
295
278
  Daru::DataFrame.new(rows.transpose, index: index, order: order)
296
279
  end
297
280
 
281
+ def apply_method_index
282
+ if multi_indexed_grouping?
283
+ Daru::MultiIndex.from_tuples(@groups.keys)
284
+ else
285
+ Daru::Index.new(@groups.keys.flatten)
286
+ end
287
+ end
288
+
298
289
  def all_indices_for arry, element
299
290
  found, index, indexes = -1, -1, []
300
291
  while found
@@ -1,210 +1,251 @@
1
1
  module Daru
2
2
  module Core
3
- module MergeHelper
4
- class << self
5
- def replace_keys_if_duplicates hash, matcher
6
- matched = nil
7
- hash.keys.each { |d|
8
- if matcher.match(Regexp.new(d.to_s))
9
- matched = d
10
- break
11
- end
12
- }
13
-
14
- return unless matched
15
-
16
- hash[matcher] = hash[matched]
17
- hash.delete matched
18
- end
3
+ class MergeFrame
4
+ class NilSorter
5
+ include Comparable
19
6
 
20
- def resolve_duplicates df_hash1, df_hash2, on
21
- hk = df_hash1.keys + df_hash2.keys - on
22
- recoded = hk.recode_repeated.map(&:to_sym)
23
- diff = (recoded - hk).sort
7
+ def nil?
8
+ true
9
+ end
24
10
 
25
- diff.each_slice(2) do |a|
26
- replace_keys_if_duplicates df_hash1, a[0]
27
- replace_keys_if_duplicates df_hash2, a[1]
28
- end
11
+ def ==(_other)
12
+ false
29
13
  end
30
14
 
31
- def hashify df
32
- hsh = df.to_h
33
- hsh.each { |k,v| hsh[k] = v.to_a }
34
- hsh
15
+ def <=>(other)
16
+ other.nil? ? 0 : -1
35
17
  end
18
+ end
36
19
 
37
- def arrayify df
38
- arr = df.to_a
39
- col_names = arr[0][0].keys
40
- values = arr[0].map(&:values)
20
+ def initialize left_df, right_df, opts={}
21
+ @on = opts[:on]
22
+ @keep_left, @keep_right = extract_left_right(opts[:how])
41
23
 
42
- [col_names, values]
43
- end
24
+ validate_on!(left_df, right_df)
44
25
 
45
- def arrayify_with_sort_keys(size, df_hash, on)
46
- # Converting to a hash and then to an array is more complex
47
- # than using df.to_a or df.map(:row). However, it's
48
- # substantially faster this way.
49
-
50
- # idx_keys = on.map { |key| df_hash.keys.index(key) }
51
-
52
- (0...size).reduce([]) do |r, idx|
53
- key_values = on.map { |col| df_hash[col][idx] }
54
- row_values = df_hash.map { |_col, val| val[idx] }
55
- r << [key_values, row_values]
56
- end
57
-
58
- # Conceptually simpler and does the same thing, but slows down the
59
- # total merge algorithm by 2x. Would be nice to improve the performance
60
- # of df.map(:row)
61
- #
62
- # df.map(:row) do |row|
63
- # key_values = on.map { |key| row[key] }
64
- # [key_values, row.to_a]
65
- # end
66
- end
26
+ key_sanitizer = ->(h) { sanitize_merge_keys(h.values_at(*on)) }
27
+
28
+ @left = df_to_a(left_df)
29
+ @left.sort_by!(&key_sanitizer)
30
+ @left_key_values = @left.map(&key_sanitizer)
31
+
32
+ @right = df_to_a(right_df)
33
+ @right.sort_by!(&key_sanitizer)
34
+ @right_key_values = @right.map(&key_sanitizer)
35
+
36
+ @left_keys, @right_keys = merge_keys(left_df, right_df, on)
37
+ end
67
38
 
68
- def verify_dataframes df_hash1, df_hash2, on
69
- raise ArgumentError,
70
- 'All fields in :on must be present in self' unless on.all? { |e| df_hash1[e] }
71
- raise ArgumentError,
72
- 'All fields in :on must be present in other DF' unless on.all? { |e| df_hash2[e] }
39
+ def join
40
+ res = []
41
+
42
+ until left.empty? && right.empty?
43
+ lkey = first_left_key
44
+ rkey = first_right_key
45
+
46
+ row(lkey, rkey).tap { |r| res << r if r }
73
47
  end
48
+
49
+ Daru::DataFrame.new(res, order: left_keys.values + on + right_keys.values)
74
50
  end
75
- end
76
51
 
77
- class MergeFrame
78
- def initialize(df1, df2, on: nil)
79
- @df1 = df1
80
- @df2 = df2
81
- @on = on
52
+ private
53
+
54
+ attr_reader :on,
55
+ :left, :left_key_values, :keep_left, :left_keys,
56
+ :right, :right_key_values, :keep_right, :right_keys
57
+
58
+ attr_accessor :merge_key
59
+
60
+ LEFT_RIGHT_COMBINATIONS = {
61
+ # left right
62
+ inner: [false, false],
63
+ left: [true, false],
64
+ right: [false, true],
65
+ outer: [true, true]
66
+ }.freeze
67
+
68
+ def extract_left_right(how)
69
+ LEFT_RIGHT_COMBINATIONS[how] or
70
+ raise ArgumentError, "Unrecognized join option: #{how}"
71
+ end
72
+
73
+ def sanitize_merge_keys(merge_keys)
74
+ merge_keys.map { |v| v || NilSorter.new }
82
75
  end
83
76
 
84
- def inner _opts
85
- merge_join(left: false, right: false)
77
+ def df_to_a df
78
+ # FIXME: much faster than "native" DataFrame#to_a. Should not be
79
+ h = df.to_h
80
+ keys = h.keys
81
+ h.values.map(&:to_a).transpose.map { |r| keys.zip(r).to_h }
86
82
  end
87
83
 
88
- def left _opts
89
- merge_join(left: true, right: false)
84
+ def merge_keys(df1, df2, on)
85
+ duplicates =
86
+ (df1.vectors.to_a + df2.vectors.to_a - on)
87
+ .group_by(&:itself)
88
+ .select { |_, g| g.count == 2 }.map(&:first)
89
+
90
+ [
91
+ guard_keys(df1.vectors.to_a - on, duplicates, 1),
92
+ guard_keys(df2.vectors.to_a - on, duplicates, 2)
93
+ ]
90
94
  end
91
95
 
92
- def right _opts
93
- merge_join(left: false, right: true)
96
+ def guard_keys keys, duplicates, num
97
+ keys.map { |v| [v, guard_duplicate(v, duplicates, num)] }.to_h
94
98
  end
95
99
 
96
- def outer _opts
97
- merge_join(left: true, right: true)
100
+ def guard_duplicate val, duplicates, num
101
+ duplicates.include?(val) ? :"#{val}_#{num}" : val
98
102
  end
99
103
 
100
- def merge_join(left: true, right: true)
101
- MergeHelper.verify_dataframes df1_hash, df2_hash, @on
102
- MergeHelper.resolve_duplicates df1_hash, df2_hash, @on
104
+ def row(lkey, rkey)
105
+ case
106
+ when !lkey && !rkey
107
+ # :nocov:
108
+ # It's just an impossibility handler, can't be covered :)
109
+ raise 'Unexpected condition met during merge'
110
+ # :nocov:
111
+ when lkey == rkey
112
+ self.merge_key = lkey
113
+ merge_matching_rows
114
+ when !rkey || lt(lkey, rkey)
115
+ left_row_missing_right
116
+ else # !lkey || lt(rkey, lkey)
117
+ right_row_missing_left
118
+ end
119
+ end
103
120
 
104
- # TODO: Use native dataframe sorting.
105
- # It would be ideal to reuse sorting functionality that is native
106
- # to dataframes. Unfortunately, native dataframe sort introduces
107
- # an overhead that reduces join performance by a factor of 4! Until
108
- # that aspect is improved, we resort to a simpler array sort.
109
- df1_array.sort_by! { |row| [row[0].nil? ? 0 : 1, row[0]] }
110
- df2_array.sort_by! { |row| [row[0].nil? ? 0 : 1, row[0]] }
121
+ def merge_matching_rows
122
+ if one_to_one_merge?
123
+ merge_rows(one_to_one_left_row, one_to_one_right_row)
124
+ elsif one_to_many_merge?
125
+ merge_rows(one_to_many_left_row, one_to_many_right_row)
126
+ else
127
+ result = cartesian_product.shift
128
+ end_cartesian_product if cartesian_product.empty?
129
+ result
130
+ end
131
+ end
111
132
 
112
- idx1 = 0
113
- idx2 = 0
133
+ def one_to_one_merge?
134
+ merge_key != next_left_key && merge_key != next_right_key
135
+ end
114
136
 
115
- while idx1 < @df1.size || idx2 < @df2.size
137
+ def one_to_many_merge?
138
+ !(merge_key == next_left_key && merge_key == next_right_key)
139
+ end
116
140
 
117
- key1 = df1_array[idx1][0] if idx1 < @df1.size
118
- key2 = df2_array[idx2][0] if idx2 < @df2.size
141
+ def one_to_one_left_row
142
+ left_key_values.shift
143
+ left.shift
144
+ end
119
145
 
120
- if key1 == key2 && idx1 < @df1.size && idx2 < @df2.size
121
- idx2_start = idx2
146
+ def one_to_many_left_row
147
+ if next_right_key && first_right_key == next_right_key
148
+ left.first
149
+ else
150
+ left_key_values.shift
151
+ left.shift
152
+ end
153
+ end
122
154
 
123
- while (idx2 < @df2.size) && (df1_array[idx1][0] == df2_array[idx2][0])
124
- add_merge_row_to_hash([df1_array[idx1], df2_array[idx2]], joined_hash)
125
- idx2 += 1
126
- end
155
+ def one_to_one_right_row
156
+ right_key_values.shift
157
+ right.shift
158
+ end
127
159
 
128
- idx2 = idx2_start if idx1+1 < @df1.size && df1_array[idx1][0] == df1_array[idx1+1][0]
129
- idx1 += 1
130
- elsif ((key2.nil? || [key1,key2].sort == [key1,key2]) && idx1 < @df1.size) || idx2 == @df2.size
131
- add_merge_row_to_hash([df1_array[idx1], nil], joined_hash) if left
132
- idx1 += 1
133
- elsif idx2 < @df2.size || idx1 == @df1.size
134
- add_merge_row_to_hash([nil, df2_array[idx2]], joined_hash) if right
135
- idx2 += 1
136
- else
137
- raise 'Unexpected condition met during merge'
138
- end
160
+ def one_to_many_right_row
161
+ if next_left_key && first_left_key == next_left_key
162
+ right.first
163
+ else
164
+ right_key_values.shift
165
+ right.shift
139
166
  end
167
+ end
140
168
 
141
- Daru::DataFrame.new(joined_hash, order: joined_hash.keys)
169
+ def left_row_missing_right
170
+ val = one_to_one_left_row
171
+ expand_row(val, left_keys) if keep_left
142
172
  end
143
173
 
144
- private
174
+ def right_row_missing_left
175
+ val = one_to_one_right_row
176
+ expand_row(val, right_keys) if keep_right
177
+ end
145
178
 
146
- def joined_hash
147
- return @joined_hash if @joined_hash
148
- @joined_hash ||= {}
179
+ def lt(k1, k2)
180
+ (k1 <=> k2) == -1
181
+ end
149
182
 
150
- ((df1_keys - @on) | @on | (df2_keys - @on)).each do |k|
151
- @joined_hash[k] = []
152
- end
183
+ def merge_rows lrow, rrow
184
+ left_keys
185
+ .map { |from, to| [to, lrow[from]] }.to_h
186
+ .merge(on.map { |col| [col, lrow[col]] }.to_h)
187
+ .merge(right_keys.map { |from, to| [to, rrow[from]] }.to_h)
188
+ end
153
189
 
154
- @joined_hash
190
+ def expand_row row, renamings
191
+ renamings
192
+ .map { |from, to| [to, row[from]] }.to_h
193
+ .merge(on.map { |col| [col, row[col]] }.to_h)
155
194
  end
156
195
 
157
- def df1_hash
158
- @df1_hash ||= MergeHelper.hashify @df1
196
+ def first_right_key
197
+ right_key_values.empty? ? nil : right_key_values.first
159
198
  end
160
199
 
161
- def df2_hash
162
- @df2_hash ||= MergeHelper.hashify @df2
200
+ def next_right_key
201
+ right_key_values.size <= 1 ? nil : right_key_values[1]
163
202
  end
164
203
 
165
- def df1_array
166
- @df1_array ||= MergeHelper.arrayify_with_sort_keys @df1.size, df1_hash, @on
204
+ def first_left_key
205
+ left_key_values.empty? ? nil : left_key_values.first
167
206
  end
168
207
 
169
- def df2_array
170
- @df2_array ||= MergeHelper.arrayify_with_sort_keys @df2.size, df2_hash, @on
208
+ def next_left_key
209
+ left_key_values.size <= 1 ? nil : left_key_values[1]
171
210
  end
172
211
 
173
- def df1_keys
174
- df1_hash.keys
212
+ def left_rows_at_merge_key
213
+ left.take_while { |arr| sanitize_merge_keys(arr.values_at(*on)) == merge_key }
175
214
  end
176
215
 
177
- def df2_keys
178
- df2_hash.keys
216
+ def right_rows_at_merge_key
217
+ right.take_while { |arr| sanitize_merge_keys(arr.values_at(*on)) == merge_key }
179
218
  end
180
219
 
181
- # Private: The merge row contains two elements, the first is the row from the
182
- # first dataframe, the second is the row from the second dataframe.
183
- def add_merge_row_to_hash row, hash
184
- @df1_key_to_index ||= df1_keys.each_with_index.map { |k,idx| [k, idx] }.to_h
185
- @df2_key_to_index ||= df2_keys.each_with_index.map { |k,idx| [k, idx] }.to_h
220
+ def cartesian_product
221
+ @cartesian_product ||= left_rows_at_merge_key.product(right_rows_at_merge_key).map do |left_row, right_row|
222
+ merge_rows(left_row, right_row)
223
+ end
224
+ end
186
225
 
187
- hash.each do |k,v|
188
- v ||= []
226
+ def end_cartesian_product
227
+ left_size = left_rows_at_merge_key.size
228
+ left_key_values.shift(left_size)
229
+ left.shift(left_size)
189
230
 
190
- left = df1_keys.include?(k) ? row[0] && row[0][1][@df1_key_to_index[k]] : nil
191
- right = df2_keys.include?(k) ? row[1] && row[1][1][@df2_key_to_index[k]] : nil
231
+ right_size = right_rows_at_merge_key.size
232
+ right_key_values.shift(right_size)
233
+ right.shift(right_size)
234
+ @cartesian_product = nil
235
+ end
192
236
 
193
- v << (left || right)
237
+ def validate_on!(left_df, right_df)
238
+ @on.each do |on|
239
+ left_df.has_vector?(on) && right_df.has_vector?(on) or
240
+ raise ArgumentError, "Both dataframes expected to have #{on.inspect} field"
194
241
  end
195
242
  end
196
243
  end
197
244
 
198
- # Private module containing methods for join, merge, concat operations on
199
- # dataframes and vectors.
200
- # @private
201
245
  module Merge
202
246
  class << self
203
247
  def join df1, df2, opts={}
204
- on = opts[:on]
205
-
206
- mf = MergeFrame.new df1, df2, on: on
207
- mf.send opts[:how], {}
248
+ MergeFrame.new(df1, df2, opts).join
208
249
  end
209
250
  end
210
251
  end