schema-inference 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/.gitignore +10 -0
- data/.rspec +2 -0
- data/.travis.yml +5 -0
- data/CODE_OF_CONDUCT.md +49 -0
- data/Gemfile +4 -0
- data/LICENSE.txt +21 -0
- data/README.md +97 -0
- data/Rakefile +6 -0
- data/bin/console +11 -0
- data/bin/setup +8 -0
- data/lib/extensions/boolean.rb +3 -0
- data/lib/schema/inference.rb +21 -0
- data/lib/schema/inference/schema_inferrer.rb +251 -0
- data/lib/schema/inference/version.rb +5 -0
- data/schema-inference.gemspec +29 -0
- metadata +157 -0
checksums.yaml
ADDED
@@ -0,0 +1,7 @@
|
|
1
|
+
---
|
2
|
+
SHA1:
|
3
|
+
metadata.gz: e9966b0286d35c69194293d9895fe6e7198c893d
|
4
|
+
data.tar.gz: d15a656c7a2c654a618b29f976f27926de913180
|
5
|
+
SHA512:
|
6
|
+
metadata.gz: 7d33da3b607dba3848fc05d8a19d5c24554e39809302146b7581749923afa34fe980caa4cf18cf32d2ca2d40f885fedc0c4f273f7a25661f1baa878bde4d2bf4
|
7
|
+
data.tar.gz: 2fa3693d384696779211b7f5706a80453e473d2d986579293d0ee7ceb8a4de3b3a1774aff80b1e041437539317cdf749fe56956e809d98c2cee00043c3cc487d
|
data/.gitignore
ADDED
data/.rspec
ADDED
data/.travis.yml
ADDED
data/CODE_OF_CONDUCT.md
ADDED
@@ -0,0 +1,49 @@
|
|
1
|
+
# Contributor Code of Conduct
|
2
|
+
|
3
|
+
As contributors and maintainers of this project, and in the interest of
|
4
|
+
fostering an open and welcoming community, we pledge to respect all people who
|
5
|
+
contribute through reporting issues, posting feature requests, updating
|
6
|
+
documentation, submitting pull requests or patches, and other activities.
|
7
|
+
|
8
|
+
We are committed to making participation in this project a harassment-free
|
9
|
+
experience for everyone, regardless of level of experience, gender, gender
|
10
|
+
identity and expression, sexual orientation, disability, personal appearance,
|
11
|
+
body size, race, ethnicity, age, religion, or nationality.
|
12
|
+
|
13
|
+
Examples of unacceptable behavior by participants include:
|
14
|
+
|
15
|
+
* The use of sexualized language or imagery
|
16
|
+
* Personal attacks
|
17
|
+
* Trolling or insulting/derogatory comments
|
18
|
+
* Public or private harassment
|
19
|
+
* Publishing other's private information, such as physical or electronic
|
20
|
+
addresses, without explicit permission
|
21
|
+
* Other unethical or unprofessional conduct
|
22
|
+
|
23
|
+
Project maintainers have the right and responsibility to remove, edit, or
|
24
|
+
reject comments, commits, code, wiki edits, issues, and other contributions
|
25
|
+
that are not aligned to this Code of Conduct, or to ban temporarily or
|
26
|
+
permanently any contributor for other behaviors that they deem inappropriate,
|
27
|
+
threatening, offensive, or harmful.
|
28
|
+
|
29
|
+
By adopting this Code of Conduct, project maintainers commit themselves to
|
30
|
+
fairly and consistently applying these principles to every aspect of managing
|
31
|
+
this project. Project maintainers who do not follow or enforce the Code of
|
32
|
+
Conduct may be permanently removed from the project team.
|
33
|
+
|
34
|
+
This code of conduct applies both within project spaces and in public spaces
|
35
|
+
when an individual is representing the project or its community.
|
36
|
+
|
37
|
+
Instances of abusive, harassing, or otherwise unacceptable behavior may be
|
38
|
+
reported by contacting a project maintainer at eurico@phybbit.com. All
|
39
|
+
complaints will be reviewed and investigated and will result in a response that
|
40
|
+
is deemed necessary and appropriate to the circumstances. Maintainers are
|
41
|
+
obligated to maintain confidentiality with regard to the reporter of an
|
42
|
+
incident.
|
43
|
+
|
44
|
+
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
|
45
|
+
version 1.3.0, available at
|
46
|
+
[http://contributor-covenant.org/version/1/3/0/][version]
|
47
|
+
|
48
|
+
[homepage]: http://contributor-covenant.org
|
49
|
+
[version]: http://contributor-covenant.org/version/1/3/0/
|
data/Gemfile
ADDED
data/LICENSE.txt
ADDED
@@ -0,0 +1,21 @@
|
|
1
|
+
The MIT License (MIT)
|
2
|
+
|
3
|
+
Copyright (c) 2016 株式会社Phybbit
|
4
|
+
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
7
|
+
in the Software without restriction, including without limitation the rights
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
10
|
+
furnished to do so, subject to the following conditions:
|
11
|
+
|
12
|
+
The above copyright notice and this permission notice shall be included in
|
13
|
+
all copies or substantial portions of the Software.
|
14
|
+
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
21
|
+
THE SOFTWARE.
|
data/README.md
ADDED
@@ -0,0 +1,97 @@
|
|
1
|
+
# Schema::Inference
|
2
|
+
|
3
|
+
Supports inferring tabular schemas from deep nested data structures.
|
4
|
+
There 2 main uses for this gem:
|
5
|
+
- gives schema information on a nested data structure (useful when converting to a tabular format)
|
6
|
+
- recover types from data that has been serialized to string (e.g. JSON or CSV)
|
7
|
+
|
8
|
+
## Installation
|
9
|
+
|
10
|
+
Add this line to your application's Gemfile:
|
11
|
+
|
12
|
+
```ruby
|
13
|
+
gem 'schema-inference'
|
14
|
+
```
|
15
|
+
|
16
|
+
And then execute:
|
17
|
+
|
18
|
+
$ bundle
|
19
|
+
|
20
|
+
Or install it yourself as:
|
21
|
+
|
22
|
+
$ gem install schema-inference
|
23
|
+
|
24
|
+
## Usage
|
25
|
+
|
26
|
+
1. Report information on nested data structure
|
27
|
+
|
28
|
+
```
|
29
|
+
schema = Schema::Inference.schema(dataset: [
|
30
|
+
{
|
31
|
+
'person' => {
|
32
|
+
'name' => 'Bob',
|
33
|
+
'age' => 30,
|
34
|
+
'weight' => 60
|
35
|
+
},
|
36
|
+
'updated_at' => '2016-01-01T00:00:00Z'
|
37
|
+
},
|
38
|
+
{
|
39
|
+
'person' => {
|
40
|
+
'name' => 'Alice',
|
41
|
+
# Alice does not want to show her age
|
42
|
+
'weight' => 50.5
|
43
|
+
},
|
44
|
+
'updated_at' => '2016-01-01T00:00:00Z'
|
45
|
+
},
|
46
|
+
])
|
47
|
+
|
48
|
+
schema['person.name'][:type] # String
|
49
|
+
schema['person.name'][:usage] # 1.0 (100% of the entries have a name)
|
50
|
+
|
51
|
+
schema['person.age'][:type] # Integer
|
52
|
+
schema['person.age'][:usage] # 0.5 (50% of the entries have an age)
|
53
|
+
|
54
|
+
schema['person.weight'][:type] # Numeric (inferred to be numeric, even though an integer was present)
|
55
|
+
schema['updated_at'][:type] # Time
|
56
|
+
```
|
57
|
+
|
58
|
+
2. Recover types from string serialization
|
59
|
+
|
60
|
+
```
|
61
|
+
schema = Schema::Inference.schema(dataset: {
|
62
|
+
'serialized_time' => '2016-01-01T00:00:00Z',
|
63
|
+
'serialized_integer' => '100',
|
64
|
+
'serialized_numeric' => '0.5',
|
65
|
+
'serialized_boolean' => 'true',
|
66
|
+
})
|
67
|
+
schema['serialized_time'][:type] # Time
|
68
|
+
schema['serialized_integer'][:type] # Integer
|
69
|
+
schema['serialized_numeric'][:type] # Numeric
|
70
|
+
schema['serialized_boolean'][:type] # Boolean
|
71
|
+
```
|
72
|
+
|
73
|
+
3. If you need to load a lot of data consider using the following pattern:
|
74
|
+
```
|
75
|
+
schema = Schema::Inference.schema(batch_count: 10) do |idx\
|
76
|
+
# Pull and return some large amount of data.
|
77
|
+
# Fetching/accessing the data here would avoid the IPC cost of
|
78
|
+
# sending the data to the child process for parallel processing.
|
79
|
+
# e.g.:
|
80
|
+
MongoClient.find.limit(1000).offset(1000 * idx)
|
81
|
+
end
|
82
|
+
```
|
83
|
+
|
84
|
+
## Development
|
85
|
+
|
86
|
+
After checking out the repo, run `bin/setup` to install dependencies. Then, run `rake spec` to run the tests. In certain cases, you maybe want to use the debug flag e.g. `DEBUG=true rake spec` to disable parallel schema processing. You can also run `bin/console` for an interactive prompt that will allow you to experiment.
|
87
|
+
|
88
|
+
To install this gem onto your local machine, run `bundle exec rake install`. To release a new version, update the version number in `version.rb`, and then run `bundle exec rake release`, which will create a git tag for the version, push git commits and tags, and push the `.gem` file to [rubygems.org](https://rubygems.org).
|
89
|
+
|
90
|
+
## Contributing
|
91
|
+
|
92
|
+
Bug reports and pull requests are welcome on GitHub at https://github.com/phybbit/schema-inference. This project is intended to be a safe, welcoming space for collaboration, and contributors are expected to adhere to the [Contributor Covenant](http://contributor-covenant.org) code of conduct.
|
93
|
+
|
94
|
+
|
95
|
+
## License
|
96
|
+
|
97
|
+
The gem is available as open source under the terms of the [MIT License](http://opensource.org/licenses/MIT).
|
data/Rakefile
ADDED
data/bin/console
ADDED
@@ -0,0 +1,11 @@
|
|
1
|
+
#!/usr/bin/env ruby
|
2
|
+
|
3
|
+
require 'bundler/setup'
|
4
|
+
require 'schema/inference'
|
5
|
+
|
6
|
+
# You can add fixtures and/or initialization code here to make experimenting
|
7
|
+
# with your gem easier. You can also use a different console, if you like.
|
8
|
+
|
9
|
+
# (If you use this, don't forget to add pry to your Gemfile!)
|
10
|
+
require 'pry'
|
11
|
+
Pry.start
|
data/bin/setup
ADDED
@@ -0,0 +1,21 @@
|
|
1
|
+
require 'active_support'
|
2
|
+
require 'active_support/core_ext/object/blank'
|
3
|
+
require 'active_support/core_ext/hash/reverse_merge'
|
4
|
+
|
5
|
+
require 'parallel'
|
6
|
+
require 'timeliness'
|
7
|
+
|
8
|
+
require 'extensions/boolean'
|
9
|
+
require 'schema/inference/version'
|
10
|
+
require 'schema/inference/schema_inferrer'
|
11
|
+
|
12
|
+
|
13
|
+
module Schema
|
14
|
+
module Inference
|
15
|
+
|
16
|
+
def Inference.schema(*args)
|
17
|
+
SchemaInferrer.new.infer_schema(*args)
|
18
|
+
end
|
19
|
+
|
20
|
+
end
|
21
|
+
end
|
@@ -0,0 +1,251 @@
|
|
1
|
+
module Schema
|
2
|
+
module Inference
|
3
|
+
class SchemaInferrer
|
4
|
+
attr_accessor :separator
|
5
|
+
|
6
|
+
def initialize(separator: '.')
|
7
|
+
@separator = separator
|
8
|
+
end
|
9
|
+
|
10
|
+
# Generate a schema based on this collection's records.
|
11
|
+
# We evaluate the schema of each record and then merge all
|
12
|
+
# the information together.
|
13
|
+
# @param dataset [Array] of samples on which we will
|
14
|
+
# perform the schema analysis.
|
15
|
+
# @param extended [Boolean] Set to true to keep each field as a basic type.
|
16
|
+
# Set to false to reduce the terminal arrays to a single key (under the type array).
|
17
|
+
# @return [Hash] with one entry per 'column'/'field'. The values
|
18
|
+
# contains information about the type and usage.
|
19
|
+
def infer_schema(dataset: [], batch_count: 0, extended: false)
|
20
|
+
# support detecting schemas of single objects
|
21
|
+
dataset = [dataset] if dataset.is_a?(Hash)
|
22
|
+
validate_dataset(dataset)
|
23
|
+
|
24
|
+
has_dataset = dataset.count > 0 || (block_given? && batch_count > 0)
|
25
|
+
raise ArgumentError, 'a dataset or a block with a batch count must be passed' unless has_dataset
|
26
|
+
|
27
|
+
if dataset.is_a?(Array) && dataset.count > 0
|
28
|
+
# divide in batches to process in parallel
|
29
|
+
per_process = (dataset.count / Parallel.processor_count.to_f).ceil
|
30
|
+
batch_count = (dataset.count / per_process.to_f).ceil
|
31
|
+
end
|
32
|
+
|
33
|
+
results = parallel_map(batch_count.times) { |i|
|
34
|
+
batch = block_given? ? yield(i) : dataset[i*per_process...(i+1)*per_process]
|
35
|
+
{ partial_schema: data_schema(batch), count: batch.count }
|
36
|
+
}
|
37
|
+
|
38
|
+
partial_schemas = results.map { |r| r[:partial_schema] }
|
39
|
+
total_count = results.map { |r| r[:count] }.reduce(:+)
|
40
|
+
|
41
|
+
table_schema = process_schema_results(partial_schemas, total_count, extended)
|
42
|
+
table_schema.sort_by { |k, v| -v[:usage] }.to_h
|
43
|
+
end
|
44
|
+
|
45
|
+
private
|
46
|
+
|
47
|
+
def validate_dataset(dataset)
|
48
|
+
return if dataset.is_a?(Array)
|
49
|
+
raise ArgumentError, 'dataset must be an array or a hash'
|
50
|
+
end
|
51
|
+
|
52
|
+
def data_schema(data)
|
53
|
+
table_schema = {}
|
54
|
+
data.each do |record|
|
55
|
+
# fetch the record schema & update the general schema
|
56
|
+
rec_schema = record_schema(record)
|
57
|
+
rec_schema.each do |field_schema|
|
58
|
+
table_schema[field_schema[:field]] ||= {type: field_schema[:type], usage_count: 0}
|
59
|
+
if table_schema[field_schema[:field]][:type] != field_schema[:type]
|
60
|
+
if table_schema[field_schema[:field]][:type] == NilClass
|
61
|
+
table_schema[field_schema[:field]][:type] = field_schema[:type]
|
62
|
+
elsif field_schema[:type] != nil
|
63
|
+
table_schema[field_schema[:field]][:type] = lowest_common_type(field_schema[:type], table_schema[field_schema[:field]][:type])
|
64
|
+
end
|
65
|
+
end
|
66
|
+
table_schema[field_schema[:field]][:usage_count] += 1
|
67
|
+
table_schema[field_schema[:field]][:types] ||= {}
|
68
|
+
table_schema[field_schema[:field]][:types][field_schema[:type]] ||= 0
|
69
|
+
table_schema[field_schema[:field]][:types][field_schema[:type]] += 1
|
70
|
+
end
|
71
|
+
end
|
72
|
+
|
73
|
+
table_schema
|
74
|
+
end
|
75
|
+
|
76
|
+
def process_schema_results(results, total_count, extended)
|
77
|
+
# aggregate the results
|
78
|
+
table_schema = results[0]
|
79
|
+
results[1..-1].each { |res|
|
80
|
+
table_schema.each { |k, v|
|
81
|
+
next if res[k].blank?
|
82
|
+
|
83
|
+
# aggregate types count
|
84
|
+
res[k][:types].each { |type, count|
|
85
|
+
table_schema[k][:types][type] ||= 0
|
86
|
+
table_schema[k][:types][type] += count
|
87
|
+
}
|
88
|
+
|
89
|
+
# aggregate other informations
|
90
|
+
table_schema[k][:usage_count] += res[k][:usage_count].to_i
|
91
|
+
if (table_schema[k][:type] != res[k][:type])
|
92
|
+
if table_schema[k][:type] == NilClass
|
93
|
+
table_schema[k][:type] = res[k][:type]
|
94
|
+
elsif res[k][:type] != NilClass
|
95
|
+
table_schema[k][:type] = lowest_common_type(res[k][:type], table_schema[k][:type])
|
96
|
+
end
|
97
|
+
end
|
98
|
+
}
|
99
|
+
|
100
|
+
# make sure keys that were not in table_schema are now added.
|
101
|
+
table_schema.reverse_merge!(res)
|
102
|
+
}
|
103
|
+
|
104
|
+
# detect and remove nulls that are part of other schemas
|
105
|
+
# e.g. { 'some_data': null } and { 'some_data': { 'hash': 1 } }
|
106
|
+
# shouldn't be reported as different keys
|
107
|
+
table_schema.each { |k, v|
|
108
|
+
next unless v[:type] == NilClass
|
109
|
+
# check if there is any key that match this one plus an hash/array extension
|
110
|
+
full_key_exists = table_schema.find {|full_key, _| full_key =~ /^#{k}#{Regexp.quote(separator)}.*/}.present?
|
111
|
+
table_schema.delete(k) if full_key_exists
|
112
|
+
}
|
113
|
+
|
114
|
+
# detect and process array information
|
115
|
+
unless extended
|
116
|
+
terminal_array_keys = {}
|
117
|
+
table_schema.keys.each { |key|
|
118
|
+
is_terminal_array = /.*#{Regexp.quote(separator)}[0-9]+$/ =~ key
|
119
|
+
next unless is_terminal_array
|
120
|
+
key_prefix = key.split(separator)[0...-1].join(separator)
|
121
|
+
terminal_array_keys[key_prefix] ||= []
|
122
|
+
terminal_array_keys[key_prefix] << key
|
123
|
+
}
|
124
|
+
|
125
|
+
terminal_array_keys.each do |key_prefix, keys|
|
126
|
+
keys_usage_count = keys.map{ |x| table_schema[x][:usage_count] }
|
127
|
+
usage_count = keys_usage_count.max
|
128
|
+
# min size = how many keys have "always" been used
|
129
|
+
# As the keys may not have been used at the same time,
|
130
|
+
# this may not be valid depending on the array usage.
|
131
|
+
min_size = keys_usage_count.count { |x| x == usage_count }
|
132
|
+
max_size = keys.map { |x| x.split(separator)[-1].to_i }.max + 1
|
133
|
+
|
134
|
+
# delete keys that are part of they array
|
135
|
+
keys.each { |key, _| table_schema.delete(key) }
|
136
|
+
|
137
|
+
table_schema[key_prefix] = {
|
138
|
+
type: Array,
|
139
|
+
usage_count: usage_count,
|
140
|
+
min_size: min_size,
|
141
|
+
max_size: max_size
|
142
|
+
}
|
143
|
+
end
|
144
|
+
end
|
145
|
+
|
146
|
+
# add a percentage in terms of usage
|
147
|
+
table_schema.each { |k, v|
|
148
|
+
table_schema[k][:usage] = table_schema[k][:usage_count] / total_count.to_f
|
149
|
+
}
|
150
|
+
|
151
|
+
table_schema
|
152
|
+
end
|
153
|
+
|
154
|
+
NumericTypes = [Numeric, Integer].freeze
|
155
|
+
def lowest_common_type(type1, type2)
|
156
|
+
return type1 if type1 == type2
|
157
|
+
return Numeric if NumericTypes.include?(type1) && NumericTypes.include?(type2)
|
158
|
+
Object
|
159
|
+
end
|
160
|
+
|
161
|
+
# Recursively explore a record and return its schema
|
162
|
+
def record_schema(record, name = "")
|
163
|
+
if record.is_a? Hash
|
164
|
+
record.flat_map { |k, v|
|
165
|
+
field_name = "#{name}#{separator}#{k}" if name.present?
|
166
|
+
field_name ||= k
|
167
|
+
record_schema(v, field_name)
|
168
|
+
}
|
169
|
+
elsif record.is_a? Array
|
170
|
+
record.each_with_index.flat_map { |x, index|
|
171
|
+
field_name = "#{name}#{separator}#{index}" if name.present?
|
172
|
+
field_name ||= k
|
173
|
+
record_schema(x, field_name)
|
174
|
+
}
|
175
|
+
else
|
176
|
+
{ field: name, type: detect_type_of(record) }
|
177
|
+
end
|
178
|
+
end
|
179
|
+
|
180
|
+
def detect_type_of(value)
|
181
|
+
return Boolean if value.is_a?(TrueClass) || value.is_a?(FalseClass)
|
182
|
+
return Integer if value.is_a? Integer
|
183
|
+
return Numeric if value.is_a? Numeric
|
184
|
+
return Time if value.is_a? Time
|
185
|
+
return NilClass if value.is_a? NilClass
|
186
|
+
|
187
|
+
if value.is_a? String
|
188
|
+
return Integer if value =~ /^[-+]?[0-9]+$/
|
189
|
+
return Numeric if value =~ /^[-+]?[0-9]*\.?[0-9]+$/
|
190
|
+
return Boolean if %w(false true).include?(value.downcase)
|
191
|
+
return Time if Timeliness.parse(value) != nil
|
192
|
+
return String
|
193
|
+
end
|
194
|
+
|
195
|
+
Object
|
196
|
+
end
|
197
|
+
|
198
|
+
def key_access_tokens(key:)
|
199
|
+
key.split(separator).map { |token|
|
200
|
+
# only parse integers for array indexing
|
201
|
+
next token unless is_integer?(token)
|
202
|
+
token.to_i
|
203
|
+
}
|
204
|
+
end
|
205
|
+
|
206
|
+
def record_value(record:, key:)
|
207
|
+
tokens = key_access_tokens(key: key)
|
208
|
+
record.dig(*tokens)
|
209
|
+
end
|
210
|
+
|
211
|
+
def add_value_to_record(record:, key:, value:)
|
212
|
+
tokens = key.split(separator)
|
213
|
+
current_ref = record
|
214
|
+
previous_token = tokens[0]
|
215
|
+
|
216
|
+
tokens[1..-1].each_with_index { |token|
|
217
|
+
if is_integer?(token)
|
218
|
+
current_ref[previous_token] ||= []
|
219
|
+
current_ref = current_ref[previous_token]
|
220
|
+
previous_token = token.to_i
|
221
|
+
else
|
222
|
+
current_ref[previous_token] ||= {}
|
223
|
+
current_ref = current_ref[previous_token]
|
224
|
+
previous_token = token
|
225
|
+
end
|
226
|
+
}
|
227
|
+
|
228
|
+
current_ref[previous_token] = value
|
229
|
+
end
|
230
|
+
|
231
|
+
def is_integer?(value)
|
232
|
+
(/^[+-]?[0-9]+$/ =~ value).present?
|
233
|
+
end
|
234
|
+
|
235
|
+
def parallel_map(itr, &block)
|
236
|
+
# set to true to debug code in the iteration
|
237
|
+
is_debugging_impl = ENV['DEBUG']
|
238
|
+
if is_debugging_impl
|
239
|
+
itr.map do |arg|
|
240
|
+
block.call(arg)
|
241
|
+
end
|
242
|
+
else
|
243
|
+
Parallel.map(itr) do |arg|
|
244
|
+
block.call(arg)
|
245
|
+
end
|
246
|
+
end
|
247
|
+
end
|
248
|
+
|
249
|
+
end
|
250
|
+
end
|
251
|
+
end
|
@@ -0,0 +1,29 @@
|
|
1
|
+
# coding: utf-8
|
2
|
+
lib = File.expand_path('../lib', __FILE__)
|
3
|
+
$LOAD_PATH.unshift(lib) unless $LOAD_PATH.include?(lib)
|
4
|
+
require 'schema/inference/version'
|
5
|
+
|
6
|
+
Gem::Specification.new do |spec|
|
7
|
+
spec.name = 'schema-inference'
|
8
|
+
spec.version = Schema::Inference::VERSION
|
9
|
+
spec.authors = ['Eurico Doirado']
|
10
|
+
spec.email = ['eurico@phybbit.com']
|
11
|
+
|
12
|
+
spec.summary = %q{Supports inferring tabular schemas from deep nested structures.}
|
13
|
+
spec.homepage = 'https://github.com/Phybbit/schema-inference'
|
14
|
+
spec.license = 'MIT'
|
15
|
+
|
16
|
+
spec.files = `git ls-files -z`.split("\x0").reject { |f| f.match(%r{^(test|spec|features)/}) }
|
17
|
+
spec.bindir = "exe"
|
18
|
+
spec.executables = spec.files.grep(%r{^exe/}) { |f| File.basename(f) }
|
19
|
+
spec.require_paths = ['lib']
|
20
|
+
|
21
|
+
spec.add_development_dependency 'bundler', '~> 1.12'
|
22
|
+
spec.add_development_dependency 'rake', '~> 10.0'
|
23
|
+
spec.add_development_dependency 'rspec', '~> 3.0'
|
24
|
+
spec.add_development_dependency 'pry-byebug'
|
25
|
+
|
26
|
+
spec.add_dependency 'activesupport', '>= 4.0.0'
|
27
|
+
spec.add_dependency 'parallel', '~>1.8'
|
28
|
+
spec.add_dependency 'timeliness', '~>0.3'
|
29
|
+
end
|
metadata
ADDED
@@ -0,0 +1,157 @@
|
|
1
|
+
--- !ruby/object:Gem::Specification
|
2
|
+
name: schema-inference
|
3
|
+
version: !ruby/object:Gem::Version
|
4
|
+
version: 1.0.0
|
5
|
+
platform: ruby
|
6
|
+
authors:
|
7
|
+
- Eurico Doirado
|
8
|
+
autorequire:
|
9
|
+
bindir: exe
|
10
|
+
cert_chain: []
|
11
|
+
date: 2016-09-03 00:00:00.000000000 Z
|
12
|
+
dependencies:
|
13
|
+
- !ruby/object:Gem::Dependency
|
14
|
+
name: bundler
|
15
|
+
requirement: !ruby/object:Gem::Requirement
|
16
|
+
requirements:
|
17
|
+
- - "~>"
|
18
|
+
- !ruby/object:Gem::Version
|
19
|
+
version: '1.12'
|
20
|
+
type: :development
|
21
|
+
prerelease: false
|
22
|
+
version_requirements: !ruby/object:Gem::Requirement
|
23
|
+
requirements:
|
24
|
+
- - "~>"
|
25
|
+
- !ruby/object:Gem::Version
|
26
|
+
version: '1.12'
|
27
|
+
- !ruby/object:Gem::Dependency
|
28
|
+
name: rake
|
29
|
+
requirement: !ruby/object:Gem::Requirement
|
30
|
+
requirements:
|
31
|
+
- - "~>"
|
32
|
+
- !ruby/object:Gem::Version
|
33
|
+
version: '10.0'
|
34
|
+
type: :development
|
35
|
+
prerelease: false
|
36
|
+
version_requirements: !ruby/object:Gem::Requirement
|
37
|
+
requirements:
|
38
|
+
- - "~>"
|
39
|
+
- !ruby/object:Gem::Version
|
40
|
+
version: '10.0'
|
41
|
+
- !ruby/object:Gem::Dependency
|
42
|
+
name: rspec
|
43
|
+
requirement: !ruby/object:Gem::Requirement
|
44
|
+
requirements:
|
45
|
+
- - "~>"
|
46
|
+
- !ruby/object:Gem::Version
|
47
|
+
version: '3.0'
|
48
|
+
type: :development
|
49
|
+
prerelease: false
|
50
|
+
version_requirements: !ruby/object:Gem::Requirement
|
51
|
+
requirements:
|
52
|
+
- - "~>"
|
53
|
+
- !ruby/object:Gem::Version
|
54
|
+
version: '3.0'
|
55
|
+
- !ruby/object:Gem::Dependency
|
56
|
+
name: pry-byebug
|
57
|
+
requirement: !ruby/object:Gem::Requirement
|
58
|
+
requirements:
|
59
|
+
- - ">="
|
60
|
+
- !ruby/object:Gem::Version
|
61
|
+
version: '0'
|
62
|
+
type: :development
|
63
|
+
prerelease: false
|
64
|
+
version_requirements: !ruby/object:Gem::Requirement
|
65
|
+
requirements:
|
66
|
+
- - ">="
|
67
|
+
- !ruby/object:Gem::Version
|
68
|
+
version: '0'
|
69
|
+
- !ruby/object:Gem::Dependency
|
70
|
+
name: activesupport
|
71
|
+
requirement: !ruby/object:Gem::Requirement
|
72
|
+
requirements:
|
73
|
+
- - ">="
|
74
|
+
- !ruby/object:Gem::Version
|
75
|
+
version: 4.0.0
|
76
|
+
type: :runtime
|
77
|
+
prerelease: false
|
78
|
+
version_requirements: !ruby/object:Gem::Requirement
|
79
|
+
requirements:
|
80
|
+
- - ">="
|
81
|
+
- !ruby/object:Gem::Version
|
82
|
+
version: 4.0.0
|
83
|
+
- !ruby/object:Gem::Dependency
|
84
|
+
name: parallel
|
85
|
+
requirement: !ruby/object:Gem::Requirement
|
86
|
+
requirements:
|
87
|
+
- - "~>"
|
88
|
+
- !ruby/object:Gem::Version
|
89
|
+
version: '1.8'
|
90
|
+
type: :runtime
|
91
|
+
prerelease: false
|
92
|
+
version_requirements: !ruby/object:Gem::Requirement
|
93
|
+
requirements:
|
94
|
+
- - "~>"
|
95
|
+
- !ruby/object:Gem::Version
|
96
|
+
version: '1.8'
|
97
|
+
- !ruby/object:Gem::Dependency
|
98
|
+
name: timeliness
|
99
|
+
requirement: !ruby/object:Gem::Requirement
|
100
|
+
requirements:
|
101
|
+
- - "~>"
|
102
|
+
- !ruby/object:Gem::Version
|
103
|
+
version: '0.3'
|
104
|
+
type: :runtime
|
105
|
+
prerelease: false
|
106
|
+
version_requirements: !ruby/object:Gem::Requirement
|
107
|
+
requirements:
|
108
|
+
- - "~>"
|
109
|
+
- !ruby/object:Gem::Version
|
110
|
+
version: '0.3'
|
111
|
+
description:
|
112
|
+
email:
|
113
|
+
- eurico@phybbit.com
|
114
|
+
executables: []
|
115
|
+
extensions: []
|
116
|
+
extra_rdoc_files: []
|
117
|
+
files:
|
118
|
+
- ".gitignore"
|
119
|
+
- ".rspec"
|
120
|
+
- ".travis.yml"
|
121
|
+
- CODE_OF_CONDUCT.md
|
122
|
+
- Gemfile
|
123
|
+
- LICENSE.txt
|
124
|
+
- README.md
|
125
|
+
- Rakefile
|
126
|
+
- bin/console
|
127
|
+
- bin/setup
|
128
|
+
- lib/extensions/boolean.rb
|
129
|
+
- lib/schema/inference.rb
|
130
|
+
- lib/schema/inference/schema_inferrer.rb
|
131
|
+
- lib/schema/inference/version.rb
|
132
|
+
- schema-inference.gemspec
|
133
|
+
homepage: https://github.com/Phybbit/schema-inference
|
134
|
+
licenses:
|
135
|
+
- MIT
|
136
|
+
metadata: {}
|
137
|
+
post_install_message:
|
138
|
+
rdoc_options: []
|
139
|
+
require_paths:
|
140
|
+
- lib
|
141
|
+
required_ruby_version: !ruby/object:Gem::Requirement
|
142
|
+
requirements:
|
143
|
+
- - ">="
|
144
|
+
- !ruby/object:Gem::Version
|
145
|
+
version: '0'
|
146
|
+
required_rubygems_version: !ruby/object:Gem::Requirement
|
147
|
+
requirements:
|
148
|
+
- - ">="
|
149
|
+
- !ruby/object:Gem::Version
|
150
|
+
version: '0'
|
151
|
+
requirements: []
|
152
|
+
rubyforge_project:
|
153
|
+
rubygems_version: 2.5.1
|
154
|
+
signing_key:
|
155
|
+
specification_version: 4
|
156
|
+
summary: Supports inferring tabular schemas from deep nested structures.
|
157
|
+
test_files: []
|