master_loader 1.0.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +7 -0
- data/LICENSE +23 -0
- data/README.md +243 -0
- data/lib/master_loader.rb +180 -0
- data/lib/master_loader/version.rb +4 -0
- metadata +48 -0
checksums.yaml
ADDED
@@ -0,0 +1,7 @@
|
|
1
|
+
---
|
2
|
+
SHA256:
|
3
|
+
metadata.gz: 4ddc0e0038e8c95ad39e592149b30409f08dc788ce0cf62372bcc7e0de9520e7
|
4
|
+
data.tar.gz: 9d43b4ca0cc9aa2c033a8ae2e16ab2615625affd845662cb5521a3f51f9d3192
|
5
|
+
SHA512:
|
6
|
+
metadata.gz: a45362d3d20a2d0ec6a256e23aca67f91e27294a78d5e965e5746a15f932c23f5d6ec9d482acb990124a446955de8d0ccb6e004d09451e4eeb7a989134ed3f55
|
7
|
+
data.tar.gz: ef2cf221281fc1c8507efa11757d550526bf006097080e938823a0be5efe133d4892935742cb1a8786982dcd4bc78a5c47d0e631b2a7b594a2c2c900530f0563
|
data/LICENSE
ADDED
@@ -0,0 +1,23 @@
|
|
1
|
+
Copyright (c) 2017-2019, Adam Stankiewicz
|
2
|
+
Copyright (c) 2019-present, Caleb Land
|
3
|
+
|
4
|
+
MIT License
|
5
|
+
|
6
|
+
Permission is hereby granted, free of charge, to any person obtaining
|
7
|
+
a copy of this software and associated documentation files (the
|
8
|
+
"Software"), to deal in the Software without restriction, including
|
9
|
+
without limitation the rights to use, copy, modify, merge, publish,
|
10
|
+
distribute, sublicense, and/or sell copies of the Software, and to
|
11
|
+
permit persons to whom the Software is furnished to do so, subject to
|
12
|
+
the following conditions:
|
13
|
+
|
14
|
+
The above copyright notice and this permission notice shall be
|
15
|
+
included in all copies or substantial portions of the Software.
|
16
|
+
|
17
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
18
|
+
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
19
|
+
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
20
|
+
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
21
|
+
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
22
|
+
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
23
|
+
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
data/README.md
ADDED
@@ -0,0 +1,243 @@
|
|
1
|
+
# ![](http://i.imgur.com/ZdJKtj1.png) MasterLoader
|
2
|
+
|
3
|
+
[![Build Status](https://travis-ci.org/sheerun/dataloader.svg?branch=master)](https://travis-ci.org/sheerun/dataloader) [![codecov](https://codecov.io/gh/sheerun/dataloader/branch/master/graph/badge.svg)](https://codecov.io/gh/sheerun/dataloader)
|
4
|
+
|
5
|
+
MasterLoader is a generic utility based on [Dataloader](https://github.com/sheerun/dataloader), to be used as part of your application's data fetching layer to provide a simplified and consistent API to perform batching and caching within a request. It is heavily inspired by [Facebook's dataloader](https://github.com/facebook/dataloader).
|
6
|
+
|
7
|
+
## Getting started
|
8
|
+
|
9
|
+
First, install MasterLoader using bundler:
|
10
|
+
|
11
|
+
```ruby
|
12
|
+
gem "master_loader"
|
13
|
+
```
|
14
|
+
|
15
|
+
To get started, instantiate `MasterLoader`. Each `MasterLoader` instance represents a unique cache. Typically instances are created per request when used within a web-server. To see how to use with GraphQL server, see section below.
|
16
|
+
|
17
|
+
MasterLoader is dependent on [concurrent-ruby](https://github.com/ruby-concurrency/concurrent-ruby) which you can use freely for batch-ready code (e.g. loader can return a `Future` that returns a `Future` that returns a `Future`). MasterLoader will try to batch most of them.
|
18
|
+
|
19
|
+
## Basic usage
|
20
|
+
|
21
|
+
```ruby
|
22
|
+
# It will be called only once with ids = [0, 1, 2]
|
23
|
+
loader = MasterLoader.new do |ids|
|
24
|
+
User.find(*ids)
|
25
|
+
end
|
26
|
+
|
27
|
+
# Schedule data to load
|
28
|
+
promise_one = loader.load(0)
|
29
|
+
promise_two = loader.load_many([1, 2])
|
30
|
+
|
31
|
+
# Get promises results
|
32
|
+
user0 = promise_one.value!
|
33
|
+
user1, user2 = promise_two.value!
|
34
|
+
```
|
35
|
+
|
36
|
+
## Using with GraphQL
|
37
|
+
|
38
|
+
You can pass loaders passed inside [`context`](https://rmosolgo.github.io/graphql-ruby/queries/executing_queries).
|
39
|
+
|
40
|
+
```ruby
|
41
|
+
UserType = GraphQL::ObjectType.define do
|
42
|
+
field :name, types.String
|
43
|
+
end
|
44
|
+
|
45
|
+
QueryType = GraphQL::ObjectType.define do
|
46
|
+
name "Query"
|
47
|
+
description "The query root of this schema"
|
48
|
+
|
49
|
+
field :user do
|
50
|
+
type UserType
|
51
|
+
argument :id, !types.ID
|
52
|
+
resolve ->(obj, args, ctx) {
|
53
|
+
ctx[:user_loader].load(args["id"])
|
54
|
+
}
|
55
|
+
end
|
56
|
+
end
|
57
|
+
|
58
|
+
Schema = GraphQL::Schema.define do
|
59
|
+
lazy_resolve(Concurrent::Promises::Future, :'value!')
|
60
|
+
|
61
|
+
query QueryType
|
62
|
+
end
|
63
|
+
|
64
|
+
context = {
|
65
|
+
user_loader: MasterLoader.new do |ids|
|
66
|
+
User.find(*ids)
|
67
|
+
end
|
68
|
+
}
|
69
|
+
|
70
|
+
Schema.execute("{ user(id: 12) { name } }", context: context)
|
71
|
+
```
|
72
|
+
|
73
|
+
## Batching
|
74
|
+
|
75
|
+
You can create loaders by providing a batch loading function.
|
76
|
+
|
77
|
+
```ruby
|
78
|
+
user_loader = MasterLoader.new { |ids| User.find(*ids) }
|
79
|
+
```
|
80
|
+
|
81
|
+
A batch loading block accepts an Array of keys, and returns a Promise which resolves to an Array or Hash of values.
|
82
|
+
|
83
|
+
`MasterLoader` will coalesce all individual loads which occur until first `#value!` (or `#value`, `#wait`, `#touch` or other `Concurrent::Promises::Future` method that blocks waiting for a result) is called on any promise returned by `#load` or `#load_many`, and then call your batch function with all requested keys.
|
84
|
+
|
85
|
+
```ruby
|
86
|
+
user_loader.load(1)
|
87
|
+
.then { |user| user_loader.load(user.invited_by_id) }
|
88
|
+
.then { |invited_by| "User 1 was invited by ${invited_by[:name]}" }
|
89
|
+
|
90
|
+
# Elsewhere in your backend
|
91
|
+
user_loader.load(2)
|
92
|
+
.then { |user| user_loader.load(user.invited_by_id) }
|
93
|
+
.then { |invited_by| "User 2 was invited by ${invited_by[:name]}" }
|
94
|
+
```
|
95
|
+
|
96
|
+
A naive solution is to issue four SQL queries to get required information, but with `MasterLoader` this application will make at most two queries (one to load users, and second one to load invites).
|
97
|
+
|
98
|
+
`MasterLoader` allows you to decouple unrelated parts of your application without sacrificing the performance of batch data-loading. While the loader presents an API that loads individual values, all concurrent requests will be coalesced and presented to your batch loading function. This allows your application to safely distribute data fetching requirements throughout your application and maintain minimal outgoing data requests.
|
99
|
+
|
100
|
+
### Batch function
|
101
|
+
|
102
|
+
A batch loading function accepts an Array of keys, and returns Array of values or Hash that maps from keys to values (or a [Concurrent::Promises::Future](https://github.com/ruby-concurrency/concurrent-ruby) that returns such Array or Hash). There are a few constraints that must be upheld:
|
103
|
+
|
104
|
+
* The Array of values must be the same length as the Array of keys.
|
105
|
+
* Each index in the Array of values must correspond to the same index in the Array of keys.
|
106
|
+
* If Hash is returned, it must include all keys passed to batch loading function
|
107
|
+
|
108
|
+
For example, if your batch function was provided the Array of keys: `[ 2, 9, 6 ]`, you could return one of following:
|
109
|
+
|
110
|
+
```ruby
|
111
|
+
[
|
112
|
+
{ id: 2, name: "foo" },
|
113
|
+
{ id: 9, name: "bar" },
|
114
|
+
{ id: 6, name: "baz" }
|
115
|
+
]
|
116
|
+
```
|
117
|
+
|
118
|
+
```ruby
|
119
|
+
{
|
120
|
+
2 => { id: 2, name: "foo" },
|
121
|
+
9 => { id: 9, name: "bar" },
|
122
|
+
6 => { id: 6, name: "baz" }
|
123
|
+
}
|
124
|
+
```
|
125
|
+
|
126
|
+
## Caching
|
127
|
+
|
128
|
+
`MasterLoader` provides a memoization cache for all loads which occur withing single instance of it. After `#load` is called once with a given key, the resulting Promise is cached to eliminate redundant loads.
|
129
|
+
|
130
|
+
In addition to relieving pressure on your data storage, caching results per-request also creates fewer objects which may relieve memory pressure on your application:
|
131
|
+
|
132
|
+
```
|
133
|
+
promise1 = user_loader.load(1)
|
134
|
+
promise2 = user_loader.load(1)
|
135
|
+
promise1 == promise2 # => true
|
136
|
+
```
|
137
|
+
|
138
|
+
### Caching per-request
|
139
|
+
|
140
|
+
`MasterLoader` caching does not replace Redis, Memcache, or any other shared application-level cache. `MasterLoader` is first and foremost a data loading mechanism, and its cache only serves the purpose of not repeatedly loading the same data in the context of a single request to your Application. To do this, it maintains a simple in-memory memoization cache (more accurately: `#load` is a memoized function).
|
141
|
+
|
142
|
+
Avoid multiple requests from different users using the same `MasterLoader` instance, which could result in cached data incorrectly appearing in each request. Typically, `MasterLoader` instances are created when a request begins, and are not used once the request ends.
|
143
|
+
|
144
|
+
See [Using with GraphQL](https://github.com/sheerun/dataloader#using-with-graphql) section to see how you can pass dataloader instances using context.
|
145
|
+
|
146
|
+
### Caching errors
|
147
|
+
|
148
|
+
If a batch load fails (that is, a batch function throws or returns a rejected Promise), then the requested values will not be cached. However if a batch function returns an Error instance for an individual value, that Error will be cached to avoid frequently loading the same Error.
|
149
|
+
|
150
|
+
In some circumstances you may wish to clear the cache for these individual Errors:
|
151
|
+
|
152
|
+
```ruby
|
153
|
+
user_loader.load(1).rescue do |error|
|
154
|
+
user_loader.cache.delete(1)
|
155
|
+
raise error
|
156
|
+
end
|
157
|
+
```
|
158
|
+
|
159
|
+
### Disabling cache
|
160
|
+
|
161
|
+
In certain uncommon cases, a `MasterLoader` which does not cache may be desirable. Calling `MasterLoader.new({ cache: nil }) { ... }` will ensure that every call to `#load` will produce a new Promise, and requested keys will not be saved in memory.
|
162
|
+
|
163
|
+
However, when the memoization cache is disabled, your batch function will receive an array of keys which may contain duplicates! Each key will be associated with each call to `#load`. Your batch loader should provide a value for each instance of the requested key.
|
164
|
+
|
165
|
+
```ruby
|
166
|
+
loader = MasterLoader.new(cache: nil) do |keys|
|
167
|
+
puts keys
|
168
|
+
some_loading_function(keys)
|
169
|
+
end
|
170
|
+
|
171
|
+
loader.load('A')
|
172
|
+
loader.load('B')
|
173
|
+
loader.load('A')
|
174
|
+
|
175
|
+
// > [ 'A', 'B', 'A' ]
|
176
|
+
```
|
177
|
+
|
178
|
+
## API
|
179
|
+
|
180
|
+
### `MasterLoader`
|
181
|
+
|
182
|
+
`MasterLoader` is a class for fetching data given unique keys such as the id column (or any other key).
|
183
|
+
|
184
|
+
Each `MasterLoader` instance contains a unique memoized cache. Because of it, it is recommended to use one `MasterLoader` instance **per web request**. You can use more long-lived instances, but then you need to take care of manually cleaning the cache.
|
185
|
+
|
186
|
+
You shouldn't share the same dataloader instance across different threads. This behavior is currently undefined.
|
187
|
+
|
188
|
+
### `MasterLoader.new(**options = {}, &batch_load)`
|
189
|
+
|
190
|
+
Create a new `MasterLoader` given a batch loading function and options.
|
191
|
+
|
192
|
+
* `batch_load`: A block which accepts an Array of keys, and returns Array of values or Hash that maps from keys to values (or a [Promise](https://github.com/lgierth/promise.rb) that returns such value).
|
193
|
+
* `options`: An optional hash of options:
|
194
|
+
* `:key` **(not implemented yet)** A function to produce a cache key for a given load key. Defaults to function { |key| key }. Useful to provide when objects are keys and two similarly shaped objects should be considered equivalent.
|
195
|
+
* `:cache` An instance of cache used for caching of promies. Defaults to `Concurrent::Map.new`.
|
196
|
+
- The only required API is `#compute_if_absent(key)`).
|
197
|
+
- You can pass `nil` if you want to disable the cache.
|
198
|
+
- You can pass pre-populated cache as well. The values can be Promises.
|
199
|
+
* `:max_batch_size` Limits the number of items that get passed in to the batchLoadFn. Defaults to `INFINITY`. You can pass `1` to disable batching.
|
200
|
+
|
201
|
+
### `#load(key)`
|
202
|
+
|
203
|
+
**key** [Object] a key to load using `batch_load`
|
204
|
+
|
205
|
+
Returns a [Future](https://github.com/ruby-concurrency/concurrent-ruby) of computed value.
|
206
|
+
|
207
|
+
You can resolve this promise when you actually need the value with `promise.value!`.
|
208
|
+
|
209
|
+
All calls to `#load` are batched until the first `#value!` is encountered. Then is starts batching again, et cetera.
|
210
|
+
|
211
|
+
### `#load_many(keys)`
|
212
|
+
|
213
|
+
**keys** [Array<Object>] list of keys to load using `batch_load`
|
214
|
+
|
215
|
+
Returns a [Future](https://github.com/ruby-concurrency/concurrent-ruby) of array of computed values.
|
216
|
+
|
217
|
+
To give an example, to multiple keys:
|
218
|
+
|
219
|
+
```ruby
|
220
|
+
promise = loader.load_many(['a', 'b'])
|
221
|
+
object_a, object_b = promise.value!
|
222
|
+
```
|
223
|
+
|
224
|
+
This is equivalent to the more verbose:
|
225
|
+
|
226
|
+
```ruby
|
227
|
+
promise = Concurrent::Promises.zip_futures(loader.load('a'), loader.load('b')).then {|*results| results}
|
228
|
+
object_a, object_b = promise.value!
|
229
|
+
```
|
230
|
+
|
231
|
+
### `#cache`
|
232
|
+
|
233
|
+
Returns the internal cache that can be overridden with `:cache` option (see constructor)
|
234
|
+
|
235
|
+
This field is writable, so you can reset the cache with something like:
|
236
|
+
|
237
|
+
```ruby
|
238
|
+
loader.cache = Concurrent::Map.new
|
239
|
+
```
|
240
|
+
|
241
|
+
## License
|
242
|
+
|
243
|
+
MIT
|
@@ -0,0 +1,180 @@
|
|
1
|
+
require 'concurrent'
|
2
|
+
|
3
|
+
class DelayedResult
|
4
|
+
def initialize(&resolver)
|
5
|
+
@resolver = resolver
|
6
|
+
end
|
7
|
+
|
8
|
+
def then(&block)
|
9
|
+
DelayedResult.new do
|
10
|
+
block.(value!)
|
11
|
+
end
|
12
|
+
end
|
13
|
+
|
14
|
+
def self.zip(*results, &block)
|
15
|
+
DelayedResult.new do
|
16
|
+
results = results.map(&:value!)
|
17
|
+
block.(*results)
|
18
|
+
end
|
19
|
+
end
|
20
|
+
|
21
|
+
def value!
|
22
|
+
@value ||= @resolver.().yield_self do |val|
|
23
|
+
if val&.is_a?(DelayedResult)
|
24
|
+
val.value!
|
25
|
+
else
|
26
|
+
val
|
27
|
+
end
|
28
|
+
end
|
29
|
+
end
|
30
|
+
|
31
|
+
def value
|
32
|
+
value!
|
33
|
+
end
|
34
|
+
end
|
35
|
+
|
36
|
+
class MasterLoader
|
37
|
+
class NoCache
|
38
|
+
def compute_if_absent(_key)
|
39
|
+
yield
|
40
|
+
end
|
41
|
+
end
|
42
|
+
|
43
|
+
class Batch
|
44
|
+
attr_accessor :name
|
45
|
+
attr_accessor :fulfilled
|
46
|
+
|
47
|
+
def initialize(loader_block, name: nil, max_batch_size: Float::INFINITY)
|
48
|
+
@name = name
|
49
|
+
@queue = Concurrent::Array.new
|
50
|
+
@lock = Concurrent::ReadWriteLock.new
|
51
|
+
@loader_block = loader_block
|
52
|
+
@max_batch_size = max_batch_size
|
53
|
+
@fulfilled = false
|
54
|
+
@results = nil
|
55
|
+
end
|
56
|
+
|
57
|
+
def queue(key)
|
58
|
+
@queue << key
|
59
|
+
|
60
|
+
DelayedResult.new do
|
61
|
+
results = if @fulfilled
|
62
|
+
@lock.with_read_lock do
|
63
|
+
@results
|
64
|
+
end
|
65
|
+
else
|
66
|
+
@lock.with_write_lock do
|
67
|
+
if @fulfilled
|
68
|
+
@results
|
69
|
+
else
|
70
|
+
@fulfilled = true
|
71
|
+
r = @loader_block.(@queue)
|
72
|
+
@results = if r.is_a?(DelayedResult)
|
73
|
+
normalize_results(r.value!)
|
74
|
+
else
|
75
|
+
normalize_results(r)
|
76
|
+
end
|
77
|
+
end
|
78
|
+
end
|
79
|
+
end
|
80
|
+
|
81
|
+
unless results.key?(key)
|
82
|
+
raise StandardError, "Batch loader didn't resolve a key: #{key}. Resolved keys: #{results.keys}"
|
83
|
+
end
|
84
|
+
|
85
|
+
results[key]
|
86
|
+
end
|
87
|
+
end
|
88
|
+
|
89
|
+
def fulfilled?
|
90
|
+
@fulfilled
|
91
|
+
end
|
92
|
+
|
93
|
+
private
|
94
|
+
|
95
|
+
def normalize_results(results)
|
96
|
+
unless results.is_a?(Array) || results.is_a?(Hash)
|
97
|
+
raise TypeError, "Batch loader must return an Array or Hash, but returned: #{results.class.name}"
|
98
|
+
end
|
99
|
+
|
100
|
+
if @queue.size != results.size
|
101
|
+
raise StandardError, "Batch loader must be instantiated with function that returns Array or Hash " \
|
102
|
+
"of the same size as provided to it Array of keys" \
|
103
|
+
"\n\nProvided keys:\n#{@queue}" \
|
104
|
+
"\n\nReturned values:\n#{results}"
|
105
|
+
end
|
106
|
+
|
107
|
+
if results.is_a?(Array)
|
108
|
+
Hash[@queue.zip(results)]
|
109
|
+
elsif results.is_a?(Hash)
|
110
|
+
results
|
111
|
+
end
|
112
|
+
end
|
113
|
+
end
|
114
|
+
|
115
|
+
attr_accessor :cache
|
116
|
+
|
117
|
+
def initialize(**options, &block)
|
118
|
+
unless block_given?
|
119
|
+
raise TypeError, "Dataloader must be constructed with a block which accepts " \
|
120
|
+
"Array and returns either Array or Hash of the same size (or Promise)"
|
121
|
+
end
|
122
|
+
|
123
|
+
@name = options.delete(:name)
|
124
|
+
@cache = if options.has_key?(:cache)
|
125
|
+
options.delete(:cache) || NoCache.new
|
126
|
+
else
|
127
|
+
Concurrent::Map.new
|
128
|
+
end
|
129
|
+
@max_batch_size = options.fetch(:max_batch_size, Float::INFINITY)
|
130
|
+
|
131
|
+
@interceptor = options.delete(:interceptor) || -> (n) {
|
132
|
+
-> (ids) {
|
133
|
+
n.call(ids)
|
134
|
+
}
|
135
|
+
}
|
136
|
+
|
137
|
+
@loader_block = @interceptor.call(block)
|
138
|
+
end
|
139
|
+
|
140
|
+
def load(key)
|
141
|
+
if key.nil?
|
142
|
+
raise TypeError, "#load must be called with a key, but got: nil"
|
143
|
+
end
|
144
|
+
|
145
|
+
result = retrieve_from_cache(key) do
|
146
|
+
batch.queue(key)
|
147
|
+
end
|
148
|
+
|
149
|
+
if result.is_a?(DelayedResult)
|
150
|
+
result
|
151
|
+
else
|
152
|
+
DelayedResult.new { result }
|
153
|
+
end
|
154
|
+
end
|
155
|
+
|
156
|
+
def load_many(keys)
|
157
|
+
unless keys.is_a?(Array)
|
158
|
+
raise TypeError, "#load_many must be called with an Array, but got: #{keys.class.name}"
|
159
|
+
end
|
160
|
+
|
161
|
+
delayed_results = keys.map(&method(:load))
|
162
|
+
DelayedResult.new do
|
163
|
+
delayed_results.map(&:value!)
|
164
|
+
end
|
165
|
+
end
|
166
|
+
|
167
|
+
def batch
|
168
|
+
if @batch.nil? || @batch.fulfilled?
|
169
|
+
@batch = Batch.new(@loader_block, name: @name, max_batch_size: @max_batch_size)
|
170
|
+
else
|
171
|
+
@batch
|
172
|
+
end
|
173
|
+
end
|
174
|
+
|
175
|
+
def retrieve_from_cache(key)
|
176
|
+
@cache.compute_if_absent(key) do
|
177
|
+
yield
|
178
|
+
end
|
179
|
+
end
|
180
|
+
end
|
metadata
ADDED
@@ -0,0 +1,48 @@
|
|
1
|
+
--- !ruby/object:Gem::Specification
|
2
|
+
name: master_loader
|
3
|
+
version: !ruby/object:Gem::Version
|
4
|
+
version: 1.0.0
|
5
|
+
platform: ruby
|
6
|
+
authors:
|
7
|
+
- Caleb Land
|
8
|
+
autorequire:
|
9
|
+
bindir: bin
|
10
|
+
cert_chain: []
|
11
|
+
date: 2020-09-05 00:00:00.000000000 Z
|
12
|
+
dependencies: []
|
13
|
+
description: A data loading utility similar to GraphQL's data_loader. Can be used
|
14
|
+
with Ruby GraphQL
|
15
|
+
email:
|
16
|
+
- caleb@land.fm
|
17
|
+
executables: []
|
18
|
+
extensions: []
|
19
|
+
extra_rdoc_files: []
|
20
|
+
files:
|
21
|
+
- LICENSE
|
22
|
+
- README.md
|
23
|
+
- lib/master_loader.rb
|
24
|
+
- lib/master_loader/version.rb
|
25
|
+
homepage: https://github.com/caleb/master_loader
|
26
|
+
licenses:
|
27
|
+
- MIT
|
28
|
+
metadata: {}
|
29
|
+
post_install_message:
|
30
|
+
rdoc_options: []
|
31
|
+
require_paths:
|
32
|
+
- lib
|
33
|
+
required_ruby_version: !ruby/object:Gem::Requirement
|
34
|
+
requirements:
|
35
|
+
- - ">="
|
36
|
+
- !ruby/object:Gem::Version
|
37
|
+
version: '0'
|
38
|
+
required_rubygems_version: !ruby/object:Gem::Requirement
|
39
|
+
requirements:
|
40
|
+
- - ">="
|
41
|
+
- !ruby/object:Gem::Version
|
42
|
+
version: '0'
|
43
|
+
requirements: []
|
44
|
+
rubygems_version: 3.0.6
|
45
|
+
signing_key:
|
46
|
+
specification_version: 4
|
47
|
+
summary: Batch data loading, works great with graphql
|
48
|
+
test_files: []
|