graphql 2.1.10 → 2.2.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/lib/graphql/dataloader/async_dataloader.rb +88 -0
- data/lib/graphql/dataloader/source.rb +4 -3
- data/lib/graphql/dataloader.rb +35 -21
- data/lib/graphql/language/lexer.rb +271 -177
- data/lib/graphql/language/nodes.rb +72 -54
- data/lib/graphql/language/parser.rb +686 -1986
- data/lib/graphql/language/printer.rb +16 -12
- data/lib/graphql/schema.rb +6 -5
- data/lib/graphql/testing/helpers.rb +125 -0
- data/lib/graphql/testing.rb +2 -0
- data/lib/graphql/tracing/trace.rb +1 -0
- data/lib/graphql/version.rb +1 -1
- data/lib/graphql.rb +1 -0
- metadata +5 -3
- data/lib/graphql/language/parser.y +0 -560
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 1294ac9e09f2c6924a771765e3f79d6c29449235e65f736afd177270b2a90b99
|
4
|
+
data.tar.gz: eece859aa0e3137e5d7a67c39ec79917d3ee884e3011cb8e9f203801c78c0408
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 4a8343dd7ddcdfbaa05cdfd462612810bbbaa600c62c2d392d367926e7c4742a14705cc05a7e14cfae6d0d5aecb0509e96b645edcbfcdb63d250349bdeef725e
|
7
|
+
data.tar.gz: de145d99232eecc3feb68fa0cb2f3cdfe73fa154924f3ad27ec25ab1e90783927a2b064667233379adc5f7bc1a38563a39466c193a5378b64e13676f802aeeeb
|
@@ -0,0 +1,88 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
module GraphQL
|
3
|
+
class Dataloader
|
4
|
+
class AsyncDataloader < Dataloader
|
5
|
+
def yield
|
6
|
+
Thread.current[:graphql_dataloader_next_tick].wait
|
7
|
+
nil
|
8
|
+
end
|
9
|
+
|
10
|
+
def run
|
11
|
+
job_tasks = []
|
12
|
+
next_job_tasks = []
|
13
|
+
source_tasks = []
|
14
|
+
next_source_tasks = []
|
15
|
+
first_pass = true
|
16
|
+
jobs_condition = Async::Condition.new
|
17
|
+
sources_condition = Async::Condition.new
|
18
|
+
Sync do |root_task|
|
19
|
+
while first_pass || job_tasks.any?
|
20
|
+
first_pass = false
|
21
|
+
|
22
|
+
root_task.async do |jobs_task|
|
23
|
+
while (task = job_tasks.shift || spawn_job_task(jobs_task, jobs_condition))
|
24
|
+
if task.alive?
|
25
|
+
next_job_tasks << task
|
26
|
+
end
|
27
|
+
end
|
28
|
+
end.wait
|
29
|
+
job_tasks.concat(next_job_tasks)
|
30
|
+
next_job_tasks.clear
|
31
|
+
|
32
|
+
while source_tasks.any? || @source_cache.each_value.any? { |group_sources| group_sources.each_value.any?(&:pending?) }
|
33
|
+
root_task.async do |sources_loop_task|
|
34
|
+
while (task = source_tasks.shift || spawn_source_task(sources_loop_task, sources_condition))
|
35
|
+
if task.alive?
|
36
|
+
next_source_tasks << task
|
37
|
+
end
|
38
|
+
end
|
39
|
+
end.wait
|
40
|
+
sources_condition.signal
|
41
|
+
source_tasks.concat(next_source_tasks)
|
42
|
+
next_source_tasks.clear
|
43
|
+
end
|
44
|
+
jobs_condition.signal
|
45
|
+
end
|
46
|
+
end
|
47
|
+
rescue UncaughtThrowError => e
|
48
|
+
throw e.tag, e.value
|
49
|
+
end
|
50
|
+
|
51
|
+
private
|
52
|
+
|
53
|
+
def spawn_job_task(parent_task, condition)
|
54
|
+
if @pending_jobs.any?
|
55
|
+
fiber_vars = get_fiber_variables
|
56
|
+
parent_task.async do |t|
|
57
|
+
set_fiber_variables(fiber_vars)
|
58
|
+
Thread.current[:graphql_dataloader_next_tick] = condition
|
59
|
+
while job = @pending_jobs.shift
|
60
|
+
job.call
|
61
|
+
end
|
62
|
+
end
|
63
|
+
end
|
64
|
+
end
|
65
|
+
|
66
|
+
def spawn_source_task(parent_task, condition)
|
67
|
+
pending_sources = nil
|
68
|
+
@source_cache.each_value do |source_by_batch_params|
|
69
|
+
source_by_batch_params.each_value do |source|
|
70
|
+
if source.pending?
|
71
|
+
pending_sources ||= []
|
72
|
+
pending_sources << source
|
73
|
+
end
|
74
|
+
end
|
75
|
+
end
|
76
|
+
|
77
|
+
if pending_sources
|
78
|
+
fiber_vars = get_fiber_variables
|
79
|
+
parent_task.async do
|
80
|
+
set_fiber_variables(fiber_vars)
|
81
|
+
Thread.current[:graphql_dataloader_next_tick] = condition
|
82
|
+
pending_sources.each(&:run_pending_keys)
|
83
|
+
end
|
84
|
+
end
|
85
|
+
end
|
86
|
+
end
|
87
|
+
end
|
88
|
+
end
|
@@ -88,6 +88,7 @@ module GraphQL
|
|
88
88
|
raise "Implement `#{self.class}#fetch(#{keys.inspect}) to return a record for each of the keys"
|
89
89
|
end
|
90
90
|
|
91
|
+
MAX_ITERATIONS = 1000
|
91
92
|
# Wait for a batch, if there's anything to batch.
|
92
93
|
# Then run the batch and update the cache.
|
93
94
|
# @return [void]
|
@@ -96,8 +97,8 @@ module GraphQL
|
|
96
97
|
iterations = 0
|
97
98
|
while pending_result_keys.any? { |key| !@results.key?(key) }
|
98
99
|
iterations += 1
|
99
|
-
if iterations >
|
100
|
-
raise "#{self.class}#sync tried
|
100
|
+
if iterations > MAX_ITERATIONS
|
101
|
+
raise "#{self.class}#sync tried #{MAX_ITERATIONS} times to load pending keys (#{pending_result_keys}), but they still weren't loaded. There is likely a circular dependency."
|
101
102
|
end
|
102
103
|
@dataloader.yield
|
103
104
|
end
|
@@ -168,7 +169,7 @@ module GraphQL
|
|
168
169
|
nil
|
169
170
|
end
|
170
171
|
|
171
|
-
attr_reader :pending
|
172
|
+
attr_reader :pending
|
172
173
|
|
173
174
|
private
|
174
175
|
|
data/lib/graphql/dataloader.rb
CHANGED
@@ -27,11 +27,12 @@ module GraphQL
|
|
27
27
|
attr_accessor :default_nonblocking
|
28
28
|
end
|
29
29
|
|
30
|
-
|
30
|
+
NonblockingDataloader = Class.new(self) { self.default_nonblocking = true }
|
31
31
|
|
32
32
|
def self.use(schema, nonblocking: nil)
|
33
33
|
schema.dataloader_class = if nonblocking
|
34
|
-
AsyncDataloader
|
34
|
+
warn("`nonblocking: true` is deprecated from `GraphQL::Dataloader`, please use `GraphQL::Dataloader::AsyncDataloader` instead. Docs: https://graphql-ruby.org/dataloader/async_dataloader.")
|
35
|
+
NonblockingDataloader
|
35
36
|
else
|
36
37
|
self
|
37
38
|
end
|
@@ -118,7 +119,12 @@ module GraphQL
|
|
118
119
|
#
|
119
120
|
# @return [void]
|
120
121
|
def yield
|
121
|
-
|
122
|
+
if use_fiber_resume?
|
123
|
+
Fiber.yield
|
124
|
+
else
|
125
|
+
parent_fiber = Thread.current[:parent_fiber]
|
126
|
+
parent_fiber.transfer
|
127
|
+
end
|
122
128
|
nil
|
123
129
|
end
|
124
130
|
|
@@ -163,11 +169,7 @@ module GraphQL
|
|
163
169
|
ensure
|
164
170
|
@pending_jobs = prev_queue
|
165
171
|
prev_pending_keys.each do |source_instance, pending|
|
166
|
-
pending.
|
167
|
-
if !source_instance.results.key?(key)
|
168
|
-
source_instance.pending[key] = value
|
169
|
-
end
|
170
|
-
end
|
172
|
+
source_instance.pending.merge!(pending)
|
171
173
|
end
|
172
174
|
end
|
173
175
|
|
@@ -181,7 +183,7 @@ module GraphQL
|
|
181
183
|
while first_pass || job_fibers.any?
|
182
184
|
first_pass = false
|
183
185
|
|
184
|
-
while (f =
|
186
|
+
while (f = job_fibers.shift || spawn_job_fiber)
|
185
187
|
if f.alive?
|
186
188
|
finished = run_fiber(f)
|
187
189
|
if !finished
|
@@ -202,37 +204,38 @@ module GraphQL
|
|
202
204
|
end
|
203
205
|
join_queues(source_fibers, next_source_fibers)
|
204
206
|
end
|
207
|
+
|
205
208
|
end
|
206
209
|
end
|
207
210
|
|
208
211
|
run_fiber(manager)
|
209
212
|
|
210
|
-
if manager.alive?
|
211
|
-
raise "Invariant: Manager fiber didn't terminate properly."
|
212
|
-
end
|
213
|
-
|
214
|
-
if job_fibers.any?
|
215
|
-
raise "Invariant: job fibers should have exited but #{job_fibers.size} remained"
|
216
|
-
end
|
217
|
-
if source_fibers.any?
|
218
|
-
raise "Invariant: source fibers should have exited but #{source_fibers.size} remained"
|
219
|
-
end
|
220
213
|
rescue UncaughtThrowError => e
|
221
214
|
throw e.tag, e.value
|
222
215
|
end
|
223
216
|
|
224
217
|
def run_fiber(f)
|
225
|
-
|
218
|
+
if use_fiber_resume?
|
219
|
+
f.resume
|
220
|
+
else
|
221
|
+
f.transfer
|
222
|
+
end
|
226
223
|
end
|
227
224
|
|
228
225
|
def spawn_fiber
|
229
226
|
fiber_vars = get_fiber_variables
|
227
|
+
parent_fiber = use_fiber_resume? ? nil : Fiber.current
|
230
228
|
Fiber.new(blocking: !@nonblocking) {
|
231
229
|
set_fiber_variables(fiber_vars)
|
230
|
+
Thread.current[:parent_fiber] = parent_fiber
|
232
231
|
yield
|
233
232
|
# With `.transfer`, you have to explicitly pass back to the parent --
|
234
233
|
# if the fiber is allowed to terminate normally, control is passed to the main fiber instead.
|
235
|
-
|
234
|
+
if parent_fiber
|
235
|
+
parent_fiber.transfer(true)
|
236
|
+
else
|
237
|
+
true
|
238
|
+
end
|
236
239
|
}
|
237
240
|
end
|
238
241
|
|
@@ -244,6 +247,15 @@ module GraphQL
|
|
244
247
|
new_queue.clear
|
245
248
|
end
|
246
249
|
|
250
|
+
def use_fiber_resume?
|
251
|
+
Fiber.respond_to?(:scheduler) &&
|
252
|
+
(
|
253
|
+
(defined?(::DummyScheduler) && Fiber.scheduler.is_a?(::DummyScheduler)) ||
|
254
|
+
(defined?(::Evt) && ::Evt::Scheduler.singleton_class::BACKENDS.any? { |be| Fiber.scheduler.is_a?(be) }) ||
|
255
|
+
(defined?(::Libev) && Fiber.scheduler.is_a?(::Libev::Scheduler))
|
256
|
+
)
|
257
|
+
end
|
258
|
+
|
247
259
|
def spawn_job_fiber
|
248
260
|
if @pending_jobs.any?
|
249
261
|
spawn_fiber do
|
@@ -273,3 +285,5 @@ module GraphQL
|
|
273
285
|
end
|
274
286
|
end
|
275
287
|
end
|
288
|
+
|
289
|
+
require "graphql/dataloader/async_dataloader"
|