graphql 2.1.7 → 2.2.0

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: f0e3e74b44a52e45b992da7fe79d28ff53ea9871ac621517888f887c1a0bd8fb
4
- data.tar.gz: fc0a67822082dfc133b1b7f44135df17a5181de2e8404d4b756c4ac0b69a9761
3
+ metadata.gz: 1294ac9e09f2c6924a771765e3f79d6c29449235e65f736afd177270b2a90b99
4
+ data.tar.gz: eece859aa0e3137e5d7a67c39ec79917d3ee884e3011cb8e9f203801c78c0408
5
5
  SHA512:
6
- metadata.gz: 00bcdc566706f6fc323b0872c5c36230e7964a518e37825dc14a49cc274cbcac099ab3dca8a24608fafb598dcdfb5c239abb2be81359fd234dbce6954530c044
7
- data.tar.gz: efb58609553a215455f026bd686b01c699ea858a732733554198150c1c6a0b8f4b60b514c3aa0532bdc611267039c413cb82b5a63bff7a2a7f6b948a3c154814
6
+ metadata.gz: 4a8343dd7ddcdfbaa05cdfd462612810bbbaa600c62c2d392d367926e7c4742a14705cc05a7e14cfae6d0d5aecb0509e96b645edcbfcdb63d250349bdeef725e
7
+ data.tar.gz: de145d99232eecc3feb68fa0cb2f3cdfe73fa154924f3ad27ec25ab1e90783927a2b064667233379adc5f7bc1a38563a39466c193a5378b64e13676f802aeeeb
@@ -105,6 +105,9 @@ module Graphql
105
105
  template("#{base_type}.erb", "#{options[:directory]}/types/#{base_type}.rb")
106
106
  end
107
107
 
108
+ # All resolvers are defined as living in their own module, including this class.
109
+ template("base_resolver.erb", "#{options[:directory]}/resolvers/base.rb")
110
+
108
111
  # Note: You can't have a schema without the query type, otherwise introspection breaks
109
112
  template("query_type.erb", "#{options[:directory]}/types/query_type.rb")
110
113
  insert_root_type('query', 'QueryType')
@@ -0,0 +1,6 @@
1
+ <% module_namespacing_when_supported do -%>
2
+ module Types
3
+ class BaseResolver < GraphQL::Schema::Resolver
4
+ end
5
+ end
6
+ <% end -%>
@@ -0,0 +1,88 @@
1
+ # frozen_string_literal: true
2
+ module GraphQL
3
+ class Dataloader
4
+ class AsyncDataloader < Dataloader
5
+ def yield
6
+ Thread.current[:graphql_dataloader_next_tick].wait
7
+ nil
8
+ end
9
+
10
+ def run
11
+ job_tasks = []
12
+ next_job_tasks = []
13
+ source_tasks = []
14
+ next_source_tasks = []
15
+ first_pass = true
16
+ jobs_condition = Async::Condition.new
17
+ sources_condition = Async::Condition.new
18
+ Sync do |root_task|
19
+ while first_pass || job_tasks.any?
20
+ first_pass = false
21
+
22
+ root_task.async do |jobs_task|
23
+ while (task = job_tasks.shift || spawn_job_task(jobs_task, jobs_condition))
24
+ if task.alive?
25
+ next_job_tasks << task
26
+ end
27
+ end
28
+ end.wait
29
+ job_tasks.concat(next_job_tasks)
30
+ next_job_tasks.clear
31
+
32
+ while source_tasks.any? || @source_cache.each_value.any? { |group_sources| group_sources.each_value.any?(&:pending?) }
33
+ root_task.async do |sources_loop_task|
34
+ while (task = source_tasks.shift || spawn_source_task(sources_loop_task, sources_condition))
35
+ if task.alive?
36
+ next_source_tasks << task
37
+ end
38
+ end
39
+ end.wait
40
+ sources_condition.signal
41
+ source_tasks.concat(next_source_tasks)
42
+ next_source_tasks.clear
43
+ end
44
+ jobs_condition.signal
45
+ end
46
+ end
47
+ rescue UncaughtThrowError => e
48
+ throw e.tag, e.value
49
+ end
50
+
51
+ private
52
+
53
+ def spawn_job_task(parent_task, condition)
54
+ if @pending_jobs.any?
55
+ fiber_vars = get_fiber_variables
56
+ parent_task.async do |t|
57
+ set_fiber_variables(fiber_vars)
58
+ Thread.current[:graphql_dataloader_next_tick] = condition
59
+ while job = @pending_jobs.shift
60
+ job.call
61
+ end
62
+ end
63
+ end
64
+ end
65
+
66
+ def spawn_source_task(parent_task, condition)
67
+ pending_sources = nil
68
+ @source_cache.each_value do |source_by_batch_params|
69
+ source_by_batch_params.each_value do |source|
70
+ if source.pending?
71
+ pending_sources ||= []
72
+ pending_sources << source
73
+ end
74
+ end
75
+ end
76
+
77
+ if pending_sources
78
+ fiber_vars = get_fiber_variables
79
+ parent_task.async do
80
+ set_fiber_variables(fiber_vars)
81
+ Thread.current[:graphql_dataloader_next_tick] = condition
82
+ pending_sources.each(&:run_pending_keys)
83
+ end
84
+ end
85
+ end
86
+ end
87
+ end
88
+ end
@@ -88,6 +88,7 @@ module GraphQL
88
88
  raise "Implement `#{self.class}#fetch(#{keys.inspect}) to return a record for each of the keys"
89
89
  end
90
90
 
91
+ MAX_ITERATIONS = 1000
91
92
  # Wait for a batch, if there's anything to batch.
92
93
  # Then run the batch and update the cache.
93
94
  # @return [void]
@@ -96,8 +97,8 @@ module GraphQL
96
97
  iterations = 0
97
98
  while pending_result_keys.any? { |key| !@results.key?(key) }
98
99
  iterations += 1
99
- if iterations > 1000
100
- raise "#{self.class}#sync tried 1000 times to load pending keys (#{pending_result_keys}), but they still weren't loaded. There is likely a circular dependency."
100
+ if iterations > MAX_ITERATIONS
101
+ raise "#{self.class}#sync tried #{MAX_ITERATIONS} times to load pending keys (#{pending_result_keys}), but they still weren't loaded. There is likely a circular dependency."
101
102
  end
102
103
  @dataloader.yield
103
104
  end
@@ -27,11 +27,12 @@ module GraphQL
27
27
  attr_accessor :default_nonblocking
28
28
  end
29
29
 
30
- AsyncDataloader = Class.new(self) { self.default_nonblocking = true }
30
+ NonblockingDataloader = Class.new(self) { self.default_nonblocking = true }
31
31
 
32
32
  def self.use(schema, nonblocking: nil)
33
33
  schema.dataloader_class = if nonblocking
34
- AsyncDataloader
34
+ warn("`nonblocking: true` is deprecated from `GraphQL::Dataloader`, please use `GraphQL::Dataloader::AsyncDataloader` instead. Docs: https://graphql-ruby.org/dataloader/async_dataloader.")
35
+ NonblockingDataloader
35
36
  else
36
37
  self
37
38
  end
@@ -118,7 +119,12 @@ module GraphQL
118
119
  #
119
120
  # @return [void]
120
121
  def yield
121
- Fiber.yield
122
+ if use_fiber_resume?
123
+ Fiber.yield
124
+ else
125
+ parent_fiber = Thread.current[:parent_fiber]
126
+ parent_fiber.transfer
127
+ end
122
128
  nil
123
129
  end
124
130
 
@@ -167,120 +173,100 @@ module GraphQL
167
173
  end
168
174
  end
169
175
 
170
- # @api private Move along, move along
171
176
  def run
172
- if @nonblocking && !Fiber.scheduler
173
- raise "`nonblocking: true` requires `Fiber.scheduler`, assign one with `Fiber.set_scheduler(...)` before executing GraphQL."
174
- end
175
- # At a high level, the algorithm is:
176
- #
177
- # A) Inside Fibers, run jobs from the queue one-by-one
178
- # - When one of the jobs yields to the dataloader (`Fiber.yield`), then that fiber will pause
179
- # - In that case, if there are still pending jobs, a new Fiber will be created to run jobs
180
- # - Continue until all jobs have been _started_ by a Fiber. (Any number of those Fibers may be waiting to be resumed, after their data is loaded)
181
- # B) Once all known jobs have been run until they are complete or paused for data, run all pending data sources.
182
- # - Similarly, create a Fiber to consume pending sources and tell them to load their data.
183
- # - If one of those Fibers pauses, then create a new Fiber to continue working through remaining pending sources.
184
- # - When a source causes another source to become pending, run the newly-pending source _first_, since it's a dependency of the previous one.
185
- # C) After all pending sources have been completely loaded (there are no more pending sources), resume any Fibers that were waiting for data.
186
- # - Those Fibers assume that source caches will have been populated with the data they were waiting for.
187
- # - Those Fibers may request data from a source again, in which case they will yeilded and be added to a new pending fiber list.
188
- # D) Once all pending fibers have been resumed once, return to `A` above.
189
- #
190
- # For whatever reason, the best implementation I could find was to order the steps `[D, A, B, C]`, with a special case for skipping `D`
191
- # on the first pass. I just couldn't find a better way to write the loops in a way that was DRY and easy to read.
192
- #
193
- pending_fibers = []
194
- next_fibers = []
195
- pending_source_fibers = []
177
+ job_fibers = []
178
+ next_job_fibers = []
179
+ source_fibers = []
196
180
  next_source_fibers = []
197
181
  first_pass = true
198
-
199
- while first_pass || (f = pending_fibers.shift)
200
- if first_pass
182
+ manager = spawn_fiber do
183
+ while first_pass || job_fibers.any?
201
184
  first_pass = false
202
- else
203
- # These fibers were previously waiting for sources to load data,
204
- # resume them. (They might wait again, in which case, re-enqueue them.)
205
- resume(f)
206
- if f.alive?
207
- next_fibers << f
208
- end
209
- end
210
185
 
211
- while @pending_jobs.any?
212
- # Create a Fiber to consume jobs until one of the jobs yields
213
- # or jobs run out
214
- f = spawn_fiber {
215
- while (job = @pending_jobs.shift)
216
- job.call
186
+ while (f = job_fibers.shift || spawn_job_fiber)
187
+ if f.alive?
188
+ finished = run_fiber(f)
189
+ if !finished
190
+ next_job_fibers << f
191
+ end
217
192
  end
218
- }
219
- resume(f)
220
- # In this case, the job yielded. Queue it up to run again after
221
- # we load whatever it's waiting for.
222
- if f.alive?
223
- next_fibers << f
224
- end
225
- end
226
-
227
- if pending_fibers.empty?
228
- # Now, run all Sources which have become pending _before_ resuming GraphQL execution.
229
- # Sources might queue up other Sources, which is fine -- those will also run before resuming execution.
230
- #
231
- # This is where an evented approach would be even better -- can we tell which
232
- # fibers are ready to continue, and continue execution there?
233
- #
234
- if (first_source_fiber = create_source_fiber)
235
- pending_source_fibers << first_source_fiber
236
193
  end
194
+ join_queues(job_fibers, next_job_fibers)
237
195
 
238
- while pending_source_fibers.any?
239
- while (outer_source_fiber = pending_source_fibers.pop)
240
- resume(outer_source_fiber)
241
- if outer_source_fiber.alive?
242
- next_source_fibers << outer_source_fiber
243
- end
244
- if (next_source_fiber = create_source_fiber)
245
- pending_source_fibers << next_source_fiber
196
+ while source_fibers.any? || @source_cache.each_value.any? { |group_sources| group_sources.each_value.any?(&:pending?) }
197
+ while (f = source_fibers.shift || spawn_source_fiber)
198
+ if f.alive?
199
+ finished = run_fiber(f)
200
+ if !finished
201
+ next_source_fibers << f
202
+ end
246
203
  end
247
204
  end
248
- join_queues(pending_source_fibers, next_source_fibers)
249
- next_source_fibers.clear
205
+ join_queues(source_fibers, next_source_fibers)
250
206
  end
251
- # Move newly-enqueued Fibers on to the list to be resumed.
252
- # Clear out the list of next-round Fibers, so that
253
- # any Fibers that pause can be put on it.
254
- join_queues(pending_fibers, next_fibers)
255
- next_fibers.clear
207
+
256
208
  end
257
209
  end
258
210
 
259
- if @pending_jobs.any?
260
- raise "Invariant: #{@pending_jobs.size} pending jobs"
261
- elsif pending_fibers.any?
262
- raise "Invariant: #{pending_fibers.size} pending fibers"
263
- elsif next_fibers.any?
264
- raise "Invariant: #{next_fibers.size} next fibers"
265
- end
266
- nil
211
+ run_fiber(manager)
212
+
213
+ rescue UncaughtThrowError => e
214
+ throw e.tag, e.value
267
215
  end
268
216
 
269
- def join_queues(previous_queue, next_queue)
270
- if @nonblocking
271
- Fiber.scheduler.run
272
- next_queue.select!(&:alive?)
217
+ def run_fiber(f)
218
+ if use_fiber_resume?
219
+ f.resume
220
+ else
221
+ f.transfer
273
222
  end
274
- previous_queue.concat(next_queue)
223
+ end
224
+
225
+ def spawn_fiber
226
+ fiber_vars = get_fiber_variables
227
+ parent_fiber = use_fiber_resume? ? nil : Fiber.current
228
+ Fiber.new(blocking: !@nonblocking) {
229
+ set_fiber_variables(fiber_vars)
230
+ Thread.current[:parent_fiber] = parent_fiber
231
+ yield
232
+ # With `.transfer`, you have to explicitly pass back to the parent --
233
+ # if the fiber is allowed to terminate normally, control is passed to the main fiber instead.
234
+ if parent_fiber
235
+ parent_fiber.transfer(true)
236
+ else
237
+ true
238
+ end
239
+ }
275
240
  end
276
241
 
277
242
  private
278
243
 
279
- # If there are pending sources, return a fiber for running them.
280
- # Otherwise, return `nil`.
281
- #
282
- # @return [Fiber, nil]
283
- def create_source_fiber
244
+ def join_queues(prev_queue, new_queue)
245
+ @nonblocking && Fiber.scheduler.run
246
+ prev_queue.concat(new_queue)
247
+ new_queue.clear
248
+ end
249
+
250
+ def use_fiber_resume?
251
+ Fiber.respond_to?(:scheduler) &&
252
+ (
253
+ (defined?(::DummyScheduler) && Fiber.scheduler.is_a?(::DummyScheduler)) ||
254
+ (defined?(::Evt) && ::Evt::Scheduler.singleton_class::BACKENDS.any? { |be| Fiber.scheduler.is_a?(be) }) ||
255
+ (defined?(::Libev) && Fiber.scheduler.is_a?(::Libev::Scheduler))
256
+ )
257
+ end
258
+
259
+ def spawn_job_fiber
260
+ if @pending_jobs.any?
261
+ spawn_fiber do
262
+ while job = @pending_jobs.shift
263
+ job.call
264
+ end
265
+ end
266
+ end
267
+ end
268
+
269
+ def spawn_source_fiber
284
270
  pending_sources = nil
285
271
  @source_cache.each_value do |source_by_batch_params|
286
272
  source_by_batch_params.each_value do |source|
@@ -292,48 +278,12 @@ module GraphQL
292
278
  end
293
279
 
294
280
  if pending_sources
295
- # By passing the whole array into this Fiber, it's possible that we set ourselves up for a bunch of no-ops.
296
- # For example, if you have sources `[a, b, c]`, and `a` is loaded, then `b` yields to wait for `d`, then
297
- # the next fiber would be dispatched with `[c, d]`. It would fulfill `c`, then `d`, then eventually
298
- # the previous fiber would start up again. `c` would no longer be pending, but it would still receive `.run_pending_keys`.
299
- # That method is short-circuited since it isn't pending any more, but it's still a waste.
300
- #
301
- # This design could probably be improved by maintaining a `@pending_sources` queue which is shared by the fibers,
302
- # similar to `@pending_jobs`. That way, when a fiber is resumed, it would never pick up work that was finished by a different fiber.
303
- source_fiber = spawn_fiber do
281
+ spawn_fiber do
304
282
  pending_sources.each(&:run_pending_keys)
305
283
  end
306
284
  end
307
-
308
- source_fiber
309
- end
310
-
311
- def resume(fiber)
312
- fiber.resume
313
- rescue UncaughtThrowError => e
314
- throw e.tag, e.value
315
- end
316
-
317
- # Copies the thread local vars into the fiber thread local vars. Many
318
- # gems (such as RequestStore, MiniRacer, etc.) rely on thread local vars
319
- # to keep track of execution context, and without this they do not
320
- # behave as expected.
321
- #
322
- # @see https://github.com/rmosolgo/graphql-ruby/issues/3449
323
- def spawn_fiber
324
- fiber_vars = get_fiber_variables
325
-
326
- if @nonblocking
327
- Fiber.new(blocking: false) do
328
- set_fiber_variables(fiber_vars)
329
- yield
330
- end
331
- else
332
- Fiber.new do
333
- set_fiber_variables(fiber_vars)
334
- yield
335
- end
336
- end
337
285
  end
338
286
  end
339
287
  end
288
+
289
+ require "graphql/dataloader/async_dataloader"