graphql 2.1.7 → 2.1.8

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: f0e3e74b44a52e45b992da7fe79d28ff53ea9871ac621517888f887c1a0bd8fb
4
- data.tar.gz: fc0a67822082dfc133b1b7f44135df17a5181de2e8404d4b756c4ac0b69a9761
3
+ metadata.gz: 5171a9a622fedcee350cbb3edff60fa524d000aa88e3bab802de59089d46d00f
4
+ data.tar.gz: 4d111262bacb8a687a1c2113de7f065af18b3e9153f19c87341d10cd14aeda34
5
5
  SHA512:
6
- metadata.gz: 00bcdc566706f6fc323b0872c5c36230e7964a518e37825dc14a49cc274cbcac099ab3dca8a24608fafb598dcdfb5c239abb2be81359fd234dbce6954530c044
7
- data.tar.gz: efb58609553a215455f026bd686b01c699ea858a732733554198150c1c6a0b8f4b60b514c3aa0532bdc611267039c413cb82b5a63bff7a2a7f6b948a3c154814
6
+ metadata.gz: 23d10d5146a5ba2ef8092e75197d932e23a30f2763354bd0114c37b1c9fba25c757a3a78bd3ae789a47c9317c02b5e3654a47064bcb42dd750464228f29b97c5
7
+ data.tar.gz: 13863ed6840cf38f52cb81b6c90539bacc0a76f3aea96cc86f69aeed397306bee1aaa48c760f4c17e39074f9d3c588338aff7887450bd6c792b0d6911b3af59c
@@ -105,6 +105,9 @@ module Graphql
105
105
  template("#{base_type}.erb", "#{options[:directory]}/types/#{base_type}.rb")
106
106
  end
107
107
 
108
+ # All resolvers are defined as living in their own module, including this class.
109
+ template("base_resolver.erb", "#{options[:directory]}/resolvers/base.rb")
110
+
108
111
  # Note: You can't have a schema without the query type, otherwise introspection breaks
109
112
  template("query_type.erb", "#{options[:directory]}/types/query_type.rb")
110
113
  insert_root_type('query', 'QueryType')
@@ -0,0 +1,6 @@
1
+ <% module_namespacing_when_supported do -%>
2
+ module Types
3
+ class BaseResolver < GraphQL::Schema::Resolver
4
+ end
5
+ end
6
+ <% end -%>
@@ -118,7 +118,12 @@ module GraphQL
118
118
  #
119
119
  # @return [void]
120
120
  def yield
121
- Fiber.yield
121
+ if use_fiber_resume?
122
+ Fiber.yield
123
+ else
124
+ parent_fiber = Thread.current[:parent_fiber]
125
+ parent_fiber.transfer
126
+ end
122
127
  nil
123
128
  end
124
129
 
@@ -167,120 +172,100 @@ module GraphQL
167
172
  end
168
173
  end
169
174
 
170
- # @api private Move along, move along
171
175
  def run
172
- if @nonblocking && !Fiber.scheduler
173
- raise "`nonblocking: true` requires `Fiber.scheduler`, assign one with `Fiber.set_scheduler(...)` before executing GraphQL."
174
- end
175
- # At a high level, the algorithm is:
176
- #
177
- # A) Inside Fibers, run jobs from the queue one-by-one
178
- # - When one of the jobs yields to the dataloader (`Fiber.yield`), then that fiber will pause
179
- # - In that case, if there are still pending jobs, a new Fiber will be created to run jobs
180
- # - Continue until all jobs have been _started_ by a Fiber. (Any number of those Fibers may be waiting to be resumed, after their data is loaded)
181
- # B) Once all known jobs have been run until they are complete or paused for data, run all pending data sources.
182
- # - Similarly, create a Fiber to consume pending sources and tell them to load their data.
183
- # - If one of those Fibers pauses, then create a new Fiber to continue working through remaining pending sources.
184
- # - When a source causes another source to become pending, run the newly-pending source _first_, since it's a dependency of the previous one.
185
- # C) After all pending sources have been completely loaded (there are no more pending sources), resume any Fibers that were waiting for data.
186
- # - Those Fibers assume that source caches will have been populated with the data they were waiting for.
187
- # - Those Fibers may request data from a source again, in which case they will yeilded and be added to a new pending fiber list.
188
- # D) Once all pending fibers have been resumed once, return to `A` above.
189
- #
190
- # For whatever reason, the best implementation I could find was to order the steps `[D, A, B, C]`, with a special case for skipping `D`
191
- # on the first pass. I just couldn't find a better way to write the loops in a way that was DRY and easy to read.
192
- #
193
- pending_fibers = []
194
- next_fibers = []
195
- pending_source_fibers = []
176
+ job_fibers = []
177
+ next_job_fibers = []
178
+ source_fibers = []
196
179
  next_source_fibers = []
197
180
  first_pass = true
198
-
199
- while first_pass || (f = pending_fibers.shift)
200
- if first_pass
181
+ manager = spawn_fiber do
182
+ while first_pass || job_fibers.any?
201
183
  first_pass = false
202
- else
203
- # These fibers were previously waiting for sources to load data,
204
- # resume them. (They might wait again, in which case, re-enqueue them.)
205
- resume(f)
206
- if f.alive?
207
- next_fibers << f
208
- end
209
- end
210
184
 
211
- while @pending_jobs.any?
212
- # Create a Fiber to consume jobs until one of the jobs yields
213
- # or jobs run out
214
- f = spawn_fiber {
215
- while (job = @pending_jobs.shift)
216
- job.call
185
+ while (f = job_fibers.shift || spawn_job_fiber)
186
+ if f.alive?
187
+ finished = run_fiber(f)
188
+ if !finished
189
+ next_job_fibers << f
190
+ end
217
191
  end
218
- }
219
- resume(f)
220
- # In this case, the job yielded. Queue it up to run again after
221
- # we load whatever it's waiting for.
222
- if f.alive?
223
- next_fibers << f
224
- end
225
- end
226
-
227
- if pending_fibers.empty?
228
- # Now, run all Sources which have become pending _before_ resuming GraphQL execution.
229
- # Sources might queue up other Sources, which is fine -- those will also run before resuming execution.
230
- #
231
- # This is where an evented approach would be even better -- can we tell which
232
- # fibers are ready to continue, and continue execution there?
233
- #
234
- if (first_source_fiber = create_source_fiber)
235
- pending_source_fibers << first_source_fiber
236
192
  end
193
+ join_queues(job_fibers, next_job_fibers)
237
194
 
238
- while pending_source_fibers.any?
239
- while (outer_source_fiber = pending_source_fibers.pop)
240
- resume(outer_source_fiber)
241
- if outer_source_fiber.alive?
242
- next_source_fibers << outer_source_fiber
243
- end
244
- if (next_source_fiber = create_source_fiber)
245
- pending_source_fibers << next_source_fiber
195
+ while source_fibers.any? || @source_cache.each_value.any? { |group_sources| group_sources.each_value.any?(&:pending?) }
196
+ while (f = source_fibers.shift || spawn_source_fiber)
197
+ if f.alive?
198
+ finished = run_fiber(f)
199
+ if !finished
200
+ next_source_fibers << f
201
+ end
246
202
  end
247
203
  end
248
- join_queues(pending_source_fibers, next_source_fibers)
249
- next_source_fibers.clear
204
+ join_queues(source_fibers, next_source_fibers)
250
205
  end
251
- # Move newly-enqueued Fibers on to the list to be resumed.
252
- # Clear out the list of next-round Fibers, so that
253
- # any Fibers that pause can be put on it.
254
- join_queues(pending_fibers, next_fibers)
255
- next_fibers.clear
206
+
256
207
  end
257
208
  end
258
209
 
259
- if @pending_jobs.any?
260
- raise "Invariant: #{@pending_jobs.size} pending jobs"
261
- elsif pending_fibers.any?
262
- raise "Invariant: #{pending_fibers.size} pending fibers"
263
- elsif next_fibers.any?
264
- raise "Invariant: #{next_fibers.size} next fibers"
265
- end
266
- nil
210
+ run_fiber(manager)
211
+
212
+ rescue UncaughtThrowError => e
213
+ throw e.tag, e.value
267
214
  end
268
215
 
269
- def join_queues(previous_queue, next_queue)
270
- if @nonblocking
271
- Fiber.scheduler.run
272
- next_queue.select!(&:alive?)
216
+ def run_fiber(f)
217
+ if use_fiber_resume?
218
+ f.resume
219
+ else
220
+ f.transfer
273
221
  end
274
- previous_queue.concat(next_queue)
222
+ end
223
+
224
+ def spawn_fiber
225
+ fiber_vars = get_fiber_variables
226
+ parent_fiber = use_fiber_resume? ? nil : Fiber.current
227
+ Fiber.new(blocking: !@nonblocking) {
228
+ set_fiber_variables(fiber_vars)
229
+ Thread.current[:parent_fiber] = parent_fiber
230
+ yield
231
+ # With `.transfer`, you have to explicitly pass back to the parent --
232
+ # if the fiber is allowed to terminate normally, control is passed to the main fiber instead.
233
+ if parent_fiber
234
+ parent_fiber.transfer(true)
235
+ else
236
+ true
237
+ end
238
+ }
275
239
  end
276
240
 
277
241
  private
278
242
 
279
- # If there are pending sources, return a fiber for running them.
280
- # Otherwise, return `nil`.
281
- #
282
- # @return [Fiber, nil]
283
- def create_source_fiber
243
+ def join_queues(prev_queue, new_queue)
244
+ @nonblocking && Fiber.scheduler.run
245
+ prev_queue.concat(new_queue)
246
+ new_queue.clear
247
+ end
248
+
249
+ def use_fiber_resume?
250
+ Fiber.respond_to?(:scheduler) &&
251
+ (
252
+ (defined?(::DummyScheduler) && Fiber.scheduler.is_a?(::DummyScheduler)) ||
253
+ (defined?(::Evt) && ::Evt::Scheduler.singleton_class::BACKENDS.any? { |be| Fiber.scheduler.is_a?(be) }) ||
254
+ (defined?(::Libev) && Fiber.scheduler.is_a?(::Libev::Scheduler))
255
+ )
256
+ end
257
+
258
+ def spawn_job_fiber
259
+ if @pending_jobs.any?
260
+ spawn_fiber do
261
+ while job = @pending_jobs.shift
262
+ job.call
263
+ end
264
+ end
265
+ end
266
+ end
267
+
268
+ def spawn_source_fiber
284
269
  pending_sources = nil
285
270
  @source_cache.each_value do |source_by_batch_params|
286
271
  source_by_batch_params.each_value do |source|
@@ -292,48 +277,10 @@ module GraphQL
292
277
  end
293
278
 
294
279
  if pending_sources
295
- # By passing the whole array into this Fiber, it's possible that we set ourselves up for a bunch of no-ops.
296
- # For example, if you have sources `[a, b, c]`, and `a` is loaded, then `b` yields to wait for `d`, then
297
- # the next fiber would be dispatched with `[c, d]`. It would fulfill `c`, then `d`, then eventually
298
- # the previous fiber would start up again. `c` would no longer be pending, but it would still receive `.run_pending_keys`.
299
- # That method is short-circuited since it isn't pending any more, but it's still a waste.
300
- #
301
- # This design could probably be improved by maintaining a `@pending_sources` queue which is shared by the fibers,
302
- # similar to `@pending_jobs`. That way, when a fiber is resumed, it would never pick up work that was finished by a different fiber.
303
- source_fiber = spawn_fiber do
280
+ spawn_fiber do
304
281
  pending_sources.each(&:run_pending_keys)
305
282
  end
306
283
  end
307
-
308
- source_fiber
309
- end
310
-
311
- def resume(fiber)
312
- fiber.resume
313
- rescue UncaughtThrowError => e
314
- throw e.tag, e.value
315
- end
316
-
317
- # Copies the thread local vars into the fiber thread local vars. Many
318
- # gems (such as RequestStore, MiniRacer, etc.) rely on thread local vars
319
- # to keep track of execution context, and without this they do not
320
- # behave as expected.
321
- #
322
- # @see https://github.com/rmosolgo/graphql-ruby/issues/3449
323
- def spawn_fiber
324
- fiber_vars = get_fiber_variables
325
-
326
- if @nonblocking
327
- Fiber.new(blocking: false) do
328
- set_fiber_variables(fiber_vars)
329
- yield
330
- end
331
- else
332
- Fiber.new do
333
- set_fiber_variables(fiber_vars)
334
- yield
335
- end
336
- end
337
284
  end
338
285
  end
339
286
  end
@@ -221,8 +221,13 @@ module GraphQL
221
221
  #
222
222
  # This will have to be called later, when the runtime object _is_ available.
223
223
  value
224
- else
224
+ elsif obj.respond_to?(@prepare)
225
225
  obj.public_send(@prepare, value)
226
+ elsif owner.respond_to?(@prepare)
227
+ owner.public_send(@prepare, value, context || obj.context)
228
+ else
229
+ raise "Invalid prepare for #{@owner.name}.name: #{@prepare.inspect}. "\
230
+ "Could not find prepare method #{@prepare} on #{obj.class} or #{owner}."
226
231
  end
227
232
  elsif @prepare.respond_to?(:call)
228
233
  @prepare.call(value, context || obj.context)
@@ -1,4 +1,4 @@
1
1
  # frozen_string_literal: true
2
2
  module GraphQL
3
- VERSION = "2.1.7"
3
+ VERSION = "2.1.8"
4
4
  end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: graphql
3
3
  version: !ruby/object:Gem::Version
4
- version: 2.1.7
4
+ version: 2.1.8
5
5
  platform: ruby
6
6
  authors:
7
7
  - Robert Mosolgo
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2023-12-04 00:00:00.000000000 Z
11
+ date: 2023-12-18 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: racc
@@ -270,6 +270,7 @@ files:
270
270
  - lib/generators/graphql/templates/base_input_object.erb
271
271
  - lib/generators/graphql/templates/base_interface.erb
272
272
  - lib/generators/graphql/templates/base_object.erb
273
+ - lib/generators/graphql/templates/base_resolver.erb
273
274
  - lib/generators/graphql/templates/base_scalar.erb
274
275
  - lib/generators/graphql/templates/base_union.erb
275
276
  - lib/generators/graphql/templates/enum.erb