graphql 2.1.6 → 2.1.8

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 9016cf336ff2bee9f58e074f1402347676f93f9fe3310a7ae13db84fb94b4c40
4
- data.tar.gz: 0e9dd85e27ffe62fbb8c8acbd749c00e4c45aa9e90440975e00a8682f43014bf
3
+ metadata.gz: 5171a9a622fedcee350cbb3edff60fa524d000aa88e3bab802de59089d46d00f
4
+ data.tar.gz: 4d111262bacb8a687a1c2113de7f065af18b3e9153f19c87341d10cd14aeda34
5
5
  SHA512:
6
- metadata.gz: fa610bc9bad0daae1e7e951c35290cafdeccb17ae726d905a0eb2c44b0b7ae7887a42b3937fe556e16cc9469448164928d6690e64bd6e6e862768ca715682cca
7
- data.tar.gz: 9bb2b85296f05df54cf47a904d8a37d2723b6293db97ee08eab50fe70a02d99ebd7f97a5e6ab8638a59e0ff902055f9a87d3aa1ebd36d78fc6b4f34910df720d
6
+ metadata.gz: 23d10d5146a5ba2ef8092e75197d932e23a30f2763354bd0114c37b1c9fba25c757a3a78bd3ae789a47c9317c02b5e3654a47064bcb42dd750464228f29b97c5
7
+ data.tar.gz: 13863ed6840cf38f52cb81b6c90539bacc0a76f3aea96cc86f69aeed397306bee1aaa48c760f4c17e39074f9d3c588338aff7887450bd6c792b0d6911b3af59c
@@ -105,6 +105,9 @@ module Graphql
105
105
  template("#{base_type}.erb", "#{options[:directory]}/types/#{base_type}.rb")
106
106
  end
107
107
 
108
+ # All resolvers are defined as living in their own module, including this class.
109
+ template("base_resolver.erb", "#{options[:directory]}/resolvers/base.rb")
110
+
108
111
  # Note: You can't have a schema without the query type, otherwise introspection breaks
109
112
  template("query_type.erb", "#{options[:directory]}/types/query_type.rb")
110
113
  insert_root_type('query', 'QueryType')
@@ -0,0 +1,6 @@
1
+ <% module_namespacing_when_supported do -%>
2
+ module Types
3
+ class BaseResolver < GraphQL::Schema::Resolver
4
+ end
5
+ end
6
+ <% end -%>
@@ -2,6 +2,12 @@
2
2
  module GraphQL
3
3
  class Backtrace
4
4
  module Trace
5
+ def initialize(*args, **kwargs, &block)
6
+ @__backtrace_contexts = {}
7
+ @__backtrace_last_context = nil
8
+ super
9
+ end
10
+
5
11
  def validate(query:, validate:)
6
12
  if query.multiplex
7
13
  push_query_backtrace_context(query)
@@ -42,36 +48,27 @@ module GraphQL
42
48
  rescue StandardError => err
43
49
  # This is an unhandled error from execution,
44
50
  # Re-raise it with a GraphQL trace.
45
- multiplex_context = multiplex.context
46
- potential_context = multiplex_context[:last_graphql_backtrace_context]
47
-
51
+ potential_context = @__backtrace_last_context
48
52
  if potential_context.is_a?(GraphQL::Query::Context) ||
49
53
  potential_context.is_a?(Backtrace::Frame)
50
54
  raise TracedError.new(err, potential_context)
51
55
  else
52
56
  raise
53
57
  end
54
- ensure
55
- multiplex_context = multiplex.context
56
- multiplex_context.delete(:graphql_backtrace_contexts)
57
- multiplex_context.delete(:last_graphql_backtrace_context)
58
58
  end
59
59
 
60
60
  private
61
61
 
62
62
  def push_query_backtrace_context(query)
63
63
  push_data = query
64
- multiplex = query.multiplex
65
64
  push_key = []
66
- push_storage = multiplex.context[:graphql_backtrace_contexts] ||= {}
67
- push_storage[push_key] = push_data
68
- multiplex.context[:last_graphql_backtrace_context] = push_data
65
+ @__backtrace_contexts[push_key] = push_data
66
+ @__backtrace_last_context = push_data
69
67
  end
70
68
 
71
69
  def push_field_backtrace_context(field, query, ast_node, arguments, object)
72
- multiplex = query.multiplex
73
70
  push_key = query.context[:current_path]
74
- push_storage = multiplex.context[:graphql_backtrace_contexts]
71
+ push_storage = @__backtrace_contexts
75
72
  parent_frame = push_storage[push_key[0..-2]]
76
73
 
77
74
  if parent_frame.is_a?(GraphQL::Query)
@@ -87,10 +84,10 @@ module GraphQL
87
84
  arguments: arguments,
88
85
  parent_frame: parent_frame,
89
86
  )
90
-
91
87
  push_storage[push_key] = push_data
92
- multiplex.context[:last_graphql_backtrace_context] = push_data
88
+ @__backtrace_last_context = push_data
93
89
  end
90
+
94
91
  end
95
92
  end
96
93
  end
@@ -118,7 +118,12 @@ module GraphQL
118
118
  #
119
119
  # @return [void]
120
120
  def yield
121
- Fiber.yield
121
+ if use_fiber_resume?
122
+ Fiber.yield
123
+ else
124
+ parent_fiber = Thread.current[:parent_fiber]
125
+ parent_fiber.transfer
126
+ end
122
127
  nil
123
128
  end
124
129
 
@@ -167,120 +172,100 @@ module GraphQL
167
172
  end
168
173
  end
169
174
 
170
- # @api private Move along, move along
171
175
  def run
172
- if @nonblocking && !Fiber.scheduler
173
- raise "`nonblocking: true` requires `Fiber.scheduler`, assign one with `Fiber.set_scheduler(...)` before executing GraphQL."
174
- end
175
- # At a high level, the algorithm is:
176
- #
177
- # A) Inside Fibers, run jobs from the queue one-by-one
178
- # - When one of the jobs yields to the dataloader (`Fiber.yield`), then that fiber will pause
179
- # - In that case, if there are still pending jobs, a new Fiber will be created to run jobs
180
- # - Continue until all jobs have been _started_ by a Fiber. (Any number of those Fibers may be waiting to be resumed, after their data is loaded)
181
- # B) Once all known jobs have been run until they are complete or paused for data, run all pending data sources.
182
- # - Similarly, create a Fiber to consume pending sources and tell them to load their data.
183
- # - If one of those Fibers pauses, then create a new Fiber to continue working through remaining pending sources.
184
- # - When a source causes another source to become pending, run the newly-pending source _first_, since it's a dependency of the previous one.
185
- # C) After all pending sources have been completely loaded (there are no more pending sources), resume any Fibers that were waiting for data.
186
- # - Those Fibers assume that source caches will have been populated with the data they were waiting for.
187
- # - Those Fibers may request data from a source again, in which case they will yeilded and be added to a new pending fiber list.
188
- # D) Once all pending fibers have been resumed once, return to `A` above.
189
- #
190
- # For whatever reason, the best implementation I could find was to order the steps `[D, A, B, C]`, with a special case for skipping `D`
191
- # on the first pass. I just couldn't find a better way to write the loops in a way that was DRY and easy to read.
192
- #
193
- pending_fibers = []
194
- next_fibers = []
195
- pending_source_fibers = []
176
+ job_fibers = []
177
+ next_job_fibers = []
178
+ source_fibers = []
196
179
  next_source_fibers = []
197
180
  first_pass = true
198
-
199
- while first_pass || (f = pending_fibers.shift)
200
- if first_pass
181
+ manager = spawn_fiber do
182
+ while first_pass || job_fibers.any?
201
183
  first_pass = false
202
- else
203
- # These fibers were previously waiting for sources to load data,
204
- # resume them. (They might wait again, in which case, re-enqueue them.)
205
- resume(f)
206
- if f.alive?
207
- next_fibers << f
208
- end
209
- end
210
184
 
211
- while @pending_jobs.any?
212
- # Create a Fiber to consume jobs until one of the jobs yields
213
- # or jobs run out
214
- f = spawn_fiber {
215
- while (job = @pending_jobs.shift)
216
- job.call
185
+ while (f = job_fibers.shift || spawn_job_fiber)
186
+ if f.alive?
187
+ finished = run_fiber(f)
188
+ if !finished
189
+ next_job_fibers << f
190
+ end
217
191
  end
218
- }
219
- resume(f)
220
- # In this case, the job yielded. Queue it up to run again after
221
- # we load whatever it's waiting for.
222
- if f.alive?
223
- next_fibers << f
224
- end
225
- end
226
-
227
- if pending_fibers.empty?
228
- # Now, run all Sources which have become pending _before_ resuming GraphQL execution.
229
- # Sources might queue up other Sources, which is fine -- those will also run before resuming execution.
230
- #
231
- # This is where an evented approach would be even better -- can we tell which
232
- # fibers are ready to continue, and continue execution there?
233
- #
234
- if (first_source_fiber = create_source_fiber)
235
- pending_source_fibers << first_source_fiber
236
192
  end
193
+ join_queues(job_fibers, next_job_fibers)
237
194
 
238
- while pending_source_fibers.any?
239
- while (outer_source_fiber = pending_source_fibers.pop)
240
- resume(outer_source_fiber)
241
- if outer_source_fiber.alive?
242
- next_source_fibers << outer_source_fiber
243
- end
244
- if (next_source_fiber = create_source_fiber)
245
- pending_source_fibers << next_source_fiber
195
+ while source_fibers.any? || @source_cache.each_value.any? { |group_sources| group_sources.each_value.any?(&:pending?) }
196
+ while (f = source_fibers.shift || spawn_source_fiber)
197
+ if f.alive?
198
+ finished = run_fiber(f)
199
+ if !finished
200
+ next_source_fibers << f
201
+ end
246
202
  end
247
203
  end
248
- join_queues(pending_source_fibers, next_source_fibers)
249
- next_source_fibers.clear
204
+ join_queues(source_fibers, next_source_fibers)
250
205
  end
251
- # Move newly-enqueued Fibers on to the list to be resumed.
252
- # Clear out the list of next-round Fibers, so that
253
- # any Fibers that pause can be put on it.
254
- join_queues(pending_fibers, next_fibers)
255
- next_fibers.clear
206
+
256
207
  end
257
208
  end
258
209
 
259
- if @pending_jobs.any?
260
- raise "Invariant: #{@pending_jobs.size} pending jobs"
261
- elsif pending_fibers.any?
262
- raise "Invariant: #{pending_fibers.size} pending fibers"
263
- elsif next_fibers.any?
264
- raise "Invariant: #{next_fibers.size} next fibers"
265
- end
266
- nil
210
+ run_fiber(manager)
211
+
212
+ rescue UncaughtThrowError => e
213
+ throw e.tag, e.value
267
214
  end
268
215
 
269
- def join_queues(previous_queue, next_queue)
270
- if @nonblocking
271
- Fiber.scheduler.run
272
- next_queue.select!(&:alive?)
216
+ def run_fiber(f)
217
+ if use_fiber_resume?
218
+ f.resume
219
+ else
220
+ f.transfer
273
221
  end
274
- previous_queue.concat(next_queue)
222
+ end
223
+
224
+ def spawn_fiber
225
+ fiber_vars = get_fiber_variables
226
+ parent_fiber = use_fiber_resume? ? nil : Fiber.current
227
+ Fiber.new(blocking: !@nonblocking) {
228
+ set_fiber_variables(fiber_vars)
229
+ Thread.current[:parent_fiber] = parent_fiber
230
+ yield
231
+ # With `.transfer`, you have to explicitly pass back to the parent --
232
+ # if the fiber is allowed to terminate normally, control is passed to the main fiber instead.
233
+ if parent_fiber
234
+ parent_fiber.transfer(true)
235
+ else
236
+ true
237
+ end
238
+ }
275
239
  end
276
240
 
277
241
  private
278
242
 
279
- # If there are pending sources, return a fiber for running them.
280
- # Otherwise, return `nil`.
281
- #
282
- # @return [Fiber, nil]
283
- def create_source_fiber
243
+ def join_queues(prev_queue, new_queue)
244
+ @nonblocking && Fiber.scheduler.run
245
+ prev_queue.concat(new_queue)
246
+ new_queue.clear
247
+ end
248
+
249
+ def use_fiber_resume?
250
+ Fiber.respond_to?(:scheduler) &&
251
+ (
252
+ (defined?(::DummyScheduler) && Fiber.scheduler.is_a?(::DummyScheduler)) ||
253
+ (defined?(::Evt) && ::Evt::Scheduler.singleton_class::BACKENDS.any? { |be| Fiber.scheduler.is_a?(be) }) ||
254
+ (defined?(::Libev) && Fiber.scheduler.is_a?(::Libev::Scheduler))
255
+ )
256
+ end
257
+
258
+ def spawn_job_fiber
259
+ if @pending_jobs.any?
260
+ spawn_fiber do
261
+ while job = @pending_jobs.shift
262
+ job.call
263
+ end
264
+ end
265
+ end
266
+ end
267
+
268
+ def spawn_source_fiber
284
269
  pending_sources = nil
285
270
  @source_cache.each_value do |source_by_batch_params|
286
271
  source_by_batch_params.each_value do |source|
@@ -292,48 +277,10 @@ module GraphQL
292
277
  end
293
278
 
294
279
  if pending_sources
295
- # By passing the whole array into this Fiber, it's possible that we set ourselves up for a bunch of no-ops.
296
- # For example, if you have sources `[a, b, c]`, and `a` is loaded, then `b` yields to wait for `d`, then
297
- # the next fiber would be dispatched with `[c, d]`. It would fulfill `c`, then `d`, then eventually
298
- # the previous fiber would start up again. `c` would no longer be pending, but it would still receive `.run_pending_keys`.
299
- # That method is short-circuited since it isn't pending any more, but it's still a waste.
300
- #
301
- # This design could probably be improved by maintaining a `@pending_sources` queue which is shared by the fibers,
302
- # similar to `@pending_jobs`. That way, when a fiber is resumed, it would never pick up work that was finished by a different fiber.
303
- source_fiber = spawn_fiber do
280
+ spawn_fiber do
304
281
  pending_sources.each(&:run_pending_keys)
305
282
  end
306
283
  end
307
-
308
- source_fiber
309
- end
310
-
311
- def resume(fiber)
312
- fiber.resume
313
- rescue UncaughtThrowError => e
314
- throw e.tag, e.value
315
- end
316
-
317
- # Copies the thread local vars into the fiber thread local vars. Many
318
- # gems (such as RequestStore, MiniRacer, etc.) rely on thread local vars
319
- # to keep track of execution context, and without this they do not
320
- # behave as expected.
321
- #
322
- # @see https://github.com/rmosolgo/graphql-ruby/issues/3449
323
- def spawn_fiber
324
- fiber_vars = get_fiber_variables
325
-
326
- if @nonblocking
327
- Fiber.new(blocking: false) do
328
- set_fiber_variables(fiber_vars)
329
- yield
330
- end
331
- else
332
- Fiber.new do
333
- set_fiber_variables(fiber_vars)
334
- yield
335
- end
336
- end
337
284
  end
338
285
  end
339
286
  end
@@ -25,7 +25,7 @@ module GraphQL
25
25
  queries = query_options.map do |opts|
26
26
  case opts
27
27
  when Hash
28
- GraphQL::Query.new(schema, nil, **opts)
28
+ schema.query_class.new(schema, nil, **opts)
29
29
  when GraphQL::Query
30
30
  opts
31
31
  else
@@ -47,10 +47,10 @@ module GraphQL
47
47
  end
48
48
 
49
49
  # Remove leading & trailing blank lines
50
- while lines.size > 0 && lines[0].empty?
50
+ while lines.size > 0 && contains_only_whitespace?(lines.first)
51
51
  lines.shift
52
52
  end
53
- while lines.size > 0 && lines[-1].empty?
53
+ while lines.size > 0 && contains_only_whitespace?(lines.last)
54
54
  lines.pop
55
55
  end
56
56
 
@@ -106,6 +106,10 @@ module GraphQL
106
106
 
107
107
  nil
108
108
  end
109
+
110
+ def self.contains_only_whitespace?(line)
111
+ line.match?(/^\s*$/)
112
+ end
109
113
  end
110
114
  end
111
115
  end
@@ -1,8 +1,9 @@
1
1
  # frozen_string_literal: true
2
+ require "graphql/query/context"
2
3
  module GraphQL
3
4
  class Query
4
5
  # This object can be `ctx` in places where there is no query
5
- class NullContext
6
+ class NullContext < Context
6
7
  include Singleton
7
8
 
8
9
  class NullQuery
data/lib/graphql/query.rb CHANGED
@@ -115,8 +115,6 @@ module GraphQL
115
115
  if schema.trace_class <= GraphQL::Tracing::CallLegacyTracers
116
116
  context_tracers += [GraphQL::Backtrace::Tracer]
117
117
  @tracers << GraphQL::Backtrace::Tracer
118
- elsif !(current_trace.class <= GraphQL::Backtrace::Trace)
119
- raise "Invariant: `backtrace: true` should have provided a trace class with Backtrace mixed in, but it didnt. (Found: #{current_trace.class.ancestors}). This is a bug in GraphQL-Ruby, please report it on GitHub."
120
118
  end
121
119
  end
122
120
 
@@ -221,8 +221,13 @@ module GraphQL
221
221
  #
222
222
  # This will have to be called later, when the runtime object _is_ available.
223
223
  value
224
- else
224
+ elsif obj.respond_to?(@prepare)
225
225
  obj.public_send(@prepare, value)
226
+ elsif owner.respond_to?(@prepare)
227
+ owner.public_send(@prepare, value, context || obj.context)
228
+ else
229
+ raise "Invalid prepare for #{@owner.name}.name: #{@prepare.inspect}. "\
230
+ "Could not find prepare method #{@prepare} on #{obj.class} or #{owner}."
226
231
  end
227
232
  elsif @prepare.respond_to?(:call)
228
233
  @prepare.call(value, context || obj.context)
@@ -145,7 +145,7 @@ module GraphQL
145
145
  end
146
146
 
147
147
  # @api private
148
- INVALID_OBJECT_MESSAGE = "Expected %{object} to be a key-value object responding to `to_h` or `to_unsafe_h`."
148
+ INVALID_OBJECT_MESSAGE = "Expected %{object} to be a key-value object."
149
149
 
150
150
  def validate_non_null_input(input, ctx, max_errors: nil)
151
151
  warden = ctx.warden
@@ -21,7 +21,7 @@ module GraphQL
21
21
  if @reauthorize_scoped_objects != nil
22
22
  @reauthorize_scoped_objects
23
23
  else
24
- find_inherited_value(:reauthorize_scoped_objects, nil)
24
+ find_inherited_value(:reauthorize_scoped_objects, true)
25
25
  end
26
26
  else
27
27
  @reauthorize_scoped_objects = new_value
@@ -680,7 +680,7 @@ module GraphQL
680
680
  else
681
681
  string_or_document
682
682
  end
683
- query = GraphQL::Query.new(self, document: doc, context: context)
683
+ query = query_class.new(self, document: doc, context: context)
684
684
  validator_opts = { schema: self }
685
685
  rules && (validator_opts[:rules] = rules)
686
686
  validator = GraphQL::StaticValidation::Validator.new(**validator_opts)
@@ -688,6 +688,14 @@ module GraphQL
688
688
  res[:errors]
689
689
  end
690
690
 
691
+ def query_class(new_query_class = NOT_CONFIGURED)
692
+ if NOT_CONFIGURED.equal?(new_query_class)
693
+ @query_class || (superclass.respond_to?(:query_class) ? superclass.query_class : GraphQL::Query)
694
+ else
695
+ @query_class = new_query_class
696
+ end
697
+ end
698
+
691
699
  attr_writer :validate_max_errors
692
700
 
693
701
  def validate_max_errors(new_validate_max_errors = nil)
@@ -126,7 +126,13 @@ module GraphQL
126
126
  when GraphQL::Schema::InputObject
127
127
  stringify_args(arg_owner, args.to_h, context)
128
128
  else
129
- args
129
+ if arg_owner.is_a?(Class) && arg_owner < GraphQL::Schema::Enum
130
+ # `prepare:` may have made the value something other than
131
+ # a defined value of this enum -- use _that_ in this case.
132
+ arg_owner.coerce_isolated_input(args) || args
133
+ else
134
+ args
135
+ end
130
136
  end
131
137
  end
132
138
 
@@ -62,7 +62,7 @@ module GraphQL
62
62
  # @return [void]
63
63
  def trigger(event_name, args, object, scope: nil, context: {})
64
64
  # Make something as context-like as possible, even though there isn't a current query:
65
- dummy_query = GraphQL::Query.new(@schema, "{ __typename }", validate: false, context: context)
65
+ dummy_query = @schema.query_class.new(@schema, "{ __typename }", validate: false, context: context)
66
66
  context = dummy_query.context
67
67
  event_name = event_name.to_s
68
68
 
@@ -234,7 +234,7 @@ module GraphQL
234
234
 
235
235
  # @return [Boolean] if true, then a query like this one would be broadcasted
236
236
  def broadcastable?(query_str, **query_options)
237
- query = GraphQL::Query.new(@schema, query_str, **query_options)
237
+ query = @schema.query_class.new(@schema, query_str, **query_options)
238
238
  if !query.valid?
239
239
  raise "Invalid query: #{query.validation_errors.map(&:to_h).inspect}"
240
240
  end
@@ -87,6 +87,19 @@ module GraphQL
87
87
  node_type.scope_items(items, context)
88
88
  end
89
89
 
90
+ # The connection will skip auth on its nodes if the node_type is configured for that
91
+ def reauthorize_scoped_objects(new_value = nil)
92
+ if new_value.nil?
93
+ if @reauthorize_scoped_objects != nil
94
+ @reauthorize_scoped_objects
95
+ else
96
+ node_type.reauthorize_scoped_objects
97
+ end
98
+ else
99
+ @reauthorize_scoped_objects = new_value
100
+ end
101
+ end
102
+
90
103
  # Add the shortcut `nodes` field to this connection and its subclasses
91
104
  def nodes_field(node_nullable: self.node_nullable, field_options: nil)
92
105
  define_nodes_field(node_nullable, field_options: field_options)
@@ -1,4 +1,4 @@
1
1
  # frozen_string_literal: true
2
2
  module GraphQL
3
- VERSION = "2.1.6"
3
+ VERSION = "2.1.8"
4
4
  end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: graphql
3
3
  version: !ruby/object:Gem::Version
4
- version: 2.1.6
4
+ version: 2.1.8
5
5
  platform: ruby
6
6
  authors:
7
7
  - Robert Mosolgo
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2023-11-02 00:00:00.000000000 Z
11
+ date: 2023-12-18 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: racc
@@ -270,6 +270,7 @@ files:
270
270
  - lib/generators/graphql/templates/base_input_object.erb
271
271
  - lib/generators/graphql/templates/base_interface.erb
272
272
  - lib/generators/graphql/templates/base_object.erb
273
+ - lib/generators/graphql/templates/base_resolver.erb
273
274
  - lib/generators/graphql/templates/base_scalar.erb
274
275
  - lib/generators/graphql/templates/base_union.erb
275
276
  - lib/generators/graphql/templates/enum.erb