graphql 2.0.28 → 2.2.11

Sign up to get free protection for your applications and to get access to all the features.
Files changed (133) hide show
  1. checksums.yaml +4 -4
  2. data/lib/generators/graphql/install/templates/base_mutation.erb +2 -0
  3. data/lib/generators/graphql/install/templates/mutation_type.erb +2 -0
  4. data/lib/generators/graphql/install_generator.rb +3 -0
  5. data/lib/generators/graphql/templates/base_argument.erb +2 -0
  6. data/lib/generators/graphql/templates/base_connection.erb +2 -0
  7. data/lib/generators/graphql/templates/base_edge.erb +2 -0
  8. data/lib/generators/graphql/templates/base_enum.erb +2 -0
  9. data/lib/generators/graphql/templates/base_field.erb +2 -0
  10. data/lib/generators/graphql/templates/base_input_object.erb +2 -0
  11. data/lib/generators/graphql/templates/base_interface.erb +2 -0
  12. data/lib/generators/graphql/templates/base_object.erb +2 -0
  13. data/lib/generators/graphql/templates/base_resolver.erb +6 -0
  14. data/lib/generators/graphql/templates/base_scalar.erb +2 -0
  15. data/lib/generators/graphql/templates/base_union.erb +2 -0
  16. data/lib/generators/graphql/templates/graphql_controller.erb +2 -0
  17. data/lib/generators/graphql/templates/loader.erb +2 -0
  18. data/lib/generators/graphql/templates/mutation.erb +2 -0
  19. data/lib/generators/graphql/templates/node_type.erb +2 -0
  20. data/lib/generators/graphql/templates/query_type.erb +2 -0
  21. data/lib/generators/graphql/templates/schema.erb +2 -0
  22. data/lib/graphql/analysis/ast/analyzer.rb +7 -0
  23. data/lib/graphql/analysis/ast/field_usage.rb +32 -7
  24. data/lib/graphql/analysis/ast/query_complexity.rb +80 -128
  25. data/lib/graphql/analysis/ast/query_depth.rb +7 -2
  26. data/lib/graphql/analysis/ast/visitor.rb +2 -2
  27. data/lib/graphql/analysis/ast.rb +21 -11
  28. data/lib/graphql/backtrace/trace.rb +12 -15
  29. data/lib/graphql/coercion_error.rb +1 -9
  30. data/lib/graphql/dataloader/async_dataloader.rb +85 -0
  31. data/lib/graphql/dataloader/request.rb +5 -0
  32. data/lib/graphql/dataloader/source.rb +11 -3
  33. data/lib/graphql/dataloader.rb +109 -142
  34. data/lib/graphql/duration_encoding_error.rb +16 -0
  35. data/lib/graphql/execution/interpreter/runtime/graphql_result.rb +170 -0
  36. data/lib/graphql/execution/interpreter/runtime.rb +79 -248
  37. data/lib/graphql/execution/interpreter.rb +91 -157
  38. data/lib/graphql/execution/lookahead.rb +88 -21
  39. data/lib/graphql/introspection/dynamic_fields.rb +1 -1
  40. data/lib/graphql/introspection/entry_points.rb +11 -5
  41. data/lib/graphql/introspection/schema_type.rb +3 -1
  42. data/lib/graphql/language/block_string.rb +34 -18
  43. data/lib/graphql/language/definition_slice.rb +1 -1
  44. data/lib/graphql/language/document_from_schema_definition.rb +37 -37
  45. data/lib/graphql/language/lexer.rb +271 -177
  46. data/lib/graphql/language/nodes.rb +75 -57
  47. data/lib/graphql/language/parser.rb +707 -1986
  48. data/lib/graphql/language/printer.rb +303 -146
  49. data/lib/graphql/language/sanitized_printer.rb +20 -22
  50. data/lib/graphql/language/static_visitor.rb +167 -0
  51. data/lib/graphql/language/visitor.rb +20 -81
  52. data/lib/graphql/language.rb +1 -0
  53. data/lib/graphql/load_application_object_failed_error.rb +5 -1
  54. data/lib/graphql/pagination/array_connection.rb +3 -3
  55. data/lib/graphql/pagination/connection.rb +28 -1
  56. data/lib/graphql/pagination/mongoid_relation_connection.rb +1 -2
  57. data/lib/graphql/pagination/relation_connection.rb +3 -3
  58. data/lib/graphql/query/context/scoped_context.rb +101 -0
  59. data/lib/graphql/query/context.rb +36 -98
  60. data/lib/graphql/query/null_context.rb +4 -11
  61. data/lib/graphql/query/validation_pipeline.rb +2 -2
  62. data/lib/graphql/query/variables.rb +3 -3
  63. data/lib/graphql/query.rb +13 -22
  64. data/lib/graphql/railtie.rb +9 -6
  65. data/lib/graphql/rake_task.rb +3 -12
  66. data/lib/graphql/schema/argument.rb +6 -1
  67. data/lib/graphql/schema/base_64_encoder.rb +3 -5
  68. data/lib/graphql/schema/build_from_definition.rb +0 -11
  69. data/lib/graphql/schema/directive/one_of.rb +12 -0
  70. data/lib/graphql/schema/directive/specified_by.rb +14 -0
  71. data/lib/graphql/schema/directive.rb +1 -1
  72. data/lib/graphql/schema/enum.rb +3 -3
  73. data/lib/graphql/schema/field/connection_extension.rb +1 -15
  74. data/lib/graphql/schema/field/scope_extension.rb +8 -1
  75. data/lib/graphql/schema/field.rb +39 -35
  76. data/lib/graphql/schema/has_single_input_argument.rb +156 -0
  77. data/lib/graphql/schema/input_object.rb +2 -2
  78. data/lib/graphql/schema/interface.rb +15 -11
  79. data/lib/graphql/schema/introspection_system.rb +2 -0
  80. data/lib/graphql/schema/loader.rb +0 -2
  81. data/lib/graphql/schema/member/base_dsl_methods.rb +2 -1
  82. data/lib/graphql/schema/member/has_arguments.rb +61 -38
  83. data/lib/graphql/schema/member/has_fields.rb +8 -5
  84. data/lib/graphql/schema/member/has_interfaces.rb +23 -9
  85. data/lib/graphql/schema/member/scoped.rb +19 -0
  86. data/lib/graphql/schema/member/validates_input.rb +3 -3
  87. data/lib/graphql/schema/object.rb +8 -0
  88. data/lib/graphql/schema/printer.rb +8 -7
  89. data/lib/graphql/schema/relay_classic_mutation.rb +6 -128
  90. data/lib/graphql/schema/resolver.rb +16 -8
  91. data/lib/graphql/schema/scalar.rb +3 -3
  92. data/lib/graphql/schema/subscription.rb +11 -4
  93. data/lib/graphql/schema/union.rb +1 -1
  94. data/lib/graphql/schema/unique_within_type.rb +1 -1
  95. data/lib/graphql/schema/warden.rb +96 -94
  96. data/lib/graphql/schema.rb +252 -78
  97. data/lib/graphql/static_validation/all_rules.rb +1 -1
  98. data/lib/graphql/static_validation/base_visitor.rb +1 -1
  99. data/lib/graphql/static_validation/literal_validator.rb +2 -3
  100. data/lib/graphql/static_validation/rules/fields_will_merge.rb +1 -1
  101. data/lib/graphql/static_validation/rules/required_arguments_are_present.rb +1 -1
  102. data/lib/graphql/static_validation/rules/required_input_object_attributes_are_present.rb +2 -2
  103. data/lib/graphql/static_validation/validation_context.rb +5 -5
  104. data/lib/graphql/static_validation/validator.rb +3 -0
  105. data/lib/graphql/static_validation.rb +0 -1
  106. data/lib/graphql/subscriptions/action_cable_subscriptions.rb +3 -2
  107. data/lib/graphql/subscriptions/event.rb +8 -2
  108. data/lib/graphql/subscriptions/serialize.rb +2 -0
  109. data/lib/graphql/subscriptions.rb +14 -12
  110. data/lib/graphql/testing/helpers.rb +129 -0
  111. data/lib/graphql/testing.rb +2 -0
  112. data/lib/graphql/tracing/appoptics_trace.rb +2 -2
  113. data/lib/graphql/tracing/appoptics_tracing.rb +2 -2
  114. data/lib/graphql/tracing/legacy_hooks_trace.rb +74 -0
  115. data/lib/graphql/tracing/platform_tracing.rb +2 -0
  116. data/lib/graphql/tracing/{prometheus_tracing → prometheus_trace}/graphql_collector.rb +3 -1
  117. data/lib/graphql/tracing/sentry_trace.rb +112 -0
  118. data/lib/graphql/tracing/trace.rb +1 -0
  119. data/lib/graphql/tracing.rb +3 -1
  120. data/lib/graphql/types/iso_8601_duration.rb +77 -0
  121. data/lib/graphql/types/relay/connection_behaviors.rb +32 -2
  122. data/lib/graphql/types/relay/edge_behaviors.rb +7 -0
  123. data/lib/graphql/types.rb +1 -0
  124. data/lib/graphql/version.rb +1 -1
  125. data/lib/graphql.rb +6 -5
  126. data/readme.md +12 -2
  127. metadata +46 -38
  128. data/lib/graphql/deprecation.rb +0 -9
  129. data/lib/graphql/filter.rb +0 -59
  130. data/lib/graphql/language/parser.y +0 -560
  131. data/lib/graphql/schema/base_64_bp.rb +0 -26
  132. data/lib/graphql/static_validation/type_stack.rb +0 -216
  133. data/lib/graphql/subscriptions/instrumentation.rb +0 -28
@@ -2,6 +2,12 @@
2
2
  module GraphQL
3
3
  class Backtrace
4
4
  module Trace
5
+ def initialize(*args, **kwargs, &block)
6
+ @__backtrace_contexts = {}
7
+ @__backtrace_last_context = nil
8
+ super
9
+ end
10
+
5
11
  def validate(query:, validate:)
6
12
  if query.multiplex
7
13
  push_query_backtrace_context(query)
@@ -42,36 +48,27 @@ module GraphQL
42
48
  rescue StandardError => err
43
49
  # This is an unhandled error from execution,
44
50
  # Re-raise it with a GraphQL trace.
45
- multiplex_context = multiplex.context
46
- potential_context = multiplex_context[:last_graphql_backtrace_context]
47
-
51
+ potential_context = @__backtrace_last_context
48
52
  if potential_context.is_a?(GraphQL::Query::Context) ||
49
53
  potential_context.is_a?(Backtrace::Frame)
50
54
  raise TracedError.new(err, potential_context)
51
55
  else
52
56
  raise
53
57
  end
54
- ensure
55
- multiplex_context = multiplex.context
56
- multiplex_context.delete(:graphql_backtrace_contexts)
57
- multiplex_context.delete(:last_graphql_backtrace_context)
58
58
  end
59
59
 
60
60
  private
61
61
 
62
62
  def push_query_backtrace_context(query)
63
63
  push_data = query
64
- multiplex = query.multiplex
65
64
  push_key = []
66
- push_storage = multiplex.context[:graphql_backtrace_contexts] ||= {}
67
- push_storage[push_key] = push_data
68
- multiplex.context[:last_graphql_backtrace_context] = push_data
65
+ @__backtrace_contexts[push_key] = push_data
66
+ @__backtrace_last_context = push_data
69
67
  end
70
68
 
71
69
  def push_field_backtrace_context(field, query, ast_node, arguments, object)
72
- multiplex = query.multiplex
73
70
  push_key = query.context[:current_path]
74
- push_storage = multiplex.context[:graphql_backtrace_contexts]
71
+ push_storage = @__backtrace_contexts
75
72
  parent_frame = push_storage[push_key[0..-2]]
76
73
 
77
74
  if parent_frame.is_a?(GraphQL::Query)
@@ -87,10 +84,10 @@ module GraphQL
87
84
  arguments: arguments,
88
85
  parent_frame: parent_frame,
89
86
  )
90
-
91
87
  push_storage[push_key] = push_data
92
- multiplex.context[:last_graphql_backtrace_context] = push_data
88
+ @__backtrace_last_context = push_data
93
89
  end
90
+
94
91
  end
95
92
  end
96
93
  end
@@ -1,13 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
  module GraphQL
3
- class CoercionError < GraphQL::Error
4
- # @return [Hash] Optional custom data for error objects which will be added
5
- # under the `extensions` key.
6
- attr_accessor :extensions
7
-
8
- def initialize(message, extensions: nil)
9
- @extensions = extensions
10
- super(message)
11
- end
3
+ class CoercionError < GraphQL::ExecutionError
12
4
  end
13
5
  end
@@ -0,0 +1,85 @@
1
+ # frozen_string_literal: true
2
+ module GraphQL
3
+ class Dataloader
4
+ class AsyncDataloader < Dataloader
5
+ def yield
6
+ if (condition = Thread.current[:graphql_dataloader_next_tick])
7
+ condition.wait
8
+ else
9
+ Fiber.yield
10
+ end
11
+ nil
12
+ end
13
+
14
+ def run
15
+ job_fibers = []
16
+ next_job_fibers = []
17
+ source_tasks = []
18
+ next_source_tasks = []
19
+ first_pass = true
20
+ sources_condition = Async::Condition.new
21
+ manager = spawn_fiber do
22
+ while first_pass || job_fibers.any?
23
+ first_pass = false
24
+
25
+ while (f = (job_fibers.shift || spawn_job_fiber))
26
+ if f.alive?
27
+ finished = run_fiber(f)
28
+ if !finished
29
+ next_job_fibers << f
30
+ end
31
+ end
32
+ end
33
+ job_fibers.concat(next_job_fibers)
34
+ next_job_fibers.clear
35
+
36
+ Sync do |root_task|
37
+ while source_tasks.any? || @source_cache.each_value.any? { |group_sources| group_sources.each_value.any?(&:pending?) }
38
+ while (task = source_tasks.shift || spawn_source_task(root_task, sources_condition))
39
+ if task.alive?
40
+ root_task.yield # give the source task a chance to run
41
+ next_source_tasks << task
42
+ end
43
+ end
44
+ sources_condition.signal
45
+ source_tasks.concat(next_source_tasks)
46
+ next_source_tasks.clear
47
+ end
48
+ end
49
+ end
50
+ end
51
+
52
+ manager.resume
53
+ if manager.alive?
54
+ raise "Invariant: Manager didn't terminate successfully: #{manager}"
55
+ end
56
+
57
+ rescue UncaughtThrowError => e
58
+ throw e.tag, e.value
59
+ end
60
+
61
+ private
62
+
63
+ def spawn_source_task(parent_task, condition)
64
+ pending_sources = nil
65
+ @source_cache.each_value do |source_by_batch_params|
66
+ source_by_batch_params.each_value do |source|
67
+ if source.pending?
68
+ pending_sources ||= []
69
+ pending_sources << source
70
+ end
71
+ end
72
+ end
73
+
74
+ if pending_sources
75
+ fiber_vars = get_fiber_variables
76
+ parent_task.async do
77
+ set_fiber_variables(fiber_vars)
78
+ Thread.current[:graphql_dataloader_next_tick] = condition
79
+ pending_sources.each(&:run_pending_keys)
80
+ end
81
+ end
82
+ end
83
+ end
84
+ end
85
+ end
@@ -14,6 +14,11 @@ module GraphQL
14
14
  def load
15
15
  @source.load(@key)
16
16
  end
17
+
18
+ def load_with_deprecation_warning
19
+ warn("Returning `.request(...)` from GraphQL::Dataloader is deprecated, use `.load(...)` instead. (See usage of #{@source} with #{@key.inspect}).")
20
+ load
21
+ end
17
22
  end
18
23
  end
19
24
  end
@@ -88,6 +88,7 @@ module GraphQL
88
88
  raise "Implement `#{self.class}#fetch(#{keys.inspect}) to return a record for each of the keys"
89
89
  end
90
90
 
91
+ MAX_ITERATIONS = 1000
91
92
  # Wait for a batch, if there's anything to batch.
92
93
  # Then run the batch and update the cache.
93
94
  # @return [void]
@@ -96,8 +97,8 @@ module GraphQL
96
97
  iterations = 0
97
98
  while pending_result_keys.any? { |key| !@results.key?(key) }
98
99
  iterations += 1
99
- if iterations > 1000
100
- raise "#{self.class}#sync tried 1000 times to load pending keys (#{pending_result_keys}), but they still weren't loaded. There is likely a circular dependency."
100
+ if iterations > MAX_ITERATIONS
101
+ raise "#{self.class}#sync tried #{MAX_ITERATIONS} times to load pending keys (#{pending_result_keys}), but they still weren't loaded. There is likely a circular dependency."
101
102
  end
102
103
  @dataloader.yield
103
104
  end
@@ -161,7 +162,14 @@ module GraphQL
161
162
  [*batch_args, **batch_kwargs]
162
163
  end
163
164
 
164
- attr_reader :pending
165
+ # Clear any already-loaded objects for this source
166
+ # @return [void]
167
+ def clear_cache
168
+ @results.clear
169
+ nil
170
+ end
171
+
172
+ attr_reader :pending, :results
165
173
 
166
174
  private
167
175
 
@@ -27,11 +27,12 @@ module GraphQL
27
27
  attr_accessor :default_nonblocking
28
28
  end
29
29
 
30
- AsyncDataloader = Class.new(self) { self.default_nonblocking = true }
30
+ NonblockingDataloader = Class.new(self) { self.default_nonblocking = true }
31
31
 
32
32
  def self.use(schema, nonblocking: nil)
33
33
  schema.dataloader_class = if nonblocking
34
- AsyncDataloader
34
+ warn("`nonblocking: true` is deprecated from `GraphQL::Dataloader`, please use `GraphQL::Dataloader::AsyncDataloader` instead. Docs: https://graphql-ruby.org/dataloader/async_dataloader.")
35
+ NonblockingDataloader
35
36
  else
36
37
  self
37
38
  end
@@ -61,6 +62,32 @@ module GraphQL
61
62
  @nonblocking
62
63
  end
63
64
 
65
+ # This is called before the fiber is spawned, from the parent context (i.e. from
66
+ # the thread or fiber that it is scheduled from).
67
+ #
68
+ # @return [Hash<Symbol, Object>] Current fiber-local variables
69
+ def get_fiber_variables
70
+ fiber_vars = {}
71
+ Thread.current.keys.each do |fiber_var_key|
72
+ # This variable should be fresh in each new fiber
73
+ if fiber_var_key != :__graphql_runtime_info
74
+ fiber_vars[fiber_var_key] = Thread.current[fiber_var_key]
75
+ end
76
+ end
77
+ fiber_vars
78
+ end
79
+
80
+ # Set up the fiber variables in a new fiber.
81
+ #
82
+ # This is called within the fiber, right after it is spawned.
83
+ #
84
+ # @param vars [Hash<Symbol, Object>] Fiber-local variables from {get_fiber_variables}
85
+ # @return [void]
86
+ def set_fiber_variables(vars)
87
+ vars.each { |k, v| Thread.current[k] = v }
88
+ nil
89
+ end
90
+
64
91
  # Get a Source instance from this dataloader, for calling `.load(...)` or `.request(...)` on.
65
92
  #
66
93
  # @param source_class [Class<GraphQL::Dataloader::Source]
@@ -104,6 +131,15 @@ module GraphQL
104
131
  nil
105
132
  end
106
133
 
134
+ # Clear any already-loaded objects from {Source} caches
135
+ # @return [void]
136
+ def clear_cache
137
+ @source_cache.each do |_source_class, batched_sources|
138
+ batched_sources.each_value(&:clear_cache)
139
+ end
140
+ nil
141
+ end
142
+
107
143
  # Use a self-contained queue for the work in the block.
108
144
  def run_isolated
109
145
  prev_queue = @pending_jobs
@@ -128,124 +164,98 @@ module GraphQL
128
164
  ensure
129
165
  @pending_jobs = prev_queue
130
166
  prev_pending_keys.each do |source_instance, pending|
131
- source_instance.pending.merge!(pending)
167
+ pending.each do |key, value|
168
+ if !source_instance.results.key?(key)
169
+ source_instance.pending[key] = value
170
+ end
171
+ end
132
172
  end
133
173
  end
134
174
 
135
- # @api private Move along, move along
136
175
  def run
137
- if @nonblocking && !Fiber.scheduler
138
- raise "`nonblocking: true` requires `Fiber.scheduler`, assign one with `Fiber.set_scheduler(...)` before executing GraphQL."
139
- end
140
- # At a high level, the algorithm is:
141
- #
142
- # A) Inside Fibers, run jobs from the queue one-by-one
143
- # - When one of the jobs yields to the dataloader (`Fiber.yield`), then that fiber will pause
144
- # - In that case, if there are still pending jobs, a new Fiber will be created to run jobs
145
- # - Continue until all jobs have been _started_ by a Fiber. (Any number of those Fibers may be waiting to be resumed, after their data is loaded)
146
- # B) Once all known jobs have been run until they are complete or paused for data, run all pending data sources.
147
- # - Similarly, create a Fiber to consume pending sources and tell them to load their data.
148
- # - If one of those Fibers pauses, then create a new Fiber to continue working through remaining pending sources.
149
- # - When a source causes another source to become pending, run the newly-pending source _first_, since it's a dependency of the previous one.
150
- # C) After all pending sources have been completely loaded (there are no more pending sources), resume any Fibers that were waiting for data.
151
- # - Those Fibers assume that source caches will have been populated with the data they were waiting for.
152
- # - Those Fibers may request data from a source again, in which case they will yeilded and be added to a new pending fiber list.
153
- # D) Once all pending fibers have been resumed once, return to `A` above.
154
- #
155
- # For whatever reason, the best implementation I could find was to order the steps `[D, A, B, C]`, with a special case for skipping `D`
156
- # on the first pass. I just couldn't find a better way to write the loops in a way that was DRY and easy to read.
157
- #
158
- pending_fibers = []
159
- next_fibers = []
160
- pending_source_fibers = []
176
+ job_fibers = []
177
+ next_job_fibers = []
178
+ source_fibers = []
161
179
  next_source_fibers = []
162
180
  first_pass = true
163
-
164
- while first_pass || (f = pending_fibers.shift)
165
- if first_pass
181
+ manager = spawn_fiber do
182
+ while first_pass || job_fibers.any?
166
183
  first_pass = false
167
- else
168
- # These fibers were previously waiting for sources to load data,
169
- # resume them. (They might wait again, in which case, re-enqueue them.)
170
- resume(f)
171
- if f.alive?
172
- next_fibers << f
173
- end
174
- end
175
184
 
176
- while @pending_jobs.any?
177
- # Create a Fiber to consume jobs until one of the jobs yields
178
- # or jobs run out
179
- f = spawn_fiber {
180
- while (job = @pending_jobs.shift)
181
- job.call
185
+ while (f = (job_fibers.shift || spawn_job_fiber))
186
+ if f.alive?
187
+ finished = run_fiber(f)
188
+ if !finished
189
+ next_job_fibers << f
190
+ end
182
191
  end
183
- }
184
- resume(f)
185
- # In this case, the job yielded. Queue it up to run again after
186
- # we load whatever it's waiting for.
187
- if f.alive?
188
- next_fibers << f
189
- end
190
- end
191
-
192
- if pending_fibers.empty?
193
- # Now, run all Sources which have become pending _before_ resuming GraphQL execution.
194
- # Sources might queue up other Sources, which is fine -- those will also run before resuming execution.
195
- #
196
- # This is where an evented approach would be even better -- can we tell which
197
- # fibers are ready to continue, and continue execution there?
198
- #
199
- if (first_source_fiber = create_source_fiber)
200
- pending_source_fibers << first_source_fiber
201
192
  end
193
+ join_queues(job_fibers, next_job_fibers)
202
194
 
203
- while pending_source_fibers.any?
204
- while (outer_source_fiber = pending_source_fibers.pop)
205
- resume(outer_source_fiber)
206
- if outer_source_fiber.alive?
207
- next_source_fibers << outer_source_fiber
208
- end
209
- if (next_source_fiber = create_source_fiber)
210
- pending_source_fibers << next_source_fiber
195
+ while source_fibers.any? || @source_cache.each_value.any? { |group_sources| group_sources.each_value.any?(&:pending?) }
196
+ while (f = source_fibers.shift || spawn_source_fiber)
197
+ if f.alive?
198
+ finished = run_fiber(f)
199
+ if !finished
200
+ next_source_fibers << f
201
+ end
211
202
  end
212
203
  end
213
- join_queues(pending_source_fibers, next_source_fibers)
214
- next_source_fibers.clear
204
+ join_queues(source_fibers, next_source_fibers)
215
205
  end
216
- # Move newly-enqueued Fibers on to the list to be resumed.
217
- # Clear out the list of next-round Fibers, so that
218
- # any Fibers that pause can be put on it.
219
- join_queues(pending_fibers, next_fibers)
220
- next_fibers.clear
221
206
  end
222
207
  end
223
208
 
224
- if @pending_jobs.any?
225
- raise "Invariant: #{@pending_jobs.size} pending jobs"
226
- elsif pending_fibers.any?
227
- raise "Invariant: #{pending_fibers.size} pending fibers"
228
- elsif next_fibers.any?
229
- raise "Invariant: #{next_fibers.size} next fibers"
209
+ run_fiber(manager)
210
+
211
+ if manager.alive?
212
+ raise "Invariant: Manager fiber didn't terminate properly."
230
213
  end
231
- nil
232
- end
233
214
 
234
- def join_queues(previous_queue, next_queue)
235
- if @nonblocking
236
- Fiber.scheduler.run
237
- next_queue.select!(&:alive?)
215
+ if job_fibers.any?
216
+ raise "Invariant: job fibers should have exited but #{job_fibers.size} remained"
217
+ end
218
+ if source_fibers.any?
219
+ raise "Invariant: source fibers should have exited but #{source_fibers.size} remained"
238
220
  end
239
- previous_queue.concat(next_queue)
221
+ rescue UncaughtThrowError => e
222
+ throw e.tag, e.value
223
+ end
224
+
225
+ def run_fiber(f)
226
+ f.resume
227
+ end
228
+
229
+ def spawn_fiber
230
+ fiber_vars = get_fiber_variables
231
+ Fiber.new(blocking: !@nonblocking) {
232
+ set_fiber_variables(fiber_vars)
233
+ yield
234
+ # With `.transfer`, you have to explicitly pass back to the parent --
235
+ # if the fiber is allowed to terminate normally, control is passed to the main fiber instead.
236
+ true
237
+ }
240
238
  end
241
239
 
242
240
  private
243
241
 
244
- # If there are pending sources, return a fiber for running them.
245
- # Otherwise, return `nil`.
246
- #
247
- # @return [Fiber, nil]
248
- def create_source_fiber
242
+ def join_queues(prev_queue, new_queue)
243
+ @nonblocking && Fiber.scheduler.run
244
+ prev_queue.concat(new_queue)
245
+ new_queue.clear
246
+ end
247
+
248
+ def spawn_job_fiber
249
+ if @pending_jobs.any?
250
+ spawn_fiber do
251
+ while job = @pending_jobs.shift
252
+ job.call
253
+ end
254
+ end
255
+ end
256
+ end
257
+
258
+ def spawn_source_fiber
249
259
  pending_sources = nil
250
260
  @source_cache.each_value do |source_by_batch_params|
251
261
  source_by_batch_params.each_value do |source|
@@ -257,55 +267,12 @@ module GraphQL
257
267
  end
258
268
 
259
269
  if pending_sources
260
- # By passing the whole array into this Fiber, it's possible that we set ourselves up for a bunch of no-ops.
261
- # For example, if you have sources `[a, b, c]`, and `a` is loaded, then `b` yields to wait for `d`, then
262
- # the next fiber would be dispatched with `[c, d]`. It would fulfill `c`, then `d`, then eventually
263
- # the previous fiber would start up again. `c` would no longer be pending, but it would still receive `.run_pending_keys`.
264
- # That method is short-circuited since it isn't pending any more, but it's still a waste.
265
- #
266
- # This design could probably be improved by maintaining a `@pending_sources` queue which is shared by the fibers,
267
- # similar to `@pending_jobs`. That way, when a fiber is resumed, it would never pick up work that was finished by a different fiber.
268
- source_fiber = spawn_fiber do
270
+ spawn_fiber do
269
271
  pending_sources.each(&:run_pending_keys)
270
272
  end
271
273
  end
272
-
273
- source_fiber
274
- end
275
-
276
- def resume(fiber)
277
- fiber.resume
278
- rescue UncaughtThrowError => e
279
- throw e.tag, e.value
280
- end
281
-
282
- # Copies the thread local vars into the fiber thread local vars. Many
283
- # gems (such as RequestStore, MiniRacer, etc.) rely on thread local vars
284
- # to keep track of execution context, and without this they do not
285
- # behave as expected.
286
- #
287
- # @see https://github.com/rmosolgo/graphql-ruby/issues/3449
288
- def spawn_fiber
289
- fiber_locals = {}
290
-
291
- Thread.current.keys.each do |fiber_var_key|
292
- # This variable should be fresh in each new fiber
293
- if fiber_var_key != :__graphql_runtime_info
294
- fiber_locals[fiber_var_key] = Thread.current[fiber_var_key]
295
- end
296
- end
297
-
298
- if @nonblocking
299
- Fiber.new(blocking: false) do
300
- fiber_locals.each { |k, v| Thread.current[k] = v }
301
- yield
302
- end
303
- else
304
- Fiber.new do
305
- fiber_locals.each { |k, v| Thread.current[k] = v }
306
- yield
307
- end
308
- end
309
274
  end
310
275
  end
311
276
  end
277
+
278
+ require "graphql/dataloader/async_dataloader"
@@ -0,0 +1,16 @@
1
+ # frozen_string_literal: true
2
+ module GraphQL
3
+ # This error is raised when `Types::ISO8601Duration` is asked to return a value
4
+ # that cannot be parsed as an ISO8601-formatted duration by ActiveSupport::Duration.
5
+ #
6
+ # @see GraphQL::Types::ISO8601Duration which raises this error
7
+ class DurationEncodingError < GraphQL::RuntimeTypeError
8
+ # The value which couldn't be encoded
9
+ attr_reader :duration_value
10
+
11
+ def initialize(value)
12
+ @duration_value = value
13
+ super("Duration cannot be parsed: #{value}. \nDuration must be an ISO8601-formatted duration.")
14
+ end
15
+ end
16
+ end