graphql 2.1.15 → 2.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of graphql might be problematic. Click here for more details.

Files changed (41) hide show
  1. checksums.yaml +4 -4
  2. data/lib/graphql/dataloader/async_dataloader.rb +88 -0
  3. data/lib/graphql/dataloader/source.rb +5 -8
  4. data/lib/graphql/dataloader.rb +35 -21
  5. data/lib/graphql/language/lexer.rb +271 -177
  6. data/lib/graphql/language/nodes.rb +72 -57
  7. data/lib/graphql/language/parser.rb +686 -1986
  8. data/lib/graphql/language/printer.rb +16 -12
  9. data/lib/graphql/language/static_visitor.rb +33 -37
  10. data/lib/graphql/language/visitor.rb +55 -59
  11. data/lib/graphql/schema/argument.rb +5 -3
  12. data/lib/graphql/schema/build_from_definition.rb +7 -8
  13. data/lib/graphql/schema/directive.rb +1 -1
  14. data/lib/graphql/schema/enum_value.rb +1 -1
  15. data/lib/graphql/schema/field.rb +1 -1
  16. data/lib/graphql/schema/input_object.rb +6 -8
  17. data/lib/graphql/schema/interface.rb +2 -6
  18. data/lib/graphql/schema/member/has_directives.rb +1 -1
  19. data/lib/graphql/schema/member/has_fields.rb +1 -1
  20. data/lib/graphql/schema/member/has_interfaces.rb +1 -1
  21. data/lib/graphql/schema/member/scoped.rb +1 -1
  22. data/lib/graphql/schema/member/type_system_helpers.rb +1 -1
  23. data/lib/graphql/schema.rb +6 -5
  24. data/lib/graphql/testing/helpers.rb +125 -0
  25. data/lib/graphql/testing.rb +2 -0
  26. data/lib/graphql/tracing/appoptics_trace.rb +0 -4
  27. data/lib/graphql/tracing/appsignal_trace.rb +0 -4
  28. data/lib/graphql/tracing/data_dog_trace.rb +34 -25
  29. data/lib/graphql/tracing/data_dog_tracing.rb +21 -7
  30. data/lib/graphql/tracing/notifications_trace.rb +0 -4
  31. data/lib/graphql/tracing/platform_trace.rb +0 -5
  32. data/lib/graphql/tracing/prometheus_trace.rb +0 -4
  33. data/lib/graphql/tracing/scout_trace.rb +0 -3
  34. data/lib/graphql/tracing/statsd_trace.rb +0 -4
  35. data/lib/graphql/tracing/trace.rb +1 -0
  36. data/lib/graphql/types/relay/connection_behaviors.rb +1 -1
  37. data/lib/graphql/types/relay/edge_behaviors.rb +1 -1
  38. data/lib/graphql/version.rb +1 -1
  39. data/lib/graphql.rb +1 -0
  40. metadata +21 -19
  41. data/lib/graphql/language/parser.y +0 -560
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 0da36aaaf7f059cae47d9120a490c5fd05ee96999bb7a401f1c4cf711d55bebe
4
- data.tar.gz: 50409b87b5716a3ccdabe69906fe57564c593e4ac9de038f178407877783e762
3
+ metadata.gz: 1294ac9e09f2c6924a771765e3f79d6c29449235e65f736afd177270b2a90b99
4
+ data.tar.gz: eece859aa0e3137e5d7a67c39ec79917d3ee884e3011cb8e9f203801c78c0408
5
5
  SHA512:
6
- metadata.gz: d16b0cae17f595f29d9dbdf02fbbf74f42654fc766583f896388fc73f7742c16edb9d32aa23706254388629a41cbc30c39e30a867cd5b552791e75f356f5c076
7
- data.tar.gz: 19b7ec6d5aa1eba7dd9cb340035b5bfa472e631683554bedbc5593b2a3a68bb3a3847cd5672c6a0f6e829029f42c0f47a3e926ec4dba01969b3ce9cea047814b
6
+ metadata.gz: 4a8343dd7ddcdfbaa05cdfd462612810bbbaa600c62c2d392d367926e7c4742a14705cc05a7e14cfae6d0d5aecb0509e96b645edcbfcdb63d250349bdeef725e
7
+ data.tar.gz: de145d99232eecc3feb68fa0cb2f3cdfe73fa154924f3ad27ec25ab1e90783927a2b064667233379adc5f7bc1a38563a39466c193a5378b64e13676f802aeeeb
@@ -0,0 +1,88 @@
1
+ # frozen_string_literal: true
2
+ module GraphQL
3
+ class Dataloader
4
+ class AsyncDataloader < Dataloader
5
+ def yield
6
+ Thread.current[:graphql_dataloader_next_tick].wait
7
+ nil
8
+ end
9
+
10
+ def run
11
+ job_tasks = []
12
+ next_job_tasks = []
13
+ source_tasks = []
14
+ next_source_tasks = []
15
+ first_pass = true
16
+ jobs_condition = Async::Condition.new
17
+ sources_condition = Async::Condition.new
18
+ Sync do |root_task|
19
+ while first_pass || job_tasks.any?
20
+ first_pass = false
21
+
22
+ root_task.async do |jobs_task|
23
+ while (task = job_tasks.shift || spawn_job_task(jobs_task, jobs_condition))
24
+ if task.alive?
25
+ next_job_tasks << task
26
+ end
27
+ end
28
+ end.wait
29
+ job_tasks.concat(next_job_tasks)
30
+ next_job_tasks.clear
31
+
32
+ while source_tasks.any? || @source_cache.each_value.any? { |group_sources| group_sources.each_value.any?(&:pending?) }
33
+ root_task.async do |sources_loop_task|
34
+ while (task = source_tasks.shift || spawn_source_task(sources_loop_task, sources_condition))
35
+ if task.alive?
36
+ next_source_tasks << task
37
+ end
38
+ end
39
+ end.wait
40
+ sources_condition.signal
41
+ source_tasks.concat(next_source_tasks)
42
+ next_source_tasks.clear
43
+ end
44
+ jobs_condition.signal
45
+ end
46
+ end
47
+ rescue UncaughtThrowError => e
48
+ throw e.tag, e.value
49
+ end
50
+
51
+ private
52
+
53
+ def spawn_job_task(parent_task, condition)
54
+ if @pending_jobs.any?
55
+ fiber_vars = get_fiber_variables
56
+ parent_task.async do |t|
57
+ set_fiber_variables(fiber_vars)
58
+ Thread.current[:graphql_dataloader_next_tick] = condition
59
+ while job = @pending_jobs.shift
60
+ job.call
61
+ end
62
+ end
63
+ end
64
+ end
65
+
66
+ def spawn_source_task(parent_task, condition)
67
+ pending_sources = nil
68
+ @source_cache.each_value do |source_by_batch_params|
69
+ source_by_batch_params.each_value do |source|
70
+ if source.pending?
71
+ pending_sources ||= []
72
+ pending_sources << source
73
+ end
74
+ end
75
+ end
76
+
77
+ if pending_sources
78
+ fiber_vars = get_fiber_variables
79
+ parent_task.async do
80
+ set_fiber_variables(fiber_vars)
81
+ Thread.current[:graphql_dataloader_next_tick] = condition
82
+ pending_sources.each(&:run_pending_keys)
83
+ end
84
+ end
85
+ end
86
+ end
87
+ end
88
+ end
@@ -88,6 +88,7 @@ module GraphQL
88
88
  raise "Implement `#{self.class}#fetch(#{keys.inspect}) to return a record for each of the keys"
89
89
  end
90
90
 
91
+ MAX_ITERATIONS = 1000
91
92
  # Wait for a batch, if there's anything to batch.
92
93
  # Then run the batch and update the cache.
93
94
  # @return [void]
@@ -96,8 +97,8 @@ module GraphQL
96
97
  iterations = 0
97
98
  while pending_result_keys.any? { |key| !@results.key?(key) }
98
99
  iterations += 1
99
- if iterations > 1000
100
- raise "#{self.class}#sync tried 1000 times to load pending keys (#{pending_result_keys}), but they still weren't loaded. There is likely a circular dependency."
100
+ if iterations > MAX_ITERATIONS
101
+ raise "#{self.class}#sync tried #{MAX_ITERATIONS} times to load pending keys (#{pending_result_keys}), but they still weren't loaded. There is likely a circular dependency."
101
102
  end
102
103
  @dataloader.yield
103
104
  end
@@ -168,7 +169,7 @@ module GraphQL
168
169
  nil
169
170
  end
170
171
 
171
- attr_reader :pending, :results
172
+ attr_reader :pending
172
173
 
173
174
  private
174
175
 
@@ -186,11 +187,7 @@ ERR
186
187
  end
187
188
  result = @results[key]
188
189
 
189
- if result.is_a?(StandardError)
190
- # Dup it because the rescuer may modify it.
191
- # (This happens for GraphQL::ExecutionErrors, at least)
192
- raise result.dup
193
- end
190
+ raise result if result.class <= StandardError
194
191
 
195
192
  result
196
193
  end
@@ -27,11 +27,12 @@ module GraphQL
27
27
  attr_accessor :default_nonblocking
28
28
  end
29
29
 
30
- AsyncDataloader = Class.new(self) { self.default_nonblocking = true }
30
+ NonblockingDataloader = Class.new(self) { self.default_nonblocking = true }
31
31
 
32
32
  def self.use(schema, nonblocking: nil)
33
33
  schema.dataloader_class = if nonblocking
34
- AsyncDataloader
34
+ warn("`nonblocking: true` is deprecated from `GraphQL::Dataloader`, please use `GraphQL::Dataloader::AsyncDataloader` instead. Docs: https://graphql-ruby.org/dataloader/async_dataloader.")
35
+ NonblockingDataloader
35
36
  else
36
37
  self
37
38
  end
@@ -118,7 +119,12 @@ module GraphQL
118
119
  #
119
120
  # @return [void]
120
121
  def yield
121
- Fiber.yield
122
+ if use_fiber_resume?
123
+ Fiber.yield
124
+ else
125
+ parent_fiber = Thread.current[:parent_fiber]
126
+ parent_fiber.transfer
127
+ end
122
128
  nil
123
129
  end
124
130
 
@@ -163,11 +169,7 @@ module GraphQL
163
169
  ensure
164
170
  @pending_jobs = prev_queue
165
171
  prev_pending_keys.each do |source_instance, pending|
166
- pending.each do |key, value|
167
- if !source_instance.results.key?(key)
168
- source_instance.pending[key] = value
169
- end
170
- end
172
+ source_instance.pending.merge!(pending)
171
173
  end
172
174
  end
173
175
 
@@ -181,7 +183,7 @@ module GraphQL
181
183
  while first_pass || job_fibers.any?
182
184
  first_pass = false
183
185
 
184
- while (f = (job_fibers.shift || spawn_job_fiber))
186
+ while (f = job_fibers.shift || spawn_job_fiber)
185
187
  if f.alive?
186
188
  finished = run_fiber(f)
187
189
  if !finished
@@ -202,37 +204,38 @@ module GraphQL
202
204
  end
203
205
  join_queues(source_fibers, next_source_fibers)
204
206
  end
207
+
205
208
  end
206
209
  end
207
210
 
208
211
  run_fiber(manager)
209
212
 
210
- if manager.alive?
211
- raise "Invariant: Manager fiber didn't terminate properly."
212
- end
213
-
214
- if job_fibers.any?
215
- raise "Invariant: job fibers should have exited but #{job_fibers.size} remained"
216
- end
217
- if source_fibers.any?
218
- raise "Invariant: source fibers should have exited but #{source_fibers.size} remained"
219
- end
220
213
  rescue UncaughtThrowError => e
221
214
  throw e.tag, e.value
222
215
  end
223
216
 
224
217
  def run_fiber(f)
225
- f.resume
218
+ if use_fiber_resume?
219
+ f.resume
220
+ else
221
+ f.transfer
222
+ end
226
223
  end
227
224
 
228
225
  def spawn_fiber
229
226
  fiber_vars = get_fiber_variables
227
+ parent_fiber = use_fiber_resume? ? nil : Fiber.current
230
228
  Fiber.new(blocking: !@nonblocking) {
231
229
  set_fiber_variables(fiber_vars)
230
+ Thread.current[:parent_fiber] = parent_fiber
232
231
  yield
233
232
  # With `.transfer`, you have to explicitly pass back to the parent --
234
233
  # if the fiber is allowed to terminate normally, control is passed to the main fiber instead.
235
- true
234
+ if parent_fiber
235
+ parent_fiber.transfer(true)
236
+ else
237
+ true
238
+ end
236
239
  }
237
240
  end
238
241
 
@@ -244,6 +247,15 @@ module GraphQL
244
247
  new_queue.clear
245
248
  end
246
249
 
250
+ def use_fiber_resume?
251
+ Fiber.respond_to?(:scheduler) &&
252
+ (
253
+ (defined?(::DummyScheduler) && Fiber.scheduler.is_a?(::DummyScheduler)) ||
254
+ (defined?(::Evt) && ::Evt::Scheduler.singleton_class::BACKENDS.any? { |be| Fiber.scheduler.is_a?(be) }) ||
255
+ (defined?(::Libev) && Fiber.scheduler.is_a?(::Libev::Scheduler))
256
+ )
257
+ end
258
+
247
259
  def spawn_job_fiber
248
260
  if @pending_jobs.any?
249
261
  spawn_fiber do
@@ -273,3 +285,5 @@ module GraphQL
273
285
  end
274
286
  end
275
287
  end
288
+
289
+ require "graphql/dataloader/async_dataloader"