graphql 1.12.3 → 1.12.4

Sign up to get free protection for your applications and to get access to all the features.
Files changed (39) hide show
  1. checksums.yaml +4 -4
  2. data/lib/generators/graphql/install_generator.rb +3 -1
  3. data/lib/generators/graphql/relay.rb +55 -0
  4. data/lib/generators/graphql/relay_generator.rb +3 -46
  5. data/lib/graphql.rb +1 -1
  6. data/lib/graphql/backtrace/inspect_result.rb +0 -1
  7. data/lib/graphql/backtrace/table.rb +0 -1
  8. data/lib/graphql/backtrace/traced_error.rb +0 -1
  9. data/lib/graphql/backtrace/tracer.rb +2 -6
  10. data/lib/graphql/dataloader.rb +102 -92
  11. data/lib/graphql/dataloader/null_dataloader.rb +5 -5
  12. data/lib/graphql/dataloader/request.rb +1 -6
  13. data/lib/graphql/dataloader/request_all.rb +1 -4
  14. data/lib/graphql/dataloader/source.rb +20 -6
  15. data/lib/graphql/execution/interpreter.rb +1 -1
  16. data/lib/graphql/execution/interpreter/arguments_cache.rb +37 -14
  17. data/lib/graphql/execution/interpreter/resolve.rb +33 -25
  18. data/lib/graphql/execution/interpreter/runtime.rb +36 -74
  19. data/lib/graphql/execution/multiplex.rb +21 -22
  20. data/lib/graphql/object_type.rb +0 -2
  21. data/lib/graphql/parse_error.rb +0 -1
  22. data/lib/graphql/query.rb +8 -2
  23. data/lib/graphql/query/arguments_cache.rb +0 -1
  24. data/lib/graphql/query/context.rb +1 -3
  25. data/lib/graphql/query/executor.rb +0 -1
  26. data/lib/graphql/query/null_context.rb +3 -2
  27. data/lib/graphql/query/variable_validation_error.rb +1 -1
  28. data/lib/graphql/schema/argument.rb +61 -0
  29. data/lib/graphql/schema/field.rb +10 -5
  30. data/lib/graphql/schema/find_inherited_value.rb +3 -1
  31. data/lib/graphql/schema/input_object.rb +6 -2
  32. data/lib/graphql/schema/member/has_arguments.rb +43 -56
  33. data/lib/graphql/schema/member/has_fields.rb +1 -4
  34. data/lib/graphql/schema/member/instrumentation.rb +0 -1
  35. data/lib/graphql/subscriptions/event.rb +0 -1
  36. data/lib/graphql/subscriptions/instrumentation.rb +0 -1
  37. data/lib/graphql/subscriptions/serialize.rb +0 -1
  38. data/lib/graphql/version.rb +1 -1
  39. metadata +7 -90
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 62556f55d1ce711780044cff00313dedc21f9c07ff69538127f3f96c52c1921c
4
- data.tar.gz: 66f0329f3a61cdddd758159e344d1cf9781b5e21e6adf1c37380188c60c73025
3
+ metadata.gz: 986cbfad7b1ad59a7c26ee447068627be0c4f089fdb4894bd1b4a8580ba9b180
4
+ data.tar.gz: f5945fe64fea70330e078c7f11119f97bd9815b41f37a4e8748e682a9a4aa5ef
5
5
  SHA512:
6
- metadata.gz: f5365accd8e35b1b016de61d9e5c2cf5605d0cb26c0d4baa4d31a4f479b6e91e7d49875fd9837bd426236a23ac52eb3dbb6a57f392ae0ebf25dc191c0e36786f
7
- data.tar.gz: 228843044ed696075f3daa6e43a51a518607007417c47f1da3690d2d8e5c5dce0233c702c4101f9d7b94159499d0251fc27a164dc0f5105d6e9674dd4c1565f0
6
+ metadata.gz: f593fc1f7a47764de491e297703e5b6cb9fd64cb90de65ed0af51bb510a47f02e2e059d2707ad425d36f6caee71597584c5f27909df39635c60d04706431121a
7
+ data.tar.gz: 6c70786f0d123fc9a8dfa6b6cb3d1a24c7d07ca1fd73e375871372b45c0b67452cb5b96a0f633fded6a92e436414b5799a920e1739f8018bfc4953b4e2ad0343
@@ -1,6 +1,7 @@
1
1
  # frozen_string_literal: true
2
2
  require 'rails/generators/base'
3
3
  require_relative 'core'
4
+ require_relative 'relay'
4
5
 
5
6
  module Graphql
6
7
  module Generators
@@ -50,6 +51,7 @@ module Graphql
50
51
  # TODO: also add base classes
51
52
  class InstallGenerator < Rails::Generators::Base
52
53
  include Core
54
+ include Relay
53
55
 
54
56
  desc "Install GraphQL folder structure and boilerplate code"
55
57
  source_root File.expand_path('../templates', __FILE__)
@@ -164,7 +166,7 @@ RUBY
164
166
  end
165
167
 
166
168
  if options[:relay]
167
- generate("graphql:relay")
169
+ install_relay
168
170
  end
169
171
 
170
172
  if gemfile_modified?
@@ -0,0 +1,55 @@
1
+ # frozen_string_literal: true
2
+ module Graphql
3
+ module Generators
4
+ module Relay
5
+ def install_relay
6
+ # Add Node, `node(id:)`, and `nodes(ids:)`
7
+ template("node_type.erb", "#{options[:directory]}/types/node_type.rb")
8
+ in_root do
9
+ fields = " # Add `node(id: ID!) and `nodes(ids: [ID!]!)`\n include GraphQL::Types::Relay::HasNodeField\n include GraphQL::Types::Relay::HasNodesField\n\n"
10
+ inject_into_file "#{options[:directory]}/types/query_type.rb", fields, after: /class .*QueryType\s*<\s*[^\s]+?\n/m, force: false
11
+ end
12
+
13
+ # Add connections and edges
14
+ template("base_connection.erb", "#{options[:directory]}/types/base_connection.rb")
15
+ template("base_edge.erb", "#{options[:directory]}/types/base_edge.rb")
16
+ connectionable_type_files = {
17
+ "#{options[:directory]}/types/base_object.rb" => /class .*BaseObject\s*<\s*[^\s]+?\n/m,
18
+ "#{options[:directory]}/types/base_union.rb" => /class .*BaseUnion\s*<\s*[^\s]+?\n/m,
19
+ "#{options[:directory]}/types/base_interface.rb" => /include GraphQL::Schema::Interface\n/m,
20
+ }
21
+ in_root do
22
+ connectionable_type_files.each do |type_class_file, sentinel|
23
+ inject_into_file type_class_file, " connection_type_class(Types::BaseConnection)\n", after: sentinel, force: false
24
+ inject_into_file type_class_file, " edge_type_class(Types::BaseEdge)\n", after: sentinel, force: false
25
+ end
26
+ end
27
+
28
+ # Add object ID hooks & connection plugin
29
+ schema_code = <<-RUBY
30
+
31
+ # Relay-style Object Identification:
32
+
33
+ # Return a string UUID for `object`
34
+ def self.id_from_object(object, type_definition, query_ctx)
35
+ # Here's a simple implementation which:
36
+ # - joins the type name & object.id
37
+ # - encodes it with base64:
38
+ # GraphQL::Schema::UniqueWithinType.encode(type_definition.name, object.id)
39
+ end
40
+
41
+ # Given a string UUID, find the object
42
+ def self.object_from_id(id, query_ctx)
43
+ # For example, to decode the UUIDs generated above:
44
+ # type_name, item_id = GraphQL::Schema::UniqueWithinType.decode(id)
45
+ #
46
+ # Then, based on `type_name` and `id`
47
+ # find an object in your application
48
+ # ...
49
+ end
50
+ RUBY
51
+ inject_into_file schema_file_path, schema_code, before: /^end\n/m, force: false
52
+ end
53
+ end
54
+ end
55
+ end
@@ -1,62 +1,19 @@
1
1
  # frozen_string_literal: true
2
2
  require 'rails/generators/base'
3
3
  require_relative 'core'
4
+ require_relative 'relay'
4
5
 
5
6
  module Graphql
6
7
  module Generators
7
8
  class RelayGenerator < Rails::Generators::Base
8
9
  include Core
10
+ include Relay
9
11
 
10
12
  desc "Add base types and fields for Relay-style nodes and connections"
11
13
  source_root File.expand_path('../templates', __FILE__)
12
14
 
13
15
  def install_relay
14
- # Add Node, `node(id:)`, and `nodes(ids:)`
15
- template("node_type.erb", "#{options[:directory]}/types/node_type.rb")
16
- in_root do
17
- fields = " # Add `node(id: ID!) and `nodes(ids: [ID!]!)`\n include GraphQL::Types::Relay::HasNodeField\n include GraphQL::Types::Relay::HasNodesField\n\n"
18
- inject_into_file "#{options[:directory]}/types/query_type.rb", fields, after: /class .*QueryType\s*<\s*[^\s]+?\n/m, force: false
19
- end
20
-
21
- # Add connections and edges
22
- template("base_connection.erb", "#{options[:directory]}/types/base_connection.rb")
23
- template("base_edge.erb", "#{options[:directory]}/types/base_edge.rb")
24
- connectionable_type_files = {
25
- "#{options[:directory]}/types/base_object.rb" => /class .*BaseObject\s*<\s*[^\s]+?\n/m,
26
- "#{options[:directory]}/types/base_union.rb" => /class .*BaseUnion\s*<\s*[^\s]+?\n/m,
27
- "#{options[:directory]}/types/base_interface.rb" => /include GraphQL::Schema::Interface\n/m,
28
- }
29
- in_root do
30
- connectionable_type_files.each do |type_class_file, sentinel|
31
- inject_into_file type_class_file, " connection_type_class(Types::BaseConnection)\n", after: sentinel, force: false
32
- inject_into_file type_class_file, " edge_type_class(Types::BaseEdge)\n", after: sentinel, force: false
33
- end
34
- end
35
-
36
- # Add object ID hooks & connection plugin
37
- schema_code = <<-RUBY
38
-
39
- # Relay-style Object Identification:
40
-
41
- # Return a string UUID for `object`
42
- def self.id_from_object(object, type_definition, query_ctx)
43
- # Here's a simple implementation which:
44
- # - joins the type name & object.id
45
- # - encodes it with base64:
46
- # GraphQL::Schema::UniqueWithinType.encode(type_definition.name, object.id)
47
- end
48
-
49
- # Given a string UUID, find the object
50
- def self.object_from_id(id, query_ctx)
51
- # For example, to decode the UUIDs generated above:
52
- # type_name, item_id = GraphQL::Schema::UniqueWithinType.decode(id)
53
- #
54
- # Then, based on `type_name` and `id`
55
- # find an object in your application
56
- # ...
57
- end
58
- RUBY
59
- inject_into_file schema_file_path, schema_code, before: /^end\n/m, force: false
16
+ super
60
17
  end
61
18
  end
62
19
  end
data/lib/graphql.rb CHANGED
@@ -128,6 +128,7 @@ require "graphql/schema/printer"
128
128
  require "graphql/filter"
129
129
  require "graphql/internal_representation"
130
130
  require "graphql/static_validation"
131
+ require "graphql/dataloader"
131
132
  require "graphql/introspection"
132
133
 
133
134
  require "graphql/analysis_error"
@@ -148,7 +149,6 @@ require "graphql/authorization"
148
149
  require "graphql/unauthorized_error"
149
150
  require "graphql/unauthorized_field_error"
150
151
  require "graphql/load_application_object_failed_error"
151
- require "graphql/dataloader"
152
152
  require "graphql/deprecation"
153
153
 
154
154
  module GraphQL
@@ -1,5 +1,4 @@
1
1
  # frozen_string_literal: true
2
- # test_via: ../backtrace.rb
3
2
  module GraphQL
4
3
  class Backtrace
5
4
  module InspectResult
@@ -1,5 +1,4 @@
1
1
  # frozen_string_literal: true
2
- # test_via: ../backtrace.rb
3
2
  module GraphQL
4
3
  class Backtrace
5
4
  # A class for turning a context into a human-readable table or array
@@ -1,5 +1,4 @@
1
1
  # frozen_string_literal: true
2
- # test_via: ../backtrace.rb
3
2
  module GraphQL
4
3
  class Backtrace
5
4
  # When {Backtrace} is enabled, raised errors are wrapped with {TracedError}.
@@ -21,14 +21,10 @@ module GraphQL
21
21
  multiplex = query.multiplex
22
22
  when "execute_field", "execute_field_lazy"
23
23
  query = metadata[:query] || raise(ArgumentError, "Add `legacy: true` to use GraphQL::Backtrace without the interpreter runtime.")
24
- context = query.context
25
24
  multiplex = query.multiplex
26
25
  push_key = metadata[:path].reject { |i| i.is_a?(Integer) }
27
26
  parent_frame = multiplex.context[:graphql_backtrace_contexts][push_key[0..-2]]
28
- if parent_frame.nil?
29
- p push_key
30
- binding.pry
31
- end
27
+
32
28
  if parent_frame.is_a?(GraphQL::Query)
33
29
  parent_frame = parent_frame.context
34
30
  end
@@ -47,7 +43,7 @@ module GraphQL
47
43
  nil
48
44
  end
49
45
 
50
- if push_data
46
+ if push_data && multiplex
51
47
  multiplex.context[:graphql_backtrace_contexts][push_key] = push_data
52
48
  multiplex.context[:last_graphql_backtrace_context] = push_data
53
49
  end
@@ -27,36 +27,24 @@ module GraphQL
27
27
  schema.dataloader_class = self
28
28
  end
29
29
 
30
- def initialize(multiplex_context)
31
- @context = multiplex_context
30
+ def initialize
32
31
  @source_cache = Hash.new { |h, source_class| h[source_class] = Hash.new { |h2, batch_parameters|
33
32
  source = source_class.new(*batch_parameters)
34
33
  source.setup(self)
35
34
  h2[batch_parameters] = source
36
35
  }
37
36
  }
38
- @waiting_fibers = []
39
- @yielded_fibers = {}
37
+ @pending_jobs = []
40
38
  end
41
39
 
42
- # @return [Hash] the {Multiplex} context
43
- attr_reader :context
44
-
45
- # @api private
46
- attr_reader :yielded_fibers
47
-
48
- # Add some work to this dataloader to be scheduled later.
49
- # @param block Some work to enqueue
50
- # @return [void]
51
- def enqueue(&block)
52
- @waiting_fibers << Fiber.new {
53
- begin
54
- yield
55
- rescue StandardError => exception
56
- exception
57
- end
58
- }
59
- nil
40
+ # Get a Source instance from this dataloader, for calling `.load(...)` or `.request(...)` on.
41
+ #
42
+ # @param source_class [Class<GraphQL::Dataloader::Source]
43
+ # @param batch_parameters [Array<Object>]
44
+ # @return [GraphQL::Dataloader::Source] An instance of {source_class}, initialized with `self, *batch_parameters`,
45
+ # and cached for the lifetime of this {Multiplex}.
46
+ def with(source_class, *batch_parameters)
47
+ @source_cache[source_class][batch_parameters]
60
48
  end
61
49
 
62
50
  # Tell the dataloader that this fiber is waiting for data.
@@ -69,33 +57,70 @@ module GraphQL
69
57
  nil
70
58
  end
71
59
 
72
- # @param path [Array<String, Integer>] A graphql response path
73
- # @return [Boolean] True if the current Fiber has yielded once via Dataloader at {path}
74
- def yielded?(path)
75
- @yielded_fibers[Fiber.current] == path
60
+ # @api private Nothing to see here
61
+ def append_job(&job)
62
+ # Given a block, queue it up to be worked through when `#run` is called.
63
+ # (If the dataloader is already running, than a Fiber will pick this up later.)
64
+ @pending_jobs.push(job)
65
+ nil
76
66
  end
77
67
 
78
- # Run all Fibers until they're all done
79
- #
80
- # Each cycle works like this:
81
- #
82
- # - Run each pending execution fiber (`@waiting_fibers`),
83
- # - Then run each pending Source, preparing more data for those fibers.
84
- # - Run each pending Source _again_ (if one Source requested more data from another Source)
85
- # - Continue until there are no pending sources
86
- # - Repeat: run execution fibers again ...
87
- #
88
- # @return [void]
68
+ # @api private Move along, move along
89
69
  def run
90
- # Start executing Fibers. This will run until all the Fibers are done.
91
- already_run_fibers = []
92
- while (current_fiber = @waiting_fibers.pop)
93
- # Run each execution fiber, enqueuing it in `already_run_fibers`
94
- # if it's still `.alive?`.
95
- # Any spin-off continuations will be enqueued in `@waiting_fibers` (via {#enqueue})
96
- resume_fiber_and_enqueue_continuation(current_fiber, already_run_fibers)
97
-
98
- if @waiting_fibers.empty?
70
+ # At a high level, the algorithm is:
71
+ #
72
+ # A) Inside Fibers, run jobs from the queue one-by-one
73
+ # - When one of the jobs yields to the dataloader (`Fiber.yield`), then that fiber will pause
74
+ # - In that case, if there are still pending jobs, a new Fiber will be created to run jobs
75
+ # - Continue until all jobs have been _started_ by a Fiber. (Any number of those Fibers may be waiting to be resumed, after their data is loaded)
76
+ # B) Once all known jobs have been run until they are complete or paused for data, run all pending data sources.
77
+ # - Similarly, create a Fiber to consume pending sources and tell them to load their data.
78
+ # - If one of those Fibers pauses, then create a new Fiber to continue working through remaining pending sources.
79
+ # - When a source causes another source to become pending, run the newly-pending source _first_, since it's a dependency of the previous one.
80
+ # C) After all pending sources have been completely loaded (there are no more pending sources), resume any Fibers that were waiting for data.
81
+ # - Those Fibers assume that source caches will have been populated with the data they were waiting for.
82
+ # - Those Fibers may request data from a source again, in which case they will yeilded and be added to a new pending fiber list.
83
+ # D) Once all pending fibers have been resumed once, return to `A` above.
84
+ #
85
+ # For whatever reason, the best implementation I could find was to order the steps `[D, A, B, C]`, with a special case for skipping `D`
86
+ # on the first pass. I just couldn't find a better way to write the loops in a way that was DRY and easy to read.
87
+ #
88
+ pending_fibers = []
89
+ next_fibers = []
90
+ first_pass = true
91
+
92
+ while first_pass || (f = pending_fibers.shift)
93
+ if first_pass
94
+ first_pass = false
95
+ else
96
+ # These fibers were previously waiting for sources to load data,
97
+ # resume them. (They might wait again, in which case, re-enqueue them.)
98
+ f.resume
99
+ if f.alive?
100
+ next_fibers << f
101
+ end
102
+ end
103
+
104
+ while @pending_jobs.any?
105
+ # Create a Fiber to consume jobs until one of the jobs yields
106
+ # or jobs run out
107
+ f = Fiber.new {
108
+ while (job = @pending_jobs.shift)
109
+ job.call
110
+ end
111
+ }
112
+ result = f.resume
113
+ if result.is_a?(StandardError)
114
+ raise result
115
+ end
116
+ # In this case, the job yielded. Queue it up to run again after
117
+ # we load whatever it's waiting for.
118
+ if f.alive?
119
+ next_fibers << f
120
+ end
121
+ end
122
+
123
+ if pending_fibers.empty?
99
124
  # Now, run all Sources which have become pending _before_ resuming GraphQL execution.
100
125
  # Sources might queue up other Sources, which is fine -- those will also run before resuming execution.
101
126
  #
@@ -109,9 +134,18 @@ module GraphQL
109
134
  end
110
135
 
111
136
  if source_fiber_stack
137
+ # Use a stack with `.pop` here so that when a source causes another source to become pending,
138
+ # that newly-pending source will run _before_ the one that depends on it.
139
+ # (See below where the old fiber is pushed to the stack, then the new fiber is pushed on the stack.)
112
140
  while (outer_source_fiber = source_fiber_stack.pop)
113
- resume_fiber_and_enqueue_continuation(outer_source_fiber, source_fiber_stack)
141
+ result = outer_source_fiber.resume
142
+ if result.is_a?(StandardError)
143
+ raise result
144
+ end
114
145
 
146
+ if outer_source_fiber.alive?
147
+ source_fiber_stack << outer_source_fiber
148
+ end
115
149
  # If this source caused more sources to become pending, run those before running this one again:
116
150
  next_source_fiber = create_source_fiber
117
151
  if next_source_fiber
@@ -119,58 +153,26 @@ module GraphQL
119
153
  end
120
154
  end
121
155
  end
122
-
123
- # We ran all the first round of execution fibers,
124
- # and we ran all the pending sources.
125
- # So pick up any paused execution fibers and repeat.
126
- @waiting_fibers.concat(already_run_fibers)
127
- already_run_fibers.clear
156
+ # Move newly-enqueued Fibers on to the list to be resumed.
157
+ # Clear out the list of next-round Fibers, so that
158
+ # any Fibers that pause can be put on it.
159
+ pending_fibers.concat(next_fibers)
160
+ next_fibers.clear
128
161
  end
129
162
  end
130
- nil
131
- end
132
163
 
133
- # Get a Source instance from this dataloader, for calling `.load(...)` or `.request(...)` on.
134
- #
135
- # @param source_class [Class<GraphQL::Dataloader::Source]
136
- # @param batch_parameters [Array<Object>]
137
- # @return [GraphQL::Dataloader::Source] An instance of {source_class}, initialized with `self, *batch_parameters`,
138
- # and cached for the lifetime of this {Multiplex}.
139
- def with(source_class, *batch_parameters)
140
- @source_cache[source_class][batch_parameters]
164
+ if @pending_jobs.any?
165
+ raise "Invariant: #{@pending_jobs.size} pending jobs"
166
+ elsif pending_fibers.any?
167
+ raise "Invariant: #{pending_fibers.size} pending fibers"
168
+ elsif next_fibers.any?
169
+ raise "Invariant: #{next_fibers.size} next fibers"
170
+ end
171
+ nil
141
172
  end
142
173
 
143
- # @api private
144
- attr_accessor :current_runtime
145
-
146
174
  private
147
175
 
148
- # Check if this fiber is still alive.
149
- # If it is, and it should continue, then enqueue a continuation.
150
- # If it is, re-enqueue it in `fiber_queue`.
151
- # Otherwise, clean it up from @yielded_fibers.
152
- # @return [void]
153
- def resume_fiber_and_enqueue_continuation(fiber, fiber_stack)
154
- result = fiber.resume
155
- if result.is_a?(StandardError)
156
- raise result
157
- end
158
-
159
- # This fiber yielded; there's more to do here.
160
- # (If `#alive?` is false, then the fiber concluded without yielding.)
161
- if fiber.alive?
162
- if !@yielded_fibers.include?(fiber)
163
- # This fiber hasn't yielded yet, we should enqueue a continuation fiber
164
- @yielded_fibers[fiber] = current_runtime.progress_path
165
- current_runtime.enqueue_selections_fiber
166
- end
167
- fiber_stack << fiber
168
- else
169
- # Keep this set clean so that fibers can be GC'ed during execution
170
- @yielded_fibers.delete(fiber)
171
- end
172
- end
173
-
174
176
  # If there are pending sources, return a fiber for running them.
175
177
  # Otherwise, return `nil`.
176
178
  #
@@ -187,6 +189,14 @@ module GraphQL
187
189
  end
188
190
 
189
191
  if pending_sources
192
+ # By passing the whole array into this Fiber, it's possible that we set ourselves up for a bunch of no-ops.
193
+ # For example, if you have sources `[a, b, c]`, and `a` is loaded, then `b` yields to wait for `d`, then
194
+ # the next fiber would be dispatched with `[c, d]`. It would fulfill `c`, then `d`, then eventually
195
+ # the previous fiber would start up again. `c` would no longer be pending, but it would still receive `.run_pending_keys`.
196
+ # That method is short-circuited since it isn't pending any more, but it's still a waste.
197
+ #
198
+ # This design could probably be improved by maintaining a `@pending_sources` queue which is shared by the fibers,
199
+ # similar to `@pending_jobs`. That way, when a fiber is resumed, it would never pick up work that was finished by a different fiber.
190
200
  source_fiber = Fiber.new do
191
201
  pending_sources.each(&:run_pending_keys)
192
202
  end