graphql 1.12.0 → 1.12.5
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/lib/generators/graphql/install_generator.rb +4 -1
- data/lib/generators/graphql/loader_generator.rb +1 -0
- data/lib/generators/graphql/mutation_generator.rb +1 -0
- data/lib/generators/graphql/relay.rb +55 -0
- data/lib/generators/graphql/relay_generator.rb +4 -46
- data/lib/generators/graphql/type_generator.rb +1 -0
- data/lib/graphql.rb +2 -2
- data/lib/graphql/analysis/analyze_query.rb +1 -1
- data/lib/graphql/analysis/ast.rb +1 -1
- data/lib/graphql/backtrace/inspect_result.rb +0 -1
- data/lib/graphql/backtrace/table.rb +0 -1
- data/lib/graphql/backtrace/traced_error.rb +0 -1
- data/lib/graphql/backtrace/tracer.rb +4 -8
- data/lib/graphql/backwards_compatibility.rb +1 -1
- data/lib/graphql/base_type.rb +1 -1
- data/lib/graphql/compatibility/execution_specification.rb +1 -1
- data/lib/graphql/compatibility/lazy_execution_specification.rb +1 -1
- data/lib/graphql/compatibility/query_parser_specification.rb +1 -1
- data/lib/graphql/compatibility/schema_parser_specification.rb +1 -1
- data/lib/graphql/dataloader.rb +102 -91
- data/lib/graphql/dataloader/null_dataloader.rb +5 -5
- data/lib/graphql/dataloader/request.rb +1 -6
- data/lib/graphql/dataloader/request_all.rb +1 -4
- data/lib/graphql/dataloader/source.rb +20 -6
- data/lib/graphql/define/instance_definable.rb +1 -1
- data/lib/graphql/deprecated_dsl.rb +4 -4
- data/lib/graphql/deprecation.rb +13 -0
- data/lib/graphql/execution/errors.rb +1 -1
- data/lib/graphql/execution/execute.rb +1 -1
- data/lib/graphql/execution/interpreter.rb +3 -3
- data/lib/graphql/execution/interpreter/arguments_cache.rb +37 -14
- data/lib/graphql/execution/interpreter/resolve.rb +33 -25
- data/lib/graphql/execution/interpreter/runtime.rb +38 -74
- data/lib/graphql/execution/multiplex.rb +22 -23
- data/lib/graphql/function.rb +1 -1
- data/lib/graphql/internal_representation/document.rb +2 -2
- data/lib/graphql/internal_representation/rewrite.rb +1 -1
- data/lib/graphql/object_type.rb +0 -2
- data/lib/graphql/pagination/connection.rb +9 -0
- data/lib/graphql/pagination/connections.rb +1 -1
- data/lib/graphql/parse_error.rb +0 -1
- data/lib/graphql/query.rb +8 -2
- data/lib/graphql/query/arguments.rb +1 -1
- data/lib/graphql/query/arguments_cache.rb +0 -1
- data/lib/graphql/query/context.rb +1 -3
- data/lib/graphql/query/executor.rb +0 -1
- data/lib/graphql/query/null_context.rb +3 -2
- data/lib/graphql/query/serial_execution.rb +1 -1
- data/lib/graphql/query/variable_validation_error.rb +1 -1
- data/lib/graphql/relay/base_connection.rb +2 -2
- data/lib/graphql/relay/mutation.rb +1 -1
- data/lib/graphql/relay/node.rb +3 -3
- data/lib/graphql/relay/range_add.rb +10 -5
- data/lib/graphql/relay/type_extensions.rb +2 -2
- data/lib/graphql/schema.rb +14 -13
- data/lib/graphql/schema/argument.rb +61 -0
- data/lib/graphql/schema/field.rb +12 -7
- data/lib/graphql/schema/find_inherited_value.rb +3 -1
- data/lib/graphql/schema/input_object.rb +6 -2
- data/lib/graphql/schema/member/has_arguments.rb +43 -56
- data/lib/graphql/schema/member/has_fields.rb +1 -4
- data/lib/graphql/schema/member/instrumentation.rb +0 -1
- data/lib/graphql/schema/middleware_chain.rb +1 -1
- data/lib/graphql/schema/resolver.rb +28 -1
- data/lib/graphql/schema/timeout_middleware.rb +1 -1
- data/lib/graphql/schema/validation.rb +2 -2
- data/lib/graphql/static_validation/validator.rb +4 -2
- data/lib/graphql/subscriptions/event.rb +0 -1
- data/lib/graphql/subscriptions/instrumentation.rb +0 -1
- data/lib/graphql/subscriptions/serialize.rb +0 -1
- data/lib/graphql/subscriptions/subscription_root.rb +1 -1
- data/lib/graphql/tracing/skylight_tracing.rb +1 -1
- data/lib/graphql/upgrader/member.rb +1 -1
- data/lib/graphql/upgrader/schema.rb +1 -1
- data/lib/graphql/version.rb +1 -1
- data/readme.md +1 -1
- metadata +22 -90
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 4ec7a782dba5df306d3e0590820d75d520b86651b9293417b115070057c45b8f
|
4
|
+
data.tar.gz: 731f02ddfda3690ecd54cba1b1f373f9c8a53b65d422f853d44ddc9072fa85d7
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: e0783dea9b65037ea2d92b9ab88fc98153a541b0821f7b5d38bb53de3d33024a85d626302c8c92ddc9971606806c1a7896fb7a98da0c8c2261f9a034d87624e2
|
7
|
+
data.tar.gz: c4b6035220bf578fc974a3fa437914dd5e652018b651b0c5af57f355e5d08b113a2faa2a6824242b71d01bab844767592f4d6ea5323e66e2ef05bcd74ad62df3
|
@@ -1,6 +1,8 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
|
+
require 'rails/generators'
|
2
3
|
require 'rails/generators/base'
|
3
4
|
require_relative 'core'
|
5
|
+
require_relative 'relay'
|
4
6
|
|
5
7
|
module Graphql
|
6
8
|
module Generators
|
@@ -50,6 +52,7 @@ module Graphql
|
|
50
52
|
# TODO: also add base classes
|
51
53
|
class InstallGenerator < Rails::Generators::Base
|
52
54
|
include Core
|
55
|
+
include Relay
|
53
56
|
|
54
57
|
desc "Install GraphQL folder structure and boilerplate code"
|
55
58
|
source_root File.expand_path('../templates', __FILE__)
|
@@ -164,7 +167,7 @@ RUBY
|
|
164
167
|
end
|
165
168
|
|
166
169
|
if options[:relay]
|
167
|
-
|
170
|
+
install_relay
|
168
171
|
end
|
169
172
|
|
170
173
|
if gemfile_modified?
|
@@ -0,0 +1,55 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
module Graphql
|
3
|
+
module Generators
|
4
|
+
module Relay
|
5
|
+
def install_relay
|
6
|
+
# Add Node, `node(id:)`, and `nodes(ids:)`
|
7
|
+
template("node_type.erb", "#{options[:directory]}/types/node_type.rb")
|
8
|
+
in_root do
|
9
|
+
fields = " # Add `node(id: ID!) and `nodes(ids: [ID!]!)`\n include GraphQL::Types::Relay::HasNodeField\n include GraphQL::Types::Relay::HasNodesField\n\n"
|
10
|
+
inject_into_file "#{options[:directory]}/types/query_type.rb", fields, after: /class .*QueryType\s*<\s*[^\s]+?\n/m, force: false
|
11
|
+
end
|
12
|
+
|
13
|
+
# Add connections and edges
|
14
|
+
template("base_connection.erb", "#{options[:directory]}/types/base_connection.rb")
|
15
|
+
template("base_edge.erb", "#{options[:directory]}/types/base_edge.rb")
|
16
|
+
connectionable_type_files = {
|
17
|
+
"#{options[:directory]}/types/base_object.rb" => /class .*BaseObject\s*<\s*[^\s]+?\n/m,
|
18
|
+
"#{options[:directory]}/types/base_union.rb" => /class .*BaseUnion\s*<\s*[^\s]+?\n/m,
|
19
|
+
"#{options[:directory]}/types/base_interface.rb" => /include GraphQL::Schema::Interface\n/m,
|
20
|
+
}
|
21
|
+
in_root do
|
22
|
+
connectionable_type_files.each do |type_class_file, sentinel|
|
23
|
+
inject_into_file type_class_file, " connection_type_class(Types::BaseConnection)\n", after: sentinel, force: false
|
24
|
+
inject_into_file type_class_file, " edge_type_class(Types::BaseEdge)\n", after: sentinel, force: false
|
25
|
+
end
|
26
|
+
end
|
27
|
+
|
28
|
+
# Add object ID hooks & connection plugin
|
29
|
+
schema_code = <<-RUBY
|
30
|
+
|
31
|
+
# Relay-style Object Identification:
|
32
|
+
|
33
|
+
# Return a string UUID for `object`
|
34
|
+
def self.id_from_object(object, type_definition, query_ctx)
|
35
|
+
# Here's a simple implementation which:
|
36
|
+
# - joins the type name & object.id
|
37
|
+
# - encodes it with base64:
|
38
|
+
# GraphQL::Schema::UniqueWithinType.encode(type_definition.name, object.id)
|
39
|
+
end
|
40
|
+
|
41
|
+
# Given a string UUID, find the object
|
42
|
+
def self.object_from_id(id, query_ctx)
|
43
|
+
# For example, to decode the UUIDs generated above:
|
44
|
+
# type_name, item_id = GraphQL::Schema::UniqueWithinType.decode(id)
|
45
|
+
#
|
46
|
+
# Then, based on `type_name` and `id`
|
47
|
+
# find an object in your application
|
48
|
+
# ...
|
49
|
+
end
|
50
|
+
RUBY
|
51
|
+
inject_into_file schema_file_path, schema_code, before: /^end\n/m, force: false
|
52
|
+
end
|
53
|
+
end
|
54
|
+
end
|
55
|
+
end
|
@@ -1,62 +1,20 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
|
+
require 'rails/generators'
|
2
3
|
require 'rails/generators/base'
|
3
4
|
require_relative 'core'
|
5
|
+
require_relative 'relay'
|
4
6
|
|
5
7
|
module Graphql
|
6
8
|
module Generators
|
7
9
|
class RelayGenerator < Rails::Generators::Base
|
8
10
|
include Core
|
11
|
+
include Relay
|
9
12
|
|
10
13
|
desc "Add base types and fields for Relay-style nodes and connections"
|
11
14
|
source_root File.expand_path('../templates', __FILE__)
|
12
15
|
|
13
16
|
def install_relay
|
14
|
-
|
15
|
-
template("node_type.erb", "#{options[:directory]}/types/node_type.rb")
|
16
|
-
in_root do
|
17
|
-
fields = " # Add `node(id: ID!) and `nodes(ids: [ID!]!)`\n include GraphQL::Types::Relay::HasNodeField\n include GraphQL::Types::Relay::HasNodesField\n\n"
|
18
|
-
inject_into_file "#{options[:directory]}/types/query_type.rb", fields, after: /class .*QueryType\s*<\s*[^\s]+?\n/m, force: false
|
19
|
-
end
|
20
|
-
|
21
|
-
# Add connections and edges
|
22
|
-
template("base_connection.erb", "#{options[:directory]}/types/base_connection.rb")
|
23
|
-
template("base_edge.erb", "#{options[:directory]}/types/base_edge.rb")
|
24
|
-
connectionable_type_files = {
|
25
|
-
"#{options[:directory]}/types/base_object.rb" => /class .*BaseObject\s*<\s*[^\s]+?\n/m,
|
26
|
-
"#{options[:directory]}/types/base_union.rb" => /class .*BaseUnion\s*<\s*[^\s]+?\n/m,
|
27
|
-
"#{options[:directory]}/types/base_interface.rb" => /include GraphQL::Schema::Interface\n/m,
|
28
|
-
}
|
29
|
-
in_root do
|
30
|
-
connectionable_type_files.each do |type_class_file, sentinel|
|
31
|
-
inject_into_file type_class_file, " connection_type_class(Types::BaseConnection)\n", after: sentinel, force: false
|
32
|
-
inject_into_file type_class_file, " edge_type_class(Types::BaseEdge)\n", after: sentinel, force: false
|
33
|
-
end
|
34
|
-
end
|
35
|
-
|
36
|
-
# Add object ID hooks & connection plugin
|
37
|
-
schema_code = <<-RUBY
|
38
|
-
|
39
|
-
# Relay-style Object Identification:
|
40
|
-
|
41
|
-
# Return a string UUID for `object`
|
42
|
-
def self.id_from_object(object, type_definition, query_ctx)
|
43
|
-
# Here's a simple implementation which:
|
44
|
-
# - joins the type name & object.id
|
45
|
-
# - encodes it with base64:
|
46
|
-
# GraphQL::Schema::UniqueWithinType.encode(type_definition.name, object.id)
|
47
|
-
end
|
48
|
-
|
49
|
-
# Given a string UUID, find the object
|
50
|
-
def self.object_from_id(id, query_ctx)
|
51
|
-
# For example, to decode the UUIDs generated above:
|
52
|
-
# type_name, item_id = GraphQL::Schema::UniqueWithinType.decode(id)
|
53
|
-
#
|
54
|
-
# Then, based on `type_name` and `id`
|
55
|
-
# find an object in your application
|
56
|
-
# ...
|
57
|
-
end
|
58
|
-
RUBY
|
59
|
-
inject_into_file schema_file_path, schema_code, before: /^end\n/m, force: false
|
17
|
+
super
|
60
18
|
end
|
61
19
|
end
|
62
20
|
end
|
data/lib/graphql.rb
CHANGED
@@ -128,6 +128,7 @@ require "graphql/schema/printer"
|
|
128
128
|
require "graphql/filter"
|
129
129
|
require "graphql/internal_representation"
|
130
130
|
require "graphql/static_validation"
|
131
|
+
require "graphql/dataloader"
|
131
132
|
require "graphql/introspection"
|
132
133
|
|
133
134
|
require "graphql/analysis_error"
|
@@ -148,8 +149,7 @@ require "graphql/authorization"
|
|
148
149
|
require "graphql/unauthorized_error"
|
149
150
|
require "graphql/unauthorized_field_error"
|
150
151
|
require "graphql/load_application_object_failed_error"
|
151
|
-
require "graphql/
|
152
|
-
|
152
|
+
require "graphql/deprecation"
|
153
153
|
|
154
154
|
module GraphQL
|
155
155
|
# Ruby has `deprecate_constant`,
|
@@ -43,7 +43,7 @@ module GraphQL
|
|
43
43
|
# @param analyzers [Array<#call>] Objects that respond to `#call(memo, visit_type, irep_node)`
|
44
44
|
# @return [Array<Any>] Results from those analyzers
|
45
45
|
def analyze_query(query, analyzers, multiplex_states: [])
|
46
|
-
warn "Legacy analysis will be removed in GraphQL-Ruby 2.0, please upgrade to AST Analysis: https://graphql-ruby.org/queries/ast_analysis.html (schema: #{query.schema})"
|
46
|
+
GraphQL::Deprecation.warn "Legacy analysis will be removed in GraphQL-Ruby 2.0, please upgrade to AST Analysis: https://graphql-ruby.org/queries/ast_analysis.html (schema: #{query.schema})"
|
47
47
|
|
48
48
|
query.trace("analyze_query", { query: query }) do
|
49
49
|
analyzers_to_run = analyzers.select do |analyzer|
|
data/lib/graphql/analysis/ast.rb
CHANGED
@@ -15,7 +15,7 @@ module GraphQL
|
|
15
15
|
def use(schema_class)
|
16
16
|
if schema_class.analysis_engine == self
|
17
17
|
definition_line = caller(2, 1).first
|
18
|
-
warn("GraphQL::Analysis::AST is now the default; remove `use GraphQL::Analysis::AST` from the schema definition (#{definition_line})")
|
18
|
+
GraphQL::Deprecation.warn("GraphQL::Analysis::AST is now the default; remove `use GraphQL::Analysis::AST` from the schema definition (#{definition_line})")
|
19
19
|
else
|
20
20
|
schema_class.analysis_engine = self
|
21
21
|
end
|
@@ -21,14 +21,10 @@ module GraphQL
|
|
21
21
|
multiplex = query.multiplex
|
22
22
|
when "execute_field", "execute_field_lazy"
|
23
23
|
query = metadata[:query] || raise(ArgumentError, "Add `legacy: true` to use GraphQL::Backtrace without the interpreter runtime.")
|
24
|
-
context = query.context
|
25
24
|
multiplex = query.multiplex
|
26
25
|
push_key = metadata[:path].reject { |i| i.is_a?(Integer) }
|
27
26
|
parent_frame = multiplex.context[:graphql_backtrace_contexts][push_key[0..-2]]
|
28
|
-
|
29
|
-
p push_key
|
30
|
-
binding.pry
|
31
|
-
end
|
27
|
+
|
32
28
|
if parent_frame.is_a?(GraphQL::Query)
|
33
29
|
parent_frame = parent_frame.context
|
34
30
|
end
|
@@ -47,14 +43,14 @@ module GraphQL
|
|
47
43
|
nil
|
48
44
|
end
|
49
45
|
|
50
|
-
if push_data
|
51
|
-
multiplex.context[:graphql_backtrace_contexts]
|
46
|
+
if push_data && multiplex
|
47
|
+
push_storage = multiplex.context[:graphql_backtrace_contexts] ||= {}
|
48
|
+
push_storage[push_key] = push_data
|
52
49
|
multiplex.context[:last_graphql_backtrace_context] = push_data
|
53
50
|
end
|
54
51
|
|
55
52
|
if key == "execute_multiplex"
|
56
53
|
multiplex_context = metadata[:multiplex].context
|
57
|
-
multiplex_context[:graphql_backtrace_contexts] = {}
|
58
54
|
begin
|
59
55
|
yield
|
60
56
|
rescue StandardError => err
|
@@ -22,7 +22,7 @@ module GraphQL
|
|
22
22
|
backtrace = caller(0, 20)
|
23
23
|
# Find the first line in the trace that isn't library internals:
|
24
24
|
user_line = backtrace.find {|l| l !~ /lib\/graphql/ }
|
25
|
-
warn(message + "\n" + user_line + "\n")
|
25
|
+
GraphQL::Deprecation.warn(message + "\n" + user_line + "\n")
|
26
26
|
wrapper = last ? LastArgumentsWrapper : FirstArgumentsWrapper
|
27
27
|
wrapper.new(callable, from)
|
28
28
|
else
|
data/lib/graphql/base_type.rb
CHANGED
@@ -224,7 +224,7 @@ module GraphQL
|
|
224
224
|
private
|
225
225
|
|
226
226
|
def warn_deprecated_coerce(alt_method_name)
|
227
|
-
warn("Coercing without a context is deprecated; use `#{alt_method_name}` if you don't want context-awareness")
|
227
|
+
GraphQL::Deprecation.warn("Coercing without a context is deprecated; use `#{alt_method_name}` if you don't want context-awareness")
|
228
228
|
end
|
229
229
|
end
|
230
230
|
end
|
@@ -32,7 +32,7 @@ module GraphQL
|
|
32
32
|
# @param execution_strategy [<#new, #execute>] An execution strategy class
|
33
33
|
# @return [Class<Minitest::Test>] A test suite for this execution strategy
|
34
34
|
def self.build_suite(execution_strategy)
|
35
|
-
warn "#{self} will be removed from GraphQL-Ruby 2.0. There is no replacement, please open an issue on GitHub if you need support."
|
35
|
+
GraphQL::Deprecation.warn "#{self} will be removed from GraphQL-Ruby 2.0. There is no replacement, please open an issue on GitHub if you need support."
|
36
36
|
Class.new(Minitest::Test) do
|
37
37
|
class << self
|
38
38
|
attr_accessor :counter_schema, :specification_schema
|
@@ -7,7 +7,7 @@ module GraphQL
|
|
7
7
|
# @param execution_strategy [<#new, #execute>] An execution strategy class
|
8
8
|
# @return [Class<Minitest::Test>] A test suite for this execution strategy
|
9
9
|
def self.build_suite(execution_strategy)
|
10
|
-
warn "#{self} will be removed from GraphQL-Ruby 2.0. There is no replacement, please open an issue on GitHub if you need support."
|
10
|
+
GraphQL::Deprecation.warn "#{self} will be removed from GraphQL-Ruby 2.0. There is no replacement, please open an issue on GitHub if you need support."
|
11
11
|
|
12
12
|
Class.new(Minitest::Test) do
|
13
13
|
class << self
|
@@ -11,7 +11,7 @@ module GraphQL
|
|
11
11
|
# @yieldreturn [GraphQL::Language::Nodes::Document]
|
12
12
|
# @return [Class<Minitest::Test>] A test suite for this parse function
|
13
13
|
def self.build_suite(&block)
|
14
|
-
warn "#{self} will be removed from GraphQL-Ruby 2.0. There is no replacement, please open an issue on GitHub if you need support."
|
14
|
+
GraphQL::Deprecation.warn "#{self} will be removed from GraphQL-Ruby 2.0. There is no replacement, please open an issue on GitHub if you need support."
|
15
15
|
|
16
16
|
Class.new(Minitest::Test) do
|
17
17
|
include QueryAssertions
|
@@ -8,7 +8,7 @@ module GraphQL
|
|
8
8
|
# @yieldreturn [GraphQL::Language::Nodes::Document]
|
9
9
|
# @return [Class<Minitest::Test>] A test suite for this parse function
|
10
10
|
def self.build_suite(&block)
|
11
|
-
warn "#{self} will be removed from GraphQL-Ruby 2.0. There is no replacement, please open an issue on GitHub if you need support."
|
11
|
+
GraphQL::Deprecation.warn "#{self} will be removed from GraphQL-Ruby 2.0. There is no replacement, please open an issue on GitHub if you need support."
|
12
12
|
|
13
13
|
Class.new(Minitest::Test) do
|
14
14
|
@@parse_fn = block
|
data/lib/graphql/dataloader.rb
CHANGED
@@ -27,36 +27,24 @@ module GraphQL
|
|
27
27
|
schema.dataloader_class = self
|
28
28
|
end
|
29
29
|
|
30
|
-
def initialize
|
31
|
-
@context = multiplex_context
|
30
|
+
def initialize
|
32
31
|
@source_cache = Hash.new { |h, source_class| h[source_class] = Hash.new { |h2, batch_parameters|
|
33
32
|
source = source_class.new(*batch_parameters)
|
34
33
|
source.setup(self)
|
35
34
|
h2[batch_parameters] = source
|
36
35
|
}
|
37
36
|
}
|
38
|
-
@
|
39
|
-
@yielded_fibers = Set.new
|
37
|
+
@pending_jobs = []
|
40
38
|
end
|
41
39
|
|
42
|
-
#
|
43
|
-
|
44
|
-
|
45
|
-
# @
|
46
|
-
|
47
|
-
|
48
|
-
|
49
|
-
|
50
|
-
# @return [void]
|
51
|
-
def enqueue(&block)
|
52
|
-
@waiting_fibers << Fiber.new {
|
53
|
-
begin
|
54
|
-
yield
|
55
|
-
rescue StandardError => exception
|
56
|
-
exception
|
57
|
-
end
|
58
|
-
}
|
59
|
-
nil
|
40
|
+
# Get a Source instance from this dataloader, for calling `.load(...)` or `.request(...)` on.
|
41
|
+
#
|
42
|
+
# @param source_class [Class<GraphQL::Dataloader::Source]
|
43
|
+
# @param batch_parameters [Array<Object>]
|
44
|
+
# @return [GraphQL::Dataloader::Source] An instance of {source_class}, initialized with `self, *batch_parameters`,
|
45
|
+
# and cached for the lifetime of this {Multiplex}.
|
46
|
+
def with(source_class, *batch_parameters)
|
47
|
+
@source_cache[source_class][batch_parameters]
|
60
48
|
end
|
61
49
|
|
62
50
|
# Tell the dataloader that this fiber is waiting for data.
|
@@ -69,32 +57,67 @@ module GraphQL
|
|
69
57
|
nil
|
70
58
|
end
|
71
59
|
|
72
|
-
# @
|
73
|
-
def
|
74
|
-
|
60
|
+
# @api private Nothing to see here
|
61
|
+
def append_job(&job)
|
62
|
+
# Given a block, queue it up to be worked through when `#run` is called.
|
63
|
+
# (If the dataloader is already running, than a Fiber will pick this up later.)
|
64
|
+
@pending_jobs.push(job)
|
65
|
+
nil
|
75
66
|
end
|
76
67
|
|
77
|
-
#
|
78
|
-
#
|
79
|
-
# Each cycle works like this:
|
80
|
-
#
|
81
|
-
# - Run each pending execution fiber (`@waiting_fibers`),
|
82
|
-
# - Then run each pending Source, preparing more data for those fibers.
|
83
|
-
# - Run each pending Source _again_ (if one Source requested more data from another Source)
|
84
|
-
# - Continue until there are no pending sources
|
85
|
-
# - Repeat: run execution fibers again ...
|
86
|
-
#
|
87
|
-
# @return [void]
|
68
|
+
# @api private Move along, move along
|
88
69
|
def run
|
89
|
-
#
|
90
|
-
|
91
|
-
|
92
|
-
|
93
|
-
|
94
|
-
|
95
|
-
|
96
|
-
|
97
|
-
|
70
|
+
# At a high level, the algorithm is:
|
71
|
+
#
|
72
|
+
# A) Inside Fibers, run jobs from the queue one-by-one
|
73
|
+
# - When one of the jobs yields to the dataloader (`Fiber.yield`), then that fiber will pause
|
74
|
+
# - In that case, if there are still pending jobs, a new Fiber will be created to run jobs
|
75
|
+
# - Continue until all jobs have been _started_ by a Fiber. (Any number of those Fibers may be waiting to be resumed, after their data is loaded)
|
76
|
+
# B) Once all known jobs have been run until they are complete or paused for data, run all pending data sources.
|
77
|
+
# - Similarly, create a Fiber to consume pending sources and tell them to load their data.
|
78
|
+
# - If one of those Fibers pauses, then create a new Fiber to continue working through remaining pending sources.
|
79
|
+
# - When a source causes another source to become pending, run the newly-pending source _first_, since it's a dependency of the previous one.
|
80
|
+
# C) After all pending sources have been completely loaded (there are no more pending sources), resume any Fibers that were waiting for data.
|
81
|
+
# - Those Fibers assume that source caches will have been populated with the data they were waiting for.
|
82
|
+
# - Those Fibers may request data from a source again, in which case they will yeilded and be added to a new pending fiber list.
|
83
|
+
# D) Once all pending fibers have been resumed once, return to `A` above.
|
84
|
+
#
|
85
|
+
# For whatever reason, the best implementation I could find was to order the steps `[D, A, B, C]`, with a special case for skipping `D`
|
86
|
+
# on the first pass. I just couldn't find a better way to write the loops in a way that was DRY and easy to read.
|
87
|
+
#
|
88
|
+
pending_fibers = []
|
89
|
+
next_fibers = []
|
90
|
+
first_pass = true
|
91
|
+
|
92
|
+
while first_pass || (f = pending_fibers.shift)
|
93
|
+
if first_pass
|
94
|
+
first_pass = false
|
95
|
+
else
|
96
|
+
# These fibers were previously waiting for sources to load data,
|
97
|
+
# resume them. (They might wait again, in which case, re-enqueue them.)
|
98
|
+
resume(f)
|
99
|
+
if f.alive?
|
100
|
+
next_fibers << f
|
101
|
+
end
|
102
|
+
end
|
103
|
+
|
104
|
+
while @pending_jobs.any?
|
105
|
+
# Create a Fiber to consume jobs until one of the jobs yields
|
106
|
+
# or jobs run out
|
107
|
+
f = Fiber.new {
|
108
|
+
while (job = @pending_jobs.shift)
|
109
|
+
job.call
|
110
|
+
end
|
111
|
+
}
|
112
|
+
resume(f)
|
113
|
+
# In this case, the job yielded. Queue it up to run again after
|
114
|
+
# we load whatever it's waiting for.
|
115
|
+
if f.alive?
|
116
|
+
next_fibers << f
|
117
|
+
end
|
118
|
+
end
|
119
|
+
|
120
|
+
if pending_fibers.empty?
|
98
121
|
# Now, run all Sources which have become pending _before_ resuming GraphQL execution.
|
99
122
|
# Sources might queue up other Sources, which is fine -- those will also run before resuming execution.
|
100
123
|
#
|
@@ -108,9 +131,15 @@ module GraphQL
|
|
108
131
|
end
|
109
132
|
|
110
133
|
if source_fiber_stack
|
134
|
+
# Use a stack with `.pop` here so that when a source causes another source to become pending,
|
135
|
+
# that newly-pending source will run _before_ the one that depends on it.
|
136
|
+
# (See below where the old fiber is pushed to the stack, then the new fiber is pushed on the stack.)
|
111
137
|
while (outer_source_fiber = source_fiber_stack.pop)
|
112
|
-
|
138
|
+
resume(outer_source_fiber)
|
113
139
|
|
140
|
+
if outer_source_fiber.alive?
|
141
|
+
source_fiber_stack << outer_source_fiber
|
142
|
+
end
|
114
143
|
# If this source caused more sources to become pending, run those before running this one again:
|
115
144
|
next_source_fiber = create_source_fiber
|
116
145
|
if next_source_fiber
|
@@ -118,58 +147,26 @@ module GraphQL
|
|
118
147
|
end
|
119
148
|
end
|
120
149
|
end
|
121
|
-
|
122
|
-
#
|
123
|
-
#
|
124
|
-
|
125
|
-
|
126
|
-
already_run_fibers.clear
|
150
|
+
# Move newly-enqueued Fibers on to the list to be resumed.
|
151
|
+
# Clear out the list of next-round Fibers, so that
|
152
|
+
# any Fibers that pause can be put on it.
|
153
|
+
pending_fibers.concat(next_fibers)
|
154
|
+
next_fibers.clear
|
127
155
|
end
|
128
156
|
end
|
129
|
-
nil
|
130
|
-
end
|
131
157
|
|
132
|
-
|
133
|
-
|
134
|
-
|
135
|
-
|
136
|
-
|
137
|
-
|
138
|
-
|
139
|
-
|
158
|
+
if @pending_jobs.any?
|
159
|
+
raise "Invariant: #{@pending_jobs.size} pending jobs"
|
160
|
+
elsif pending_fibers.any?
|
161
|
+
raise "Invariant: #{pending_fibers.size} pending fibers"
|
162
|
+
elsif next_fibers.any?
|
163
|
+
raise "Invariant: #{next_fibers.size} next fibers"
|
164
|
+
end
|
165
|
+
nil
|
140
166
|
end
|
141
167
|
|
142
|
-
# @api private
|
143
|
-
attr_accessor :current_runtime
|
144
|
-
|
145
168
|
private
|
146
169
|
|
147
|
-
# Check if this fiber is still alive.
|
148
|
-
# If it is, and it should continue, then enqueue a continuation.
|
149
|
-
# If it is, re-enqueue it in `fiber_queue`.
|
150
|
-
# Otherwise, clean it up from @yielded_fibers.
|
151
|
-
# @return [void]
|
152
|
-
def resume_fiber_and_enqueue_continuation(fiber, fiber_stack)
|
153
|
-
result = fiber.resume
|
154
|
-
if result.is_a?(StandardError)
|
155
|
-
raise result
|
156
|
-
end
|
157
|
-
|
158
|
-
# This fiber yielded; there's more to do here.
|
159
|
-
# (If `#alive?` is false, then the fiber concluded without yielding.)
|
160
|
-
if fiber.alive?
|
161
|
-
if !@yielded_fibers.include?(fiber)
|
162
|
-
# This fiber hasn't yielded yet, we should enqueue a continuation fiber
|
163
|
-
@yielded_fibers.add(fiber)
|
164
|
-
current_runtime.enqueue_selections_fiber
|
165
|
-
end
|
166
|
-
fiber_stack << fiber
|
167
|
-
else
|
168
|
-
# Keep this set clean so that fibers can be GC'ed during execution
|
169
|
-
@yielded_fibers.delete(fiber)
|
170
|
-
end
|
171
|
-
end
|
172
|
-
|
173
170
|
# If there are pending sources, return a fiber for running them.
|
174
171
|
# Otherwise, return `nil`.
|
175
172
|
#
|
@@ -186,6 +183,14 @@ module GraphQL
|
|
186
183
|
end
|
187
184
|
|
188
185
|
if pending_sources
|
186
|
+
# By passing the whole array into this Fiber, it's possible that we set ourselves up for a bunch of no-ops.
|
187
|
+
# For example, if you have sources `[a, b, c]`, and `a` is loaded, then `b` yields to wait for `d`, then
|
188
|
+
# the next fiber would be dispatched with `[c, d]`. It would fulfill `c`, then `d`, then eventually
|
189
|
+
# the previous fiber would start up again. `c` would no longer be pending, but it would still receive `.run_pending_keys`.
|
190
|
+
# That method is short-circuited since it isn't pending any more, but it's still a waste.
|
191
|
+
#
|
192
|
+
# This design could probably be improved by maintaining a `@pending_sources` queue which is shared by the fibers,
|
193
|
+
# similar to `@pending_jobs`. That way, when a fiber is resumed, it would never pick up work that was finished by a different fiber.
|
189
194
|
source_fiber = Fiber.new do
|
190
195
|
pending_sources.each(&:run_pending_keys)
|
191
196
|
end
|
@@ -193,5 +198,11 @@ module GraphQL
|
|
193
198
|
|
194
199
|
source_fiber
|
195
200
|
end
|
201
|
+
|
202
|
+
def resume(fiber)
|
203
|
+
fiber.resume
|
204
|
+
rescue UncaughtThrowError => e
|
205
|
+
throw e.tag, e.value
|
206
|
+
end
|
196
207
|
end
|
197
208
|
end
|