graphql-stitching 0.3.4 → 1.0.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/README.md +79 -14
- data/docs/README.md +1 -1
- data/docs/client.md +103 -0
- data/docs/composer.md +2 -2
- data/docs/supergraph.md +1 -1
- data/example/gateway.rb +4 -4
- data/lib/graphql/stitching/{gateway.rb → client.rb} +7 -7
- data/lib/graphql/stitching/composer/validate_boundaries.rb +4 -4
- data/lib/graphql/stitching/composer.rb +44 -22
- data/lib/graphql/stitching/executor/boundary_source.rb +199 -0
- data/lib/graphql/stitching/executor/root_source.rb +48 -0
- data/lib/graphql/stitching/executor.rb +7 -231
- data/lib/graphql/stitching/{remote_client.rb → http_executable.rb} +1 -1
- data/lib/graphql/stitching/planner.rb +264 -189
- data/lib/graphql/stitching/planner_operation.rb +18 -16
- data/lib/graphql/stitching/shaper.rb +1 -1
- data/lib/graphql/stitching/supergraph.rb +21 -28
- data/lib/graphql/stitching/version.rb +1 -1
- data/lib/graphql/stitching.rb +3 -2
- metadata +7 -5
- data/docs/gateway.md +0 -103
@@ -0,0 +1,199 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module GraphQL
|
4
|
+
module Stitching
|
5
|
+
class Executor::BoundarySource < GraphQL::Dataloader::Source
|
6
|
+
def initialize(executor, location)
|
7
|
+
@executor = executor
|
8
|
+
@location = location
|
9
|
+
end
|
10
|
+
|
11
|
+
def fetch(ops)
|
12
|
+
origin_sets_by_operation = ops.each_with_object({}) do |op, memo|
|
13
|
+
origin_set = op["path"].reduce([@executor.data]) do |set, path_segment|
|
14
|
+
set.flat_map { |obj| obj && obj[path_segment] }.tap(&:compact!)
|
15
|
+
end
|
16
|
+
|
17
|
+
if op["if_type"]
|
18
|
+
# operations planned around unused fragment conditions should not trigger requests
|
19
|
+
origin_set.select! { _1["_STITCH_typename"] == op["if_type"] }
|
20
|
+
end
|
21
|
+
|
22
|
+
memo[op] = origin_set if origin_set.any?
|
23
|
+
end
|
24
|
+
|
25
|
+
if origin_sets_by_operation.any?
|
26
|
+
query_document, variable_names = build_document(origin_sets_by_operation, @executor.request.operation_name)
|
27
|
+
variables = @executor.request.variables.slice(*variable_names)
|
28
|
+
raw_result = @executor.supergraph.execute_at_location(@location, query_document, variables, @executor.request.context)
|
29
|
+
@executor.query_count += 1
|
30
|
+
|
31
|
+
merge_results!(origin_sets_by_operation, raw_result.dig("data"))
|
32
|
+
|
33
|
+
errors = raw_result.dig("errors")
|
34
|
+
@executor.errors.concat(extract_errors!(origin_sets_by_operation, errors)) if errors&.any?
|
35
|
+
end
|
36
|
+
|
37
|
+
ops.map { origin_sets_by_operation[_1] ? _1["order"] : nil }
|
38
|
+
end
|
39
|
+
|
40
|
+
# Builds batched boundary queries
|
41
|
+
# "query MyOperation_2_3($var:VarType) {
|
42
|
+
# _0_result: list(keys:["a","b","c"]) { boundarySelections... }
|
43
|
+
# _1_0_result: item(key:"x") { boundarySelections... }
|
44
|
+
# _1_1_result: item(key:"y") { boundarySelections... }
|
45
|
+
# _1_2_result: item(key:"z") { boundarySelections... }
|
46
|
+
# }"
|
47
|
+
def build_document(origin_sets_by_operation, operation_name = nil)
|
48
|
+
variable_defs = {}
|
49
|
+
query_fields = origin_sets_by_operation.map.with_index do |(op, origin_set), batch_index|
|
50
|
+
variable_defs.merge!(op["variables"])
|
51
|
+
boundary = op["boundary"]
|
52
|
+
|
53
|
+
if boundary["list"]
|
54
|
+
input = origin_set.each_with_index.reduce(String.new) do |memo, (origin_obj, index)|
|
55
|
+
memo << "," if index > 0
|
56
|
+
memo << build_key(boundary["key"], origin_obj, federation: boundary["federation"])
|
57
|
+
memo
|
58
|
+
end
|
59
|
+
|
60
|
+
"_#{batch_index}_result: #{boundary["field"]}(#{boundary["arg"]}:[#{input}]) #{op["selections"]}"
|
61
|
+
else
|
62
|
+
origin_set.map.with_index do |origin_obj, index|
|
63
|
+
input = build_key(boundary["key"], origin_obj, federation: boundary["federation"])
|
64
|
+
"_#{batch_index}_#{index}_result: #{boundary["field"]}(#{boundary["arg"]}:#{input}) #{op["selections"]}"
|
65
|
+
end
|
66
|
+
end
|
67
|
+
end
|
68
|
+
|
69
|
+
doc = String.new("query") # << boundary fulfillment always uses query
|
70
|
+
|
71
|
+
if operation_name
|
72
|
+
doc << " #{operation_name}"
|
73
|
+
origin_sets_by_operation.each_key do |op|
|
74
|
+
doc << "_#{op["order"]}"
|
75
|
+
end
|
76
|
+
end
|
77
|
+
|
78
|
+
if variable_defs.any?
|
79
|
+
variable_str = variable_defs.map { |k, v| "$#{k}:#{v}" }.join(",")
|
80
|
+
doc << "(#{variable_str})"
|
81
|
+
end
|
82
|
+
|
83
|
+
doc << "{ #{query_fields.join(" ")} }"
|
84
|
+
|
85
|
+
return doc, variable_defs.keys
|
86
|
+
end
|
87
|
+
|
88
|
+
def build_key(key, origin_obj, federation: false)
|
89
|
+
key_value = JSON.generate(origin_obj["_STITCH_#{key}"])
|
90
|
+
if federation
|
91
|
+
"{ __typename: \"#{origin_obj["_STITCH_typename"]}\", #{key}: #{key_value} }"
|
92
|
+
else
|
93
|
+
key_value
|
94
|
+
end
|
95
|
+
end
|
96
|
+
|
97
|
+
def merge_results!(origin_sets_by_operation, raw_result)
|
98
|
+
return unless raw_result
|
99
|
+
|
100
|
+
origin_sets_by_operation.each_with_index do |(op, origin_set), batch_index|
|
101
|
+
results = if op.dig("boundary", "list")
|
102
|
+
raw_result["_#{batch_index}_result"]
|
103
|
+
else
|
104
|
+
origin_set.map.with_index { |_, index| raw_result["_#{batch_index}_#{index}_result"] }
|
105
|
+
end
|
106
|
+
|
107
|
+
next unless results&.any?
|
108
|
+
|
109
|
+
origin_set.each_with_index do |origin_obj, index|
|
110
|
+
origin_obj.merge!(results[index]) if results[index]
|
111
|
+
end
|
112
|
+
end
|
113
|
+
end
|
114
|
+
|
115
|
+
# https://spec.graphql.org/June2018/#sec-Errors
|
116
|
+
def extract_errors!(origin_sets_by_operation, errors)
|
117
|
+
ops = origin_sets_by_operation.keys
|
118
|
+
origin_sets = origin_sets_by_operation.values
|
119
|
+
pathed_errors_by_op_index_and_object_id = {}
|
120
|
+
|
121
|
+
errors_result = errors.each_with_object([]) do |err, memo|
|
122
|
+
err.delete("locations")
|
123
|
+
path = err["path"]
|
124
|
+
|
125
|
+
if path && path.length > 0
|
126
|
+
result_alias = /^_(\d+)(?:_(\d+))?_result$/.match(path.first.to_s)
|
127
|
+
|
128
|
+
if result_alias
|
129
|
+
path = err["path"] = path[1..-1]
|
130
|
+
|
131
|
+
origin_obj = if result_alias[2]
|
132
|
+
origin_sets.dig(result_alias[1].to_i, result_alias[2].to_i)
|
133
|
+
elsif path[0].is_a?(Integer) || /\d+/.match?(path[0].to_s)
|
134
|
+
origin_sets.dig(result_alias[1].to_i, path.shift.to_i)
|
135
|
+
end
|
136
|
+
|
137
|
+
if origin_obj
|
138
|
+
by_op_index = pathed_errors_by_op_index_and_object_id[result_alias[1].to_i] ||= {}
|
139
|
+
by_object_id = by_op_index[origin_obj.object_id] ||= []
|
140
|
+
by_object_id << err
|
141
|
+
next
|
142
|
+
end
|
143
|
+
end
|
144
|
+
end
|
145
|
+
|
146
|
+
memo << err
|
147
|
+
end
|
148
|
+
|
149
|
+
if pathed_errors_by_op_index_and_object_id.any?
|
150
|
+
pathed_errors_by_op_index_and_object_id.each do |op_index, pathed_errors_by_object_id|
|
151
|
+
repath_errors!(pathed_errors_by_object_id, ops.dig(op_index, "path"))
|
152
|
+
errors_result.concat(pathed_errors_by_object_id.values)
|
153
|
+
end
|
154
|
+
end
|
155
|
+
errors_result.flatten!
|
156
|
+
end
|
157
|
+
|
158
|
+
private
|
159
|
+
|
160
|
+
# traverse forward through origin data, expanding arrays to follow all paths
|
161
|
+
# any errors found for an origin object_id have their path prefixed by the object path
|
162
|
+
def repath_errors!(pathed_errors_by_object_id, forward_path, current_path=[], root=@executor.data)
|
163
|
+
current_path.push(forward_path.shift)
|
164
|
+
scope = root[current_path.last]
|
165
|
+
|
166
|
+
if forward_path.any? && scope.is_a?(Array)
|
167
|
+
scope.each_with_index do |element, index|
|
168
|
+
inner_elements = element.is_a?(Array) ? element.flatten : [element]
|
169
|
+
inner_elements.each do |inner_element|
|
170
|
+
current_path << index
|
171
|
+
repath_errors!(pathed_errors_by_object_id, forward_path, current_path, inner_element)
|
172
|
+
current_path.pop
|
173
|
+
end
|
174
|
+
end
|
175
|
+
|
176
|
+
elsif forward_path.any?
|
177
|
+
current_path << index
|
178
|
+
repath_errors!(pathed_errors_by_object_id, forward_path, current_path, scope)
|
179
|
+
current_path.pop
|
180
|
+
|
181
|
+
elsif scope.is_a?(Array)
|
182
|
+
scope.each_with_index do |element, index|
|
183
|
+
inner_elements = element.is_a?(Array) ? element.flatten : [element]
|
184
|
+
inner_elements.each do |inner_element|
|
185
|
+
errors = pathed_errors_by_object_id[inner_element.object_id]
|
186
|
+
errors.each { _1["path"] = [*current_path, index, *_1["path"]] } if errors
|
187
|
+
end
|
188
|
+
end
|
189
|
+
|
190
|
+
else
|
191
|
+
errors = pathed_errors_by_object_id[scope.object_id]
|
192
|
+
errors.each { _1["path"] = [*current_path, *_1["path"]] } if errors
|
193
|
+
end
|
194
|
+
|
195
|
+
forward_path.unshift(current_path.pop)
|
196
|
+
end
|
197
|
+
end
|
198
|
+
end
|
199
|
+
end
|
@@ -0,0 +1,48 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module GraphQL
|
4
|
+
module Stitching
|
5
|
+
class Executor::RootSource < GraphQL::Dataloader::Source
|
6
|
+
def initialize(executor, location)
|
7
|
+
@executor = executor
|
8
|
+
@location = location
|
9
|
+
end
|
10
|
+
|
11
|
+
def fetch(ops)
|
12
|
+
op = ops.first # There should only ever be one per location at a time
|
13
|
+
|
14
|
+
query_document = build_document(op, @executor.request.operation_name)
|
15
|
+
query_variables = @executor.request.variables.slice(*op["variables"].keys)
|
16
|
+
result = @executor.supergraph.execute_at_location(op["location"], query_document, query_variables, @executor.request.context)
|
17
|
+
@executor.query_count += 1
|
18
|
+
|
19
|
+
@executor.data.merge!(result["data"]) if result["data"]
|
20
|
+
if result["errors"]&.any?
|
21
|
+
result["errors"].each { _1.delete("locations") }
|
22
|
+
@executor.errors.concat(result["errors"])
|
23
|
+
end
|
24
|
+
|
25
|
+
ops.map { op["order"] }
|
26
|
+
end
|
27
|
+
|
28
|
+
# Builds root source documents
|
29
|
+
# "query MyOperation_1($var:VarType) { rootSelections ... }"
|
30
|
+
def build_document(op, operation_name = nil)
|
31
|
+
doc = String.new
|
32
|
+
doc << op["operation_type"]
|
33
|
+
|
34
|
+
if operation_name
|
35
|
+
doc << " #{operation_name}_#{op["order"]}"
|
36
|
+
end
|
37
|
+
|
38
|
+
if op["variables"].any?
|
39
|
+
variable_defs = op["variables"].map { |k, v| "$#{k}:#{v}" }.join(",")
|
40
|
+
doc << "(#{variable_defs})"
|
41
|
+
end
|
42
|
+
|
43
|
+
doc << op["selections"]
|
44
|
+
doc
|
45
|
+
end
|
46
|
+
end
|
47
|
+
end
|
48
|
+
end
|
@@ -5,232 +5,6 @@ require "json"
|
|
5
5
|
module GraphQL
|
6
6
|
module Stitching
|
7
7
|
class Executor
|
8
|
-
|
9
|
-
class RootSource < GraphQL::Dataloader::Source
|
10
|
-
def initialize(executor, location)
|
11
|
-
@executor = executor
|
12
|
-
@location = location
|
13
|
-
end
|
14
|
-
|
15
|
-
def fetch(ops)
|
16
|
-
op = ops.first # There should only ever be one per location at a time
|
17
|
-
|
18
|
-
query_document = build_document(op, @executor.request.operation_name)
|
19
|
-
query_variables = @executor.request.variables.slice(*op["variables"].keys)
|
20
|
-
result = @executor.supergraph.execute_at_location(op["location"], query_document, query_variables, @executor.request.context)
|
21
|
-
@executor.query_count += 1
|
22
|
-
|
23
|
-
@executor.data.merge!(result["data"]) if result["data"]
|
24
|
-
if result["errors"]&.any?
|
25
|
-
result["errors"].each { _1.delete("locations") }
|
26
|
-
@executor.errors.concat(result["errors"])
|
27
|
-
end
|
28
|
-
|
29
|
-
ops.map { op["key"] }
|
30
|
-
end
|
31
|
-
|
32
|
-
# Builds root source documents
|
33
|
-
# "query MyOperation_1($var:VarType) { rootSelections ... }"
|
34
|
-
def build_document(op, operation_name = nil)
|
35
|
-
doc = String.new
|
36
|
-
doc << op["operation_type"]
|
37
|
-
|
38
|
-
if operation_name
|
39
|
-
doc << " " << operation_name << "_" << op["key"].to_s
|
40
|
-
end
|
41
|
-
|
42
|
-
if op["variables"].any?
|
43
|
-
variable_defs = op["variables"].map { |k, v| "$#{k}:#{v}" }.join(",")
|
44
|
-
doc << "(" << variable_defs << ")"
|
45
|
-
end
|
46
|
-
|
47
|
-
doc << op["selections"]
|
48
|
-
doc
|
49
|
-
end
|
50
|
-
end
|
51
|
-
|
52
|
-
class BoundarySource < GraphQL::Dataloader::Source
|
53
|
-
def initialize(executor, location)
|
54
|
-
@executor = executor
|
55
|
-
@location = location
|
56
|
-
end
|
57
|
-
|
58
|
-
def fetch(ops)
|
59
|
-
origin_sets_by_operation = ops.each_with_object({}) do |op, memo|
|
60
|
-
origin_set = op["insertion_path"].reduce([@executor.data]) do |set, path_segment|
|
61
|
-
set.flat_map { |obj| obj && obj[path_segment] }.tap(&:compact!)
|
62
|
-
end
|
63
|
-
|
64
|
-
if op["type_condition"]
|
65
|
-
# operations planned around unused fragment conditions should not trigger requests
|
66
|
-
origin_set.select! { _1["_STITCH_typename"] == op["type_condition"] }
|
67
|
-
end
|
68
|
-
|
69
|
-
memo[op] = origin_set if origin_set.any?
|
70
|
-
end
|
71
|
-
|
72
|
-
if origin_sets_by_operation.any?
|
73
|
-
query_document, variable_names = build_document(origin_sets_by_operation, @executor.request.operation_name)
|
74
|
-
variables = @executor.request.variables.slice(*variable_names)
|
75
|
-
raw_result = @executor.supergraph.execute_at_location(@location, query_document, variables, @executor.request.context)
|
76
|
-
@executor.query_count += 1
|
77
|
-
|
78
|
-
merge_results!(origin_sets_by_operation, raw_result.dig("data"))
|
79
|
-
|
80
|
-
errors = raw_result.dig("errors")
|
81
|
-
@executor.errors.concat(extract_errors!(origin_sets_by_operation, errors)) if errors&.any?
|
82
|
-
end
|
83
|
-
|
84
|
-
ops.map { origin_sets_by_operation[_1] ? _1["key"] : nil }
|
85
|
-
end
|
86
|
-
|
87
|
-
# Builds batched boundary queries
|
88
|
-
# "query MyOperation_2_3($var:VarType) {
|
89
|
-
# _0_result: list(keys:["a","b","c"]) { boundarySelections... }
|
90
|
-
# _1_0_result: item(key:"x") { boundarySelections... }
|
91
|
-
# _1_1_result: item(key:"y") { boundarySelections... }
|
92
|
-
# _1_2_result: item(key:"z") { boundarySelections... }
|
93
|
-
# }"
|
94
|
-
def build_document(origin_sets_by_operation, operation_name = nil)
|
95
|
-
variable_defs = {}
|
96
|
-
query_fields = origin_sets_by_operation.map.with_index do |(op, origin_set), batch_index|
|
97
|
-
variable_defs.merge!(op["variables"])
|
98
|
-
boundary = op["boundary"]
|
99
|
-
key_selection = "_STITCH_#{boundary["selection"]}"
|
100
|
-
|
101
|
-
if boundary["list"]
|
102
|
-
input = JSON.generate(origin_set.map { _1[key_selection] })
|
103
|
-
"_#{batch_index}_result: #{boundary["field"]}(#{boundary["arg"]}:#{input}) #{op["selections"]}"
|
104
|
-
else
|
105
|
-
origin_set.map.with_index do |origin_obj, index|
|
106
|
-
input = JSON.generate(origin_obj[key_selection])
|
107
|
-
"_#{batch_index}_#{index}_result: #{boundary["field"]}(#{boundary["arg"]}:#{input}) #{op["selections"]}"
|
108
|
-
end
|
109
|
-
end
|
110
|
-
end
|
111
|
-
|
112
|
-
doc = String.new
|
113
|
-
doc << "query" # << boundary fulfillment always uses query
|
114
|
-
|
115
|
-
if operation_name
|
116
|
-
doc << " " << operation_name
|
117
|
-
origin_sets_by_operation.each_key do |op|
|
118
|
-
doc << "_" << op["key"].to_s
|
119
|
-
end
|
120
|
-
end
|
121
|
-
|
122
|
-
if variable_defs.any?
|
123
|
-
variable_str = variable_defs.map { |k, v| "$#{k}:#{v}" }.join(",")
|
124
|
-
doc << "(" << variable_str << ")"
|
125
|
-
end
|
126
|
-
|
127
|
-
doc << "{ " << query_fields.join(" ") << " }"
|
128
|
-
|
129
|
-
return doc, variable_defs.keys
|
130
|
-
end
|
131
|
-
|
132
|
-
def merge_results!(origin_sets_by_operation, raw_result)
|
133
|
-
return unless raw_result
|
134
|
-
|
135
|
-
origin_sets_by_operation.each_with_index do |(op, origin_set), batch_index|
|
136
|
-
results = if op.dig("boundary", "list")
|
137
|
-
raw_result["_#{batch_index}_result"]
|
138
|
-
else
|
139
|
-
origin_set.map.with_index { |_, index| raw_result["_#{batch_index}_#{index}_result"] }
|
140
|
-
end
|
141
|
-
|
142
|
-
next unless results&.any?
|
143
|
-
|
144
|
-
origin_set.each_with_index do |origin_obj, index|
|
145
|
-
origin_obj.merge!(results[index]) if results[index]
|
146
|
-
end
|
147
|
-
end
|
148
|
-
end
|
149
|
-
|
150
|
-
# https://spec.graphql.org/June2018/#sec-Errors
|
151
|
-
def extract_errors!(origin_sets_by_operation, errors)
|
152
|
-
ops = origin_sets_by_operation.keys
|
153
|
-
origin_sets = origin_sets_by_operation.values
|
154
|
-
pathed_errors_by_op_index_and_object_id = {}
|
155
|
-
|
156
|
-
errors_result = errors.each_with_object([]) do |err, memo|
|
157
|
-
err.delete("locations")
|
158
|
-
path = err["path"]
|
159
|
-
|
160
|
-
if path && path.length > 0
|
161
|
-
result_alias = /^_(\d+)(?:_(\d+))?_result$/.match(path.first.to_s)
|
162
|
-
|
163
|
-
if result_alias
|
164
|
-
path = err["path"] = path[1..-1]
|
165
|
-
|
166
|
-
origin_obj = if result_alias[2]
|
167
|
-
origin_sets.dig(result_alias[1].to_i, result_alias[2].to_i)
|
168
|
-
elsif path[0].is_a?(Integer) || /\d+/.match?(path[0].to_s)
|
169
|
-
origin_sets.dig(result_alias[1].to_i, path.shift.to_i)
|
170
|
-
end
|
171
|
-
|
172
|
-
if origin_obj
|
173
|
-
by_op_index = pathed_errors_by_op_index_and_object_id[result_alias[1].to_i] ||= {}
|
174
|
-
by_object_id = by_op_index[origin_obj.object_id] ||= []
|
175
|
-
by_object_id << err
|
176
|
-
next
|
177
|
-
end
|
178
|
-
end
|
179
|
-
end
|
180
|
-
|
181
|
-
memo << err
|
182
|
-
end
|
183
|
-
|
184
|
-
if pathed_errors_by_op_index_and_object_id.any?
|
185
|
-
pathed_errors_by_op_index_and_object_id.each do |op_index, pathed_errors_by_object_id|
|
186
|
-
repath_errors!(pathed_errors_by_object_id, ops.dig(op_index, "insertion_path"))
|
187
|
-
errors_result.concat(pathed_errors_by_object_id.values)
|
188
|
-
end
|
189
|
-
end
|
190
|
-
errors_result.flatten!
|
191
|
-
end
|
192
|
-
|
193
|
-
private
|
194
|
-
|
195
|
-
# traverse forward through origin data, expanding arrays to follow all paths
|
196
|
-
# any errors found for an origin object_id have their path prefixed by the object path
|
197
|
-
def repath_errors!(pathed_errors_by_object_id, forward_path, current_path=[], root=@executor.data)
|
198
|
-
current_path.push(forward_path.shift)
|
199
|
-
scope = root[current_path.last]
|
200
|
-
|
201
|
-
if forward_path.any? && scope.is_a?(Array)
|
202
|
-
scope.each_with_index do |element, index|
|
203
|
-
inner_elements = element.is_a?(Array) ? element.flatten : [element]
|
204
|
-
inner_elements.each do |inner_element|
|
205
|
-
current_path << index
|
206
|
-
repath_errors!(pathed_errors_by_object_id, forward_path, current_path, inner_element)
|
207
|
-
current_path.pop
|
208
|
-
end
|
209
|
-
end
|
210
|
-
|
211
|
-
elsif forward_path.any?
|
212
|
-
current_path << index
|
213
|
-
repath_errors!(pathed_errors_by_object_id, forward_path, current_path, scope)
|
214
|
-
current_path.pop
|
215
|
-
|
216
|
-
elsif scope.is_a?(Array)
|
217
|
-
scope.each_with_index do |element, index|
|
218
|
-
inner_elements = element.is_a?(Array) ? element.flatten : [element]
|
219
|
-
inner_elements.each do |inner_element|
|
220
|
-
errors = pathed_errors_by_object_id[inner_element.object_id]
|
221
|
-
errors.each { _1["path"] = [*current_path, index, *_1["path"]] } if errors
|
222
|
-
end
|
223
|
-
end
|
224
|
-
|
225
|
-
else
|
226
|
-
errors = pathed_errors_by_object_id[scope.object_id]
|
227
|
-
errors.each { _1["path"] = [*current_path, *_1["path"]] } if errors
|
228
|
-
end
|
229
|
-
|
230
|
-
forward_path.unshift(current_path.pop)
|
231
|
-
end
|
232
|
-
end
|
233
|
-
|
234
8
|
attr_reader :supergraph, :request, :data, :errors
|
235
9
|
attr_accessor :query_count
|
236
10
|
|
@@ -265,7 +39,7 @@ module GraphQL
|
|
265
39
|
|
266
40
|
private
|
267
41
|
|
268
|
-
def exec!(
|
42
|
+
def exec!(next_ordinals = [0])
|
269
43
|
if @exec_cycles > @queue.length
|
270
44
|
# sanity check... if we've exceeded queue size, then something went wrong.
|
271
45
|
raise StitchingError, "Too many execution requests attempted."
|
@@ -273,7 +47,7 @@ module GraphQL
|
|
273
47
|
|
274
48
|
@dataloader.append_job do
|
275
49
|
tasks = @queue
|
276
|
-
.select {
|
50
|
+
.select { next_ordinals.include?(_1["after"]) }
|
277
51
|
.group_by { [_1["location"], _1["boundary"].nil?] }
|
278
52
|
.map do |(location, root_source), ops|
|
279
53
|
if root_source
|
@@ -291,10 +65,12 @@ module GraphQL
|
|
291
65
|
end
|
292
66
|
|
293
67
|
def exec_task(task)
|
294
|
-
|
295
|
-
|
296
|
-
exec!(next_keys) if next_keys.any?
|
68
|
+
next_ordinals = task.load.tap(&:compact!)
|
69
|
+
exec!(next_ordinals) if next_ordinals.any?
|
297
70
|
end
|
298
71
|
end
|
299
72
|
end
|
300
73
|
end
|
74
|
+
|
75
|
+
require_relative "./executor/boundary_source"
|
76
|
+
require_relative "./executor/root_source"
|