graphql-stitching 0.0.1
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +7 -0
- data/.github/workflows/ci.yml +27 -0
- data/.gitignore +59 -0
- data/.ruby-version +1 -0
- data/Gemfile +11 -0
- data/Gemfile.lock +49 -0
- data/LICENSE +21 -0
- data/Procfile +3 -0
- data/README.md +329 -0
- data/Rakefile +12 -0
- data/docs/README.md +14 -0
- data/docs/composer.md +69 -0
- data/docs/document.md +15 -0
- data/docs/executor.md +29 -0
- data/docs/gateway.md +106 -0
- data/docs/images/library.png +0 -0
- data/docs/images/merging.png +0 -0
- data/docs/images/stitching.png +0 -0
- data/docs/planner.md +43 -0
- data/docs/shaper.md +20 -0
- data/docs/supergraph.md +65 -0
- data/example/gateway.rb +50 -0
- data/example/graphiql.html +153 -0
- data/example/remote1.rb +26 -0
- data/example/remote2.rb +26 -0
- data/graphql-stitching.gemspec +34 -0
- data/lib/graphql/stitching/composer/base_validator.rb +11 -0
- data/lib/graphql/stitching/composer/validate_boundaries.rb +80 -0
- data/lib/graphql/stitching/composer/validate_interfaces.rb +24 -0
- data/lib/graphql/stitching/composer.rb +442 -0
- data/lib/graphql/stitching/document.rb +59 -0
- data/lib/graphql/stitching/executor.rb +254 -0
- data/lib/graphql/stitching/gateway.rb +120 -0
- data/lib/graphql/stitching/planner.rb +323 -0
- data/lib/graphql/stitching/planner_operation.rb +59 -0
- data/lib/graphql/stitching/remote_client.rb +25 -0
- data/lib/graphql/stitching/shaper.rb +92 -0
- data/lib/graphql/stitching/supergraph.rb +171 -0
- data/lib/graphql/stitching/util.rb +63 -0
- data/lib/graphql/stitching/version.rb +7 -0
- data/lib/graphql/stitching.rb +30 -0
- metadata +142 -0
@@ -0,0 +1,254 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require "json"
|
4
|
+
|
5
|
+
module GraphQL
|
6
|
+
module Stitching
|
7
|
+
class Executor
|
8
|
+
|
9
|
+
class RootSource < GraphQL::Dataloader::Source
|
10
|
+
def initialize(executor)
|
11
|
+
@executor = executor
|
12
|
+
end
|
13
|
+
|
14
|
+
def fetch(ops)
|
15
|
+
op = ops.first # There should only ever be one per location at a time
|
16
|
+
|
17
|
+
query_document = build_query(op)
|
18
|
+
query_variables = @executor.variables.slice(*op["variables"].keys)
|
19
|
+
result = @executor.supergraph.execute_at_location(op["location"], query_document, query_variables)
|
20
|
+
@executor.query_count += 1
|
21
|
+
|
22
|
+
@executor.data.merge!(result["data"]) if result["data"]
|
23
|
+
if result["errors"]&.any?
|
24
|
+
result["errors"].each { _1.delete("locations") }
|
25
|
+
@executor.errors.concat(result["errors"])
|
26
|
+
end
|
27
|
+
op["key"]
|
28
|
+
end
|
29
|
+
|
30
|
+
def build_query(op)
|
31
|
+
if op["variables"].any?
|
32
|
+
variable_defs = op["variables"].map { |k, v| "$#{k}:#{v}" }.join(",")
|
33
|
+
"#{op["operation_type"]}(#{variable_defs})#{op["selections"]}"
|
34
|
+
else
|
35
|
+
"#{op["operation_type"]}#{op["selections"]}"
|
36
|
+
end
|
37
|
+
end
|
38
|
+
end
|
39
|
+
|
40
|
+
class BoundarySource < GraphQL::Dataloader::Source
|
41
|
+
def initialize(executor, location)
|
42
|
+
@executor = executor
|
43
|
+
@location = location
|
44
|
+
end
|
45
|
+
|
46
|
+
def fetch(ops)
|
47
|
+
origin_sets_by_operation = ops.each_with_object({}) do |op, memo|
|
48
|
+
origin_set = op["insertion_path"].reduce([@executor.data]) do |set, path_segment|
|
49
|
+
mapped = set.flat_map { |obj| obj && obj[path_segment] }
|
50
|
+
mapped.compact!
|
51
|
+
mapped
|
52
|
+
end
|
53
|
+
|
54
|
+
if op["type_condition"]
|
55
|
+
# operations planned around unused fragment conditions should not trigger requests
|
56
|
+
origin_set.select! { _1["_STITCH_typename"] == op["type_condition"] }
|
57
|
+
end
|
58
|
+
|
59
|
+
memo[op] = origin_set if origin_set.any?
|
60
|
+
end
|
61
|
+
|
62
|
+
if origin_sets_by_operation.any?
|
63
|
+
query_document, variable_names = build_query(origin_sets_by_operation)
|
64
|
+
variables = @executor.variables.slice(*variable_names)
|
65
|
+
raw_result = @executor.supergraph.execute_at_location(@location, query_document, variables)
|
66
|
+
@executor.query_count += 1
|
67
|
+
|
68
|
+
merge_results!(origin_sets_by_operation, raw_result.dig("data"))
|
69
|
+
|
70
|
+
errors = raw_result.dig("errors")
|
71
|
+
@executor.errors.concat(extract_errors!(origin_sets_by_operation, errors)) if errors&.any?
|
72
|
+
end
|
73
|
+
|
74
|
+
ops.map { origin_sets_by_operation[_1] ? _1["key"] : nil }
|
75
|
+
end
|
76
|
+
|
77
|
+
def build_query(origin_sets_by_operation)
|
78
|
+
variable_defs = {}
|
79
|
+
query_fields = origin_sets_by_operation.map.with_index do |(op, origin_set), batch_index|
|
80
|
+
variable_defs.merge!(op["variables"])
|
81
|
+
boundary = op["boundary"]
|
82
|
+
key_selection = "_STITCH_#{boundary["selection"]}"
|
83
|
+
|
84
|
+
if boundary["list"]
|
85
|
+
input = JSON.generate(origin_set.map { _1[key_selection] })
|
86
|
+
"_#{batch_index}_result: #{boundary["field"]}(#{boundary["arg"]}:#{input}) #{op["selections"]}"
|
87
|
+
else
|
88
|
+
origin_set.map.with_index do |origin_obj, index|
|
89
|
+
input = JSON.generate(origin_obj[key_selection])
|
90
|
+
"_#{batch_index}_#{index}_result: #{boundary["field"]}(#{boundary["arg"]}:#{input}) #{op["selections"]}"
|
91
|
+
end
|
92
|
+
end
|
93
|
+
end
|
94
|
+
|
95
|
+
query_document = if variable_defs.any?
|
96
|
+
query_variables = variable_defs.map { |k, v| "$#{k}:#{v}" }.join(",")
|
97
|
+
"query(#{query_variables}){ #{query_fields.join(" ")} }"
|
98
|
+
else
|
99
|
+
"query{ #{query_fields.join(" ")} }"
|
100
|
+
end
|
101
|
+
|
102
|
+
return query_document, variable_defs.keys
|
103
|
+
end
|
104
|
+
|
105
|
+
def merge_results!(origin_sets_by_operation, raw_result)
|
106
|
+
return unless raw_result
|
107
|
+
|
108
|
+
origin_sets_by_operation.each_with_index do |(op, origin_set), batch_index|
|
109
|
+
results = if op.dig("boundary", "list")
|
110
|
+
raw_result["_#{batch_index}_result"]
|
111
|
+
else
|
112
|
+
origin_set.map.with_index { |_, index| raw_result["_#{batch_index}_#{index}_result"] }
|
113
|
+
end
|
114
|
+
|
115
|
+
next unless results&.any?
|
116
|
+
|
117
|
+
origin_set.each_with_index do |origin_obj, index|
|
118
|
+
origin_obj.merge!(results[index]) if results[index]
|
119
|
+
end
|
120
|
+
end
|
121
|
+
end
|
122
|
+
|
123
|
+
# https://spec.graphql.org/June2018/#sec-Errors
|
124
|
+
def extract_errors!(origin_sets_by_operation, errors)
|
125
|
+
ops = origin_sets_by_operation.keys
|
126
|
+
origin_sets = origin_sets_by_operation.values
|
127
|
+
pathed_errors_by_op_index_and_object_id = {}
|
128
|
+
|
129
|
+
errors_result = errors.each_with_object([]) do |err, memo|
|
130
|
+
err.delete("locations")
|
131
|
+
path = err["path"]
|
132
|
+
|
133
|
+
if path && path.length > 0
|
134
|
+
result_alias = /^_(\d+)(?:_(\d+))?_result$/.match(path.first.to_s)
|
135
|
+
|
136
|
+
if result_alias
|
137
|
+
path = err["path"] = path[1..-1]
|
138
|
+
|
139
|
+
origin_obj = if result_alias[2]
|
140
|
+
origin_sets.dig(result_alias[1].to_i, result_alias[2].to_i)
|
141
|
+
elsif path[0].is_a?(Integer) || /\d+/.match?(path[0].to_s)
|
142
|
+
origin_sets.dig(result_alias[1].to_i, path.shift.to_i)
|
143
|
+
end
|
144
|
+
|
145
|
+
if origin_obj
|
146
|
+
by_op_index = pathed_errors_by_op_index_and_object_id[result_alias[1].to_i] ||= {}
|
147
|
+
by_object_id = by_op_index[origin_obj.object_id] ||= []
|
148
|
+
by_object_id << err
|
149
|
+
next
|
150
|
+
end
|
151
|
+
end
|
152
|
+
end
|
153
|
+
|
154
|
+
memo << err
|
155
|
+
end
|
156
|
+
|
157
|
+
if pathed_errors_by_op_index_and_object_id.any?
|
158
|
+
pathed_errors_by_op_index_and_object_id.each do |op_index, pathed_errors_by_object_id|
|
159
|
+
repath_errors!(pathed_errors_by_object_id, ops.dig(op_index, "insertion_path"))
|
160
|
+
errors_result.concat(pathed_errors_by_object_id.values)
|
161
|
+
end
|
162
|
+
end
|
163
|
+
errors_result.flatten!
|
164
|
+
end
|
165
|
+
|
166
|
+
private
|
167
|
+
|
168
|
+
# traverses forward through origin data, expanding arrays to follow all paths
|
169
|
+
# any errors found for an origin object_id have their path prefixed by the object path
|
170
|
+
def repath_errors!(pathed_errors_by_object_id, forward_path, current_path=[], root=@executor.data)
|
171
|
+
current_path << forward_path.first
|
172
|
+
forward_path = forward_path[1..-1]
|
173
|
+
scope = root[current_path.last]
|
174
|
+
|
175
|
+
if forward_path.any? && scope.is_a?(Array)
|
176
|
+
scope.each_with_index do |element, index|
|
177
|
+
inner_elements = element.is_a?(Array) ? element.flatten : [element]
|
178
|
+
inner_elements.each do |inner_element|
|
179
|
+
repath_errors!(pathed_errors_by_object_id, forward_path, [*current_path, index], inner_element)
|
180
|
+
end
|
181
|
+
end
|
182
|
+
|
183
|
+
elsif forward_path.any?
|
184
|
+
repath_errors!(pathed_errors_by_object_id, forward_path, [*current_path, index], scope)
|
185
|
+
|
186
|
+
elsif scope.is_a?(Array)
|
187
|
+
scope.each_with_index do |element, index|
|
188
|
+
inner_elements = element.is_a?(Array) ? element.flatten : [element]
|
189
|
+
inner_elements.each do |inner_element|
|
190
|
+
errors = pathed_errors_by_object_id[inner_element.object_id]
|
191
|
+
errors.each { _1["path"] = [*current_path, index, *_1["path"]] } if errors
|
192
|
+
end
|
193
|
+
end
|
194
|
+
|
195
|
+
else
|
196
|
+
errors = pathed_errors_by_object_id[scope.object_id]
|
197
|
+
errors.each { _1["path"] = [*current_path, *_1["path"]] } if errors
|
198
|
+
end
|
199
|
+
end
|
200
|
+
end
|
201
|
+
|
202
|
+
attr_reader :supergraph, :data, :errors, :variables
|
203
|
+
attr_accessor :query_count
|
204
|
+
|
205
|
+
def initialize(supergraph:, plan:, variables: {}, nonblocking: false)
|
206
|
+
@supergraph = supergraph
|
207
|
+
@variables = variables
|
208
|
+
@queue = plan["ops"]
|
209
|
+
@data = {}
|
210
|
+
@errors = []
|
211
|
+
@query_count = 0
|
212
|
+
@dataloader = GraphQL::Dataloader.new(nonblocking: nonblocking)
|
213
|
+
end
|
214
|
+
|
215
|
+
def perform(document=nil)
|
216
|
+
exec!
|
217
|
+
|
218
|
+
result = {}
|
219
|
+
result["data"] = @data if @data && @data.length > 0
|
220
|
+
result["errors"] = @errors if @errors.length > 0
|
221
|
+
|
222
|
+
result if document.nil?
|
223
|
+
|
224
|
+
GraphQL::Stitching::Shaper.new(supergraph: @supergraph, document: document, raw: result).perform!
|
225
|
+
end
|
226
|
+
|
227
|
+
private
|
228
|
+
|
229
|
+
def exec!(after_keys = [0])
|
230
|
+
@dataloader.append_job do
|
231
|
+
requests = @queue
|
232
|
+
.select { after_keys.include?(_1["after_key"]) }
|
233
|
+
.group_by { _1["location"] }
|
234
|
+
.map do |location, ops|
|
235
|
+
if ops.first["after_key"].zero?
|
236
|
+
@dataloader.with(RootSource, self).request_all(ops)
|
237
|
+
else
|
238
|
+
@dataloader.with(BoundarySource, self, location).request_all(ops)
|
239
|
+
end
|
240
|
+
end
|
241
|
+
|
242
|
+
requests.each(&method(:exec_request))
|
243
|
+
end
|
244
|
+
@dataloader.run
|
245
|
+
end
|
246
|
+
|
247
|
+
def exec_request(request)
|
248
|
+
next_keys = request.load
|
249
|
+
next_keys.compact!
|
250
|
+
exec!(next_keys) if next_keys.any?
|
251
|
+
end
|
252
|
+
end
|
253
|
+
end
|
254
|
+
end
|
@@ -0,0 +1,120 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require "json"
|
4
|
+
|
5
|
+
module GraphQL
|
6
|
+
module Stitching
|
7
|
+
class Gateway
|
8
|
+
class GatewayError < StitchingError; end
|
9
|
+
|
10
|
+
EMPTY_CONTEXT = {}.freeze
|
11
|
+
|
12
|
+
attr_reader :supergraph
|
13
|
+
|
14
|
+
def initialize(locations: nil, supergraph: nil)
|
15
|
+
@supergraph = if locations && supergraph
|
16
|
+
raise GatewayError, "Cannot provide both locations and a supergraph."
|
17
|
+
elsif supergraph && !supergraph.is_a?(Supergraph)
|
18
|
+
raise GatewayError, "Provided supergraph must be a GraphQL::Stitching::Supergraph instance."
|
19
|
+
elsif supergraph
|
20
|
+
supergraph
|
21
|
+
elsif locations
|
22
|
+
build_supergraph_from_locations_config(locations)
|
23
|
+
else
|
24
|
+
raise GatewayError, "No locations or supergraph provided."
|
25
|
+
end
|
26
|
+
end
|
27
|
+
|
28
|
+
def execute(query:, variables: nil, operation_name: nil, context: EMPTY_CONTEXT, validate: true)
|
29
|
+
document = GraphQL::Stitching::Document.new(query, operation_name: operation_name)
|
30
|
+
|
31
|
+
if validate
|
32
|
+
validation_errors = @supergraph.schema.validate(document.ast)
|
33
|
+
return error_result(validation_errors) if validation_errors.any?
|
34
|
+
end
|
35
|
+
|
36
|
+
begin
|
37
|
+
plan = fetch_plan(document, context) do
|
38
|
+
GraphQL::Stitching::Planner.new(
|
39
|
+
supergraph: @supergraph,
|
40
|
+
document: document,
|
41
|
+
).perform.to_h
|
42
|
+
end
|
43
|
+
|
44
|
+
GraphQL::Stitching::Executor.new(
|
45
|
+
supergraph: @supergraph,
|
46
|
+
plan: plan,
|
47
|
+
variables: variables || {},
|
48
|
+
).perform(document)
|
49
|
+
rescue StandardError => e
|
50
|
+
custom_message = @on_error.call(e, context) if @on_error
|
51
|
+
error_result([{ "message" => custom_message || "An unexpected error occured." }])
|
52
|
+
end
|
53
|
+
end
|
54
|
+
|
55
|
+
def on_cache_read(&block)
|
56
|
+
raise GatewayError, "A cache read block is required." unless block_given?
|
57
|
+
@on_cache_read = block
|
58
|
+
end
|
59
|
+
|
60
|
+
def on_cache_write(&block)
|
61
|
+
raise GatewayError, "A cache write block is required." unless block_given?
|
62
|
+
@on_cache_write = block
|
63
|
+
end
|
64
|
+
|
65
|
+
def on_error(&block)
|
66
|
+
raise GatewayError, "An error handler block is required." unless block_given?
|
67
|
+
@on_error = block
|
68
|
+
end
|
69
|
+
|
70
|
+
private
|
71
|
+
|
72
|
+
def build_supergraph_from_locations_config(locations)
|
73
|
+
schemas = locations.each_with_object({}) do |(location, config), memo|
|
74
|
+
schema = config[:schema]
|
75
|
+
if schema.nil?
|
76
|
+
raise GatewayError, "A schema is required for `#{location}` location."
|
77
|
+
elsif !(schema.is_a?(Class) && schema <= GraphQL::Schema)
|
78
|
+
raise GatewayError, "The schema for `#{location}` location must be a GraphQL::Schema class."
|
79
|
+
else
|
80
|
+
memo[location.to_s] = schema
|
81
|
+
end
|
82
|
+
end
|
83
|
+
|
84
|
+
supergraph = GraphQL::Stitching::Composer.new(schemas: schemas).perform
|
85
|
+
|
86
|
+
locations.each do |location, config|
|
87
|
+
executable = config[:executable]
|
88
|
+
supergraph.assign_executable(location.to_s, executable) if executable
|
89
|
+
end
|
90
|
+
|
91
|
+
supergraph
|
92
|
+
end
|
93
|
+
|
94
|
+
def fetch_plan(document, context)
|
95
|
+
if @on_cache_read
|
96
|
+
cached_plan = @on_cache_read.call(document.digest, context)
|
97
|
+
return JSON.parse(cached_plan) if cached_plan
|
98
|
+
end
|
99
|
+
|
100
|
+
plan_json = yield
|
101
|
+
|
102
|
+
if @on_cache_write
|
103
|
+
@on_cache_write.call(document.digest, JSON.generate(plan_json), context)
|
104
|
+
end
|
105
|
+
|
106
|
+
plan_json
|
107
|
+
end
|
108
|
+
|
109
|
+
def error_result(errors)
|
110
|
+
public_errors = errors.map do |e|
|
111
|
+
public_error = e.is_a?(Hash) ? e : e.to_h
|
112
|
+
public_error["path"] ||= []
|
113
|
+
public_error
|
114
|
+
end
|
115
|
+
|
116
|
+
{ "errors" => public_errors }
|
117
|
+
end
|
118
|
+
end
|
119
|
+
end
|
120
|
+
end
|
@@ -0,0 +1,323 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module GraphQL
|
4
|
+
module Stitching
|
5
|
+
class Planner
|
6
|
+
SUPERGRAPH_LOCATIONS = [Supergraph::LOCATION].freeze
|
7
|
+
TYPENAME_NODE = GraphQL::Language::Nodes::Field.new(alias: "_STITCH_typename", name: "__typename")
|
8
|
+
|
9
|
+
def initialize(supergraph:, document:)
|
10
|
+
@supergraph = supergraph
|
11
|
+
@document = document
|
12
|
+
@sequence_key = 0
|
13
|
+
@operations_by_grouping = {}
|
14
|
+
end
|
15
|
+
|
16
|
+
def perform
|
17
|
+
build_root_operations
|
18
|
+
expand_abstract_boundaries
|
19
|
+
self
|
20
|
+
end
|
21
|
+
|
22
|
+
def operations
|
23
|
+
ops = @operations_by_grouping.values
|
24
|
+
ops.sort_by!(&:key)
|
25
|
+
ops
|
26
|
+
end
|
27
|
+
|
28
|
+
def to_h
|
29
|
+
{ "ops" => operations.map(&:to_h) }
|
30
|
+
end
|
31
|
+
|
32
|
+
private
|
33
|
+
|
34
|
+
def add_operation(location:, parent_type:, selections: nil, insertion_path: [], operation_type: "query", after_key: 0, boundary: nil)
|
35
|
+
parent_key = @sequence_key += 1
|
36
|
+
selection_set, variables = if selections&.any?
|
37
|
+
extract_locale_selections(location, parent_type, selections, insertion_path, parent_key)
|
38
|
+
end
|
39
|
+
|
40
|
+
grouping = [after_key, location, parent_type.graphql_name, *insertion_path].join("/")
|
41
|
+
|
42
|
+
if op = @operations_by_grouping[grouping]
|
43
|
+
op.selections += selection_set if selection_set
|
44
|
+
op.variables.merge!(variables) if variables
|
45
|
+
return op
|
46
|
+
end
|
47
|
+
|
48
|
+
type_conditional = !parent_type.kind.abstract? && parent_type != @supergraph.schema.query && parent_type != @supergraph.schema.mutation
|
49
|
+
|
50
|
+
@operations_by_grouping[grouping] = PlannerOperation.new(
|
51
|
+
key: parent_key,
|
52
|
+
after_key: after_key,
|
53
|
+
location: location,
|
54
|
+
parent_type: parent_type,
|
55
|
+
operation_type: operation_type,
|
56
|
+
insertion_path: insertion_path,
|
57
|
+
type_condition: type_conditional ? parent_type.graphql_name : nil,
|
58
|
+
selections: selection_set || [],
|
59
|
+
variables: variables || {},
|
60
|
+
boundary: boundary,
|
61
|
+
)
|
62
|
+
end
|
63
|
+
|
64
|
+
def build_root_operations
|
65
|
+
case @document.operation.operation_type
|
66
|
+
when "query"
|
67
|
+
# plan steps grouping all fields by location for async execution
|
68
|
+
parent_type = @supergraph.schema.query
|
69
|
+
|
70
|
+
selections_by_location = @document.operation.selections.each_with_object({}) do |node, memo|
|
71
|
+
locations = @supergraph.locations_by_type_and_field[parent_type.graphql_name][node.name] || SUPERGRAPH_LOCATIONS
|
72
|
+
memo[locations.last] ||= []
|
73
|
+
memo[locations.last] << node
|
74
|
+
end
|
75
|
+
|
76
|
+
selections_by_location.each do |location, selections|
|
77
|
+
add_operation(location: location, parent_type: parent_type, selections: selections)
|
78
|
+
end
|
79
|
+
|
80
|
+
when "mutation"
|
81
|
+
# plan steps grouping sequential fields by location for serial execution
|
82
|
+
parent_type = @supergraph.schema.mutation
|
83
|
+
location_groups = []
|
84
|
+
|
85
|
+
@document.operation.selections.reduce(nil) do |last_location, node|
|
86
|
+
location = @supergraph.locations_by_type_and_field[parent_type.graphql_name][node.name].last
|
87
|
+
if location != last_location
|
88
|
+
location_groups << {
|
89
|
+
location: location,
|
90
|
+
selections: [],
|
91
|
+
}
|
92
|
+
end
|
93
|
+
location_groups.last[:selections] << node
|
94
|
+
location
|
95
|
+
end
|
96
|
+
|
97
|
+
location_groups.reduce(0) do |after_key, group|
|
98
|
+
add_operation(
|
99
|
+
location: group[:location],
|
100
|
+
selections: group[:selections],
|
101
|
+
operation_type: "mutation",
|
102
|
+
parent_type: parent_type,
|
103
|
+
after_key: after_key
|
104
|
+
).key
|
105
|
+
end
|
106
|
+
|
107
|
+
else
|
108
|
+
raise "Invalid operation type."
|
109
|
+
end
|
110
|
+
end
|
111
|
+
|
112
|
+
def extract_locale_selections(current_location, parent_type, input_selections, insertion_path, after_key)
|
113
|
+
remote_selections = []
|
114
|
+
selections_result = []
|
115
|
+
variables_result = {}
|
116
|
+
implements_fragments = false
|
117
|
+
|
118
|
+
if parent_type.kind.interface?
|
119
|
+
# fields of a merged interface may not belong to the interface at the local level,
|
120
|
+
# so these non-local interface fields get expanded into typed fragments for planning
|
121
|
+
local_interface_fields = @supergraph.fields_by_type_and_location[parent_type.graphql_name][current_location]
|
122
|
+
extended_selections = []
|
123
|
+
|
124
|
+
input_selections.reject! do |node|
|
125
|
+
if node.is_a?(GraphQL::Language::Nodes::Field) && !local_interface_fields.include?(node.name)
|
126
|
+
extended_selections << node
|
127
|
+
true
|
128
|
+
end
|
129
|
+
end
|
130
|
+
|
131
|
+
if extended_selections.any?
|
132
|
+
possible_types = Util.get_possible_types(@supergraph.schema, parent_type)
|
133
|
+
possible_types.each do |possible_type|
|
134
|
+
next if possible_type.kind.abstract? # ignore child interfaces
|
135
|
+
next unless @supergraph.locations_by_type[possible_type.graphql_name].include?(current_location)
|
136
|
+
|
137
|
+
type_name = GraphQL::Language::Nodes::TypeName.new(name: possible_type.graphql_name)
|
138
|
+
input_selections << GraphQL::Language::Nodes::InlineFragment.new(type: type_name, selections: extended_selections)
|
139
|
+
end
|
140
|
+
end
|
141
|
+
end
|
142
|
+
|
143
|
+
input_selections.each do |node|
|
144
|
+
case node
|
145
|
+
when GraphQL::Language::Nodes::Field
|
146
|
+
if node.name == "__typename"
|
147
|
+
selections_result << node
|
148
|
+
next
|
149
|
+
end
|
150
|
+
|
151
|
+
possible_locations = @supergraph.locations_by_type_and_field[parent_type.graphql_name][node.name] || SUPERGRAPH_LOCATIONS
|
152
|
+
unless possible_locations.include?(current_location)
|
153
|
+
remote_selections << node
|
154
|
+
next
|
155
|
+
end
|
156
|
+
|
157
|
+
field_type = Util.get_named_type_for_field_node(@supergraph.schema, parent_type, node)
|
158
|
+
|
159
|
+
extract_node_variables!(node, variables_result)
|
160
|
+
|
161
|
+
if Util.is_leaf_type?(field_type)
|
162
|
+
selections_result << node
|
163
|
+
else
|
164
|
+
expanded_path = [*insertion_path, node.alias || node.name]
|
165
|
+
selection_set, variables = extract_locale_selections(current_location, field_type, node.selections, expanded_path, after_key)
|
166
|
+
selections_result << node.merge(selections: selection_set)
|
167
|
+
variables_result.merge!(variables)
|
168
|
+
end
|
169
|
+
|
170
|
+
when GraphQL::Language::Nodes::InlineFragment
|
171
|
+
next unless @supergraph.locations_by_type[node.type.name].include?(current_location)
|
172
|
+
|
173
|
+
fragment_type = @supergraph.schema.types[node.type.name]
|
174
|
+
selection_set, variables = extract_locale_selections(current_location, fragment_type, node.selections, insertion_path, after_key)
|
175
|
+
selections_result << node.merge(selections: selection_set)
|
176
|
+
variables_result.merge!(variables)
|
177
|
+
implements_fragments = true
|
178
|
+
|
179
|
+
when GraphQL::Language::Nodes::FragmentSpread
|
180
|
+
fragment = @document.fragment_definitions[node.name]
|
181
|
+
next unless @supergraph.locations_by_type[fragment.type.name].include?(current_location)
|
182
|
+
|
183
|
+
fragment_type = @supergraph.schema.types[fragment.type.name]
|
184
|
+
selection_set, variables = extract_locale_selections(current_location, fragment_type, fragment.selections, insertion_path, after_key)
|
185
|
+
selections_result << GraphQL::Language::Nodes::InlineFragment.new(type: fragment.type, selections: selection_set)
|
186
|
+
variables_result.merge!(variables)
|
187
|
+
implements_fragments = true
|
188
|
+
|
189
|
+
else
|
190
|
+
raise "Unexpected node of type #{node.class.name} in selection set."
|
191
|
+
end
|
192
|
+
end
|
193
|
+
|
194
|
+
if remote_selections.any?
|
195
|
+
selection_set = build_child_operations(current_location, parent_type, remote_selections, insertion_path, after_key)
|
196
|
+
selections_result.concat(selection_set)
|
197
|
+
end
|
198
|
+
|
199
|
+
if parent_type.kind.abstract? || implements_fragments
|
200
|
+
selections_result << TYPENAME_NODE
|
201
|
+
end
|
202
|
+
|
203
|
+
return selections_result, variables_result
|
204
|
+
end
|
205
|
+
|
206
|
+
def build_child_operations(current_location, parent_type, input_selections, insertion_path, after_key)
|
207
|
+
parent_selections_result = []
|
208
|
+
selections_by_location = {}
|
209
|
+
|
210
|
+
# distribute unique fields among required locations
|
211
|
+
input_selections.reject! do |node|
|
212
|
+
possible_locations = @supergraph.locations_by_type_and_field[parent_type.graphql_name][node.name]
|
213
|
+
if possible_locations.length == 1
|
214
|
+
selections_by_location[possible_locations.first] ||= []
|
215
|
+
selections_by_location[possible_locations.first] << node
|
216
|
+
true
|
217
|
+
end
|
218
|
+
end
|
219
|
+
|
220
|
+
# distribute non-unique fields among available locations, preferring used locations
|
221
|
+
if input_selections.any?
|
222
|
+
# weight locations by number of needed fields available, prefer greater availability
|
223
|
+
location_weights = input_selections.each_with_object({}) do |node, memo|
|
224
|
+
possible_locations = @supergraph.locations_by_type_and_field[parent_type.graphql_name][node.name]
|
225
|
+
possible_locations.each do |location|
|
226
|
+
memo[location] ||= 0
|
227
|
+
memo[location] += 1
|
228
|
+
end
|
229
|
+
end
|
230
|
+
|
231
|
+
input_selections.each do |node|
|
232
|
+
possible_locations = @supergraph.locations_by_type_and_field[parent_type.graphql_name][node.name]
|
233
|
+
|
234
|
+
perfect_location_score = input_selections.length
|
235
|
+
preferred_location_score = 0
|
236
|
+
preferred_location = possible_locations.reduce(possible_locations.first) do |current_loc, candidate_loc|
|
237
|
+
score = selections_by_location[location] ? perfect_location_score : 0
|
238
|
+
score += location_weights.fetch(candidate_loc, 0)
|
239
|
+
|
240
|
+
if score > preferred_location_score
|
241
|
+
preferred_location_score = score
|
242
|
+
candidate_loc
|
243
|
+
else
|
244
|
+
current_loc
|
245
|
+
end
|
246
|
+
end
|
247
|
+
|
248
|
+
selections_by_location[preferred_location] ||= []
|
249
|
+
selections_by_location[preferred_location] << node
|
250
|
+
end
|
251
|
+
end
|
252
|
+
|
253
|
+
routes = @supergraph.route_type_to_locations(parent_type.graphql_name, current_location, selections_by_location.keys)
|
254
|
+
routes.values.each_with_object({}) do |route, memo|
|
255
|
+
route.reduce(nil) do |parent_op, boundary|
|
256
|
+
location = boundary["location"]
|
257
|
+
next memo[location] if memo[location]
|
258
|
+
|
259
|
+
child_op = memo[location] = add_operation(
|
260
|
+
location: location,
|
261
|
+
selections: selections_by_location[location],
|
262
|
+
parent_type: parent_type,
|
263
|
+
insertion_path: insertion_path,
|
264
|
+
boundary: boundary,
|
265
|
+
after_key: after_key,
|
266
|
+
)
|
267
|
+
|
268
|
+
foreign_key_node = GraphQL::Language::Nodes::Field.new(
|
269
|
+
alias: "_STITCH_#{boundary["selection"]}",
|
270
|
+
name: boundary["selection"]
|
271
|
+
)
|
272
|
+
|
273
|
+
if parent_op
|
274
|
+
parent_op.selections << foreign_key_node << TYPENAME_NODE
|
275
|
+
else
|
276
|
+
parent_selections_result << foreign_key_node << TYPENAME_NODE
|
277
|
+
end
|
278
|
+
|
279
|
+
child_op
|
280
|
+
end
|
281
|
+
end
|
282
|
+
|
283
|
+
parent_selections_result
|
284
|
+
end
|
285
|
+
|
286
|
+
def extract_node_variables!(node_with_args, variables={})
|
287
|
+
node_with_args.arguments.each_with_object(variables) do |argument, memo|
|
288
|
+
case argument.value
|
289
|
+
when GraphQL::Language::Nodes::InputObject
|
290
|
+
extract_node_variables!(argument.value, memo)
|
291
|
+
when GraphQL::Language::Nodes::VariableIdentifier
|
292
|
+
memo[argument.value.name] ||= @document.variable_definitions[argument.value.name]
|
293
|
+
end
|
294
|
+
end
|
295
|
+
end
|
296
|
+
|
297
|
+
# expand concrete type selections into typed fragments when sending to abstract boundaries
|
298
|
+
def expand_abstract_boundaries
|
299
|
+
@operations_by_grouping.each do |_grouping, op|
|
300
|
+
next unless op.boundary
|
301
|
+
|
302
|
+
boundary_type = @supergraph.schema.get_type(op.boundary["type_name"])
|
303
|
+
next unless boundary_type.kind.abstract?
|
304
|
+
|
305
|
+
unless op.parent_type == boundary_type
|
306
|
+
to_typed_selections = []
|
307
|
+
op.selections.reject! do |node|
|
308
|
+
if node.is_a?(GraphQL::Language::Nodes::Field)
|
309
|
+
to_typed_selections << node
|
310
|
+
true
|
311
|
+
end
|
312
|
+
end
|
313
|
+
|
314
|
+
if to_typed_selections.any?
|
315
|
+
type_name = GraphQL::Language::Nodes::TypeName.new(name: op.parent_type.graphql_name)
|
316
|
+
op.selections << GraphQL::Language::Nodes::InlineFragment.new(type: type_name, selections: to_typed_selections)
|
317
|
+
end
|
318
|
+
end
|
319
|
+
end
|
320
|
+
end
|
321
|
+
end
|
322
|
+
end
|
323
|
+
end
|