graphql 2.4.8 → 2.4.11
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of graphql might be problematic. Click here for more details.
- checksums.yaml +4 -4
- data/lib/graphql/backtrace/table.rb +95 -55
- data/lib/graphql/backtrace.rb +1 -19
- data/lib/graphql/current.rb +5 -0
- data/lib/graphql/dashboard/statics/bootstrap-5.3.3.min.css +6 -0
- data/lib/graphql/dashboard/statics/bootstrap-5.3.3.min.js +7 -0
- data/lib/graphql/dashboard/statics/dashboard.css +3 -0
- data/lib/graphql/dashboard/statics/dashboard.js +78 -0
- data/lib/graphql/dashboard/statics/header-icon.png +0 -0
- data/lib/graphql/dashboard/statics/icon.png +0 -0
- data/lib/graphql/dashboard/views/graphql/dashboard/landings/show.html.erb +18 -0
- data/lib/graphql/dashboard/views/graphql/dashboard/traces/index.html.erb +63 -0
- data/lib/graphql/dashboard/views/layouts/graphql/dashboard/application.html.erb +60 -0
- data/lib/graphql/dashboard.rb +142 -0
- data/lib/graphql/dataloader/active_record_association_source.rb +64 -0
- data/lib/graphql/dataloader/active_record_source.rb +26 -0
- data/lib/graphql/dataloader/async_dataloader.rb +17 -5
- data/lib/graphql/dataloader/null_dataloader.rb +1 -1
- data/lib/graphql/dataloader/source.rb +2 -2
- data/lib/graphql/dataloader.rb +37 -5
- data/lib/graphql/execution/interpreter/runtime/graphql_result.rb +11 -4
- data/lib/graphql/execution/interpreter/runtime.rb +60 -33
- data/lib/graphql/execution/interpreter.rb +9 -1
- data/lib/graphql/execution/multiplex.rb +0 -4
- data/lib/graphql/introspection/directive_location_enum.rb +1 -1
- data/lib/graphql/invalid_name_error.rb +1 -1
- data/lib/graphql/invalid_null_error.rb +6 -12
- data/lib/graphql/language/parser.rb +1 -1
- data/lib/graphql/query.rb +8 -12
- data/lib/graphql/schema/enum.rb +36 -1
- data/lib/graphql/schema/input_object.rb +1 -1
- data/lib/graphql/schema/interface.rb +1 -0
- data/lib/graphql/schema/member/has_dataloader.rb +60 -0
- data/lib/graphql/schema/member.rb +1 -0
- data/lib/graphql/schema/object.rb +17 -8
- data/lib/graphql/schema/resolver.rb +2 -5
- data/lib/graphql/schema/validator/required_validator.rb +23 -6
- data/lib/graphql/schema/visibility/profile.rb +5 -5
- data/lib/graphql/schema/visibility.rb +14 -9
- data/lib/graphql/schema.rb +54 -28
- data/lib/graphql/static_validation/validator.rb +6 -1
- data/lib/graphql/subscriptions/serialize.rb +1 -3
- data/lib/graphql/tracing/active_support_notifications_trace.rb +6 -2
- data/lib/graphql/tracing/appoptics_trace.rb +3 -1
- data/lib/graphql/tracing/appsignal_trace.rb +6 -0
- data/lib/graphql/tracing/data_dog_trace.rb +5 -0
- data/lib/graphql/tracing/detailed_trace/memory_backend.rb +60 -0
- data/lib/graphql/tracing/detailed_trace/redis_backend.rb +72 -0
- data/lib/graphql/tracing/detailed_trace.rb +93 -0
- data/lib/graphql/tracing/new_relic_trace.rb +147 -41
- data/lib/graphql/tracing/perfetto_trace/trace.proto +141 -0
- data/lib/graphql/tracing/perfetto_trace/trace_pb.rb +33 -0
- data/lib/graphql/tracing/perfetto_trace.rb +737 -0
- data/lib/graphql/tracing/prometheus_trace.rb +22 -0
- data/lib/graphql/tracing/scout_trace.rb +6 -0
- data/lib/graphql/tracing/sentry_trace.rb +5 -0
- data/lib/graphql/tracing/statsd_trace.rb +9 -0
- data/lib/graphql/tracing/trace.rb +125 -1
- data/lib/graphql/tracing.rb +2 -0
- data/lib/graphql/version.rb +1 -1
- data/lib/graphql.rb +3 -0
- metadata +148 -10
- data/lib/graphql/backtrace/inspect_result.rb +0 -38
- data/lib/graphql/backtrace/trace.rb +0 -93
- data/lib/graphql/backtrace/tracer.rb +0 -80
- data/lib/graphql/schema/null_mask.rb +0 -11
@@ -0,0 +1,64 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
require "graphql/dataloader/source"
|
3
|
+
require "graphql/dataloader/active_record_source"
|
4
|
+
|
5
|
+
module GraphQL
|
6
|
+
class Dataloader
|
7
|
+
class ActiveRecordAssociationSource < GraphQL::Dataloader::Source
|
8
|
+
RECORD_SOURCE_CLASS = ActiveRecordSource
|
9
|
+
|
10
|
+
def initialize(association, scope = nil)
|
11
|
+
@association = association
|
12
|
+
@scope = scope
|
13
|
+
end
|
14
|
+
|
15
|
+
def load(record)
|
16
|
+
if (assoc = record.association(@association)).loaded?
|
17
|
+
assoc.target
|
18
|
+
else
|
19
|
+
super
|
20
|
+
end
|
21
|
+
end
|
22
|
+
|
23
|
+
def fetch(records)
|
24
|
+
record_classes = Set.new.compare_by_identity
|
25
|
+
associated_classes = Set.new.compare_by_identity
|
26
|
+
records.each do |record|
|
27
|
+
if record_classes.add?(record.class)
|
28
|
+
reflection = record.class.reflect_on_association(@association)
|
29
|
+
if !reflection.polymorphic? && reflection.klass
|
30
|
+
associated_classes.add(reflection.klass)
|
31
|
+
end
|
32
|
+
end
|
33
|
+
end
|
34
|
+
|
35
|
+
available_records = []
|
36
|
+
associated_classes.each do |assoc_class|
|
37
|
+
already_loaded_records = dataloader.with(RECORD_SOURCE_CLASS, assoc_class).results.values
|
38
|
+
available_records.concat(already_loaded_records)
|
39
|
+
end
|
40
|
+
|
41
|
+
::ActiveRecord::Associations::Preloader.new(records: records, associations: @association, available_records: available_records, scope: @scope).call
|
42
|
+
|
43
|
+
loaded_associated_records = records.map { |r| r.public_send(@association) }
|
44
|
+
records_by_model = {}
|
45
|
+
loaded_associated_records.each do |record|
|
46
|
+
if record
|
47
|
+
updates = records_by_model[record.class] ||= {}
|
48
|
+
updates[record.id] = record
|
49
|
+
end
|
50
|
+
end
|
51
|
+
|
52
|
+
if @scope.nil?
|
53
|
+
# Don't cache records loaded via scope because they might have reduced `SELECT`s
|
54
|
+
# Could check .select_values here?
|
55
|
+
records_by_model.each do |model_class, updates|
|
56
|
+
dataloader.with(RECORD_SOURCE_CLASS, model_class).merge(updates)
|
57
|
+
end
|
58
|
+
end
|
59
|
+
|
60
|
+
loaded_associated_records
|
61
|
+
end
|
62
|
+
end
|
63
|
+
end
|
64
|
+
end
|
@@ -0,0 +1,26 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
require "graphql/dataloader/source"
|
3
|
+
|
4
|
+
module GraphQL
|
5
|
+
class Dataloader
|
6
|
+
class ActiveRecordSource < GraphQL::Dataloader::Source
|
7
|
+
def initialize(model_class, find_by: model_class.primary_key)
|
8
|
+
@model_class = model_class
|
9
|
+
@find_by = find_by
|
10
|
+
@type_for_column = @model_class.type_for_attribute(@find_by)
|
11
|
+
end
|
12
|
+
|
13
|
+
def load(requested_key)
|
14
|
+
casted_key = @type_for_column.cast(requested_key)
|
15
|
+
super(casted_key)
|
16
|
+
end
|
17
|
+
|
18
|
+
def fetch(record_ids)
|
19
|
+
records = @model_class.where(@find_by => record_ids)
|
20
|
+
record_lookup = {}
|
21
|
+
records.each { |r| record_lookup[r.public_send(@find_by)] = r }
|
22
|
+
record_ids.map { |id| record_lookup[id] }
|
23
|
+
end
|
24
|
+
end
|
25
|
+
end
|
26
|
+
end
|
@@ -2,16 +2,20 @@
|
|
2
2
|
module GraphQL
|
3
3
|
class Dataloader
|
4
4
|
class AsyncDataloader < Dataloader
|
5
|
-
def yield
|
5
|
+
def yield(source = Fiber[:__graphql_current_dataloader_source])
|
6
|
+
trace = Fiber[:__graphql_current_multiplex]&.current_trace
|
7
|
+
trace&.dataloader_fiber_yield(source)
|
6
8
|
if (condition = Fiber[:graphql_dataloader_next_tick])
|
7
9
|
condition.wait
|
8
10
|
else
|
9
11
|
Fiber.yield
|
10
12
|
end
|
13
|
+
trace&.dataloader_fiber_resume(source)
|
11
14
|
nil
|
12
15
|
end
|
13
16
|
|
14
17
|
def run
|
18
|
+
trace = Fiber[:__graphql_current_multiplex]&.current_trace
|
15
19
|
jobs_fiber_limit, total_fiber_limit = calculate_fiber_limit
|
16
20
|
job_fibers = []
|
17
21
|
next_job_fibers = []
|
@@ -20,11 +24,12 @@ module GraphQL
|
|
20
24
|
first_pass = true
|
21
25
|
sources_condition = Async::Condition.new
|
22
26
|
manager = spawn_fiber do
|
27
|
+
trace&.begin_dataloader(self)
|
23
28
|
while first_pass || !job_fibers.empty?
|
24
29
|
first_pass = false
|
25
30
|
fiber_vars = get_fiber_variables
|
26
31
|
|
27
|
-
while (f = (job_fibers.shift || (((job_fibers.size + next_job_fibers.size + source_tasks.size) < jobs_fiber_limit) && spawn_job_fiber)))
|
32
|
+
while (f = (job_fibers.shift || (((job_fibers.size + next_job_fibers.size + source_tasks.size) < jobs_fiber_limit) && spawn_job_fiber(trace))))
|
28
33
|
if f.alive?
|
29
34
|
finished = run_fiber(f)
|
30
35
|
if !finished
|
@@ -38,7 +43,7 @@ module GraphQL
|
|
38
43
|
Sync do |root_task|
|
39
44
|
set_fiber_variables(fiber_vars)
|
40
45
|
while !source_tasks.empty? || @source_cache.each_value.any? { |group_sources| group_sources.each_value.any?(&:pending?) }
|
41
|
-
while (task = (source_tasks.shift || (((job_fibers.size + next_job_fibers.size + source_tasks.size + next_source_tasks.size) < total_fiber_limit) && spawn_source_task(root_task, sources_condition))))
|
46
|
+
while (task = (source_tasks.shift || (((job_fibers.size + next_job_fibers.size + source_tasks.size + next_source_tasks.size) < total_fiber_limit) && spawn_source_task(root_task, sources_condition, trace))))
|
42
47
|
if task.alive?
|
43
48
|
root_task.yield # give the source task a chance to run
|
44
49
|
next_source_tasks << task
|
@@ -50,6 +55,7 @@ module GraphQL
|
|
50
55
|
end
|
51
56
|
end
|
52
57
|
end
|
58
|
+
trace&.end_dataloader(self)
|
53
59
|
end
|
54
60
|
|
55
61
|
manager.resume
|
@@ -63,7 +69,7 @@ module GraphQL
|
|
63
69
|
|
64
70
|
private
|
65
71
|
|
66
|
-
def spawn_source_task(parent_task, condition)
|
72
|
+
def spawn_source_task(parent_task, condition, trace)
|
67
73
|
pending_sources = nil
|
68
74
|
@source_cache.each_value do |source_by_batch_params|
|
69
75
|
source_by_batch_params.each_value do |source|
|
@@ -77,10 +83,16 @@ module GraphQL
|
|
77
83
|
if pending_sources
|
78
84
|
fiber_vars = get_fiber_variables
|
79
85
|
parent_task.async do
|
86
|
+
trace&.dataloader_spawn_source_fiber(pending_sources)
|
80
87
|
set_fiber_variables(fiber_vars)
|
81
88
|
Fiber[:graphql_dataloader_next_tick] = condition
|
82
|
-
pending_sources.each
|
89
|
+
pending_sources.each do |s|
|
90
|
+
trace&.begin_dataloader_source(s)
|
91
|
+
s.run_pending_keys
|
92
|
+
trace&.end_dataloader_source(s)
|
93
|
+
end
|
83
94
|
cleanup_fiber
|
95
|
+
trace&.dataloader_fiber_exit
|
84
96
|
end
|
85
97
|
end
|
86
98
|
end
|
@@ -11,7 +11,7 @@ module GraphQL
|
|
11
11
|
# executed synchronously.
|
12
12
|
def run; end
|
13
13
|
def run_isolated; yield; end
|
14
|
-
def yield
|
14
|
+
def yield(_source)
|
15
15
|
raise GraphQL::Error, "GraphQL::Dataloader is not running -- add `use GraphQL::Dataloader` to your schema to use Dataloader sources."
|
16
16
|
end
|
17
17
|
|
@@ -93,14 +93,14 @@ module GraphQL
|
|
93
93
|
# Then run the batch and update the cache.
|
94
94
|
# @return [void]
|
95
95
|
def sync(pending_result_keys)
|
96
|
-
@dataloader.yield
|
96
|
+
@dataloader.yield(self)
|
97
97
|
iterations = 0
|
98
98
|
while pending_result_keys.any? { |key| !@results.key?(key) }
|
99
99
|
iterations += 1
|
100
100
|
if iterations > MAX_ITERATIONS
|
101
101
|
raise "#{self.class}#sync tried #{MAX_ITERATIONS} times to load pending keys (#{pending_result_keys}), but they still weren't loaded. There is likely a circular dependency#{@dataloader.fiber_limit ? " or `fiber_limit: #{@dataloader.fiber_limit}` is set too low" : ""}."
|
102
102
|
end
|
103
|
-
@dataloader.yield
|
103
|
+
@dataloader.yield(self)
|
104
104
|
end
|
105
105
|
nil
|
106
106
|
end
|
data/lib/graphql/dataloader.rb
CHANGED
@@ -4,6 +4,8 @@ require "graphql/dataloader/null_dataloader"
|
|
4
4
|
require "graphql/dataloader/request"
|
5
5
|
require "graphql/dataloader/request_all"
|
6
6
|
require "graphql/dataloader/source"
|
7
|
+
require "graphql/dataloader/active_record_association_source"
|
8
|
+
require "graphql/dataloader/active_record_source"
|
7
9
|
|
8
10
|
module GraphQL
|
9
11
|
# This plugin supports Fiber-based concurrency, along with {GraphQL::Dataloader::Source}.
|
@@ -129,8 +131,11 @@ module GraphQL
|
|
129
131
|
# Dataloader will resume the fiber after the requested data has been loaded (by another Fiber).
|
130
132
|
#
|
131
133
|
# @return [void]
|
132
|
-
def yield
|
134
|
+
def yield(source = Fiber[:__graphql_current_dataloader_source])
|
135
|
+
trace = Fiber[:__graphql_current_multiplex]&.current_trace
|
136
|
+
trace&.dataloader_fiber_yield(source)
|
133
137
|
Fiber.yield
|
138
|
+
trace&.dataloader_fiber_resume(source)
|
134
139
|
nil
|
135
140
|
end
|
136
141
|
|
@@ -184,6 +189,7 @@ module GraphQL
|
|
184
189
|
end
|
185
190
|
|
186
191
|
def run
|
192
|
+
trace = Fiber[:__graphql_current_multiplex]&.current_trace
|
187
193
|
jobs_fiber_limit, total_fiber_limit = calculate_fiber_limit
|
188
194
|
job_fibers = []
|
189
195
|
next_job_fibers = []
|
@@ -191,10 +197,11 @@ module GraphQL
|
|
191
197
|
next_source_fibers = []
|
192
198
|
first_pass = true
|
193
199
|
manager = spawn_fiber do
|
200
|
+
trace&.begin_dataloader(self)
|
194
201
|
while first_pass || !job_fibers.empty?
|
195
202
|
first_pass = false
|
196
203
|
|
197
|
-
while (f = (job_fibers.shift || (((next_job_fibers.size + job_fibers.size) < jobs_fiber_limit) && spawn_job_fiber)))
|
204
|
+
while (f = (job_fibers.shift || (((next_job_fibers.size + job_fibers.size) < jobs_fiber_limit) && spawn_job_fiber(trace))))
|
198
205
|
if f.alive?
|
199
206
|
finished = run_fiber(f)
|
200
207
|
if !finished
|
@@ -205,7 +212,7 @@ module GraphQL
|
|
205
212
|
join_queues(job_fibers, next_job_fibers)
|
206
213
|
|
207
214
|
while (!source_fibers.empty? || @source_cache.each_value.any? { |group_sources| group_sources.each_value.any?(&:pending?) })
|
208
|
-
while (f = source_fibers.shift || (((job_fibers.size + source_fibers.size + next_source_fibers.size + next_job_fibers.size) < total_fiber_limit) && spawn_source_fiber))
|
215
|
+
while (f = source_fibers.shift || (((job_fibers.size + source_fibers.size + next_source_fibers.size + next_job_fibers.size) < total_fiber_limit) && spawn_source_fiber(trace)))
|
209
216
|
if f.alive?
|
210
217
|
finished = run_fiber(f)
|
211
218
|
if !finished
|
@@ -216,6 +223,8 @@ module GraphQL
|
|
216
223
|
join_queues(source_fibers, next_source_fibers)
|
217
224
|
end
|
218
225
|
end
|
226
|
+
|
227
|
+
trace&.end_dataloader(self)
|
219
228
|
end
|
220
229
|
|
221
230
|
run_fiber(manager)
|
@@ -230,6 +239,7 @@ module GraphQL
|
|
230
239
|
if !source_fibers.empty?
|
231
240
|
raise "Invariant: source fibers should have exited but #{source_fibers.size} remained"
|
232
241
|
end
|
242
|
+
|
233
243
|
rescue UncaughtThrowError => e
|
234
244
|
throw e.tag, e.value
|
235
245
|
end
|
@@ -247,6 +257,22 @@ module GraphQL
|
|
247
257
|
}
|
248
258
|
end
|
249
259
|
|
260
|
+
# Pre-warm the Dataloader cache with ActiveRecord objects which were loaded elsewhere.
|
261
|
+
# These will be used by {Dataloader::ActiveRecordSource}, {Dataloader::ActiveRecordAssociationSource} and their helper
|
262
|
+
# methods, `dataload_record` and `dataload_association`.
|
263
|
+
# @param records [Array<ActiveRecord::Base>] Already-loaded records to warm the cache with
|
264
|
+
# @param index_by [Symbol] The attribute to use as the cache key. (Should match `find_by:` when using {ActiveRecordSource})
|
265
|
+
# @return [void]
|
266
|
+
def merge_records(records, index_by: :id)
|
267
|
+
records_by_class = Hash.new { |h, k| h[k] = {} }
|
268
|
+
records.each do |r|
|
269
|
+
records_by_class[r.class][r.public_send(index_by)] = r
|
270
|
+
end
|
271
|
+
records_by_class.each do |r_class, records|
|
272
|
+
with(ActiveRecordSource, r_class).merge(records)
|
273
|
+
end
|
274
|
+
end
|
275
|
+
|
250
276
|
private
|
251
277
|
|
252
278
|
def calculate_fiber_limit
|
@@ -266,17 +292,19 @@ module GraphQL
|
|
266
292
|
new_queue.clear
|
267
293
|
end
|
268
294
|
|
269
|
-
def spawn_job_fiber
|
295
|
+
def spawn_job_fiber(trace)
|
270
296
|
if !@pending_jobs.empty?
|
271
297
|
spawn_fiber do
|
298
|
+
trace&.dataloader_spawn_execution_fiber(@pending_jobs)
|
272
299
|
while job = @pending_jobs.shift
|
273
300
|
job.call
|
274
301
|
end
|
302
|
+
trace&.dataloader_fiber_exit
|
275
303
|
end
|
276
304
|
end
|
277
305
|
end
|
278
306
|
|
279
|
-
def spawn_source_fiber
|
307
|
+
def spawn_source_fiber(trace)
|
280
308
|
pending_sources = nil
|
281
309
|
@source_cache.each_value do |source_by_batch_params|
|
282
310
|
source_by_batch_params.each_value do |source|
|
@@ -289,10 +317,14 @@ module GraphQL
|
|
289
317
|
|
290
318
|
if pending_sources
|
291
319
|
spawn_fiber do
|
320
|
+
trace&.dataloader_spawn_source_fiber(pending_sources)
|
292
321
|
pending_sources.each do |source|
|
293
322
|
Fiber[:__graphql_current_dataloader_source] = source
|
323
|
+
trace&.begin_dataloader_source(source)
|
294
324
|
source.run_pending_keys
|
325
|
+
trace&.end_dataloader_source(source)
|
295
326
|
end
|
327
|
+
trace&.dataloader_fiber_exit
|
296
328
|
end
|
297
329
|
end
|
298
330
|
end
|
@@ -5,7 +5,10 @@ module GraphQL
|
|
5
5
|
class Interpreter
|
6
6
|
class Runtime
|
7
7
|
module GraphQLResult
|
8
|
-
def initialize(result_name, result_type, application_value, parent_result, is_non_null_in_parent, selections, is_eager)
|
8
|
+
def initialize(result_name, result_type, application_value, parent_result, is_non_null_in_parent, selections, is_eager, ast_node, graphql_arguments, graphql_field) # rubocop:disable Metrics/ParameterLists
|
9
|
+
@ast_node = ast_node
|
10
|
+
@graphql_arguments = graphql_arguments
|
11
|
+
@graphql_field = graphql_field
|
9
12
|
@graphql_parent = parent_result
|
10
13
|
@graphql_application_value = application_value
|
11
14
|
@graphql_result_type = result_type
|
@@ -31,14 +34,14 @@ module GraphQL
|
|
31
34
|
|
32
35
|
attr_accessor :graphql_dead
|
33
36
|
attr_reader :graphql_parent, :graphql_result_name, :graphql_is_non_null_in_parent,
|
34
|
-
:graphql_application_value, :graphql_result_type, :graphql_selections, :graphql_is_eager
|
37
|
+
:graphql_application_value, :graphql_result_type, :graphql_selections, :graphql_is_eager, :ast_node, :graphql_arguments, :graphql_field
|
35
38
|
|
36
39
|
# @return [Hash] Plain-Ruby result data (`@graphql_metadata` contains Result wrapper objects)
|
37
40
|
attr_accessor :graphql_result_data
|
38
41
|
end
|
39
42
|
|
40
43
|
class GraphQLResultHash
|
41
|
-
def initialize(_result_name, _result_type, _application_value, _parent_result, _is_non_null_in_parent, _selections, _is_eager)
|
44
|
+
def initialize(_result_name, _result_type, _application_value, _parent_result, _is_non_null_in_parent, _selections, _is_eager, _ast_node, _graphql_arguments, graphql_field) # rubocop:disable Metrics/ParameterLists
|
42
45
|
super
|
43
46
|
@graphql_result_data = {}
|
44
47
|
end
|
@@ -126,7 +129,7 @@ module GraphQL
|
|
126
129
|
class GraphQLResultArray
|
127
130
|
include GraphQLResult
|
128
131
|
|
129
|
-
def initialize(_result_name, _result_type, _application_value, _parent_result, _is_non_null_in_parent, _selections, _is_eager)
|
132
|
+
def initialize(_result_name, _result_type, _application_value, _parent_result, _is_non_null_in_parent, _selections, _is_eager, _ast_node, _graphql_arguments, graphql_field) # rubocop:disable Metrics/ParameterLists
|
130
133
|
super
|
131
134
|
@graphql_result_data = []
|
132
135
|
end
|
@@ -168,6 +171,10 @@ module GraphQL
|
|
168
171
|
def values
|
169
172
|
(@graphql_metadata || @graphql_result_data)
|
170
173
|
end
|
174
|
+
|
175
|
+
def [](idx)
|
176
|
+
(@graphql_metadata || @graphql_result_data)[idx]
|
177
|
+
end
|
171
178
|
end
|
172
179
|
end
|
173
180
|
end
|
@@ -74,7 +74,7 @@ module GraphQL
|
|
74
74
|
runtime_object = root_type.wrap(query.root_value, context)
|
75
75
|
runtime_object = schema.sync_lazy(runtime_object)
|
76
76
|
is_eager = root_op_type == "mutation"
|
77
|
-
@response = GraphQLResultHash.new(nil, root_type, runtime_object, nil, false, root_operation.selections, is_eager)
|
77
|
+
@response = GraphQLResultHash.new(nil, root_type, runtime_object, nil, false, root_operation.selections, is_eager, root_operation, nil, nil)
|
78
78
|
st = get_current_runtime_state
|
79
79
|
st.current_result = @response
|
80
80
|
|
@@ -85,7 +85,7 @@ module GraphQL
|
|
85
85
|
call_method_on_directives(:resolve, runtime_object, root_operation.directives) do # execute query level directives
|
86
86
|
each_gathered_selections(@response) do |selections, is_selection_array|
|
87
87
|
if is_selection_array
|
88
|
-
selection_response = GraphQLResultHash.new(nil, root_type, runtime_object, nil, false, selections, is_eager)
|
88
|
+
selection_response = GraphQLResultHash.new(nil, root_type, runtime_object, nil, false, selections, is_eager, root_operation, nil, nil)
|
89
89
|
final_response = @response
|
90
90
|
else
|
91
91
|
selection_response = @response
|
@@ -218,8 +218,10 @@ module GraphQL
|
|
218
218
|
result_name, field_ast_nodes_or_ast_node, selections_result
|
219
219
|
)
|
220
220
|
finished_jobs += 1
|
221
|
-
if
|
222
|
-
|
221
|
+
if finished_jobs == enqueued_jobs
|
222
|
+
if target_result
|
223
|
+
selections_result.merge_into(target_result)
|
224
|
+
end
|
223
225
|
end
|
224
226
|
@dataloader.clear_cache
|
225
227
|
}
|
@@ -229,8 +231,10 @@ module GraphQL
|
|
229
231
|
result_name, field_ast_nodes_or_ast_node, selections_result
|
230
232
|
)
|
231
233
|
finished_jobs += 1
|
232
|
-
if
|
233
|
-
|
234
|
+
if finished_jobs == enqueued_jobs
|
235
|
+
if target_result
|
236
|
+
selections_result.merge_into(target_result)
|
237
|
+
end
|
234
238
|
end
|
235
239
|
}
|
236
240
|
end
|
@@ -371,6 +375,7 @@ module GraphQL
|
|
371
375
|
end
|
372
376
|
# Actually call the field resolver and capture the result
|
373
377
|
app_result = begin
|
378
|
+
@current_trace.begin_execute_field(field_defn, object, kwarg_arguments, query)
|
374
379
|
@current_trace.execute_field(field: field_defn, ast_node: ast_node, query: query, object: object, arguments: kwarg_arguments) do
|
375
380
|
field_defn.resolve(object, kwarg_arguments, context)
|
376
381
|
end
|
@@ -383,6 +388,7 @@ module GraphQL
|
|
383
388
|
ex_err
|
384
389
|
end
|
385
390
|
end
|
391
|
+
@current_trace.end_execute_field(field_defn, object, kwarg_arguments, query, app_result)
|
386
392
|
after_lazy(app_result, field: field_defn, ast_node: ast_node, owner_object: object, arguments: resolved_arguments, result_name: result_name, result: selection_result, runtime_state: runtime_state) do |inner_result, runtime_state|
|
387
393
|
owner_type = selection_result.graphql_result_type
|
388
394
|
return_type = field_defn.type
|
@@ -391,6 +397,8 @@ module GraphQL
|
|
391
397
|
was_scoped = runtime_state.was_authorized_by_scope_items
|
392
398
|
runtime_state.was_authorized_by_scope_items = nil
|
393
399
|
continue_field(continue_value, owner_type, field_defn, return_type, ast_node, next_selections, false, object, resolved_arguments, result_name, selection_result, was_scoped, runtime_state)
|
400
|
+
else
|
401
|
+
nil
|
394
402
|
end
|
395
403
|
end
|
396
404
|
end
|
@@ -465,7 +473,7 @@ module GraphQL
|
|
465
473
|
# When this comes from a list item, use the parent object:
|
466
474
|
parent_type = selection_result.is_a?(GraphQLResultArray) ? selection_result.graphql_parent.graphql_result_type : selection_result.graphql_result_type
|
467
475
|
# This block is called if `result_name` is not dead. (Maybe a previous invalid nil caused it be marked dead.)
|
468
|
-
err = parent_type::InvalidNullError.new(parent_type, field, value)
|
476
|
+
err = parent_type::InvalidNullError.new(parent_type, field, value, ast_node)
|
469
477
|
schema.type_error(err, context)
|
470
478
|
end
|
471
479
|
else
|
@@ -574,7 +582,7 @@ module GraphQL
|
|
574
582
|
r = begin
|
575
583
|
current_type.coerce_result(value, context)
|
576
584
|
rescue StandardError => err
|
577
|
-
|
585
|
+
query.handle_or_reraise(err)
|
578
586
|
end
|
579
587
|
set_result(selection_result, result_name, r, false, is_non_null)
|
580
588
|
r
|
@@ -609,11 +617,11 @@ module GraphQL
|
|
609
617
|
after_lazy(object_proxy, ast_node: ast_node, field: field, owner_object: owner_object, arguments: arguments, trace: false, result_name: result_name, result: selection_result, runtime_state: runtime_state) do |inner_object, runtime_state|
|
610
618
|
continue_value = continue_value(inner_object, field, is_non_null, ast_node, result_name, selection_result)
|
611
619
|
if HALT != continue_value
|
612
|
-
response_hash = GraphQLResultHash.new(result_name, current_type, continue_value, selection_result, is_non_null, next_selections, false)
|
620
|
+
response_hash = GraphQLResultHash.new(result_name, current_type, continue_value, selection_result, is_non_null, next_selections, false, ast_node, arguments, field)
|
613
621
|
set_result(selection_result, result_name, response_hash, true, is_non_null)
|
614
622
|
each_gathered_selections(response_hash) do |selections, is_selection_array|
|
615
623
|
if is_selection_array
|
616
|
-
this_result = GraphQLResultHash.new(result_name, current_type, continue_value, selection_result, is_non_null, selections, false)
|
624
|
+
this_result = GraphQLResultHash.new(result_name, current_type, continue_value, selection_result, is_non_null, selections, false, ast_node, arguments, field)
|
617
625
|
final_result = response_hash
|
618
626
|
else
|
619
627
|
this_result = response_hash
|
@@ -634,35 +642,43 @@ module GraphQL
|
|
634
642
|
# This is true for objects, unions, and interfaces
|
635
643
|
use_dataloader_job = !inner_type.unwrap.kind.input?
|
636
644
|
inner_type_non_null = inner_type.non_null?
|
637
|
-
response_list = GraphQLResultArray.new(result_name, current_type, owner_object, selection_result, is_non_null, next_selections, false)
|
645
|
+
response_list = GraphQLResultArray.new(result_name, current_type, owner_object, selection_result, is_non_null, next_selections, false, ast_node, arguments, field)
|
638
646
|
set_result(selection_result, result_name, response_list, true, is_non_null)
|
639
647
|
idx = nil
|
640
648
|
list_value = begin
|
641
|
-
|
642
|
-
|
643
|
-
|
644
|
-
|
645
|
-
|
646
|
-
|
649
|
+
begin
|
650
|
+
value.each do |inner_value|
|
651
|
+
idx ||= 0
|
652
|
+
this_idx = idx
|
653
|
+
idx += 1
|
654
|
+
if use_dataloader_job
|
655
|
+
@dataloader.append_job do
|
656
|
+
resolve_list_item(inner_value, inner_type, inner_type_non_null, ast_node, field, owner_object, arguments, this_idx, response_list, owner_type, was_scoped, runtime_state)
|
657
|
+
end
|
658
|
+
else
|
647
659
|
resolve_list_item(inner_value, inner_type, inner_type_non_null, ast_node, field, owner_object, arguments, this_idx, response_list, owner_type, was_scoped, runtime_state)
|
648
660
|
end
|
649
|
-
else
|
650
|
-
resolve_list_item(inner_value, inner_type, inner_type_non_null, ast_node, field, owner_object, arguments, this_idx, response_list, owner_type, was_scoped, runtime_state)
|
651
661
|
end
|
652
|
-
end
|
653
662
|
|
654
|
-
|
655
|
-
|
656
|
-
|
657
|
-
|
658
|
-
|
659
|
-
|
660
|
-
|
661
|
-
|
662
|
-
|
663
|
+
response_list
|
664
|
+
rescue NoMethodError => err
|
665
|
+
# Ruby 2.2 doesn't have NoMethodError#receiver, can't check that one in this case. (It's been EOL since 2017.)
|
666
|
+
if err.name == :each && (err.respond_to?(:receiver) ? err.receiver == value : true)
|
667
|
+
# This happens when the GraphQL schema doesn't match the implementation. Help the dev debug.
|
668
|
+
raise ListResultFailedError.new(value: value, field: field, path: current_path)
|
669
|
+
else
|
670
|
+
# This was some other NoMethodError -- let it bubble to reveal the real error.
|
671
|
+
raise
|
672
|
+
end
|
673
|
+
rescue GraphQL::ExecutionError, GraphQL::UnauthorizedError => ex_err
|
674
|
+
ex_err
|
675
|
+
rescue StandardError => err
|
676
|
+
begin
|
677
|
+
query.handle_or_reraise(err)
|
678
|
+
rescue GraphQL::ExecutionError => ex_err
|
679
|
+
ex_err
|
680
|
+
end
|
663
681
|
end
|
664
|
-
rescue GraphQL::ExecutionError, GraphQL::UnauthorizedError => ex_err
|
665
|
-
ex_err
|
666
682
|
rescue StandardError => err
|
667
683
|
begin
|
668
684
|
query.handle_or_reraise(err)
|
@@ -773,8 +789,10 @@ module GraphQL
|
|
773
789
|
runtime_state.was_authorized_by_scope_items = was_authorized_by_scope_items
|
774
790
|
# Wrap the execution of _this_ method with tracing,
|
775
791
|
# but don't wrap the continuation below
|
792
|
+
result = nil
|
776
793
|
inner_obj = begin
|
777
|
-
if trace
|
794
|
+
result = if trace
|
795
|
+
@current_trace.begin_execute_field(field, owner_object, arguments, query)
|
778
796
|
@current_trace.execute_field_lazy(field: field, query: query, object: owner_object, arguments: arguments, ast_node: ast_node) do
|
779
797
|
schema.sync_lazy(lazy_obj)
|
780
798
|
end
|
@@ -789,6 +807,10 @@ module GraphQL
|
|
789
807
|
rescue GraphQL::ExecutionError => ex_err
|
790
808
|
ex_err
|
791
809
|
end
|
810
|
+
ensure
|
811
|
+
if trace
|
812
|
+
@current_trace.end_execute_field(field, owner_object, arguments, query, result)
|
813
|
+
end
|
792
814
|
end
|
793
815
|
yield(inner_obj, runtime_state)
|
794
816
|
end
|
@@ -832,14 +854,19 @@ module GraphQL
|
|
832
854
|
end
|
833
855
|
|
834
856
|
def resolve_type(type, value)
|
857
|
+
@current_trace.begin_resolve_type(type, value, context)
|
835
858
|
resolved_type, resolved_value = @current_trace.resolve_type(query: query, type: type, object: value) do
|
836
859
|
query.resolve_type(type, value)
|
837
860
|
end
|
861
|
+
@current_trace.end_resolve_type(type, value, context, resolved_type)
|
838
862
|
|
839
863
|
if lazy?(resolved_type)
|
840
864
|
GraphQL::Execution::Lazy.new do
|
865
|
+
@current_trace.begin_resolve_type(type, value, context)
|
841
866
|
@current_trace.resolve_type_lazy(query: query, type: type, object: value) do
|
842
|
-
schema.sync_lazy(resolved_type)
|
867
|
+
rt = schema.sync_lazy(resolved_type)
|
868
|
+
@current_trace.end_resolve_type(type, value, context, rt)
|
869
|
+
rt
|
843
870
|
end
|
844
871
|
end
|
845
872
|
else
|
@@ -33,9 +33,12 @@ module GraphQL
|
|
33
33
|
end
|
34
34
|
end
|
35
35
|
|
36
|
+
|
36
37
|
multiplex = Execution::Multiplex.new(schema: schema, queries: queries, context: context, max_complexity: max_complexity)
|
37
38
|
Fiber[:__graphql_current_multiplex] = multiplex
|
38
|
-
multiplex.current_trace
|
39
|
+
trace = multiplex.current_trace
|
40
|
+
trace.begin_execute_multiplex(multiplex)
|
41
|
+
trace.execute_multiplex(multiplex: multiplex) do
|
39
42
|
schema = multiplex.schema
|
40
43
|
queries = multiplex.queries
|
41
44
|
lazies_at_depth = Hash.new { |h, k| h[k] = [] }
|
@@ -44,7 +47,10 @@ module GraphQL
|
|
44
47
|
multiplex_analyzers += [GraphQL::Analysis::MaxQueryComplexity]
|
45
48
|
end
|
46
49
|
|
50
|
+
trace.begin_analyze_multiplex(multiplex, multiplex_analyzers)
|
47
51
|
schema.analysis_engine.analyze_multiplex(multiplex, multiplex_analyzers)
|
52
|
+
trace.end_analyze_multiplex(multiplex, multiplex_analyzers)
|
53
|
+
|
48
54
|
begin
|
49
55
|
# Since this is basically the batching context,
|
50
56
|
# share it for a whole multiplex
|
@@ -148,6 +154,8 @@ module GraphQL
|
|
148
154
|
}
|
149
155
|
end
|
150
156
|
end
|
157
|
+
ensure
|
158
|
+
trace&.end_execute_multiplex(multiplex)
|
151
159
|
end
|
152
160
|
end
|
153
161
|
|
@@ -35,10 +35,6 @@ module GraphQL
|
|
35
35
|
@current_trace = @context[:trace] || schema.new_trace(multiplex: self)
|
36
36
|
@dataloader = @context[:dataloader] ||= @schema.dataloader_class.new
|
37
37
|
@tracers = schema.tracers + (context[:tracers] || [])
|
38
|
-
# Support `context: {backtrace: true}`
|
39
|
-
if context[:backtrace] && !@tracers.include?(GraphQL::Backtrace::Tracer)
|
40
|
-
@tracers << GraphQL::Backtrace::Tracer
|
41
|
-
end
|
42
38
|
@max_complexity = max_complexity
|
43
39
|
end
|
44
40
|
end
|
@@ -7,7 +7,7 @@ module GraphQL
|
|
7
7
|
"a __DirectiveLocation describes one such possible adjacencies."
|
8
8
|
|
9
9
|
GraphQL::Schema::Directive::LOCATIONS.each do |location|
|
10
|
-
value(location.to_s, GraphQL::Schema::Directive::LOCATION_DESCRIPTIONS[location], value: location)
|
10
|
+
value(location.to_s, GraphQL::Schema::Directive::LOCATION_DESCRIPTIONS[location], value: location, value_method: false)
|
11
11
|
end
|
12
12
|
introspection true
|
13
13
|
end
|