graphql 2.5.11 → 2.5.12
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/lib/graphql/dataloader/async_dataloader.rb +22 -11
- data/lib/graphql/dataloader/null_dataloader.rb +44 -10
- data/lib/graphql/dataloader.rb +75 -23
- data/lib/graphql/execution/interpreter/resolve.rb +7 -13
- data/lib/graphql/execution/interpreter/runtime/graphql_result.rb +5 -0
- data/lib/graphql/execution/interpreter/runtime.rb +17 -6
- data/lib/graphql/execution/interpreter.rb +2 -12
- data/lib/graphql/schema/build_from_definition.rb +3 -1
- data/lib/graphql/schema/member/has_arguments.rb +6 -0
- data/lib/graphql/version.rb +1 -1
- metadata +3 -3
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 1e59a0d63055f40c6802e504d0a0b12fa4a0b36d73caba86f2879966d13fc31e
|
4
|
+
data.tar.gz: 38416d067f73335742755e9fe11825e591d6ecd999360074e469f8b6ceea0908
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: af2f66ced060a82f5869586c41917a7dfd12d44478f63f48b80b96fef563b0f3c0294e1aa5d16256740362474f70523f983a6bec47d400408392dce46ab62445
|
7
|
+
data.tar.gz: cf1361a8d27f9ec47de57aa76f58f33757652a4765d8e887cf13079b8e0873a7d983dc94d1022cda045d56e6c3e6f39e2ce966685681da98084179e604883bb1
|
@@ -14,7 +14,7 @@ module GraphQL
|
|
14
14
|
nil
|
15
15
|
end
|
16
16
|
|
17
|
-
def run
|
17
|
+
def run(trace_query_lazy: nil)
|
18
18
|
trace = Fiber[:__graphql_current_multiplex]&.current_trace
|
19
19
|
jobs_fiber_limit, total_fiber_limit = calculate_fiber_limit
|
20
20
|
job_fibers = []
|
@@ -29,16 +29,7 @@ module GraphQL
|
|
29
29
|
first_pass = false
|
30
30
|
fiber_vars = get_fiber_variables
|
31
31
|
|
32
|
-
|
33
|
-
if f.alive?
|
34
|
-
finished = run_fiber(f)
|
35
|
-
if !finished
|
36
|
-
next_job_fibers << f
|
37
|
-
end
|
38
|
-
end
|
39
|
-
end
|
40
|
-
job_fibers.concat(next_job_fibers)
|
41
|
-
next_job_fibers.clear
|
32
|
+
run_pending_steps(job_fibers, next_job_fibers, source_tasks, jobs_fiber_limit, trace)
|
42
33
|
|
43
34
|
Sync do |root_task|
|
44
35
|
set_fiber_variables(fiber_vars)
|
@@ -54,6 +45,13 @@ module GraphQL
|
|
54
45
|
next_source_tasks.clear
|
55
46
|
end
|
56
47
|
end
|
48
|
+
|
49
|
+
if !@lazies_at_depth.empty?
|
50
|
+
with_trace_query_lazy(trace_query_lazy) do
|
51
|
+
run_next_pending_lazies(job_fibers, trace)
|
52
|
+
run_pending_steps(job_fibers, next_job_fibers, source_tasks, jobs_fiber_limit, trace)
|
53
|
+
end
|
54
|
+
end
|
57
55
|
end
|
58
56
|
trace&.end_dataloader(self)
|
59
57
|
end
|
@@ -69,6 +67,19 @@ module GraphQL
|
|
69
67
|
|
70
68
|
private
|
71
69
|
|
70
|
+
def run_pending_steps(job_fibers, next_job_fibers, source_tasks, jobs_fiber_limit, trace)
|
71
|
+
while (f = (job_fibers.shift || (((job_fibers.size + next_job_fibers.size + source_tasks.size) < jobs_fiber_limit) && spawn_job_fiber(trace))))
|
72
|
+
if f.alive?
|
73
|
+
finished = run_fiber(f)
|
74
|
+
if !finished
|
75
|
+
next_job_fibers << f
|
76
|
+
end
|
77
|
+
end
|
78
|
+
end
|
79
|
+
job_fibers.concat(next_job_fibers)
|
80
|
+
next_job_fibers.clear
|
81
|
+
end
|
82
|
+
|
72
83
|
def spawn_source_task(parent_task, condition, trace)
|
73
84
|
pending_sources = nil
|
74
85
|
@source_cache.each_value do |source_by_batch_params|
|
@@ -2,24 +2,58 @@
|
|
2
2
|
|
3
3
|
module GraphQL
|
4
4
|
class Dataloader
|
5
|
-
#
|
5
|
+
# GraphQL-Ruby uses this when Dataloader isn't enabled.
|
6
6
|
#
|
7
|
-
#
|
8
|
-
#
|
7
|
+
# It runs execution code inline and gathers lazy objects (eg. Promises)
|
8
|
+
# and resolves them during {#run}.
|
9
9
|
class NullDataloader < Dataloader
|
10
|
-
|
11
|
-
|
10
|
+
def initialize(*)
|
11
|
+
@lazies_at_depth = Hash.new { |h,k| h[k] = [] }
|
12
|
+
end
|
13
|
+
|
14
|
+
def freeze
|
15
|
+
@lazies_at_depth.default_proc = nil
|
16
|
+
@lazies_at_depth.freeze
|
17
|
+
super
|
18
|
+
end
|
19
|
+
|
20
|
+
def run(trace_query_lazy: nil)
|
21
|
+
with_trace_query_lazy(trace_query_lazy) do
|
22
|
+
while !@lazies_at_depth.empty?
|
23
|
+
smallest_depth = nil
|
24
|
+
@lazies_at_depth.each_key do |depth_key|
|
25
|
+
smallest_depth ||= depth_key
|
26
|
+
if depth_key < smallest_depth
|
27
|
+
smallest_depth = depth_key
|
28
|
+
end
|
29
|
+
end
|
30
|
+
|
31
|
+
if smallest_depth
|
32
|
+
lazies = @lazies_at_depth.delete(smallest_depth)
|
33
|
+
lazies.each(&:value) # resolve these Lazy instances
|
34
|
+
end
|
35
|
+
end
|
36
|
+
end
|
37
|
+
end
|
38
|
+
|
39
|
+
def run_isolated
|
40
|
+
new_dl = self.class.new
|
41
|
+
res = nil
|
42
|
+
new_dl.append_job {
|
43
|
+
res = yield
|
44
|
+
}
|
45
|
+
new_dl.run
|
46
|
+
res
|
47
|
+
end
|
12
48
|
|
13
|
-
def initialize(*); end
|
14
|
-
def run; end
|
15
|
-
def run_isolated; yield; end
|
16
49
|
def clear_cache; end
|
50
|
+
|
17
51
|
def yield(_source)
|
18
52
|
raise GraphQL::Error, "GraphQL::Dataloader is not running -- add `use GraphQL::Dataloader` to your schema to use Dataloader sources."
|
19
53
|
end
|
20
54
|
|
21
|
-
def append_job
|
22
|
-
yield
|
55
|
+
def append_job(callable = nil)
|
56
|
+
callable ? callable.call : yield
|
23
57
|
nil
|
24
58
|
end
|
25
59
|
|
data/lib/graphql/dataloader.rb
CHANGED
@@ -64,6 +64,7 @@ module GraphQL
|
|
64
64
|
@nonblocking = nonblocking
|
65
65
|
end
|
66
66
|
@fiber_limit = fiber_limit
|
67
|
+
@lazies_at_depth = Hash.new { |h, k| h[k] = [] }
|
67
68
|
end
|
68
69
|
|
69
70
|
# @return [Integer, nil]
|
@@ -140,10 +141,10 @@ module GraphQL
|
|
140
141
|
end
|
141
142
|
|
142
143
|
# @api private Nothing to see here
|
143
|
-
def append_job(&job)
|
144
|
+
def append_job(callable = nil, &job)
|
144
145
|
# Given a block, queue it up to be worked through when `#run` is called.
|
145
|
-
# (If the dataloader is already running,
|
146
|
-
@pending_jobs.push(job)
|
146
|
+
# (If the dataloader is already running, then a Fiber will pick this up later.)
|
147
|
+
@pending_jobs.push(callable || job)
|
147
148
|
nil
|
148
149
|
end
|
149
150
|
|
@@ -160,6 +161,10 @@ module GraphQL
|
|
160
161
|
def run_isolated
|
161
162
|
prev_queue = @pending_jobs
|
162
163
|
prev_pending_keys = {}
|
164
|
+
prev_lazies_at_depth = @lazies_at_depth
|
165
|
+
@lazies_at_depth = @lazies_at_depth.dup.clear
|
166
|
+
# Clear pending loads but keep already-cached records
|
167
|
+
# in case they are useful to the given block.
|
163
168
|
@source_cache.each do |source_class, batched_sources|
|
164
169
|
batched_sources.each do |batch_args, batched_source_instance|
|
165
170
|
if batched_source_instance.pending?
|
@@ -179,6 +184,7 @@ module GraphQL
|
|
179
184
|
res
|
180
185
|
ensure
|
181
186
|
@pending_jobs = prev_queue
|
187
|
+
@lazies_at_depth = prev_lazies_at_depth
|
182
188
|
prev_pending_keys.each do |source_instance, pending|
|
183
189
|
pending.each do |key, value|
|
184
190
|
if !source_instance.results.key?(key)
|
@@ -188,7 +194,8 @@ module GraphQL
|
|
188
194
|
end
|
189
195
|
end
|
190
196
|
|
191
|
-
|
197
|
+
# @param trace_query_lazy [nil, Execution::Multiplex]
|
198
|
+
def run(trace_query_lazy: nil)
|
192
199
|
trace = Fiber[:__graphql_current_multiplex]&.current_trace
|
193
200
|
jobs_fiber_limit, total_fiber_limit = calculate_fiber_limit
|
194
201
|
job_fibers = []
|
@@ -201,26 +208,13 @@ module GraphQL
|
|
201
208
|
while first_pass || !job_fibers.empty?
|
202
209
|
first_pass = false
|
203
210
|
|
204
|
-
|
205
|
-
|
206
|
-
|
207
|
-
|
208
|
-
|
209
|
-
|
210
|
-
end
|
211
|
-
end
|
212
|
-
join_queues(job_fibers, next_job_fibers)
|
213
|
-
|
214
|
-
while (!source_fibers.empty? || @source_cache.each_value.any? { |group_sources| group_sources.each_value.any?(&:pending?) })
|
215
|
-
while (f = source_fibers.shift || (((job_fibers.size + source_fibers.size + next_source_fibers.size + next_job_fibers.size) < total_fiber_limit) && spawn_source_fiber(trace)))
|
216
|
-
if f.alive?
|
217
|
-
finished = run_fiber(f)
|
218
|
-
if !finished
|
219
|
-
next_source_fibers << f
|
220
|
-
end
|
221
|
-
end
|
211
|
+
run_pending_steps(trace, job_fibers, next_job_fibers, jobs_fiber_limit, source_fibers, next_source_fibers, total_fiber_limit)
|
212
|
+
|
213
|
+
if !@lazies_at_depth.empty?
|
214
|
+
with_trace_query_lazy(trace_query_lazy) do
|
215
|
+
run_next_pending_lazies(job_fibers, trace)
|
216
|
+
run_pending_steps(trace, job_fibers, next_job_fibers, jobs_fiber_limit, source_fibers, next_source_fibers, total_fiber_limit)
|
222
217
|
end
|
223
|
-
join_queues(source_fibers, next_source_fibers)
|
224
218
|
end
|
225
219
|
end
|
226
220
|
|
@@ -248,6 +242,11 @@ module GraphQL
|
|
248
242
|
f.resume
|
249
243
|
end
|
250
244
|
|
245
|
+
# @api private
|
246
|
+
def lazy_at_depth(depth, lazy)
|
247
|
+
@lazies_at_depth[depth] << lazy
|
248
|
+
end
|
249
|
+
|
251
250
|
def spawn_fiber
|
252
251
|
fiber_vars = get_fiber_variables
|
253
252
|
Fiber.new(blocking: !@nonblocking) {
|
@@ -275,6 +274,59 @@ module GraphQL
|
|
275
274
|
|
276
275
|
private
|
277
276
|
|
277
|
+
def run_next_pending_lazies(job_fibers, trace)
|
278
|
+
smallest_depth = nil
|
279
|
+
@lazies_at_depth.each_key do |depth_key|
|
280
|
+
smallest_depth ||= depth_key
|
281
|
+
if depth_key < smallest_depth
|
282
|
+
smallest_depth = depth_key
|
283
|
+
end
|
284
|
+
end
|
285
|
+
|
286
|
+
if smallest_depth
|
287
|
+
lazies = @lazies_at_depth.delete(smallest_depth)
|
288
|
+
if !lazies.empty?
|
289
|
+
lazies.each_with_index do |l, idx|
|
290
|
+
append_job { l.value }
|
291
|
+
end
|
292
|
+
job_fibers.unshift(spawn_job_fiber(trace))
|
293
|
+
end
|
294
|
+
end
|
295
|
+
end
|
296
|
+
|
297
|
+
def run_pending_steps(trace, job_fibers, next_job_fibers, jobs_fiber_limit, source_fibers, next_source_fibers, total_fiber_limit)
|
298
|
+
while (f = (job_fibers.shift || (((next_job_fibers.size + job_fibers.size) < jobs_fiber_limit) && spawn_job_fiber(trace))))
|
299
|
+
if f.alive?
|
300
|
+
finished = run_fiber(f)
|
301
|
+
if !finished
|
302
|
+
next_job_fibers << f
|
303
|
+
end
|
304
|
+
end
|
305
|
+
end
|
306
|
+
join_queues(job_fibers, next_job_fibers)
|
307
|
+
|
308
|
+
while (!source_fibers.empty? || @source_cache.each_value.any? { |group_sources| group_sources.each_value.any?(&:pending?) })
|
309
|
+
while (f = source_fibers.shift || (((job_fibers.size + source_fibers.size + next_source_fibers.size + next_job_fibers.size) < total_fiber_limit) && spawn_source_fiber(trace)))
|
310
|
+
if f.alive?
|
311
|
+
finished = run_fiber(f)
|
312
|
+
if !finished
|
313
|
+
next_source_fibers << f
|
314
|
+
end
|
315
|
+
end
|
316
|
+
end
|
317
|
+
join_queues(source_fibers, next_source_fibers)
|
318
|
+
end
|
319
|
+
end
|
320
|
+
|
321
|
+
def with_trace_query_lazy(multiplex_or_nil, &block)
|
322
|
+
if (multiplex = multiplex_or_nil)
|
323
|
+
query = multiplex.queries.length == 1 ? multiplex.queries[0] : nil
|
324
|
+
multiplex.current_trace.execute_query_lazy(query: query, multiplex: multiplex, &block)
|
325
|
+
else
|
326
|
+
yield
|
327
|
+
end
|
328
|
+
end
|
329
|
+
|
278
330
|
def calculate_fiber_limit
|
279
331
|
total_fiber_limit = @fiber_limit || Float::INFINITY
|
280
332
|
if total_fiber_limit < 4
|
@@ -6,12 +6,17 @@ module GraphQL
|
|
6
6
|
module Resolve
|
7
7
|
# Continue field results in `results` until there's nothing else to continue.
|
8
8
|
# @return [void]
|
9
|
+
# @deprecated Call `dataloader.run` instead
|
9
10
|
def self.resolve_all(results, dataloader)
|
11
|
+
warn "#{self}.#{__method__} is deprecated; Use `dataloader.run` instead.#{caller(1, 5).map { |l| "\n #{l}"}.join}"
|
10
12
|
dataloader.append_job { resolve(results, dataloader) }
|
11
13
|
nil
|
12
14
|
end
|
13
15
|
|
16
|
+
# @deprecated Call `dataloader.run` instead
|
14
17
|
def self.resolve_each_depth(lazies_at_depth, dataloader)
|
18
|
+
warn "#{self}.#{__method__} is deprecated; Use `dataloader.run` instead.#{caller(1, 5).map { |l| "\n #{l}"}.join}"
|
19
|
+
|
15
20
|
smallest_depth = nil
|
16
21
|
lazies_at_depth.each_key do |depth_key|
|
17
22
|
smallest_depth ||= depth_key
|
@@ -34,20 +39,9 @@ module GraphQL
|
|
34
39
|
nil
|
35
40
|
end
|
36
41
|
|
37
|
-
#
|
38
|
-
# continue it until you get a response-ready Ruby value.
|
39
|
-
#
|
40
|
-
# `results` is one level of _depth_ of a query or multiplex.
|
41
|
-
#
|
42
|
-
# Resolve all lazy values in that depth before moving on
|
43
|
-
# to the next level.
|
44
|
-
#
|
45
|
-
# It's assumed that the lazies will
|
46
|
-
# return {Lazy} instances if there's more work to be done,
|
47
|
-
# or return {Hash}/{Array} if the query should be continued.
|
48
|
-
#
|
49
|
-
# @return [void]
|
42
|
+
# @deprecated Call `dataloader.run` instead
|
50
43
|
def self.resolve(results, dataloader)
|
44
|
+
warn "#{self}.#{__method__} is deprecated; Use `dataloader.run` instead.#{caller(1, 5).map { |l| "\n #{l}"}.join}"
|
51
45
|
# There might be pending jobs here that _will_ write lazies
|
52
46
|
# into the result hash. We should run them out, so we
|
53
47
|
# can be sure that all lazies will be present in the result hashes.
|
@@ -35,11 +35,10 @@ module GraphQL
|
|
35
35
|
# @return [GraphQL::Query::Context]
|
36
36
|
attr_reader :context
|
37
37
|
|
38
|
-
def initialize(query
|
38
|
+
def initialize(query:)
|
39
39
|
@query = query
|
40
40
|
@current_trace = query.current_trace
|
41
41
|
@dataloader = query.multiplex.dataloader
|
42
|
-
@lazies_at_depth = lazies_at_depth
|
43
42
|
@schema = query.schema
|
44
43
|
@context = query.context
|
45
44
|
@response = nil
|
@@ -365,6 +364,10 @@ module GraphQL
|
|
365
364
|
else
|
366
365
|
@query.arguments_cache.dataload_for(ast_node, field_defn, owner_object) do |resolved_arguments|
|
367
366
|
runtime_state = get_current_runtime_state # This might be in a different fiber
|
367
|
+
runtime_state.current_field = field_defn
|
368
|
+
runtime_state.current_arguments = resolved_arguments
|
369
|
+
runtime_state.current_result_name = result_name
|
370
|
+
runtime_state.current_result = selections_result
|
368
371
|
evaluate_selection_with_args(resolved_arguments, field_defn, ast_node, field_ast_nodes, owner_object, result_name, selections_result, runtime_state)
|
369
372
|
end
|
370
373
|
end
|
@@ -373,6 +376,8 @@ module GraphQL
|
|
373
376
|
def evaluate_selection_with_args(arguments, field_defn, ast_node, field_ast_nodes, object, result_name, selection_result, runtime_state) # rubocop:disable Metrics/ParameterLists
|
374
377
|
after_lazy(arguments, field: field_defn, ast_node: ast_node, owner_object: object, arguments: arguments, result_name: result_name, result: selection_result, runtime_state: runtime_state) do |resolved_arguments, runtime_state|
|
375
378
|
if resolved_arguments.is_a?(GraphQL::ExecutionError) || resolved_arguments.is_a?(GraphQL::UnauthorizedError)
|
379
|
+
next if selection_result.collect_result(result_name, resolved_arguments)
|
380
|
+
|
376
381
|
return_type_non_null = field_defn.type.non_null?
|
377
382
|
continue_value(resolved_arguments, field_defn, return_type_non_null, ast_node, result_name, selection_result)
|
378
383
|
next
|
@@ -446,7 +451,7 @@ module GraphQL
|
|
446
451
|
}
|
447
452
|
end
|
448
453
|
|
449
|
-
|
454
|
+
call_method_on_directives(:resolve, object, directives) do
|
450
455
|
if !directives.empty?
|
451
456
|
# This might be executed in a different context; reset this info
|
452
457
|
runtime_state = get_current_runtime_state
|
@@ -472,6 +477,8 @@ module GraphQL
|
|
472
477
|
end
|
473
478
|
@current_trace.end_execute_field(field_defn, object, kwarg_arguments, query, app_result)
|
474
479
|
after_lazy(app_result, field: field_defn, ast_node: ast_node, owner_object: object, arguments: resolved_arguments, result_name: result_name, result: selection_result, runtime_state: runtime_state) do |inner_result, runtime_state|
|
480
|
+
next if selection_result.collect_result(result_name, inner_result)
|
481
|
+
|
475
482
|
owner_type = selection_result.graphql_result_type
|
476
483
|
return_type = field_defn.type
|
477
484
|
continue_value = continue_value(inner_result, field_defn, return_type.non_null?, ast_node, result_name, selection_result)
|
@@ -488,7 +495,7 @@ module GraphQL
|
|
488
495
|
# all of its child fields before moving on to the next root mutation field.
|
489
496
|
# (Subselections of this mutation will still be resolved level-by-level.)
|
490
497
|
if selection_result.graphql_is_eager
|
491
|
-
|
498
|
+
@dataloader.run
|
492
499
|
end
|
493
500
|
end
|
494
501
|
|
@@ -667,7 +674,11 @@ module GraphQL
|
|
667
674
|
rescue GraphQL::ExecutionError => ex_err
|
668
675
|
return continue_value(ex_err, field, is_non_null, ast_node, result_name, selection_result)
|
669
676
|
rescue StandardError => err
|
670
|
-
|
677
|
+
begin
|
678
|
+
query.handle_or_reraise(err)
|
679
|
+
rescue GraphQL::ExecutionError => ex_err
|
680
|
+
return continue_value(ex_err, field, is_non_null, ast_node, result_name, selection_result)
|
681
|
+
end
|
671
682
|
end
|
672
683
|
set_result(selection_result, result_name, r, false, is_non_null)
|
673
684
|
r
|
@@ -928,7 +939,7 @@ module GraphQL
|
|
928
939
|
current_depth += 1
|
929
940
|
result = result.graphql_parent
|
930
941
|
end
|
931
|
-
@
|
942
|
+
@dataloader.lazy_at_depth(current_depth, lazy)
|
932
943
|
lazy
|
933
944
|
end
|
934
945
|
else
|
@@ -42,7 +42,6 @@ module GraphQL
|
|
42
42
|
trace.execute_multiplex(multiplex: multiplex) do
|
43
43
|
schema = multiplex.schema
|
44
44
|
queries = multiplex.queries
|
45
|
-
lazies_at_depth = Hash.new { |h, k| h[k] = [] }
|
46
45
|
multiplex_analyzers = schema.multiplex_analyzers
|
47
46
|
if multiplex.max_complexity
|
48
47
|
multiplex_analyzers += [GraphQL::Analysis::MaxQueryComplexity]
|
@@ -73,7 +72,7 @@ module GraphQL
|
|
73
72
|
# Although queries in a multiplex _share_ an Interpreter instance,
|
74
73
|
# they also have another item of state, which is private to that query
|
75
74
|
# in particular, assign it here:
|
76
|
-
runtime = Runtime.new(query: query
|
75
|
+
runtime = Runtime.new(query: query)
|
77
76
|
query.context.namespace(:interpreter_runtime)[:runtime] = runtime
|
78
77
|
|
79
78
|
query.current_trace.execute_query(query: query) do
|
@@ -88,16 +87,7 @@ module GraphQL
|
|
88
87
|
}
|
89
88
|
end
|
90
89
|
|
91
|
-
multiplex.dataloader.run
|
92
|
-
|
93
|
-
# Then, work through lazy results in a breadth-first way
|
94
|
-
multiplex.dataloader.append_job {
|
95
|
-
query = multiplex.queries.length == 1 ? multiplex.queries[0] : nil
|
96
|
-
multiplex.current_trace.execute_query_lazy(multiplex: multiplex, query: query) do
|
97
|
-
Interpreter::Resolve.resolve_each_depth(lazies_at_depth, multiplex.dataloader)
|
98
|
-
end
|
99
|
-
}
|
100
|
-
multiplex.dataloader.run
|
90
|
+
multiplex.dataloader.run(trace_query_lazy: multiplex)
|
101
91
|
|
102
92
|
# Then, find all errors and assign the result to the query object
|
103
93
|
results.each_with_index do |data_result, idx|
|
@@ -266,6 +266,8 @@ module GraphQL
|
|
266
266
|
build_scalar_type(definition, type_resolver, base_types[:scalar], default_resolve: default_resolve)
|
267
267
|
when GraphQL::Language::Nodes::InputObjectTypeDefinition
|
268
268
|
build_input_object_type(definition, type_resolver, base_types[:input_object])
|
269
|
+
when GraphQL::Language::Nodes::DirectiveDefinition
|
270
|
+
build_directive(definition, type_resolver)
|
269
271
|
end
|
270
272
|
end
|
271
273
|
|
@@ -544,7 +546,7 @@ module GraphQL
|
|
544
546
|
when GraphQL::Language::Nodes::ListType
|
545
547
|
resolve_type_proc.call(ast_node.of_type).to_list_type
|
546
548
|
when String
|
547
|
-
directives[ast_node]
|
549
|
+
directives[ast_node] ||= missing_type_handler.call(ast_node)
|
548
550
|
else
|
549
551
|
raise "Unexpected ast_node: #{ast_node.inspect}"
|
550
552
|
end
|
@@ -413,6 +413,12 @@ module GraphQL
|
|
413
413
|
end
|
414
414
|
end
|
415
415
|
|
416
|
+
# Called when an argument's `loads:` configuration fails to fetch an application object.
|
417
|
+
# By default, this method raises the given error, but you can override it to handle failures differently.
|
418
|
+
#
|
419
|
+
# @param err [GraphQL::LoadApplicationObjectFailedError] The error that occurred
|
420
|
+
# @return [Object, nil] If a value is returned, it will be used instead of the failed load
|
421
|
+
# @api public
|
416
422
|
def load_application_object_failed(err)
|
417
423
|
raise err
|
418
424
|
end
|
data/lib/graphql/version.rb
CHANGED
metadata
CHANGED
@@ -1,13 +1,13 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: graphql
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 2.5.
|
4
|
+
version: 2.5.12
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Robert Mosolgo
|
8
8
|
bindir: bin
|
9
9
|
cert_chain: []
|
10
|
-
date: 2025-
|
10
|
+
date: 2025-09-15 00:00:00.000000000 Z
|
11
11
|
dependencies:
|
12
12
|
- !ruby/object:Gem::Dependency
|
13
13
|
name: base64
|
@@ -817,7 +817,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
|
|
817
817
|
- !ruby/object:Gem::Version
|
818
818
|
version: '0'
|
819
819
|
requirements: []
|
820
|
-
rubygems_version: 3.6.
|
820
|
+
rubygems_version: 3.6.2
|
821
821
|
specification_version: 4
|
822
822
|
summary: A GraphQL language and runtime for Ruby
|
823
823
|
test_files: []
|