graphql 2.4.8 → 2.4.10

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (44) hide show
  1. checksums.yaml +4 -4
  2. data/lib/graphql/backtrace/table.rb +95 -55
  3. data/lib/graphql/backtrace.rb +1 -19
  4. data/lib/graphql/current.rb +5 -0
  5. data/lib/graphql/dataloader/active_record_association_source.rb +64 -0
  6. data/lib/graphql/dataloader/active_record_source.rb +26 -0
  7. data/lib/graphql/dataloader/async_dataloader.rb +17 -5
  8. data/lib/graphql/dataloader/null_dataloader.rb +1 -1
  9. data/lib/graphql/dataloader/source.rb +2 -2
  10. data/lib/graphql/dataloader.rb +37 -5
  11. data/lib/graphql/execution/interpreter/runtime/graphql_result.rb +11 -4
  12. data/lib/graphql/execution/interpreter/runtime.rb +59 -32
  13. data/lib/graphql/execution/interpreter.rb +9 -1
  14. data/lib/graphql/execution/multiplex.rb +0 -4
  15. data/lib/graphql/introspection/directive_location_enum.rb +1 -1
  16. data/lib/graphql/language/parser.rb +1 -1
  17. data/lib/graphql/query.rb +8 -12
  18. data/lib/graphql/schema/build_from_definition.rb +1 -0
  19. data/lib/graphql/schema/enum.rb +21 -1
  20. data/lib/graphql/schema/interface.rb +1 -0
  21. data/lib/graphql/schema/loader.rb +1 -0
  22. data/lib/graphql/schema/member/has_dataloader.rb +56 -0
  23. data/lib/graphql/schema/member.rb +1 -0
  24. data/lib/graphql/schema/object.rb +17 -8
  25. data/lib/graphql/schema/resolver.rb +2 -5
  26. data/lib/graphql/schema/validator/required_validator.rb +23 -6
  27. data/lib/graphql/schema/visibility/profile.rb +5 -5
  28. data/lib/graphql/schema/visibility.rb +14 -9
  29. data/lib/graphql/schema.rb +9 -25
  30. data/lib/graphql/static_validation/validator.rb +6 -1
  31. data/lib/graphql/subscriptions/serialize.rb +1 -3
  32. data/lib/graphql/tracing/appoptics_trace.rb +1 -1
  33. data/lib/graphql/tracing/new_relic_trace.rb +138 -41
  34. data/lib/graphql/tracing/perfetto_trace/trace.proto +141 -0
  35. data/lib/graphql/tracing/perfetto_trace/trace_pb.rb +33 -0
  36. data/lib/graphql/tracing/perfetto_trace.rb +726 -0
  37. data/lib/graphql/tracing/trace.rb +125 -1
  38. data/lib/graphql/tracing.rb +1 -0
  39. data/lib/graphql/version.rb +1 -1
  40. metadata +135 -10
  41. data/lib/graphql/backtrace/inspect_result.rb +0 -38
  42. data/lib/graphql/backtrace/trace.rb +0 -93
  43. data/lib/graphql/backtrace/tracer.rb +0 -80
  44. data/lib/graphql/schema/null_mask.rb +0 -11
@@ -0,0 +1,726 @@
1
+ # frozen_string_literal: true
2
+ module GraphQL
3
+ module Tracing
4
+ # This produces a trace file for inspecting in the [Perfetto Trace Viewer](https://ui.perfetto.dev).
5
+ #
6
+ # To get the file, call {#write} on the trace.
7
+ #
8
+ # Use "trace modes" to configure this to run on command or on a sample of traffic.
9
+ #
10
+ # @example Writing trace output
11
+ #
12
+ # result = MySchema.execute(...)
13
+ # result.query.trace.write(file: "tmp/trace.dump")
14
+ #
15
+ # @example Running this instrumenter when `trace: true` is present in the request
16
+ #
17
+ # class MySchema < GraphQL::Schema
18
+ # # Only run this tracer when `context[:trace_mode]` is `:trace`
19
+ # trace_with GraphQL::Tracing::Perfetto, mode: :trace
20
+ # end
21
+ #
22
+ # # In graphql_controller.rb:
23
+ #
24
+ # context[:trace_mode] = params[:trace] ? :trace : nil
25
+ # result = MySchema.execute(query_str, context: context, variables: variables, ...)
26
+ # if context[:trace_mode] == :trace
27
+ # result.trace.write(file: ...)
28
+ # end
29
+ #
30
+ module PerfettoTrace
31
+ # TODOs:
32
+ # - Make debug annotations visible on both parts when dataloader is involved
33
+
34
+ PROTOBUF_AVAILABLE = begin
35
+ require "google/protobuf"
36
+ true
37
+ rescue LoadError
38
+ false
39
+ end
40
+
41
+ if PROTOBUF_AVAILABLE
42
+ require "graphql/tracing/perfetto_trace/trace_pb"
43
+ end
44
+
45
+ def self.included(_trace_class)
46
+ if !PROTOBUF_AVAILABLE
47
+ raise "#{self} can't be used because the `google-protobuf` gem wasn't available. Add it to your project, then try again."
48
+ end
49
+ end
50
+
51
+ DATALOADER_CATEGORY_IIDS = [5]
52
+ FIELD_EXECUTE_CATEGORY_IIDS = [6]
53
+ ACTIVE_SUPPORT_NOTIFICATIONS_CATEGORY_IIDS = [7]
54
+ AUTHORIZED_CATEGORY_IIDS = [8]
55
+ RESOLVE_TYPE_CATEGORY_IIDS = [9]
56
+
57
+ DA_OBJECT_IID = 10
58
+ DA_RESULT_IID = 11
59
+ DA_ARGUMENTS_IID = 12
60
+ DA_FETCH_KEYS_IID = 13
61
+ DA_STR_VAL_NIL_IID = 14
62
+
63
+ # @param active_support_notifications_pattern [String, RegExp, false] A filter for `ActiveSupport::Notifications`, if it's present. Or `false` to skip subscribing.
64
+ def initialize(active_support_notifications_pattern: nil, **_rest)
65
+ super
66
+ @sequence_id = object_id
67
+ @pid = Process.pid
68
+ @flow_ids = Hash.new { |h, source_inst| h[source_inst] = [] }.compare_by_identity
69
+ @new_interned_event_names = {}
70
+ @interned_event_name_iids = Hash.new { |h, k|
71
+ new_id = 100 + h.size
72
+ @new_interned_event_names[k] = new_id
73
+ h[k] = new_id
74
+ }
75
+
76
+ @source_name_iids = Hash.new do |h, source_class|
77
+ h[source_class] = @interned_event_name_iids[source_class.name]
78
+ end.compare_by_identity
79
+
80
+ @auth_name_iids = Hash.new do |h, graphql_type|
81
+ h[graphql_type] = @interned_event_name_iids["Authorize: #{graphql_type.graphql_name}"]
82
+ end.compare_by_identity
83
+
84
+ @resolve_type_name_iids = Hash.new do |h, graphql_type|
85
+ h[graphql_type] = @interned_event_name_iids["Resolve Type: #{graphql_type.graphql_name}"]
86
+ end.compare_by_identity
87
+
88
+ @new_interned_da_names = {}
89
+ @interned_da_name_ids = Hash.new { |h, k|
90
+ next_id = 100 + h.size
91
+ @new_interned_da_names[k] = next_id
92
+ h[k] = next_id
93
+ }
94
+
95
+ @new_interned_da_string_values = {}
96
+ @interned_da_string_values = Hash.new do |h, k|
97
+ new_id = 100 + h.size
98
+ @new_interned_da_string_values[k] = new_id
99
+ h[k] = new_id
100
+ end
101
+
102
+ @class_name_iids = Hash.new do |h, k|
103
+ h[k] = @interned_da_string_values[k.name]
104
+ end.compare_by_identity
105
+
106
+ @starting_objects = GC.stat(:total_allocated_objects)
107
+ @objects_counter_id = :objects_counter.object_id
108
+ @fibers_counter_id = :fibers_counter.object_id
109
+ @fields_counter_id = :fields_counter.object_id
110
+ @begin_validate = nil
111
+ @packets = []
112
+ @packets << TracePacket.new(
113
+ track_descriptor: TrackDescriptor.new(
114
+ uuid: tid,
115
+ name: "Main Thread",
116
+ child_ordering: TrackDescriptor::ChildTracksOrdering::CHRONOLOGICAL,
117
+ ),
118
+ first_packet_on_sequence: true,
119
+ previous_packet_dropped: true,
120
+ trusted_packet_sequence_id: @sequence_id,
121
+ sequence_flags: 3,
122
+ )
123
+ @packets << TracePacket.new(
124
+ interned_data: InternedData.new(
125
+ event_categories: [
126
+ EventCategory.new(name: "Dataloader", iid: DATALOADER_CATEGORY_IIDS.first),
127
+ EventCategory.new(name: "Field Execution", iid: FIELD_EXECUTE_CATEGORY_IIDS.first),
128
+ EventCategory.new(name: "ActiveSupport::Notifications", iid: ACTIVE_SUPPORT_NOTIFICATIONS_CATEGORY_IIDS.first),
129
+ EventCategory.new(name: "Authorized", iid: AUTHORIZED_CATEGORY_IIDS.first),
130
+ EventCategory.new(name: "Resolve Type", iid: RESOLVE_TYPE_CATEGORY_IIDS.first),
131
+ ],
132
+ debug_annotation_names: [
133
+ DebugAnnotationName.new(name: "object", iid: DA_OBJECT_IID),
134
+ DebugAnnotationName.new(name: "arguments", iid: DA_ARGUMENTS_IID),
135
+ DebugAnnotationName.new(name: "result", iid: DA_RESULT_IID),
136
+ DebugAnnotationName.new(name: "fetch keys", iid: DA_FETCH_KEYS_IID),
137
+ ],
138
+ debug_annotation_string_values: [
139
+ InternedString.new(str: "(nil)", iid: DA_STR_VAL_NIL_IID),
140
+ ],
141
+ ),
142
+ trusted_packet_sequence_id: @sequence_id,
143
+ sequence_flags: 2,
144
+ )
145
+ @main_fiber_id = fid
146
+ @packets << track_descriptor_packet(tid, fid, "Main Fiber")
147
+ @packets << track_descriptor_packet(tid, @objects_counter_id, "Allocated Objects", counter: {})
148
+ @packets << trace_packet(
149
+ type: TrackEvent::Type::TYPE_COUNTER,
150
+ track_uuid: @objects_counter_id,
151
+ counter_value: count_allocations,
152
+ )
153
+ @packets << track_descriptor_packet(tid, @fibers_counter_id, "Active Fibers", counter: {})
154
+ @fibers_count = 0
155
+ @packets << trace_packet(
156
+ type: TrackEvent::Type::TYPE_COUNTER,
157
+ track_uuid: @fibers_counter_id,
158
+ counter_value: count_fibers(0),
159
+ )
160
+
161
+ @packets << track_descriptor_packet(tid, @fields_counter_id, "Resolved Fields", counter: {})
162
+ @fields_count = -1
163
+ @packets << trace_packet(
164
+ type: TrackEvent::Type::TYPE_COUNTER,
165
+ track_uuid: @fields_counter_id,
166
+ counter_value: count_fields,
167
+ )
168
+
169
+ if defined?(ActiveSupport::Notifications) && active_support_notifications_pattern != false
170
+ subscribe_to_active_support_notifications(active_support_notifications_pattern)
171
+ end
172
+ end
173
+
174
+ def begin_execute_multiplex(m)
175
+ @packets << trace_packet(
176
+ type: TrackEvent::Type::TYPE_SLICE_BEGIN,
177
+ track_uuid: fid,
178
+ name: "Multiplex",
179
+ debug_annotations: [
180
+ payload_to_debug("query_string", m.queries.map(&:sanitized_query_string).join("\n\n"))
181
+ ]
182
+ )
183
+ super
184
+ end
185
+
186
+ def end_execute_multiplex(m)
187
+ @packets << trace_packet(
188
+ type: TrackEvent::Type::TYPE_SLICE_END,
189
+ track_uuid: fid,
190
+ )
191
+ unsubscribe_from_active_support_notifications
192
+ super
193
+ end
194
+
195
+ def begin_execute_field(field, object, arguments, query)
196
+ packet = trace_packet(
197
+ type: TrackEvent::Type::TYPE_SLICE_BEGIN,
198
+ track_uuid: fid,
199
+ name: query.context.current_path.join("."),
200
+ category_iids: FIELD_EXECUTE_CATEGORY_IIDS,
201
+ extra_counter_track_uuids: [@objects_counter_id],
202
+ extra_counter_values: [count_allocations],
203
+ )
204
+ @packets << packet
205
+ fiber_flow_stack << packet
206
+ super
207
+ end
208
+
209
+ def end_execute_field(field, object, arguments, query, app_result)
210
+ start_field = fiber_flow_stack.pop
211
+ start_field.track_event = dup_with(start_field.track_event, {
212
+ debug_annotations: [
213
+ payload_to_debug(nil, object.object, iid: DA_OBJECT_IID, intern_value: true),
214
+ payload_to_debug(nil, arguments, iid: DA_ARGUMENTS_IID),
215
+ payload_to_debug(nil, app_result, iid: DA_RESULT_IID, intern_value: true)
216
+ ]
217
+ })
218
+
219
+ @packets << trace_packet(
220
+ type: TrackEvent::Type::TYPE_SLICE_END,
221
+ track_uuid: fid,
222
+ extra_counter_track_uuids: [@objects_counter_id, @fields_counter_id],
223
+ extra_counter_values: [count_allocations, count_fields],
224
+ )
225
+ super
226
+ end
227
+
228
+ def begin_analyze_multiplex(m, analyzers)
229
+ @packets << trace_packet(
230
+ type: TrackEvent::Type::TYPE_SLICE_BEGIN,
231
+ track_uuid: fid,
232
+ extra_counter_track_uuids: [@objects_counter_id],
233
+ extra_counter_values: [count_allocations],
234
+ name: "Analysis",
235
+ debug_annotations: [
236
+ payload_to_debug("analyzers_count", analyzers.size),
237
+ payload_to_debug("analyzers", analyzers),
238
+ ]
239
+ )
240
+ super
241
+ end
242
+
243
+ def end_analyze_multiplex(m, analyzers)
244
+ @packets << trace_packet(
245
+ type: TrackEvent::Type::TYPE_SLICE_END,
246
+ track_uuid: fid,
247
+ extra_counter_track_uuids: [@objects_counter_id],
248
+ extra_counter_values: [count_allocations],
249
+ )
250
+ super
251
+ end
252
+
253
+ def begin_parse(str)
254
+ @packets << trace_packet(
255
+ type: TrackEvent::Type::TYPE_SLICE_BEGIN,
256
+ track_uuid: fid,
257
+ extra_counter_track_uuids: [@objects_counter_id],
258
+ extra_counter_values: [count_allocations],
259
+ name: "Parse"
260
+ )
261
+ super
262
+ end
263
+
264
+ def end_parse(str)
265
+ @packets << trace_packet(
266
+ type: TrackEvent::Type::TYPE_SLICE_END,
267
+ track_uuid: fid,
268
+ extra_counter_track_uuids: [@objects_counter_id],
269
+ extra_counter_values: [count_allocations],
270
+ )
271
+ super
272
+ end
273
+
274
+ def begin_validate(query, validate)
275
+ @packets << @begin_validate = trace_packet(
276
+ type: TrackEvent::Type::TYPE_SLICE_BEGIN,
277
+ track_uuid: fid,
278
+ extra_counter_track_uuids: [@objects_counter_id],
279
+ extra_counter_values: [count_allocations],
280
+ name: "Validate",
281
+ debug_annotations: [
282
+ payload_to_debug("validate?", validate),
283
+ ]
284
+ )
285
+ super
286
+ end
287
+
288
+ def end_validate(query, validate, validation_errors)
289
+ @packets << trace_packet(
290
+ type: TrackEvent::Type::TYPE_SLICE_END,
291
+ track_uuid: fid,
292
+ extra_counter_track_uuids: [@objects_counter_id],
293
+ extra_counter_values: [count_allocations],
294
+ )
295
+ @begin_validate.track_event = dup_with(
296
+ @begin_validate.track_event,
297
+ {
298
+ debug_annotations: [
299
+ @begin_validate.track_event.debug_annotations.first,
300
+ payload_to_debug("valid?", !validation_errors.empty?)
301
+ ]
302
+ }
303
+ )
304
+ super
305
+ end
306
+
307
+ def dataloader_spawn_execution_fiber(jobs)
308
+ @packets << trace_packet(
309
+ type: TrackEvent::Type::TYPE_INSTANT,
310
+ track_uuid: fid,
311
+ name: "Create Execution Fiber",
312
+ category_iids: DATALOADER_CATEGORY_IIDS,
313
+ extra_counter_track_uuids: [@fibers_counter_id, @objects_counter_id],
314
+ extra_counter_values: [count_fibers(1), count_allocations]
315
+ )
316
+ @packets << track_descriptor_packet(@did, fid, "Exec Fiber ##{fid}")
317
+ super
318
+ end
319
+
320
+ def dataloader_spawn_source_fiber(pending_sources)
321
+ @packets << trace_packet(
322
+ type: TrackEvent::Type::TYPE_INSTANT,
323
+ track_uuid: fid,
324
+ name: "Create Source Fiber",
325
+ category_iids: DATALOADER_CATEGORY_IIDS,
326
+ extra_counter_track_uuids: [@fibers_counter_id, @objects_counter_id],
327
+ extra_counter_values: [count_fibers(1), count_allocations]
328
+ )
329
+ @packets << track_descriptor_packet(@did, fid, "Source Fiber ##{fid}")
330
+ super
331
+ end
332
+
333
+ def dataloader_fiber_yield(source)
334
+ ls = fiber_flow_stack.last
335
+ if (flow_id = ls.track_event.flow_ids.first)
336
+ # got it
337
+ else
338
+ flow_id = ls.track_event.name.object_id
339
+ ls.track_event = dup_with(ls.track_event, {flow_ids: [flow_id] }, delete_counters: true)
340
+ end
341
+ @flow_ids[source] << flow_id
342
+ @packets << trace_packet(
343
+ type: TrackEvent::Type::TYPE_SLICE_END,
344
+ track_uuid: fid,
345
+ )
346
+ @packets << trace_packet(
347
+ type: TrackEvent::Type::TYPE_INSTANT,
348
+ track_uuid: fid,
349
+ name: "Fiber Yield",
350
+ category_iids: DATALOADER_CATEGORY_IIDS,
351
+ )
352
+ super
353
+ end
354
+
355
+ def dataloader_fiber_resume(source)
356
+ @packets << trace_packet(
357
+ type: TrackEvent::Type::TYPE_INSTANT,
358
+ track_uuid: fid,
359
+ name: "Fiber Resume",
360
+ category_iids: DATALOADER_CATEGORY_IIDS,
361
+ )
362
+
363
+ ls = fiber_flow_stack.pop
364
+ @packets << packet = TracePacket.new(
365
+ timestamp: ts,
366
+ track_event: dup_with(ls.track_event, { type: TrackEvent::Type::TYPE_SLICE_BEGIN }),
367
+ trusted_packet_sequence_id: @sequence_id,
368
+ )
369
+ fiber_flow_stack << packet
370
+
371
+ super
372
+ end
373
+
374
+ def dataloader_fiber_exit
375
+ @packets << trace_packet(
376
+ type: TrackEvent::Type::TYPE_INSTANT,
377
+ track_uuid: fid,
378
+ name: "Fiber Exit",
379
+ category_iids: DATALOADER_CATEGORY_IIDS,
380
+ extra_counter_track_uuids: [@fibers_counter_id],
381
+ extra_counter_values: [count_fibers(-1)],
382
+ )
383
+ super
384
+ end
385
+
386
+ def begin_dataloader(dl)
387
+ @packets << trace_packet(
388
+ type: TrackEvent::Type::TYPE_COUNTER,
389
+ track_uuid: @fibers_counter_id,
390
+ counter_value: count_fibers(1),
391
+ )
392
+ @did = fid
393
+ @packets << track_descriptor_packet(@main_fiber_id, @did, "Dataloader Fiber ##{@did}")
394
+ super
395
+ end
396
+
397
+ def end_dataloader(dl)
398
+ @packets << trace_packet(
399
+ type: TrackEvent::Type::TYPE_COUNTER,
400
+ track_uuid: @fibers_counter_id,
401
+ counter_value: count_fibers(-1),
402
+ )
403
+ super
404
+ end
405
+
406
+ def begin_dataloader_source(source)
407
+ fds = @flow_ids[source]
408
+ fds_copy = fds.dup
409
+ fds.clear
410
+ packet = trace_packet(
411
+ type: TrackEvent::Type::TYPE_SLICE_BEGIN,
412
+ track_uuid: fid,
413
+ name_iid: @source_name_iids[source.class],
414
+ category_iids: DATALOADER_CATEGORY_IIDS,
415
+ flow_ids: fds_copy,
416
+ extra_counter_track_uuids: [@objects_counter_id],
417
+ extra_counter_values: [count_allocations],
418
+ debug_annotations: [
419
+ payload_to_debug(nil, source.pending.values, iid: DA_FETCH_KEYS_IID, intern_value: true),
420
+ *(source.instance_variables - [:@pending, :@fetching, :@results, :@dataloader]).map { |iv|
421
+ payload_to_debug(iv.to_s, source.instance_variable_get(iv), intern_value: true)
422
+ }
423
+ ]
424
+ )
425
+ @packets << packet
426
+ fiber_flow_stack << packet
427
+ super
428
+ end
429
+
430
+ def end_dataloader_source(source)
431
+ @packets << trace_packet(
432
+ type: TrackEvent::Type::TYPE_SLICE_END,
433
+ track_uuid: fid,
434
+ extra_counter_track_uuids: [@objects_counter_id],
435
+ extra_counter_values: [count_allocations],
436
+ )
437
+ fiber_flow_stack.pop
438
+ super
439
+ end
440
+
441
+ def begin_authorized(type, obj, ctx)
442
+ packet = trace_packet(
443
+ type: TrackEvent::Type::TYPE_SLICE_BEGIN,
444
+ track_uuid: fid,
445
+ category_iids: AUTHORIZED_CATEGORY_IIDS,
446
+ extra_counter_track_uuids: [@objects_counter_id],
447
+ extra_counter_values: [count_allocations],
448
+ name_iid: @auth_name_iids[type],
449
+ )
450
+ @packets << packet
451
+ fiber_flow_stack << packet
452
+ super
453
+ end
454
+
455
+ def end_authorized(type, obj, ctx, is_authorized)
456
+ @packets << trace_packet(
457
+ type: TrackEvent::Type::TYPE_SLICE_END,
458
+ track_uuid: fid,
459
+ extra_counter_track_uuids: [@objects_counter_id],
460
+ extra_counter_values: [count_allocations],
461
+ )
462
+ beg_auth = fiber_flow_stack.pop
463
+ beg_auth.track_event = dup_with(beg_auth.track_event, { debug_annotations: [payload_to_debug("authorized?", is_authorized)] })
464
+ super
465
+ end
466
+
467
+ def begin_resolve_type(type, value, context)
468
+ packet = trace_packet(
469
+ type: TrackEvent::Type::TYPE_SLICE_BEGIN,
470
+ track_uuid: fid,
471
+ category_iids: RESOLVE_TYPE_CATEGORY_IIDS,
472
+ extra_counter_track_uuids: [@objects_counter_id],
473
+ extra_counter_values: [count_allocations],
474
+ name_iid: @resolve_type_name_iids[type],
475
+ )
476
+ @packets << packet
477
+ fiber_flow_stack << packet
478
+ super
479
+ end
480
+
481
+ def end_resolve_type(type, value, context, resolved_type)
482
+ @packets << trace_packet(
483
+ type: TrackEvent::Type::TYPE_SLICE_END,
484
+ track_uuid: fid,
485
+ extra_counter_track_uuids: [@objects_counter_id],
486
+ extra_counter_values: [count_allocations],
487
+ )
488
+ rt_begin = fiber_flow_stack.pop
489
+ rt_begin.track_event = dup_with(rt_begin.track_event, { debug_annotations: [payload_to_debug("resolved_type", resolved_type, intern_value: true)] })
490
+ super
491
+ end
492
+
493
+ # Dump protobuf output in the specified file.
494
+ # @param file [String] path to a file in a directory that already exists
495
+ # @param debug_json [Boolean] True to print JSON instead of binary
496
+ # @return [nil, String, Hash] If `file` was given, `nil`. If `file` was `nil`, a Hash if `debug_json: true`, else binary data.
497
+ def write(file:, debug_json: false)
498
+ trace = Trace.new(
499
+ packet: @packets,
500
+ )
501
+ data = if debug_json
502
+ small_json = Trace.encode_json(trace)
503
+ JSON.pretty_generate(JSON.parse(small_json))
504
+ else
505
+ Trace.encode(trace)
506
+ end
507
+
508
+ if file
509
+ File.write(file, data, mode: 'wb')
510
+ nil
511
+ else
512
+ data
513
+ end
514
+ end
515
+
516
+ private
517
+
518
+ def ts
519
+ Process.clock_gettime(Process::CLOCK_MONOTONIC, :nanosecond)
520
+ end
521
+
522
+ def tid
523
+ Thread.current.object_id
524
+ end
525
+
526
+ def fid
527
+ Fiber.current.object_id
528
+ end
529
+
530
+ def debug_annotation(iid, value_key, value)
531
+ if iid
532
+ DebugAnnotation.new(name_iid: iid, value_key => value)
533
+ else
534
+ DebugAnnotation.new(value_key => value)
535
+ end
536
+ end
537
+
538
+ def payload_to_debug(k, v, iid: nil, intern_value: false)
539
+ if iid.nil?
540
+ iid = @interned_da_name_ids[k]
541
+ k = nil
542
+ end
543
+ case v
544
+ when String
545
+ if intern_value
546
+ v = @interned_da_string_values[v]
547
+ debug_annotation(iid, :string_value_iid, v)
548
+ else
549
+ debug_annotation(iid, :string_value, v)
550
+ end
551
+ when Float
552
+ debug_annotation(iid, :double_value, v)
553
+ when Integer
554
+ debug_annotation(iid, :int_value, v)
555
+ when true, false
556
+ debug_annotation(iid, :bool_value, v)
557
+ when nil
558
+ if iid
559
+ DebugAnnotation.new(name_iid: iid, string_value_iid: DA_STR_VAL_NIL_IID)
560
+ else
561
+ DebugAnnotation.new(name: k, string_value_iid: DA_STR_VAL_NIL_IID)
562
+ end
563
+ when Module
564
+ if intern_value
565
+ val_iid = @class_name_iids[v]
566
+ debug_annotation(iid, :string_value_iid, val_iid)
567
+ else
568
+ debug_annotation(iid, :string_value, v.name)
569
+ end
570
+ when Symbol
571
+ debug_annotation(iid, :string_value, v.inspect)
572
+ when Array
573
+ debug_annotation(iid, :array_values, v.map { |v2| payload_to_debug(nil, v2, intern_value: intern_value) }.compact)
574
+ when Hash
575
+ debug_annotation(iid, :dict_entries, v.map { |k2, v2| payload_to_debug(k2, v2, intern_value: intern_value) }.compact)
576
+ else
577
+ debug_str = if defined?(ActiveRecord::Relation) && v.is_a?(ActiveRecord::Relation)
578
+ "#{v.class}, .to_sql=#{v.to_sql.inspect}"
579
+ else
580
+ v.inspect
581
+ end
582
+ if intern_value
583
+ str_iid = @interned_da_string_values[debug_str]
584
+ debug_annotation(iid, :string_value_iid, str_iid)
585
+ else
586
+ debug_annotation(iid, :string_value, debug_str)
587
+ end
588
+ end
589
+ end
590
+
591
+ def count_allocations
592
+ GC.stat(:total_allocated_objects) - @starting_objects
593
+ end
594
+
595
+ def count_fibers(diff)
596
+ @fibers_count += diff
597
+ end
598
+
599
+ def count_fields
600
+ @fields_count += 1
601
+ end
602
+
603
+ def dup_with(message, attrs, delete_counters: false)
604
+ new_attrs = message.to_h
605
+ if delete_counters
606
+ new_attrs.delete(:extra_counter_track_uuids)
607
+ new_attrs.delete(:extra_counter_values)
608
+ end
609
+ new_attrs.merge!(attrs)
610
+ message.class.new(**new_attrs)
611
+ end
612
+
613
+ def fiber_flow_stack
614
+ Fiber[:graphql_flow_stack] ||= []
615
+ end
616
+
617
+ def trace_packet(event_attrs)
618
+ TracePacket.new(
619
+ timestamp: ts,
620
+ track_event: TrackEvent.new(event_attrs),
621
+ trusted_packet_sequence_id: @sequence_id,
622
+ sequence_flags: 2,
623
+ interned_data: new_interned_data
624
+ )
625
+ end
626
+
627
+ def new_interned_data
628
+ if !@new_interned_da_names.empty?
629
+ da_names = @new_interned_da_names.map { |(name, iid)| DebugAnnotationName.new(iid: iid, name: name) }
630
+ @new_interned_da_names.clear
631
+ end
632
+
633
+ if !@new_interned_event_names.empty?
634
+ ev_names = @new_interned_event_names.map { |(name, iid)| EventName.new(iid: iid, name: name) }
635
+ @new_interned_event_names.clear
636
+ end
637
+
638
+ if !@new_interned_da_string_values.empty?
639
+ str_vals = @new_interned_da_string_values.map { |name, iid| InternedString.new(iid: iid, str: name) }
640
+ @new_interned_da_string_values.clear
641
+ end
642
+
643
+ if ev_names || da_names || str_vals
644
+ InternedData.new(
645
+ event_names: ev_names,
646
+ debug_annotation_names: da_names,
647
+ debug_annotation_string_values: str_vals,
648
+ )
649
+ else
650
+ nil
651
+ end
652
+ end
653
+
654
+ def track_descriptor_packet(parent_uuid, uuid, name, counter: nil)
655
+ td = if counter
656
+ TrackDescriptor.new(
657
+ parent_uuid: parent_uuid,
658
+ uuid: uuid,
659
+ name: name,
660
+ counter: counter
661
+ )
662
+ else
663
+ TrackDescriptor.new(
664
+ parent_uuid: parent_uuid,
665
+ uuid: uuid,
666
+ name: name,
667
+ child_ordering: TrackDescriptor::ChildTracksOrdering::CHRONOLOGICAL,
668
+ )
669
+ end
670
+ TracePacket.new(
671
+ track_descriptor: td,
672
+ trusted_packet_sequence_id: @sequence_id,
673
+ sequence_flags: 2,
674
+ )
675
+ end
676
+
677
+ def unsubscribe_from_active_support_notifications
678
+ if defined?(@as_subscriber)
679
+ ActiveSupport::Notifications.unsubscribe(@as_subscriber)
680
+ end
681
+ end
682
+
683
+ def subscribe_to_active_support_notifications(pattern)
684
+ @as_subscriber = ActiveSupport::Notifications.monotonic_subscribe(pattern) do |name, start, finish, id, payload|
685
+ metadata = payload.map { |k, v| payload_to_debug(k, v, intern_value: true) }
686
+ metadata.compact!
687
+ te = if metadata.empty?
688
+ TrackEvent.new(
689
+ type: TrackEvent::Type::TYPE_SLICE_BEGIN,
690
+ track_uuid: fid,
691
+ category_iids: ACTIVE_SUPPORT_NOTIFICATIONS_CATEGORY_IIDS,
692
+ name: name,
693
+ )
694
+ else
695
+ TrackEvent.new(
696
+ type: TrackEvent::Type::TYPE_SLICE_BEGIN,
697
+ track_uuid: fid,
698
+ name: name,
699
+ category_iids: ACTIVE_SUPPORT_NOTIFICATIONS_CATEGORY_IIDS,
700
+ debug_annotations: metadata,
701
+ )
702
+ end
703
+ @packets << TracePacket.new(
704
+ timestamp: (start * 1_000_000_000).to_i,
705
+ track_event: te,
706
+ trusted_packet_sequence_id: @sequence_id,
707
+ sequence_flags: 2,
708
+ interned_data: new_interned_data
709
+ )
710
+ @packets << TracePacket.new(
711
+ timestamp: (finish * 1_000_000_000).to_i,
712
+ track_event: TrackEvent.new(
713
+ type: TrackEvent::Type::TYPE_SLICE_END,
714
+ track_uuid: fid,
715
+ name: name,
716
+ extra_counter_track_uuids: [@objects_counter_id],
717
+ extra_counter_values: [count_allocations]
718
+ ),
719
+ trusted_packet_sequence_id: @sequence_id,
720
+ sequence_flags: 2,
721
+ )
722
+ end
723
+ end
724
+ end
725
+ end
726
+ end