elasticgraph-graphql 0.18.0.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +7 -0
- data/LICENSE.txt +21 -0
- data/README.md +3 -0
- data/elasticgraph-graphql.gemspec +23 -0
- data/lib/elastic_graph/graphql/aggregation/composite_grouping_adapter.rb +79 -0
- data/lib/elastic_graph/graphql/aggregation/computation.rb +39 -0
- data/lib/elastic_graph/graphql/aggregation/date_histogram_grouping.rb +83 -0
- data/lib/elastic_graph/graphql/aggregation/field_path_encoder.rb +47 -0
- data/lib/elastic_graph/graphql/aggregation/field_term_grouping.rb +26 -0
- data/lib/elastic_graph/graphql/aggregation/key.rb +87 -0
- data/lib/elastic_graph/graphql/aggregation/nested_sub_aggregation.rb +37 -0
- data/lib/elastic_graph/graphql/aggregation/non_composite_grouping_adapter.rb +129 -0
- data/lib/elastic_graph/graphql/aggregation/path_segment.rb +31 -0
- data/lib/elastic_graph/graphql/aggregation/query.rb +172 -0
- data/lib/elastic_graph/graphql/aggregation/query_adapter.rb +345 -0
- data/lib/elastic_graph/graphql/aggregation/query_optimizer.rb +187 -0
- data/lib/elastic_graph/graphql/aggregation/resolvers/aggregated_values.rb +41 -0
- data/lib/elastic_graph/graphql/aggregation/resolvers/count_detail.rb +44 -0
- data/lib/elastic_graph/graphql/aggregation/resolvers/grouped_by.rb +30 -0
- data/lib/elastic_graph/graphql/aggregation/resolvers/node.rb +64 -0
- data/lib/elastic_graph/graphql/aggregation/resolvers/relay_connection_builder.rb +83 -0
- data/lib/elastic_graph/graphql/aggregation/resolvers/sub_aggregations.rb +82 -0
- data/lib/elastic_graph/graphql/aggregation/script_term_grouping.rb +32 -0
- data/lib/elastic_graph/graphql/aggregation/term_grouping.rb +118 -0
- data/lib/elastic_graph/graphql/client.rb +43 -0
- data/lib/elastic_graph/graphql/config.rb +81 -0
- data/lib/elastic_graph/graphql/datastore_query/document_paginator.rb +100 -0
- data/lib/elastic_graph/graphql/datastore_query/index_expression_builder.rb +142 -0
- data/lib/elastic_graph/graphql/datastore_query/paginator.rb +199 -0
- data/lib/elastic_graph/graphql/datastore_query/routing_picker.rb +239 -0
- data/lib/elastic_graph/graphql/datastore_query.rb +372 -0
- data/lib/elastic_graph/graphql/datastore_response/document.rb +78 -0
- data/lib/elastic_graph/graphql/datastore_response/search_response.rb +79 -0
- data/lib/elastic_graph/graphql/datastore_search_router.rb +151 -0
- data/lib/elastic_graph/graphql/decoded_cursor.rb +120 -0
- data/lib/elastic_graph/graphql/filtering/boolean_query.rb +45 -0
- data/lib/elastic_graph/graphql/filtering/field_path.rb +81 -0
- data/lib/elastic_graph/graphql/filtering/filter_args_translator.rb +58 -0
- data/lib/elastic_graph/graphql/filtering/filter_interpreter.rb +526 -0
- data/lib/elastic_graph/graphql/filtering/filter_value_set_extractor.rb +148 -0
- data/lib/elastic_graph/graphql/filtering/range_query.rb +56 -0
- data/lib/elastic_graph/graphql/http_endpoint.rb +229 -0
- data/lib/elastic_graph/graphql/monkey_patches/schema_field.rb +56 -0
- data/lib/elastic_graph/graphql/monkey_patches/schema_object.rb +48 -0
- data/lib/elastic_graph/graphql/query_adapter/filters.rb +161 -0
- data/lib/elastic_graph/graphql/query_adapter/pagination.rb +27 -0
- data/lib/elastic_graph/graphql/query_adapter/requested_fields.rb +124 -0
- data/lib/elastic_graph/graphql/query_adapter/sort.rb +32 -0
- data/lib/elastic_graph/graphql/query_details_tracker.rb +60 -0
- data/lib/elastic_graph/graphql/query_executor.rb +200 -0
- data/lib/elastic_graph/graphql/resolvers/get_record_field_value.rb +49 -0
- data/lib/elastic_graph/graphql/resolvers/graphql_adapter.rb +114 -0
- data/lib/elastic_graph/graphql/resolvers/list_records.rb +29 -0
- data/lib/elastic_graph/graphql/resolvers/nested_relationships.rb +74 -0
- data/lib/elastic_graph/graphql/resolvers/query_adapter.rb +85 -0
- data/lib/elastic_graph/graphql/resolvers/query_source.rb +46 -0
- data/lib/elastic_graph/graphql/resolvers/relay_connection/array_adapter.rb +71 -0
- data/lib/elastic_graph/graphql/resolvers/relay_connection/generic_adapter.rb +65 -0
- data/lib/elastic_graph/graphql/resolvers/relay_connection/page_info.rb +82 -0
- data/lib/elastic_graph/graphql/resolvers/relay_connection/search_response_adapter_builder.rb +40 -0
- data/lib/elastic_graph/graphql/resolvers/relay_connection.rb +42 -0
- data/lib/elastic_graph/graphql/resolvers/resolvable_value.rb +56 -0
- data/lib/elastic_graph/graphql/scalar_coercion_adapters/cursor.rb +35 -0
- data/lib/elastic_graph/graphql/scalar_coercion_adapters/date.rb +64 -0
- data/lib/elastic_graph/graphql/scalar_coercion_adapters/date_time.rb +60 -0
- data/lib/elastic_graph/graphql/scalar_coercion_adapters/local_time.rb +30 -0
- data/lib/elastic_graph/graphql/scalar_coercion_adapters/longs.rb +47 -0
- data/lib/elastic_graph/graphql/scalar_coercion_adapters/no_op.rb +24 -0
- data/lib/elastic_graph/graphql/scalar_coercion_adapters/time_zone.rb +44 -0
- data/lib/elastic_graph/graphql/scalar_coercion_adapters/untyped.rb +32 -0
- data/lib/elastic_graph/graphql/scalar_coercion_adapters/valid_time_zones.rb +634 -0
- data/lib/elastic_graph/graphql/schema/arguments.rb +78 -0
- data/lib/elastic_graph/graphql/schema/enum_value.rb +30 -0
- data/lib/elastic_graph/graphql/schema/field.rb +147 -0
- data/lib/elastic_graph/graphql/schema/relation_join.rb +103 -0
- data/lib/elastic_graph/graphql/schema/type.rb +263 -0
- data/lib/elastic_graph/graphql/schema.rb +164 -0
- data/lib/elastic_graph/graphql.rb +253 -0
- data/script/dump_time_zones +81 -0
- data/script/dump_time_zones.java +17 -0
- metadata +503 -0
@@ -0,0 +1,172 @@
|
|
1
|
+
# Copyright 2024 Block, Inc.
|
2
|
+
#
|
3
|
+
# Use of this source code is governed by an MIT-style
|
4
|
+
# license that can be found in the LICENSE file or at
|
5
|
+
# https://opensource.org/licenses/MIT.
|
6
|
+
#
|
7
|
+
# frozen_string_literal: true
|
8
|
+
|
9
|
+
require "elastic_graph/graphql/aggregation/key"
|
10
|
+
require "elastic_graph/graphql/datastore_query"
|
11
|
+
require "elastic_graph/graphql/filtering/field_path"
|
12
|
+
|
13
|
+
module ElasticGraph
|
14
|
+
class GraphQL
|
15
|
+
module Aggregation
|
16
|
+
class Query < ::Data.define(
|
17
|
+
# Unique name for the aggregation
|
18
|
+
:name,
|
19
|
+
# Whether or not we need to get the document count for each bucket.
|
20
|
+
:needs_doc_count,
|
21
|
+
# Whether or not we need to get the error on the document count to satisfy the sub-aggregation query.
|
22
|
+
# https://www.elastic.co/guide/en/elasticsearch/reference/8.10/search-aggregations-bucket-terms-aggregation.html#_per_bucket_document_count_error
|
23
|
+
:needs_doc_count_error,
|
24
|
+
# Filter to apply to this sub-aggregation.
|
25
|
+
:filter,
|
26
|
+
# Paginator for handling size and other pagination concerns.
|
27
|
+
:paginator,
|
28
|
+
# A sub-aggregation query can have sub-aggregations of its own.
|
29
|
+
:sub_aggregations,
|
30
|
+
# Collection of `Computation` objects that specify numeric computations to perform.
|
31
|
+
:computations,
|
32
|
+
# Collection of `DateHistogramGrouping`, `FieldTermGrouping`, and `ScriptTermGrouping` objects that specify how this sub-aggregation should be grouped.
|
33
|
+
:groupings,
|
34
|
+
# Adapter to use for groupings.
|
35
|
+
:grouping_adapter
|
36
|
+
)
|
37
|
+
def needs_total_doc_count?
|
38
|
+
# We only need a total document count when there are NO groupings and the doc count is requested.
|
39
|
+
# The datastore will return the number of hits in each grouping automatically, so we don't need
|
40
|
+
# a total doc count when there are groupings. And when the query isn't requesting the field, we
|
41
|
+
# don't need it, either.
|
42
|
+
needs_doc_count && groupings.empty?
|
43
|
+
end
|
44
|
+
|
45
|
+
# Builds an aggregations hash. The returned value has a few different cases:
|
46
|
+
#
|
47
|
+
# - If `size` is 0, or `groupings` and `computations` are both empty, we return an empty hash,
|
48
|
+
# so that `to_datastore_body` is an empty hash. We do this so that we avoid sending
|
49
|
+
# the datastore any sort of aggregations query in these cases, as the client is not
|
50
|
+
# requesting any aggregation data.
|
51
|
+
# - If `SINGLETON_CURSOR` was provide for either `before` or `after`, we also return an empty hash,
|
52
|
+
# because we know there cannot be any results to return--the cursor is a reference to
|
53
|
+
# the one and only item in the list, and nothing can exist before or after it.
|
54
|
+
# - Otherwise, we return an aggregatinos hash based on the groupings, computations, and sub-aggregations.
|
55
|
+
def build_agg_hash(filter_interpreter)
|
56
|
+
build_agg_detail(filter_interpreter, field_path: [], parent_queries: [])&.clauses || {}
|
57
|
+
end
|
58
|
+
|
59
|
+
def build_agg_detail(filter_interpreter, field_path:, parent_queries:)
|
60
|
+
return nil if paginator.desired_page_size.zero? || paginator.paginated_from_singleton_cursor?
|
61
|
+
queries = parent_queries + [self] # : ::Array[Query]
|
62
|
+
|
63
|
+
filter_detail(filter_interpreter, field_path) do
|
64
|
+
grouping_adapter.grouping_detail_for(self) do
|
65
|
+
computations_detail.merge(sub_aggregation_detail(filter_interpreter, queries))
|
66
|
+
end
|
67
|
+
end
|
68
|
+
end
|
69
|
+
|
70
|
+
private
|
71
|
+
|
72
|
+
def filter_detail(filter_interpreter, field_path)
|
73
|
+
filtering_field_path = Filtering::FieldPath.of(field_path.filter_map(&:name_in_index))
|
74
|
+
filter_clause = filter_interpreter.build_query([filter].compact, from_field_path: filtering_field_path)
|
75
|
+
|
76
|
+
inner_detail = yield
|
77
|
+
|
78
|
+
return inner_detail if filter_clause.nil?
|
79
|
+
key = "#{name}:filtered"
|
80
|
+
|
81
|
+
clause = {
|
82
|
+
key => {
|
83
|
+
"filter" => filter_clause,
|
84
|
+
"aggs" => inner_detail.clauses
|
85
|
+
}.compact
|
86
|
+
}
|
87
|
+
|
88
|
+
inner_meta = inner_detail.meta
|
89
|
+
meta =
|
90
|
+
if (buckets_path = inner_detail.meta["buckets_path"])
|
91
|
+
# In this case, we have some grouping aggregations applied, and the response will include a `buckets` array.
|
92
|
+
# Here we are prefixing the `buckets_path` with the `key` used for our filter aggregation to maintain its accuracy.
|
93
|
+
inner_meta.merge({"buckets_path" => [key] + buckets_path})
|
94
|
+
else
|
95
|
+
# In this case, no grouping aggregations have been applied, and the response will _not_ have a `buckets` array.
|
96
|
+
# Instead, we'll need to treat the single unbucketed aggregation as a single bucket. To indicate that, we use
|
97
|
+
# `bucket_path` (singular) rather than `buckets_path` (plural).
|
98
|
+
inner_meta.merge({"bucket_path" => [key]})
|
99
|
+
end
|
100
|
+
|
101
|
+
AggregationDetail.new(clause, meta)
|
102
|
+
end
|
103
|
+
|
104
|
+
def computations_detail
|
105
|
+
build_inner_aggregation_detail(computations) do |computation|
|
106
|
+
{computation.key(aggregation_name: name) => computation.clause}
|
107
|
+
end
|
108
|
+
end
|
109
|
+
|
110
|
+
def sub_aggregation_detail(filter_interpreter, parent_queries)
|
111
|
+
build_inner_aggregation_detail(sub_aggregations.values) do |sub_agg|
|
112
|
+
sub_agg.build_agg_hash(filter_interpreter, parent_queries: parent_queries)
|
113
|
+
end
|
114
|
+
end
|
115
|
+
|
116
|
+
def build_inner_aggregation_detail(collection, &block)
|
117
|
+
collection.map(&block).reduce({}, :merge)
|
118
|
+
end
|
119
|
+
end
|
120
|
+
|
121
|
+
# The details of an aggregation level, including the `aggs` clauses themselves and `meta`
|
122
|
+
# that we want echoed back to us in the response for the aggregation level.
|
123
|
+
AggregationDetail = ::Data.define(
|
124
|
+
# Aggregation clauses that would go under `aggs.
|
125
|
+
:clauses,
|
126
|
+
# Custom metadata that will be echoed back to us in the response.
|
127
|
+
# https://www.elastic.co/guide/en/elasticsearch/reference/8.11/search-aggregations.html#add-metadata-to-an-agg
|
128
|
+
:meta
|
129
|
+
) do
|
130
|
+
# @implements AggregationDetail
|
131
|
+
|
132
|
+
# Wraps this aggregation detail in another aggregation layer for the given `grouping`,
|
133
|
+
# so that we can easily build up the necessary multi-level aggregation structure.
|
134
|
+
def wrap_with_grouping(grouping, query:)
|
135
|
+
agg_key = grouping.key
|
136
|
+
extra_inner_meta = grouping.inner_meta.merge({
|
137
|
+
# The response just includes tuples of values for the key of each bucket. We need to know what fields those
|
138
|
+
# values come from, and this `meta` field indicates that.
|
139
|
+
"grouping_fields" => [agg_key]
|
140
|
+
})
|
141
|
+
|
142
|
+
inner_agg_hash = {
|
143
|
+
"aggs" => (clauses unless (clauses || {}).empty?),
|
144
|
+
"meta" => meta.merge(extra_inner_meta)
|
145
|
+
}.compact
|
146
|
+
|
147
|
+
missing_bucket_inner_agg_hash = inner_agg_hash.key?("aggs") ? inner_agg_hash : {} # : ::Hash[::String, untyped]
|
148
|
+
|
149
|
+
AggregationDetail.new(
|
150
|
+
{
|
151
|
+
agg_key => grouping.non_composite_clause_for(query).merge(inner_agg_hash),
|
152
|
+
|
153
|
+
# Here we include a `missing` aggregation as a sibling to the main grouping aggregation. We do this
|
154
|
+
# so that we get a bucket of documents that have `null` values for the field we are grouping on, in
|
155
|
+
# order to provide the same behavior as the `CompositeGroupingAdapter` (which uses the built-in
|
156
|
+
# `missing_bucket` option).
|
157
|
+
#
|
158
|
+
# To work correctly, we need to include this `missing` aggregation as a sibling at _every_ level of
|
159
|
+
# the aggregation structure, and the `missing` aggregation needs the same child aggregations as the
|
160
|
+
# main grouping aggregation has. Given the recursive nature of how this is applied, this results in
|
161
|
+
# a fairly complex structure, even though conceptually the idea behind this isn't _too_ bad.
|
162
|
+
Key.missing_value_bucket_key(agg_key) => {
|
163
|
+
"missing" => {"field" => grouping.encoded_index_field_path}
|
164
|
+
}.merge(missing_bucket_inner_agg_hash)
|
165
|
+
},
|
166
|
+
{"buckets_path" => [agg_key]}
|
167
|
+
)
|
168
|
+
end
|
169
|
+
end
|
170
|
+
end
|
171
|
+
end
|
172
|
+
end
|
@@ -0,0 +1,345 @@
|
|
1
|
+
# Copyright 2024 Block, Inc.
|
2
|
+
#
|
3
|
+
# Use of this source code is governed by an MIT-style
|
4
|
+
# license that can be found in the LICENSE file or at
|
5
|
+
# https://opensource.org/licenses/MIT.
|
6
|
+
#
|
7
|
+
# frozen_string_literal: true
|
8
|
+
|
9
|
+
require "elastic_graph/graphql/aggregation/composite_grouping_adapter"
|
10
|
+
require "elastic_graph/graphql/aggregation/computation"
|
11
|
+
require "elastic_graph/graphql/aggregation/date_histogram_grouping"
|
12
|
+
require "elastic_graph/graphql/aggregation/field_term_grouping"
|
13
|
+
require "elastic_graph/graphql/aggregation/nested_sub_aggregation"
|
14
|
+
require "elastic_graph/graphql/aggregation/path_segment"
|
15
|
+
require "elastic_graph/graphql/aggregation/query"
|
16
|
+
require "elastic_graph/graphql/aggregation/script_term_grouping"
|
17
|
+
require "elastic_graph/graphql/schema/arguments"
|
18
|
+
require "elastic_graph/support/memoizable_data"
|
19
|
+
|
20
|
+
module ElasticGraph
|
21
|
+
class GraphQL
|
22
|
+
module Aggregation
|
23
|
+
# Responsible for taking in the incoming GraphQL request context, arguments, and the GraphQL
|
24
|
+
# schema and directives and populating the `aggregations` portion of `query`.
|
25
|
+
class QueryAdapter < Support::MemoizableData.define(:schema, :config, :filter_args_translator, :runtime_metadata, :sub_aggregation_grouping_adapter)
|
26
|
+
# @dynamic element_names
|
27
|
+
attr_reader :element_names
|
28
|
+
|
29
|
+
def call(query:, lookahead:, args:, field:, context:)
|
30
|
+
return query unless field.type.unwrap_fully.indexed_aggregation?
|
31
|
+
|
32
|
+
aggregations_node = extract_aggregation_node(lookahead, field, context.query)
|
33
|
+
return query unless aggregations_node
|
34
|
+
|
35
|
+
aggregation_query = build_aggregation_query_for(
|
36
|
+
aggregations_node,
|
37
|
+
field: field,
|
38
|
+
grouping_adapter: CompositeGroupingAdapter,
|
39
|
+
# Filters on root aggregations applied to the search query body itself instead of
|
40
|
+
# using a filter aggregation, like sub-aggregations do, so we don't want a filter
|
41
|
+
# aggregation generated here.
|
42
|
+
unfiltered: true
|
43
|
+
)
|
44
|
+
|
45
|
+
query.merge_with(aggregations: {aggregation_query.name => aggregation_query})
|
46
|
+
end
|
47
|
+
|
48
|
+
private
|
49
|
+
|
50
|
+
def after_initialize
|
51
|
+
@element_names = schema.element_names
|
52
|
+
end
|
53
|
+
|
54
|
+
def extract_aggregation_node(lookahead, field, graphql_query)
|
55
|
+
return nil unless (ast_nodes = lookahead.ast_nodes)
|
56
|
+
|
57
|
+
if ast_nodes.size > 1
|
58
|
+
names = ast_nodes.map { |n| "`#{name_of(n)}`" }
|
59
|
+
raise_conflicting_grouping_requirement_selections("`#{lookahead.name}` selection with the same name", names)
|
60
|
+
end
|
61
|
+
|
62
|
+
::GraphQL::Execution::Lookahead.new(
|
63
|
+
query: graphql_query,
|
64
|
+
ast_nodes: ast_nodes,
|
65
|
+
field: lookahead.field,
|
66
|
+
owner_type: field.parent_type.graphql_type
|
67
|
+
)
|
68
|
+
end
|
69
|
+
|
70
|
+
def build_aggregation_query_for(aggregations_node, field:, grouping_adapter:, nested_path: [], unfiltered: false)
|
71
|
+
aggregation_name = name_of(_ = aggregations_node.ast_nodes.first)
|
72
|
+
|
73
|
+
# Get the AST node for the `nodes` subfield (e.g. from `fooAggregations { nodes { ... } }`)
|
74
|
+
nodes_node = selection_above_grouping_fields(aggregations_node, element_names.nodes, aggregation_name)
|
75
|
+
|
76
|
+
# Also get the AST node for `edges.node` (e.g. from `fooAggregations { edges { node { ... } } }`)
|
77
|
+
edges_node_node = [element_names.edges, element_names.node].reduce(aggregations_node) do |node, sub_selection|
|
78
|
+
selection_above_grouping_fields(node, sub_selection, aggregation_name)
|
79
|
+
end
|
80
|
+
|
81
|
+
# ...and then determine which one is being used for nodes.
|
82
|
+
node_node =
|
83
|
+
if nodes_node.selected? && edges_node_node.selected?
|
84
|
+
raise_conflicting_grouping_requirement_selections("node selection", ["`#{element_names.nodes}`", "`#{element_names.edges}.#{element_names.node}`"])
|
85
|
+
elsif !nodes_node.selected?
|
86
|
+
edges_node_node
|
87
|
+
else
|
88
|
+
nodes_node
|
89
|
+
end
|
90
|
+
|
91
|
+
count_detail_node = node_node.selection(element_names.count_detail)
|
92
|
+
needs_doc_count_error =
|
93
|
+
# We need to know what the error is to determine if the approximate count is in fact the exact count.
|
94
|
+
count_detail_node.selects?(element_names.exact_value) ||
|
95
|
+
# We need to know what the error is to determine the upper bound on the count.
|
96
|
+
count_detail_node.selects?(element_names.upper_bound)
|
97
|
+
|
98
|
+
unless unfiltered
|
99
|
+
filter = filter_args_translator.translate_filter_args(field: field, args: field.args_to_schema_form(aggregations_node.arguments))
|
100
|
+
end
|
101
|
+
|
102
|
+
Query.new(
|
103
|
+
name: aggregation_name,
|
104
|
+
groupings: build_groupings_from(node_node, aggregation_name, from_field_path: nested_path),
|
105
|
+
computations: build_computations_from(node_node, from_field_path: nested_path),
|
106
|
+
sub_aggregations: build_sub_aggregations_from(node_node, parent_nested_path: nested_path),
|
107
|
+
needs_doc_count: count_detail_node.selected? || node_node.selects?(element_names.count),
|
108
|
+
needs_doc_count_error: needs_doc_count_error,
|
109
|
+
paginator: build_paginator_for(aggregations_node),
|
110
|
+
filter: filter,
|
111
|
+
grouping_adapter: grouping_adapter
|
112
|
+
)
|
113
|
+
end
|
114
|
+
|
115
|
+
# Helper method for dealing with lookahead selections above the grouping fields. If the caller selects
|
116
|
+
# such a field multiple times (e.g. with aliases) it leads to conflicting grouping requirements, so we
|
117
|
+
# do not allow it.
|
118
|
+
def selection_above_grouping_fields(node, sub_selection_name, aggregation_name)
|
119
|
+
node.selection(sub_selection_name).tap do |nested_node|
|
120
|
+
ast_nodes = nested_node.ast_nodes || []
|
121
|
+
if ast_nodes.size > 1
|
122
|
+
names = ast_nodes.map { |n| "`#{name_of(n)}`" }
|
123
|
+
raise_conflicting_grouping_requirement_selections("`#{sub_selection_name}` selection under `#{aggregation_name}`", names)
|
124
|
+
end
|
125
|
+
end
|
126
|
+
end
|
127
|
+
|
128
|
+
def build_clauses_from(parent_node, &block)
|
129
|
+
get_children_nodes(parent_node).flat_map do |child_node|
|
130
|
+
transform_node_to_clauses(child_node, &block)
|
131
|
+
end.to_set
|
132
|
+
end
|
133
|
+
|
134
|
+
# Takes a `GraphQL::Execution::Lookahead` node and returns an array of children
|
135
|
+
# lookahead nodes, excluding nodes for introspection fields.
|
136
|
+
def get_children_nodes(node)
|
137
|
+
node.selections.reject do |child|
|
138
|
+
child.field.introspection?
|
139
|
+
end
|
140
|
+
end
|
141
|
+
|
142
|
+
# Takes a `GraphQL::Execution::Lookahead` node that conforms to our aggregate field
|
143
|
+
# conventions (`some_field: {Type}Metric`) and returns a Hash compatible with the `aggregations`
|
144
|
+
# argument to `DatastoreQuery.new`.
|
145
|
+
def transform_node_to_clauses(node, parent_path: [], &clause_builder)
|
146
|
+
field = field_from_node(node)
|
147
|
+
field_path = parent_path + [PathSegment.for(field: field, lookahead: node)]
|
148
|
+
|
149
|
+
clause_builder.call(node, field, field_path) || get_children_nodes(node).flat_map do |embedded_field|
|
150
|
+
transform_node_to_clauses(embedded_field, parent_path: field_path, &clause_builder)
|
151
|
+
end
|
152
|
+
end
|
153
|
+
|
154
|
+
def build_computations_from(node_node, from_field_path: [])
|
155
|
+
aggregated_values_node = node_node.selection(element_names.aggregated_values)
|
156
|
+
|
157
|
+
build_clauses_from(aggregated_values_node) do |node, field, field_path|
|
158
|
+
if field.aggregated?
|
159
|
+
field_path = from_field_path + field_path
|
160
|
+
|
161
|
+
get_children_nodes(node).map do |fn_node|
|
162
|
+
computed_field = field_from_node(fn_node)
|
163
|
+
computation_detail = field_from_node(fn_node).computation_detail # : SchemaArtifacts::RuntimeMetadata::ComputationDetail
|
164
|
+
|
165
|
+
Aggregation::Computation.new(
|
166
|
+
source_field_path: field_path,
|
167
|
+
computed_index_field_name: computed_field.name_in_index.to_s,
|
168
|
+
detail: computation_detail
|
169
|
+
)
|
170
|
+
end
|
171
|
+
end
|
172
|
+
end
|
173
|
+
end
|
174
|
+
|
175
|
+
def build_groupings_from(node_node, aggregation_name, from_field_path: [])
|
176
|
+
grouped_by_node = selection_above_grouping_fields(node_node, element_names.grouped_by, aggregation_name)
|
177
|
+
|
178
|
+
build_clauses_from(grouped_by_node) do |node, field, field_path|
|
179
|
+
field_path = from_field_path + field_path
|
180
|
+
|
181
|
+
# New date/time grouping API (DateGroupedBy, DateTimeGroupedBy)
|
182
|
+
if field.type.elasticgraph_category == :date_grouped_by_object
|
183
|
+
date_time_groupings_from(field_path: field_path, node: node)
|
184
|
+
|
185
|
+
elsif !field.type.object?
|
186
|
+
case field.type.name
|
187
|
+
# Legacy date grouping API
|
188
|
+
when :Date
|
189
|
+
legacy_date_histogram_groupings_from(
|
190
|
+
field_path: field_path,
|
191
|
+
node: node,
|
192
|
+
get_time_zone: ->(args) {},
|
193
|
+
get_offset: ->(args) { args[element_names.offset_days]&.then { |days| "#{days}d" } }
|
194
|
+
)
|
195
|
+
# Legacy datetime grouping API
|
196
|
+
when :DateTime
|
197
|
+
legacy_date_histogram_groupings_from(
|
198
|
+
field_path: field_path,
|
199
|
+
node: node,
|
200
|
+
get_time_zone: ->(args) { args.fetch(element_names.time_zone) },
|
201
|
+
get_offset: ->(args) { datetime_offset_from(node, args) }
|
202
|
+
)
|
203
|
+
# Non-date/time grouping
|
204
|
+
else
|
205
|
+
[FieldTermGrouping.new(field_path: field_path)]
|
206
|
+
end
|
207
|
+
end
|
208
|
+
end
|
209
|
+
end
|
210
|
+
|
211
|
+
# Given a `GraphQL::Execution::Lookahead` node, returns the corresponding `Schema::Field`
|
212
|
+
def field_from_node(node)
|
213
|
+
schema.field_named(node.owner_type.graphql_name, node.field.name)
|
214
|
+
end
|
215
|
+
|
216
|
+
# Returns an array of `...Grouping`, one for each child node (`as_date_time`, `as_date`, etc).
|
217
|
+
def date_time_groupings_from(field_path:, node:)
|
218
|
+
get_children_nodes(node).map do |child_node|
|
219
|
+
schema_args = Schema::Arguments.to_schema_form(child_node.arguments, child_node.field)
|
220
|
+
# Because `DateGroupedBy` doesn't have a `timeZone` argument, and we want to reuse the same
|
221
|
+
# script for both `Date` and `DateTime`, we fall back to "UTC" here.
|
222
|
+
time_zone = schema_args[element_names.time_zone] || "UTC"
|
223
|
+
child_field_path = field_path + [PathSegment.for(lookahead: child_node)]
|
224
|
+
|
225
|
+
if child_node.field.name == element_names.as_day_of_week
|
226
|
+
ScriptTermGrouping.new(
|
227
|
+
field_path: child_field_path,
|
228
|
+
script_id: runtime_metadata.static_script_ids_by_scoped_name.fetch("field/as_day_of_week"),
|
229
|
+
params: {
|
230
|
+
"offset_ms" => datetime_offset_as_ms_from(child_node, schema_args),
|
231
|
+
"time_zone" => time_zone
|
232
|
+
}
|
233
|
+
)
|
234
|
+
elsif child_node.field.name == element_names.as_time_of_day
|
235
|
+
ScriptTermGrouping.new(
|
236
|
+
field_path: child_field_path,
|
237
|
+
script_id: runtime_metadata.static_script_ids_by_scoped_name.fetch("field/as_time_of_day"),
|
238
|
+
params: {
|
239
|
+
"interval" => interval_from(child_node, schema_args, interval_unit_key: element_names.truncation_unit),
|
240
|
+
"offset_ms" => datetime_offset_as_ms_from(child_node, schema_args),
|
241
|
+
"time_zone" => time_zone
|
242
|
+
}
|
243
|
+
)
|
244
|
+
else
|
245
|
+
DateHistogramGrouping.new(
|
246
|
+
field_path: child_field_path,
|
247
|
+
interval: interval_from(child_node, schema_args, interval_unit_key: element_names.truncation_unit),
|
248
|
+
offset: datetime_offset_from(child_node, schema_args),
|
249
|
+
time_zone: time_zone
|
250
|
+
)
|
251
|
+
end
|
252
|
+
end
|
253
|
+
end
|
254
|
+
|
255
|
+
def legacy_date_histogram_groupings_from(field_path:, node:, get_time_zone:, get_offset:)
|
256
|
+
schema_args = Schema::Arguments.to_schema_form(node.arguments, node.field)
|
257
|
+
|
258
|
+
[DateHistogramGrouping.new(
|
259
|
+
field_path: field_path,
|
260
|
+
interval: interval_from(node, schema_args, interval_unit_key: element_names.granularity),
|
261
|
+
time_zone: get_time_zone.call(schema_args),
|
262
|
+
offset: get_offset.call(schema_args)
|
263
|
+
)]
|
264
|
+
end
|
265
|
+
|
266
|
+
# Figure out the Date histogram grouping interval for the given node based on the `grouped_by` argument.
|
267
|
+
# Until `legacy_grouping_schema` is removed, we need to check both `granularity` and `truncation_unit`.
|
268
|
+
def interval_from(node, schema_args, interval_unit_key:)
|
269
|
+
enum_type_name = node.field.arguments.fetch(interval_unit_key).type.unwrap.graphql_name
|
270
|
+
enum_value_name = schema_args.fetch(interval_unit_key)
|
271
|
+
enum_value = schema.type_named(enum_type_name).enum_value_named(enum_value_name)
|
272
|
+
|
273
|
+
_ = enum_value.runtime_metadata.datastore_value
|
274
|
+
end
|
275
|
+
|
276
|
+
def datetime_offset_from(node, schema_args)
|
277
|
+
if (unit_name = schema_args.dig(element_names.offset, element_names.unit))
|
278
|
+
enum_value = enum_value_from_offset(node, unit_name)
|
279
|
+
amount = schema_args.fetch(element_names.offset).fetch(element_names.amount)
|
280
|
+
"#{amount}#{enum_value.runtime_metadata.datastore_abbreviation}"
|
281
|
+
end
|
282
|
+
end
|
283
|
+
|
284
|
+
# Convert from amount and unit to milliseconds, using runtime metadata `datastore_value`
|
285
|
+
def datetime_offset_as_ms_from(node, schema_args)
|
286
|
+
unit_name = schema_args.dig(element_names.offset, element_names.unit)
|
287
|
+
return 0 unless unit_name
|
288
|
+
|
289
|
+
amount = schema_args.fetch(element_names.offset).fetch(element_names.amount)
|
290
|
+
enum_value = enum_value_from_offset(node, unit_name)
|
291
|
+
|
292
|
+
amount * enum_value.runtime_metadata.datastore_value
|
293
|
+
end
|
294
|
+
|
295
|
+
def enum_value_from_offset(node, unit_name)
|
296
|
+
offset_input_type = node.field.arguments.fetch(element_names.offset).type.unwrap # : ::GraphQL::Schema::InputObject
|
297
|
+
enum_type_name = offset_input_type.arguments.fetch(element_names.unit).type.unwrap.graphql_name
|
298
|
+
schema.type_named(enum_type_name).enum_value_named(unit_name)
|
299
|
+
end
|
300
|
+
|
301
|
+
def name_of(ast_node)
|
302
|
+
ast_node.alias || ast_node.name
|
303
|
+
end
|
304
|
+
|
305
|
+
def build_sub_aggregations_from(node_node, parent_nested_path: [])
|
306
|
+
build_clauses_from(node_node.selection(element_names.sub_aggregations)) do |node, field, field_path|
|
307
|
+
if field.type.elasticgraph_category == :nested_sub_aggregation_connection
|
308
|
+
nested_path = parent_nested_path + field_path
|
309
|
+
nested_sub_agg = NestedSubAggregation.new(
|
310
|
+
nested_path: nested_path,
|
311
|
+
query: build_aggregation_query_for(
|
312
|
+
node,
|
313
|
+
field: field,
|
314
|
+
grouping_adapter: sub_aggregation_grouping_adapter,
|
315
|
+
nested_path: nested_path
|
316
|
+
)
|
317
|
+
)
|
318
|
+
|
319
|
+
[[nested_sub_agg.query.name, nested_sub_agg]]
|
320
|
+
end
|
321
|
+
end.to_h
|
322
|
+
end
|
323
|
+
|
324
|
+
def build_paginator_for(node)
|
325
|
+
args = field_from_node(node).args_to_schema_form(node.arguments)
|
326
|
+
|
327
|
+
DatastoreQuery::Paginator.new(
|
328
|
+
first: args[element_names.first],
|
329
|
+
after: args[element_names.after],
|
330
|
+
last: args[element_names.last],
|
331
|
+
before: args[element_names.before],
|
332
|
+
default_page_size: config.default_page_size,
|
333
|
+
max_page_size: config.max_page_size,
|
334
|
+
schema_element_names: schema.element_names
|
335
|
+
)
|
336
|
+
end
|
337
|
+
|
338
|
+
def raise_conflicting_grouping_requirement_selections(more_than_one_description, paths)
|
339
|
+
raise ::GraphQL::ExecutionError, "Cannot have more than one #{more_than_one_description} " \
|
340
|
+
"(#{paths.join(", ")}), because that could lead to conflicting grouping requirements."
|
341
|
+
end
|
342
|
+
end
|
343
|
+
end
|
344
|
+
end
|
345
|
+
end
|