elasticgraph-admin 0.18.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml ADDED
@@ -0,0 +1,7 @@
1
+ ---
2
+ SHA256:
3
+ metadata.gz: 6202cf82573c7c499ad9615f590f14272dda1b605617f5680bcec1f97a648be6
4
+ data.tar.gz: eb0f47afd2cafb1a91b868097ac6e65f51ba4bef76ff307762c6f10bc2a1fbaa
5
+ SHA512:
6
+ metadata.gz: 641c7cf2668a7ae77b4aa7e53d333193e09e052c834cff99792dd75a2213a9b6c441c021665ebf6cc9a5b425368ff8ecec8e1af48bc8cc7d8b7dfdfa8d8a13ff
7
+ data.tar.gz: ed4aede0dfa748e7a6f97bf71d51112e6b5e1d97c44a8fd2a08f928063c23b878c4bcae1d8ae4872094229e304328422250885ae43e92fdb054264ee6925ac69
data/LICENSE.txt ADDED
@@ -0,0 +1,21 @@
1
+ The MIT License (MIT)
2
+
3
+ Copyright (c) 2024 Block, Inc.
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in
13
+ all copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
21
+ THE SOFTWARE.
data/README.md ADDED
@@ -0,0 +1,3 @@
1
+ # ElasticGraph::Admin
2
+
3
+ Provides datastore administrative tasks for ElasticGraph.
@@ -0,0 +1,23 @@
1
+ # Copyright 2024 Block, Inc.
2
+ #
3
+ # Use of this source code is governed by an MIT-style
4
+ # license that can be found in the LICENSE file or at
5
+ # https://opensource.org/licenses/MIT.
6
+ #
7
+ # frozen_string_literal: true
8
+
9
+ require_relative "../gemspec_helper"
10
+
11
+ ElasticGraphGemspecHelper.define_elasticgraph_gem(gemspec_file: __FILE__, category: :core) do |spec, eg_version|
12
+ spec.summary = "ElasticGraph gem that provides datastore administrative tasks, to keep a datastore up-to-date with an ElasticGraph schema."
13
+
14
+ spec.add_dependency "elasticgraph-datastore_core", eg_version
15
+ spec.add_dependency "elasticgraph-indexer", eg_version
16
+ spec.add_dependency "elasticgraph-schema_artifacts", eg_version
17
+ spec.add_dependency "elasticgraph-support", eg_version
18
+ spec.add_dependency "rake", "~> 13.2"
19
+
20
+ spec.add_development_dependency "elasticgraph-elasticsearch", eg_version
21
+ spec.add_development_dependency "elasticgraph-opensearch", eg_version
22
+ spec.add_development_dependency "elasticgraph-schema_definition", eg_version
23
+ end
@@ -0,0 +1,23 @@
1
+ # Copyright 2024 Block, Inc.
2
+ #
3
+ # Use of this source code is governed by an MIT-style
4
+ # license that can be found in the LICENSE file or at
5
+ # https://opensource.org/licenses/MIT.
6
+ #
7
+ # frozen_string_literal: true
8
+
9
+ module ElasticGraph
10
+ class Admin
11
+ class ClusterConfigurator
12
+ class ActionReporter
13
+ def initialize(output)
14
+ @output = output
15
+ end
16
+
17
+ def report_action(message)
18
+ @output.puts "#{message.chomp}\n#{"=" * 80}\n"
19
+ end
20
+ end
21
+ end
22
+ end
23
+ end
@@ -0,0 +1,99 @@
1
+ # Copyright 2024 Block, Inc.
2
+ #
3
+ # Use of this source code is governed by an MIT-style
4
+ # license that can be found in the LICENSE file or at
5
+ # https://opensource.org/licenses/MIT.
6
+ #
7
+ # frozen_string_literal: true
8
+
9
+ require "elastic_graph/error"
10
+
11
+ module ElasticGraph
12
+ class Admin
13
+ class ClusterConfigurator
14
+ # Responsible for updating datastore cluster settings based on the mode EG is in, maintenance mode or indexing mode
15
+ class ClusterSettingsManager
16
+ def initialize(datastore_clients_by_name:, datastore_config:, logger:)
17
+ @datastore_clients_by_name = datastore_clients_by_name
18
+ @datastore_config = datastore_config
19
+ @logger = logger
20
+ end
21
+
22
+ # Starts index maintenance mode, if it has not already been started. This method is idempotent.
23
+ #
24
+ # In index maintenance mode, you can safely delete or update the index configuration without
25
+ # worrying about indices being auto-created with dynamic mappings (e.g. due to an indexing
26
+ # race condition). While in this mode, indexing operations on documents that fall into new rollover
27
+ # indices may fail since the auto-creation of those indices is disabled.
28
+ #
29
+ # `cluster_spec` can be the name of a specific cluster (as a string) or `:all_clusters`.
30
+ def start_index_maintenance_mode!(cluster_spec)
31
+ cluster_names_for(cluster_spec).each do |cluster_name|
32
+ datastore_client_named(cluster_name).put_persistent_cluster_settings(desired_cluster_settings(cluster_name))
33
+ end
34
+ end
35
+
36
+ # Ends index maintenance mode, if it has not already ended. This method is idempotent.
37
+ #
38
+ # Outside of this mode, you cannot safely delete or update the index configuration. However,
39
+ # new rollover indices will correctly be auto-created as documents that fall in new months or
40
+ # years are indexed.
41
+ #
42
+ # `cluster_spec` can be the name of a specific cluster (as a string) or `:all_clusters`.
43
+ def end_index_maintenance_mode!(cluster_spec)
44
+ cluster_names_for(cluster_spec).each do |cluster_name|
45
+ datastore_client_named(cluster_name).put_persistent_cluster_settings(
46
+ desired_cluster_settings(cluster_name, auto_create_index_patterns: ["*#{ROLLOVER_INDEX_INFIX_MARKER}*"])
47
+ )
48
+ end
49
+ end
50
+
51
+ # Runs a block in index maintenance mode. Should be used to wrap any code that updates your index configuration.
52
+ #
53
+ # `cluster_spec` can be the name of a specific cluster (as a string) or `:all_clusters`.
54
+ def in_index_maintenance_mode(cluster_spec)
55
+ start_index_maintenance_mode!(cluster_spec)
56
+
57
+ begin
58
+ yield
59
+ rescue => e
60
+ @logger.warn "WARNING: ClusterSettingsManager#in_index_maintenance_mode is not able to exit index maintenance mode due to exception #{e}.\n A bit of manual cleanup may be required (although a re-try should be idempotent)."
61
+ raise # re-raise the same error
62
+ else
63
+ # Note: we intentionally do not end maintenance mode in an `ensure` block, because if an exception
64
+ # happens while we `yield`, we do _not_ want to exit maintenance mode. Exiting maintenance mode
65
+ # could put us in a state where indices are dynamically created when we do not want them to be.
66
+ end_index_maintenance_mode!(cluster_spec)
67
+ end
68
+ end
69
+
70
+ private
71
+
72
+ def desired_cluster_settings(cluster_name, auto_create_index_patterns: [])
73
+ {
74
+ # https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html#index-creation
75
+ #
76
+ # We generally want to disable automatic index creation in order to require all indices to be properly
77
+ # defined and configured. However, we must allow kibana to create some indices for it to be usable
78
+ # (https://discuss.elastic.co/t/elasticsearchs-action-auto-create-index-setting-impact-on-kibana/117701).
79
+ "action.auto_create_index" => ([".kibana*"] + auto_create_index_patterns).map { |p| "+#{p}" }.join(",")
80
+ }.merge(@datastore_config.clusters.fetch(cluster_name).settings)
81
+ end
82
+
83
+ def datastore_client_named(cluster_name)
84
+ @datastore_clients_by_name.fetch(cluster_name) do
85
+ raise ClusterOperationError,
86
+ "Unknown datastore cluster name: `#{cluster_name}`. Valid cluster names: #{@datastore_clients_by_name.keys}"
87
+ end
88
+ end
89
+
90
+ def cluster_names_for(cluster_spec)
91
+ case cluster_spec
92
+ when :all_clusters then @datastore_clients_by_name.keys
93
+ else [cluster_spec]
94
+ end
95
+ end
96
+ end
97
+ end
98
+ end
99
+ end
@@ -0,0 +1,54 @@
1
+ # Copyright 2024 Block, Inc.
2
+ #
3
+ # Use of this source code is governed by an MIT-style
4
+ # license that can be found in the LICENSE file or at
5
+ # https://opensource.org/licenses/MIT.
6
+ #
7
+ # frozen_string_literal: true
8
+
9
+ require "elastic_graph/admin/cluster_configurator/action_reporter"
10
+ require "elastic_graph/error"
11
+
12
+ module ElasticGraph
13
+ class Admin
14
+ class ClusterConfigurator
15
+ class ScriptConfigurator
16
+ def initialize(datastore_client:, script_context:, script_id:, script:, output:)
17
+ @datastore_client = datastore_client
18
+ @script_context = script_context
19
+ @script_id = script_id
20
+ @script = script
21
+ @action_reporter = ActionReporter.new(output)
22
+ end
23
+
24
+ def validate
25
+ case existing_datastore_script
26
+ when :not_found, @script
27
+ []
28
+ else
29
+ [
30
+ "#{@script_context} script #{@script_id} already exists in the datastore but has different contents. " \
31
+ "\n\nScript in the datastore:\n#{::YAML.dump(existing_datastore_script)}" \
32
+ "\n\nDesired script:\n#{::YAML.dump(@script)}"
33
+ ]
34
+ end
35
+ end
36
+
37
+ def configure!
38
+ if existing_datastore_script == :not_found
39
+ @datastore_client.put_script(id: @script_id, body: {script: @script}, context: @script_context)
40
+ @action_reporter.report_action "Stored #{@script_context} script: #{@script_id}"
41
+ end
42
+ end
43
+
44
+ private
45
+
46
+ def existing_datastore_script
47
+ @existing_datastore_script ||= @datastore_client
48
+ .get_script(id: @script_id)
49
+ &.fetch("script") || :not_found
50
+ end
51
+ end
52
+ end
53
+ end
54
+ end
@@ -0,0 +1,104 @@
1
+ # Copyright 2024 Block, Inc.
2
+ #
3
+ # Use of this source code is governed by an MIT-style
4
+ # license that can be found in the LICENSE file or at
5
+ # https://opensource.org/licenses/MIT.
6
+ #
7
+ # frozen_string_literal: true
8
+
9
+ require "elastic_graph/admin/cluster_configurator/script_configurator"
10
+ require "elastic_graph/admin/index_definition_configurator"
11
+ require "elastic_graph/error"
12
+ require "stringio"
13
+
14
+ module ElasticGraph
15
+ class Admin
16
+ # Facade responsible for overall cluster configuration. Delegates to other classes as
17
+ # necessary to configure different aspects of the cluster (such as index configuration,
18
+ # cluster settings, etc).
19
+ class ClusterConfigurator
20
+ def initialize(
21
+ datastore_clients_by_name:,
22
+ index_defs:,
23
+ index_configurations_by_name:,
24
+ index_template_configurations_by_name:,
25
+ scripts:,
26
+ cluster_settings_manager:,
27
+ clock:
28
+ )
29
+ @datastore_clients_by_name = datastore_clients_by_name
30
+ @index_defs = index_defs
31
+ @index_configurations_by_name = index_configurations_by_name.merge(index_template_configurations_by_name)
32
+ @scripts_by_id = scripts
33
+ @cluster_settings_manager = cluster_settings_manager
34
+ @clock = clock
35
+ end
36
+
37
+ # Attempts to configure all aspects of the datastore cluster. Known/expected failure
38
+ # cases are pre-validated so that an error can be raised before applying any changes to
39
+ # any indices, so that we hopefully don't wind up in a "partially configured" state.
40
+ def configure_cluster(output)
41
+ # Note: we do not want to cache `index_configurators_for` here in a variable, because it's important
42
+ # for our tests that different instances are used for `validate` vs `configure!`. That's the case because
43
+ # each `index_configurator` memoizes some datastore responses (e.g. when it fetches the settings or
44
+ # mappings for an index...). In our tests, we use different datastore clients that connect to the same
45
+ # datastore server, and that means that when we reuse the same `index_configurator`, the datastore
46
+ # index winds up being mutated (via another client) in between `validate` and `configure!` breaking assumptions
47
+ # of the datastore response memoization. By using different index configurators for the two steps it
48
+ # avoids some odd bugs.
49
+ script_configurators = script_configurators_for(output)
50
+
51
+ errors = script_configurators.flat_map(&:validate) + index_definition_configurators_for(output).flat_map(&:validate)
52
+
53
+ if errors.any?
54
+ error_descriptions = errors.map.with_index do |error, index|
55
+ "#{index + 1}): #{error}"
56
+ end.join("\n#{"=" * 80}\n\n")
57
+
58
+ raise ClusterOperationError, "Got #{errors.size} validation error(s):\n\n#{error_descriptions}"
59
+ end
60
+
61
+ script_configurators.each(&:configure!)
62
+
63
+ @cluster_settings_manager.in_index_maintenance_mode(:all_clusters) do
64
+ index_definition_configurators_for(output).each(&:configure!)
65
+ end
66
+ end
67
+
68
+ def accessible_index_definitions
69
+ @accessible_index_definitions ||= @index_defs.reject { |i| i.all_accessible_cluster_names.empty? }
70
+ end
71
+
72
+ private
73
+
74
+ def script_configurators_for(output)
75
+ # It's a bit tricky to know which datastore cluster a script is needed in (the script metadata
76
+ # doesn't store that), but storing a script in a cluster that doesn't need it causes no harm. The
77
+ # id of each script contains the hash of its contents so there's no possibility of different clusters
78
+ # needing a script with the same `id` to have different contents. So here we create a script configurator
79
+ # for each datastore client.
80
+ @datastore_clients_by_name.values.flat_map do |datastore_client|
81
+ @scripts_by_id.map do |id, payload|
82
+ ScriptConfigurator.new(
83
+ datastore_client: datastore_client,
84
+ script_context: payload.fetch("context"),
85
+ script_id: id,
86
+ script: payload.fetch("script"),
87
+ output: output
88
+ )
89
+ end
90
+ end
91
+ end
92
+
93
+ def index_definition_configurators_for(output)
94
+ @index_defs.flat_map do |index_def|
95
+ env_agnostic_config = @index_configurations_by_name.fetch(index_def.name)
96
+
97
+ index_def.all_accessible_cluster_names.map do |cluster_name|
98
+ IndexDefinitionConfigurator.new(@datastore_clients_by_name.fetch(cluster_name), index_def, env_agnostic_config, output, @clock)
99
+ end
100
+ end
101
+ end
102
+ end
103
+ end
104
+ end
@@ -0,0 +1,76 @@
1
+ # Copyright 2024 Block, Inc.
2
+ #
3
+ # Use of this source code is governed by an MIT-style
4
+ # license that can be found in the LICENSE file or at
5
+ # https://opensource.org/licenses/MIT.
6
+ #
7
+ # frozen_string_literal: true
8
+
9
+ require "forwardable"
10
+
11
+ module ElasticGraph
12
+ class Admin
13
+ # Decorator that wraps a datastore client in order to implement dry run behavior.
14
+ # All write operations are implemented as no-ops, while read operations are passed through
15
+ # to the wrapped datastore client.
16
+ #
17
+ # We prefer this over having to check a `dry_run` flag in many places because that's
18
+ # easy to forget. One mistake and a dry run isn't truly a dry run!
19
+ #
20
+ # In contrast, this gives us a strong guarantee that dry run mode truly avoids mutating
21
+ # any datastore state. This decorator specifically picks and chooses which operations it
22
+ # allows.
23
+ #
24
+ # - Read operations are forwarded to the wrapped datastore client.
25
+ # - Write operations are implemented as no-ops.
26
+ #
27
+ # If/when the calling code evolves to call a new method on this, it'll trigger
28
+ # `NoMethodError`, giving us a good chance to evaluate how this decorator should
29
+ # support a particular API. This is also why this doesn't use Ruby's `delegate` library,
30
+ # because we don't want methods automatically delegated; we want to opt-in to only the read-only methods.
31
+ class DatastoreClientDryRunDecorator
32
+ extend Forwardable
33
+
34
+ def initialize(wrapped_client)
35
+ @wrapped_client = wrapped_client
36
+ end
37
+
38
+ # Cluster APIs
39
+ def_delegators :@wrapped_client, :get_flat_cluster_settings, :get_cluster_health
40
+
41
+ def put_persistent_cluster_settings(*) = nil
42
+
43
+ # Script APIs
44
+ def_delegators :@wrapped_client, :get_script
45
+
46
+ def put_script(*) = nil
47
+
48
+ def delete_script(*) = nil
49
+
50
+ # Index Template APIs
51
+ def_delegators :@wrapped_client, :get_index_template
52
+
53
+ def delete_index_template(*) = nil
54
+
55
+ def put_index_template(*) = nil
56
+
57
+ # Index APIs
58
+ def_delegators :@wrapped_client, :get_index, :list_indices_matching
59
+
60
+ def delete_indices(*) = nil
61
+
62
+ def create_index(*) = nil
63
+
64
+ def put_index_mapping(*) = nil
65
+
66
+ def put_index_settings(*) = nil
67
+
68
+ # Document APIs
69
+ def_delegators :@wrapped_client, :get, :search, :msearch
70
+
71
+ def delete_all_documents(*) = nil
72
+
73
+ def bulk(*) = nil
74
+ end
75
+ end
76
+ end
@@ -0,0 +1,194 @@
1
+ # Copyright 2024 Block, Inc.
2
+ #
3
+ # Use of this source code is governed by an MIT-style
4
+ # license that can be found in the LICENSE file or at
5
+ # https://opensource.org/licenses/MIT.
6
+ #
7
+ # frozen_string_literal: true
8
+
9
+ require "elastic_graph/admin/cluster_configurator/action_reporter"
10
+ require "elastic_graph/datastore_core/index_config_normalizer"
11
+ require "elastic_graph/indexer/hash_differ"
12
+ require "elastic_graph/support/hash_util"
13
+
14
+ module ElasticGraph
15
+ class Admin
16
+ module IndexDefinitionConfigurator
17
+ # Responsible for managing an index's configuration, including both mappings and settings.
18
+ class ForIndex
19
+ # @dynamic index
20
+
21
+ attr_reader :index
22
+
23
+ def initialize(datastore_client, index, env_agnostic_index_config, output)
24
+ @datastore_client = datastore_client
25
+ @index = index
26
+ @env_agnostic_index_config = env_agnostic_index_config
27
+ @reporter = ClusterConfigurator::ActionReporter.new(output)
28
+ end
29
+
30
+ # Attempts to idempotently update the index configuration to the desired configuration
31
+ # exposed by the `IndexDefinition` object. Based on the configuration of the passed index
32
+ # and the state of the index in the datastore, does one of the following:
33
+ #
34
+ # - If the index did not already exist: creates the index with the desired mappings and settings.
35
+ # - If the desired mapping has fewer fields than what is in the index: raises an exception,
36
+ # because the datastore provides no way to remove fields from a mapping and it would be confusing
37
+ # for this method to silently ignore the issue.
38
+ # - If the settings have desired changes: updates the settings, restoring any setting that
39
+ # no longer has a desired value to its default.
40
+ # - If the mapping has desired changes: updates the mappings.
41
+ #
42
+ # Note that any of the writes to the index may fail. There are many things that cannot
43
+ # be changed on an existing index (such as static settings, field mapping types, etc). We do not attempt
44
+ # to validate those things ahead of time and instead rely on the datastore to fail if an invalid operation
45
+ # is attempted.
46
+ def configure!
47
+ return create_new_index unless index_exists?
48
+
49
+ # Update settings before mappings, to front-load the API call that is more likely to fail.
50
+ # Our `validate` method guards against mapping changes that are known to be disallowed by
51
+ # the datastore, but it is much harder to validate that for settings, because there are so
52
+ # many settings, and there is not clear documentation that outlines all settings, which can
53
+ # be updated on existing indices, etc.
54
+ #
55
+ # If we get a failure, we'd rather it happen before any changes are applied to the index, instead
56
+ # of applying the mappings and then failing on the settings.
57
+ update_settings if settings_updates.any?
58
+
59
+ update_mapping if has_mapping_updates?
60
+ end
61
+
62
+ def validate
63
+ if index_exists? && mapping_type_changes.any?
64
+ [cannot_modify_mapping_field_type_error]
65
+ else
66
+ []
67
+ end
68
+ end
69
+
70
+ private
71
+
72
+ def create_new_index
73
+ @datastore_client.create_index(index: @index.name, body: desired_config)
74
+ report_action "Created index: `#{@index.name}`"
75
+ end
76
+
77
+ def update_mapping
78
+ @datastore_client.put_index_mapping(index: @index.name, body: desired_mapping)
79
+ action_description = "Updated mappings for index `#{@index.name}`:\n#{mapping_diff}"
80
+
81
+ if mapping_removals.any?
82
+ action_description += "\n\nNote: the extra fields listed here will not actually get removed. " \
83
+ "Mapping removals are unsupported (but ElasticGraph will leave them alone and they'll cause no problems)."
84
+ end
85
+
86
+ report_action action_description
87
+ end
88
+
89
+ def update_settings
90
+ @datastore_client.put_index_settings(index: @index.name, body: settings_updates)
91
+ report_action "Updated settings for index `#{@index.name}`:\n#{settings_diff}"
92
+ end
93
+
94
+ def cannot_modify_mapping_field_type_error
95
+ "The datastore does not support modifying the type of a field from an existing index definition. " \
96
+ "You are attempting to update type of fields (#{mapping_type_changes.inspect}) from the #{@index.name} index definition."
97
+ end
98
+
99
+ def index_exists?
100
+ !current_config.empty?
101
+ end
102
+
103
+ def mapping_removals
104
+ @mapping_removals ||= mapping_fields_from(current_mapping) - mapping_fields_from(desired_mapping)
105
+ end
106
+
107
+ def mapping_type_changes
108
+ @mapping_type_changes ||= begin
109
+ flattened_current = Support::HashUtil.flatten_and_stringify_keys(current_mapping)
110
+ flattened_desired = Support::HashUtil.flatten_and_stringify_keys(desired_mapping)
111
+
112
+ flattened_current.keys.select do |key|
113
+ key.end_with?(".type") && flattened_desired.key?(key) && flattened_desired[key] != flattened_current[key]
114
+ end
115
+ end
116
+ end
117
+
118
+ def has_mapping_updates?
119
+ current_mapping != desired_mapping
120
+ end
121
+
122
+ def settings_updates
123
+ @settings_updates ||= begin
124
+ # Updating a setting to null will cause the datastore to restore the default value of the setting.
125
+ restore_to_defaults = (current_settings.keys - desired_settings.keys).to_h { |key| [key, nil] }
126
+ desired_settings.select { |key, value| current_settings[key] != value }.merge(restore_to_defaults)
127
+ end
128
+ end
129
+
130
+ def mapping_fields_from(mapping_hash, prefix = "")
131
+ (mapping_hash["properties"] || []).flat_map do |key, params|
132
+ field = prefix + key
133
+ if params.key?("properties")
134
+ [field] + mapping_fields_from(params, "#{field}.")
135
+ else
136
+ [field]
137
+ end
138
+ end
139
+ end
140
+
141
+ def desired_mapping
142
+ desired_config.fetch("mappings")
143
+ end
144
+
145
+ def desired_settings
146
+ @desired_settings ||= desired_config.fetch("settings")
147
+ end
148
+
149
+ def desired_config
150
+ @desired_config ||= begin
151
+ # _meta is place where we can record state on the index mapping in the datastore.
152
+ # We want to maintain `_meta.ElasticGraph.sources` as an append-only set of all sources that have ever
153
+ # been configured to flow into an index, so that we can remember whether or not an index which currently
154
+ # has no `sourced_from` from fields ever did. This is necessary for our automatic filtering of multi-source
155
+ # indexes.
156
+ previously_recorded_sources = current_mapping.dig("_meta", "ElasticGraph", "sources") || []
157
+ sources = previously_recorded_sources.union(@index.current_sources.to_a).sort
158
+
159
+ DatastoreCore::IndexConfigNormalizer.normalize(Support::HashUtil.deep_merge(@env_agnostic_index_config, {
160
+ "mappings" => {"_meta" => {"ElasticGraph" => {"sources" => sources}}},
161
+ "settings" => @index.flattened_env_setting_overrides
162
+ }))
163
+ end
164
+ end
165
+
166
+ def current_mapping
167
+ current_config["mappings"] || {}
168
+ end
169
+
170
+ def current_settings
171
+ @current_settings ||= current_config["settings"]
172
+ end
173
+
174
+ def current_config
175
+ @current_config ||= DatastoreCore::IndexConfigNormalizer.normalize(
176
+ @datastore_client.get_index(@index.name)
177
+ )
178
+ end
179
+
180
+ def mapping_diff
181
+ @mapping_diff ||= Indexer::HashDiffer.diff(current_mapping, desired_mapping) || "(no diff)"
182
+ end
183
+
184
+ def settings_diff
185
+ @settings_diff ||= Indexer::HashDiffer.diff(current_settings, desired_settings) || "(no diff)"
186
+ end
187
+
188
+ def report_action(message)
189
+ @reporter.report_action(message)
190
+ end
191
+ end
192
+ end
193
+ end
194
+ end