schema-tools 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,211 @@
1
+ module SchemaTools
2
+ module Migrate
3
+ class Rollback
4
+ def initialize(alias_name, current_index, catchup1_index, new_index, client, logger)
5
+ @alias_name = alias_name
6
+ @current_index = current_index
7
+ @catchup1_index = catchup1_index
8
+ @new_index = new_index
9
+ @client = client
10
+ @logger = logger
11
+ end
12
+
13
+ def attempt_rollback(original_error)
14
+ @logger.log "=" * 60
15
+ @logger.log "🔄 ATTEMPTING ROLLBACK DUE TO STEP 3 FAILURE"
16
+ @logger.log "=" * 60
17
+ @logger.log "Original error: #{original_error.message}"
18
+ @logger.log ""
19
+ @logger.log "Rolling back to original state..."
20
+ @logger.log "This will preserve any data written during migration and restore the alias to the original index."
21
+ @logger.log ""
22
+
23
+ begin
24
+ # Step 1: Stop writes by making alias read-only
25
+ @logger.log "🔄 ROLLBACK STEP 1: Stopping writes to prevent data loss..."
26
+ stop_writes
27
+
28
+ # Step 2: Reindex catchup changes back to original index
29
+ @logger.log "🔄 ROLLBACK STEP 2: Reindexing catchup changes back to original index..."
30
+ reindex_catchup_to_original
31
+
32
+ # Step 3: Restore alias to original state
33
+ @logger.log "🔄 ROLLBACK STEP 3: Restoring alias to original index..."
34
+ restore_alias_to_original
35
+
36
+ # Step 4: Clean up created indexes
37
+ @logger.log "🔄 ROLLBACK STEP 4: Cleaning up created indexes..."
38
+ cleanup_indexes
39
+
40
+ @logger.log "=" * 60
41
+ @logger.log "✅ ROLLBACK COMPLETED SUCCESSFULLY"
42
+ @logger.log "=" * 60
43
+ @logger.log "The alias '#{@alias_name}' has been restored to point to the original index '#{@current_index}'"
44
+ @logger.log "All data written during migration has been preserved in the original index."
45
+ @logger.log "All created indexes have been cleaned up."
46
+ @logger.log "You can now:"
47
+ @logger.log " 1. Fix the issue that caused the reindex to fail"
48
+ @logger.log " 2. Re-run the migration: rake 'schema:migrate[#{@alias_name}]'"
49
+ @logger.log " 3. Check the migration log: #{@logger.instance_variable_get(:@migration_log_index)}"
50
+ @logger.log ""
51
+
52
+ rescue => rollback_error
53
+ @logger.log "=" * 60
54
+ @logger.log "❌ ROLLBACK FAILED"
55
+ @logger.log "=" * 60
56
+ @logger.log "Rollback error: #{rollback_error.message}"
57
+ @logger.log ""
58
+ log_rollback_instructions(original_error, rollback_error)
59
+ end
60
+ end
61
+
62
+ private
63
+
64
+ def stop_writes
65
+ # Configure alias to read-only: read from both original and catchup, write to neither
66
+ actions = []
67
+
68
+ # Remove write access from catchup-1 index
69
+ actions << {
70
+ remove: {
71
+ index: @catchup1_index,
72
+ alias: @alias_name
73
+ }
74
+ }
75
+
76
+ # Add read-only access to original index
77
+ actions << {
78
+ add: {
79
+ index: @current_index,
80
+ alias: @alias_name,
81
+ is_write_index: false
82
+ }
83
+ }
84
+
85
+ # Add read-only access to catchup-1 index (if it exists)
86
+ if @client.index_exists?(@catchup1_index)
87
+ actions << {
88
+ add: {
89
+ index: @catchup1_index,
90
+ alias: @alias_name,
91
+ is_write_index: false
92
+ }
93
+ }
94
+ end
95
+
96
+ @client.update_aliases(actions)
97
+ @logger.log "✓ Writes stopped - alias is now read-only"
98
+ end
99
+
100
+ def reindex_catchup_to_original
101
+ # Check if catchup-1 index has any documents
102
+ doc_count = @client.get_index_doc_count(@catchup1_index)
103
+
104
+ if doc_count > 0
105
+ @logger.log "📊 Found #{doc_count} documents in catchup-1 index - reindexing to original..."
106
+
107
+ # Reindex from catchup-1 to original index
108
+ response = @client.reindex(@catchup1_index, @current_index, nil)
109
+ @logger.log "Reindex task started - task_id: #{response['task']}"
110
+
111
+ # Wait for reindex to complete
112
+ @client.wait_for_task(response['task'])
113
+ @logger.log "✓ Catchup data successfully reindexed to original index"
114
+ else
115
+ @logger.log "✓ No documents in catchup-1 index - skipping reindex"
116
+ end
117
+ end
118
+
119
+ def restore_alias_to_original
120
+ # Remove all aliases and restore to original state only
121
+ actions = []
122
+
123
+ # Remove alias from catchup-1 index (if it exists)
124
+ if @client.index_exists?(@catchup1_index)
125
+ actions << {
126
+ remove: {
127
+ index: @catchup1_index,
128
+ alias: @alias_name
129
+ }
130
+ }
131
+ end
132
+
133
+ # Add alias back to original index only
134
+ actions << {
135
+ add: {
136
+ index: @current_index,
137
+ alias: @alias_name,
138
+ is_write_index: true
139
+ }
140
+ }
141
+
142
+ @client.update_aliases(actions)
143
+ @logger.log "✓ Alias restored to original index: #{@current_index}"
144
+ end
145
+
146
+ def cleanup_indexes
147
+ # Clean up catchup-1 index
148
+ if @client.index_exists?(@catchup1_index)
149
+ @client.delete_index(@catchup1_index)
150
+ @logger.log "✓ Deleted catchup-1 index: #{@catchup1_index}"
151
+ else
152
+ @logger.log "⚠️ Catchup-1 index does not exist: #{@catchup1_index}"
153
+ end
154
+
155
+ # Clean up new index if it was created
156
+ if @client.index_exists?(@new_index)
157
+ @client.delete_index(@new_index)
158
+ @logger.log "✓ Deleted new index: #{@new_index}"
159
+ else
160
+ @logger.log "⚠️ New index does not exist: #{@new_index}"
161
+ end
162
+ end
163
+
164
+ def log_rollback_instructions(original_error, rollback_error = nil)
165
+ @logger.log "=" * 60
166
+ @logger.log "❌ MANUAL ROLLBACK REQUIRED"
167
+ @logger.log "=" * 60
168
+ @logger.log "The automatic rollback failed. You need to manually restore the system."
169
+ @logger.log ""
170
+ @logger.log "Current state:"
171
+ @logger.log " - Alias: #{@alias_name}"
172
+ @logger.log " - Original index: #{@current_index}"
173
+ @logger.log " - Catchup-1 index: #{@catchup1_index}"
174
+ @logger.log " - New index: #{@new_index}"
175
+ @logger.log ""
176
+ @logger.log "Manual rollback steps:"
177
+ @logger.log "1. Stop writes by making alias read-only:"
178
+ @logger.log " curl -X POST 'http://localhost:9200/_aliases' -H 'Content-Type: application/json' -d '{"
179
+ @logger.log " \"actions\": ["
180
+ @logger.log " { \"remove\": { \"index\": \"#{@catchup1_index}\", \"alias\": \"#{@alias_name}\" } },"
181
+ @logger.log " { \"add\": { \"index\": \"#{@current_index}\", \"alias\": \"#{@alias_name}\", \"is_write_index\": false } }"
182
+ @logger.log " ]"
183
+ @logger.log " }'"
184
+ @logger.log ""
185
+ @logger.log "2. Reindex catchup data to original (if needed):"
186
+ @logger.log " curl -X POST 'http://localhost:9200/_reindex' -H 'Content-Type: application/json' -d '{"
187
+ @logger.log " \"source\": { \"index\": \"#{@catchup1_index}\" },"
188
+ @logger.log " \"dest\": { \"index\": \"#{@current_index}\" }"
189
+ @logger.log " }'"
190
+ @logger.log ""
191
+ @logger.log "3. Restore alias to original index:"
192
+ @logger.log " curl -X POST 'http://localhost:9200/_aliases' -H 'Content-Type: application/json' -d '{"
193
+ @logger.log " \"actions\": ["
194
+ @logger.log " { \"remove\": { \"index\": \"#{@catchup1_index}\", \"alias\": \"#{@alias_name}\" } },"
195
+ @logger.log " { \"add\": { \"index\": \"#{@current_index}\", \"alias\": \"#{@alias_name}\", \"is_write_index\": true } }"
196
+ @logger.log " ]"
197
+ @logger.log " }'"
198
+ @logger.log ""
199
+ @logger.log "4. Clean up created indexes:"
200
+ @logger.log " curl -X DELETE 'http://localhost:9200/#{@catchup1_index}'"
201
+ @logger.log " curl -X DELETE 'http://localhost:9200/#{@new_index}'"
202
+ @logger.log ""
203
+ @logger.log "Original error: #{original_error.message}"
204
+ if rollback_error
205
+ @logger.log "Rollback error: #{rollback_error.message}"
206
+ end
207
+ @logger.log ""
208
+ end
209
+ end
210
+ end
211
+ end
@@ -0,0 +1,165 @@
1
+ require 'json'
2
+ require 'fileutils'
3
+ require 'time'
4
+ require_relative 'config'
5
+ require_relative 'settings_filter'
6
+
7
+ module SchemaTools
8
+ def self.new_alias(client:)
9
+ puts "Warning: This tool only supports migrating aliases."
10
+ puts "Create an alias for this index by running:"
11
+ puts "rake schema:alias"
12
+ puts "\nDownload this index schema anyway? Y/n"
13
+
14
+ choice = STDIN.gets&.chomp&.downcase
15
+ if choice.nil? || choice.empty? || choice == 'y' || choice == 'yes'
16
+ # User wants to proceed with creating a new alias
17
+ puts "\nEnter a new alias name:"
18
+ alias_name = STDIN.gets&.chomp
19
+ if alias_name.nil? || alias_name.empty?
20
+ puts "No alias name provided. Exiting."
21
+ exit 1
22
+ end
23
+
24
+ if client.alias_exists?(alias_name)
25
+ puts "Alias '#{alias_name}' already exists."
26
+ exit 1
27
+ end
28
+
29
+ timestamp = Time.now.strftime("%Y%m%d%H%M%S")
30
+ index_name = "#{alias_name}-#{timestamp}"
31
+
32
+ puts "Creating index '#{index_name}' with alias '#{alias_name}'..."
33
+
34
+ sample_settings = {
35
+ "number_of_shards" => 1,
36
+ "number_of_replicas" => 0,
37
+ "analysis" => {
38
+ "analyzer" => {
39
+ "default" => {
40
+ "type" => "standard"
41
+ }
42
+ }
43
+ }
44
+ }
45
+
46
+ sample_mappings = {
47
+ "properties" => {
48
+ "id" => {
49
+ "type" => "keyword"
50
+ },
51
+ "created_at" => {
52
+ "type" => "date"
53
+ },
54
+ "updated_at" => {
55
+ "type" => "date"
56
+ }
57
+ }
58
+ }
59
+
60
+ client.create_index(index_name, sample_settings, sample_mappings)
61
+ client.create_alias(alias_name, index_name)
62
+
63
+ puts "✓ Created index '#{index_name}' with alias '#{alias_name}'"
64
+
65
+ schema_path = File.join(Config.schemas_path, alias_name)
66
+ FileUtils.mkdir_p(schema_path)
67
+
68
+ settings_file = File.join(schema_path, 'settings.json')
69
+ mappings_file = File.join(schema_path, 'mappings.json')
70
+
71
+ File.write(settings_file, JSON.pretty_generate(sample_settings))
72
+ File.write(mappings_file, JSON.pretty_generate(sample_mappings))
73
+
74
+ puts "✓ Sample schema created at #{schema_path}"
75
+ puts " - settings.json"
76
+ puts " - mappings.json"
77
+ else
78
+ puts "Exiting. Run 'rake schema:alias' to create an alias for an existing index."
79
+ end
80
+ end
81
+
82
+ def self.create_alias_for_index(client:)
83
+ aliases = client.list_aliases
84
+ indices = client.list_indices
85
+
86
+ unaliased_indices = indices.reject { |index| aliases.values.flatten.include?(index) || index.start_with?('.') || client.index_closed?(index) }
87
+
88
+ puts "\nIndexes not part of any aliases:"
89
+ if unaliased_indices.empty?
90
+ puts " (none)"
91
+ puts "\nNo unaliased indices available to create aliases for."
92
+ return
93
+ end
94
+
95
+ unaliased_indices.each_with_index do |index_name, index|
96
+ puts " #{index + 1}. #{index_name}"
97
+ end
98
+
99
+ puts "\nPlease choose an index to create an alias for:"
100
+ puts "Enter the number (1-#{unaliased_indices.length}):"
101
+
102
+ choice = STDIN.gets&.chomp
103
+ if choice.nil?
104
+ puts "No input provided. Exiting."
105
+ exit 1
106
+ end
107
+
108
+ choice_num = choice.to_i
109
+ if choice_num < 1 || choice_num > unaliased_indices.length
110
+ puts "Invalid choice. Please enter a number between 1 and #{unaliased_indices.length}."
111
+ exit 1
112
+ end
113
+
114
+ selected_index = unaliased_indices[choice_num - 1]
115
+
116
+ puts "\nType the name of a new alias to create for this index:"
117
+ new_alias_name = STDIN.gets&.chomp
118
+ if new_alias_name.nil? || new_alias_name.empty?
119
+ puts "No alias name provided. Exiting."
120
+ exit 1
121
+ end
122
+
123
+ if client.alias_exists?(new_alias_name)
124
+ puts "Alias '#{new_alias_name}' already exists."
125
+ exit 1
126
+ end
127
+
128
+ puts "Creating alias '#{new_alias_name}' for index '#{selected_index}'..."
129
+ client.create_alias(new_alias_name, selected_index)
130
+
131
+ puts "✓ Created alias '#{new_alias_name}' -> '#{selected_index}'"
132
+
133
+ # Download the schema for the newly aliased index
134
+ download_schema(new_alias_name, selected_index, client)
135
+ end
136
+
137
+ private
138
+
139
+ def self.download_schema(folder_name, index_name, client)
140
+ settings = client.get_index_settings(index_name)
141
+ mappings = client.get_index_mappings(index_name)
142
+
143
+ if settings.nil? || mappings.nil?
144
+ puts "Failed to retrieve settings or mappings for #{index_name}"
145
+ exit 1
146
+ end
147
+
148
+ # Filter out internal settings
149
+ filtered_settings = SettingsFilter.filter_internal_settings(settings)
150
+
151
+ schema_path = File.join(Config.schemas_path, folder_name)
152
+ FileUtils.mkdir_p(schema_path)
153
+
154
+ settings_file = File.join(schema_path, 'settings.json')
155
+ mappings_file = File.join(schema_path, 'mappings.json')
156
+
157
+ File.write(settings_file, JSON.pretty_generate(filtered_settings))
158
+ File.write(mappings_file, JSON.pretty_generate(mappings))
159
+
160
+ puts "✓ Schema downloaded to #{schema_path}"
161
+ puts " - settings.json"
162
+ puts " - mappings.json"
163
+ end
164
+
165
+ end
@@ -0,0 +1,21 @@
1
+ module SchemaTools
2
+ def self.painless_scripts_delete(script_name:, client:)
3
+ raise "script_name parameter is required" unless script_name && !script_name.strip.empty?
4
+
5
+ # Remove .painless extension if provided
6
+ script_name = script_name.gsub(/\.painless$/, '')
7
+
8
+ puts "Deleting painless script '#{script_name}' from cluster..."
9
+
10
+ begin
11
+ client.delete_script(script_name)
12
+ puts "Successfully deleted painless script '#{script_name}' from cluster"
13
+ rescue => e
14
+ if e.message.include?('404') || e.message.include?('not found')
15
+ puts "Script '#{script_name}' not found in cluster"
16
+ else
17
+ raise e
18
+ end
19
+ end
20
+ end
21
+ end
@@ -0,0 +1,26 @@
1
+ require 'fileutils'
2
+
3
+ module SchemaTools
4
+ def self.painless_scripts_download(client:)
5
+ painless_scripts_path = Config.painless_scripts_path
6
+
7
+ puts "Downloading all painless scripts from cluster..."
8
+
9
+ scripts = client.get_stored_scripts
10
+
11
+ if scripts.empty?
12
+ puts "No painless scripts found in cluster."
13
+ return
14
+ end
15
+
16
+ FileUtils.mkdir_p(painless_scripts_path)
17
+
18
+ scripts.each do |script_name, script_content|
19
+ script_file_path = File.join(painless_scripts_path, "#{script_name}.painless")
20
+ File.write(script_file_path, script_content)
21
+ puts "Downloaded script: #{script_name}"
22
+ end
23
+
24
+ puts "Successfully downloaded #{scripts.length} painless script(s) to #{painless_scripts_path}"
25
+ end
26
+ end
@@ -0,0 +1,31 @@
1
+ require 'fileutils'
2
+
3
+ module SchemaTools
4
+ def self.painless_scripts_upload(client:)
5
+ painless_scripts_path = Config.painless_scripts_path
6
+
7
+ unless Dir.exist?(painless_scripts_path)
8
+ puts "Painless scripts directory #{painless_scripts_path} does not exist."
9
+ return
10
+ end
11
+
12
+ puts "Uploading all painless scripts from #{painless_scripts_path} to cluster..."
13
+
14
+ script_files = Dir.glob(File.join(painless_scripts_path, '*.painless'))
15
+
16
+ if script_files.empty?
17
+ puts "No painless script files found in #{painless_scripts_path}"
18
+ return
19
+ end
20
+
21
+ script_files.each do |script_file_path|
22
+ script_name = File.basename(script_file_path, '.painless')
23
+ script_content = File.read(script_file_path)
24
+
25
+ client.put_script(script_name, script_content)
26
+ puts "Uploaded script: #{script_name}"
27
+ end
28
+
29
+ puts "Successfully uploaded #{script_files.length} painless script(s) to cluster"
30
+ end
31
+ end
@@ -0,0 +1,15 @@
1
+ require 'rake'
2
+ require 'rake/tasklib'
3
+
4
+ module SchemaTools
5
+ module RakeTasks
6
+ def self.load_tasks
7
+ Dir.glob(File.join(File.dirname(__FILE__), '..', 'tasks', '*.rake')).each do |rake_file|
8
+ load rake_file
9
+ end
10
+ end
11
+ end
12
+ end
13
+
14
+ # Auto-load tasks when this file is required
15
+ SchemaTools::RakeTasks.load_tasks
@@ -0,0 +1,53 @@
1
+ require 'json'
2
+ require 'fileutils'
3
+
4
+ module SchemaTools
5
+ class SchemaFiles
6
+ def self.get_settings(alias_name)
7
+ settings_path = File.join(Config.schemas_path, alias_name, 'settings.json')
8
+ return nil unless File.exist?(settings_path)
9
+
10
+ JSON.parse(File.read(settings_path))
11
+ end
12
+
13
+ def self.get_mappings(alias_name)
14
+ mappings_path = File.join(Config.schemas_path, alias_name, 'mappings.json')
15
+ return nil unless File.exist?(mappings_path)
16
+
17
+ JSON.parse(File.read(mappings_path))
18
+ end
19
+
20
+ def self.get_reindex_script(alias_name)
21
+ script_path = File.join(Config.schemas_path, alias_name, 'reindex.painless')
22
+
23
+ File.exist?(script_path) ? File.read(script_path) : nil
24
+ end
25
+
26
+ def self.discover_all_schemas
27
+ return [] unless Dir.exist?(Config.schemas_path)
28
+
29
+ schemas = []
30
+
31
+ Dir.glob(File.join(Config.schemas_path, '*'))
32
+ .select { |d| File.directory?(d) }
33
+ .each do |schema_dir|
34
+ alias_name = File.basename(schema_dir)
35
+
36
+ if has_schema_files?(alias_name)
37
+ schemas << alias_name
38
+ end
39
+ end
40
+
41
+ schemas
42
+ end
43
+
44
+ private
45
+
46
+ def self.has_schema_files?(alias_name)
47
+ settings_path = File.join(Config.schemas_path, alias_name, 'settings.json')
48
+ mappings_path = File.join(Config.schemas_path, alias_name, 'mappings.json')
49
+
50
+ File.exist?(settings_path) && File.exist?(mappings_path)
51
+ end
52
+ end
53
+ end
@@ -0,0 +1,64 @@
1
+ module SchemaTools
2
+ def self.seed(client:)
3
+ # List available indices (connection already validated during client initialization)
4
+ puts "Connecting to #{Config.connection_url}..."
5
+ indices = client.list_indices
6
+
7
+ if indices.empty?
8
+ puts "No indices found in the cluster."
9
+ puts "Please create an index first."
10
+ exit 0
11
+ end
12
+
13
+ puts "Available indices:"
14
+ indices.each_with_index do |index_name, index|
15
+ puts "#{index + 1}. #{index_name}"
16
+ end
17
+
18
+ puts "\nPlease select an index by number (1-#{indices.length}):"
19
+ selection_input = STDIN.gets&.chomp
20
+ if selection_input.nil?
21
+ puts "No input provided. Exiting."
22
+ exit 1
23
+ end
24
+ selection = selection_input.to_i
25
+
26
+ if selection < 1 || selection > indices.length
27
+ puts "Invalid selection. Please run the task again and select a valid number."
28
+ exit 1
29
+ end
30
+
31
+ selected_index = indices[selection - 1]
32
+ puts "Selected index: #{selected_index}"
33
+
34
+ # Fetch the mappings for the selected index
35
+ puts "Fetching mappings for #{selected_index}..."
36
+ mappings = client.get_index_mappings(selected_index)
37
+
38
+ if mappings.nil?
39
+ puts "Failed to fetch mappings for #{selected_index}"
40
+ exit 1
41
+ end
42
+
43
+ puts "Mappings fetched successfully."
44
+
45
+ # Prompt user for number of documents to seed
46
+ puts "\nHow many documents would you like to seed?"
47
+ num_docs_input = STDIN.gets&.chomp
48
+ if num_docs_input.nil?
49
+ puts "No input provided. Exiting."
50
+ exit 1
51
+ end
52
+
53
+ num_docs = num_docs_input.to_i
54
+ if num_docs <= 0
55
+ puts "Invalid number of documents. Please enter a positive integer."
56
+ exit 1
57
+ end
58
+
59
+ puts "Seeding #{num_docs} documents from #{selected_index}..."
60
+
61
+ # Call the seeding function
62
+ Seed.seed_data(num_docs, mappings, client, selected_index)
63
+ end
64
+ end
@@ -0,0 +1,64 @@
1
+ require 'json'
2
+ require_relative 'diff'
3
+
4
+ module SchemaTools
5
+ class SettingsDiff
6
+ def initialize(local_schema, remote_schema)
7
+ @local_schema = local_schema
8
+ @remote_schema = remote_schema
9
+ end
10
+
11
+ def generate_minimal_changes
12
+ return {} unless @local_schema.is_a?(Hash)
13
+
14
+ # Normalize local schema to always have "index" wrapper
15
+ local_index = normalize_local_schema(@local_schema)
16
+ return {} if local_index.nil?
17
+
18
+ remote_index = @remote_schema.is_a?(Hash) && @remote_schema.key?("index") ? @remote_schema["index"] : {}
19
+
20
+ # Normalize both sides to ensure consistent comparison
21
+ normalized_remote = Diff.normalize_values(remote_index)
22
+ normalized_local = Diff.normalize_values(local_index)
23
+
24
+ changes = find_changes(normalized_remote, normalized_local)
25
+ changes.empty? ? {} : { "index" => changes }
26
+ end
27
+
28
+ private
29
+
30
+ def normalize_local_schema(local_schema)
31
+ # If local schema already has "index" wrapper, use it only if it's a valid hash
32
+ if local_schema.key?("index")
33
+ return local_schema["index"] if local_schema["index"].is_a?(Hash)
34
+ # If index exists but is not a hash, return nil to indicate invalid schema
35
+ return nil
36
+ end
37
+
38
+ # If local schema doesn't have "index" wrapper, treat the entire schema as index settings
39
+ # This handles cases like { "number_of_shards": 1 } which is equivalent to { "index": { "number_of_shards": 1 } }
40
+ return local_schema
41
+ end
42
+
43
+ def find_changes(remote, local)
44
+ changes = {}
45
+
46
+ return changes unless local.is_a?(Hash) && remote.is_a?(Hash)
47
+
48
+ local.each do |key, value|
49
+ if !remote.key?(key)
50
+ changes[key] = value
51
+ elsif value != remote[key]
52
+ if value.is_a?(Hash) && remote[key].is_a?(Hash)
53
+ nested_changes = find_changes(remote[key], value)
54
+ changes[key] = nested_changes unless nested_changes.empty?
55
+ else
56
+ changes[key] = value
57
+ end
58
+ end
59
+ end
60
+
61
+ changes
62
+ end
63
+ end
64
+ end
@@ -0,0 +1,27 @@
1
+ require 'json'
2
+
3
+ module SchemaTools
4
+ class SettingsFilter
5
+ # Remove read-only OpenSearch/Elasticsearch internal fields
6
+ def self.filter_internal_settings(settings)
7
+ return settings unless settings.is_a?(Hash)
8
+
9
+ filtered_settings = JSON.parse(JSON.generate(settings))
10
+
11
+ internal_fields = [
12
+ 'creation_date',
13
+ 'provided_name',
14
+ 'uuid',
15
+ 'version'
16
+ ]
17
+
18
+ if filtered_settings['index']
19
+ internal_fields.each do |field|
20
+ filtered_settings['index'].delete(field)
21
+ end
22
+ end
23
+
24
+ filtered_settings
25
+ end
26
+ end
27
+ end