forest_admin_datasource_rpc 1.22.1 → 2.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/forest_admin_datasource_rpc.gemspec +0 -3
- data/lib/forest_admin_datasource_rpc/Utils/rpc_client.rb +5 -39
- data/lib/forest_admin_datasource_rpc/Utils/schema_polling_client.rb +76 -108
- data/lib/forest_admin_datasource_rpc/collection.rb +3 -2
- data/lib/forest_admin_datasource_rpc/datasource.rb +13 -46
- data/lib/forest_admin_datasource_rpc/version.rb +1 -1
- data/lib/forest_admin_datasource_rpc.rb +97 -45
- metadata +2 -46
- data/lib/forest_admin_datasource_rpc/Utils/schema_polling_pool.rb +0 -286
- data/lib/forest_admin_datasource_rpc/reconciliate_rpc.rb +0 -71
checksums.yaml
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
---
|
|
2
2
|
SHA256:
|
|
3
|
-
metadata.gz:
|
|
4
|
-
data.tar.gz:
|
|
3
|
+
metadata.gz: f0b6bdcda842a6d936560f74f955186c51f36d4be1dbd332c51d0ecdea710cf5
|
|
4
|
+
data.tar.gz: faabc27ad8e0b3b9fe5f719da93375224c8e4f42bec99dff71c9a132c58119fc
|
|
5
5
|
SHA512:
|
|
6
|
-
metadata.gz:
|
|
7
|
-
data.tar.gz:
|
|
6
|
+
metadata.gz: 9fe4da7443c4080e0097ad823e750a4cc5fab2e992a1890cc22568ed055cc0b7f78702ca4523bcc0ecb56715355d02c89915d4bffbfd80e9e861ee128633cbe2
|
|
7
|
+
data.tar.gz: ff37fcf51cf77f521b284dc4f21358241a1c5cd5859117cf614b507a1f4cb0836f547cf0220222052b0cc05a5f898debeb1ab3bf01aa182767fca3a3ff769543
|
|
@@ -33,12 +33,9 @@ admin work on any Ruby application."
|
|
|
33
33
|
spec.require_paths = ["lib"]
|
|
34
34
|
|
|
35
35
|
spec.add_dependency "base64"
|
|
36
|
-
spec.add_dependency "benchmark"
|
|
37
36
|
spec.add_dependency "bigdecimal"
|
|
38
|
-
spec.add_dependency "cgi"
|
|
39
37
|
spec.add_dependency "csv"
|
|
40
38
|
spec.add_dependency "faraday", "~> 2.7"
|
|
41
|
-
spec.add_dependency "logger"
|
|
42
39
|
spec.add_dependency "mutex_m"
|
|
43
40
|
spec.add_dependency "ostruct"
|
|
44
41
|
spec.add_dependency "zeitwerk", "~> 2.3"
|
|
@@ -32,8 +32,6 @@ module ForestAdminDatasourceRpc
|
|
|
32
32
|
|
|
33
33
|
HTTP_NOT_MODIFIED = 304
|
|
34
34
|
NotModified = Class.new
|
|
35
|
-
DEFAULT_TIMEOUT = 30 # seconds
|
|
36
|
-
DEFAULT_OPEN_TIMEOUT = 10 # seconds
|
|
37
35
|
|
|
38
36
|
def initialize(api_url, auth_secret)
|
|
39
37
|
@api_url = api_url
|
|
@@ -60,31 +58,13 @@ module ForestAdminDatasourceRpc
|
|
|
60
58
|
def make_request(endpoint, caller: nil, method: :get, payload: nil, symbolize_keys: false, if_none_match: nil)
|
|
61
59
|
log_request_start(method, endpoint, if_none_match)
|
|
62
60
|
|
|
63
|
-
client =
|
|
64
|
-
headers = build_request_headers(caller, if_none_match)
|
|
65
|
-
|
|
66
|
-
response = client.send(method, endpoint, payload, headers)
|
|
67
|
-
log_request_complete(response, endpoint)
|
|
68
|
-
response
|
|
69
|
-
rescue Faraday::ConnectionFailed => e
|
|
70
|
-
handle_connection_failed(endpoint, e)
|
|
71
|
-
rescue Faraday::TimeoutError => e
|
|
72
|
-
handle_timeout_error(endpoint, e)
|
|
73
|
-
end
|
|
74
|
-
# rubocop:enable Metrics/ParameterLists
|
|
75
|
-
|
|
76
|
-
def build_faraday_client(symbolize_keys)
|
|
77
|
-
Faraday.new(url: @api_url) do |faraday|
|
|
61
|
+
client = Faraday.new(url: @api_url) do |faraday|
|
|
78
62
|
faraday.request :json
|
|
79
63
|
faraday.response :json, parser_options: { symbolize_names: symbolize_keys }
|
|
80
64
|
faraday.adapter Faraday.default_adapter
|
|
81
65
|
faraday.ssl.verify = !ForestAdminAgent::Facades::Container.cache(:debug)
|
|
82
|
-
faraday.options.timeout = DEFAULT_TIMEOUT
|
|
83
|
-
faraday.options.open_timeout = DEFAULT_OPEN_TIMEOUT
|
|
84
66
|
end
|
|
85
|
-
end
|
|
86
67
|
|
|
87
|
-
def build_request_headers(caller, if_none_match)
|
|
88
68
|
timestamp = Time.now.utc.iso8601(3)
|
|
89
69
|
signature = generate_signature(timestamp)
|
|
90
70
|
|
|
@@ -96,26 +76,12 @@ module ForestAdminDatasourceRpc
|
|
|
96
76
|
|
|
97
77
|
headers['forest_caller'] = caller.to_json if caller
|
|
98
78
|
headers['If-None-Match'] = %("#{if_none_match}") if if_none_match
|
|
99
|
-
headers
|
|
100
|
-
end
|
|
101
79
|
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
"[RPC Client] Connection failed to #{@api_url}#{endpoint}: #{error.message}"
|
|
106
|
-
)
|
|
107
|
-
raise ForestAdminDatasourceToolkit::Exceptions::ForestException,
|
|
108
|
-
"RPC connection failed: Unable to connect to #{@api_url}. Please check if the RPC server is running."
|
|
109
|
-
end
|
|
110
|
-
|
|
111
|
-
def handle_timeout_error(endpoint, error)
|
|
112
|
-
ForestAdminAgent::Facades::Container.logger&.log(
|
|
113
|
-
'Error',
|
|
114
|
-
"[RPC Client] Request timeout to #{@api_url}#{endpoint}: #{error.message}"
|
|
115
|
-
)
|
|
116
|
-
raise ForestAdminDatasourceToolkit::Exceptions::ForestException,
|
|
117
|
-
"RPC request timeout: The RPC server at #{@api_url} did not respond in time."
|
|
80
|
+
response = client.send(method, endpoint, payload, headers)
|
|
81
|
+
log_request_complete(response, endpoint)
|
|
82
|
+
response
|
|
118
83
|
end
|
|
84
|
+
# rubocop:enable Metrics/ParameterLists
|
|
119
85
|
|
|
120
86
|
def generate_signature(timestamp)
|
|
121
87
|
OpenSSL::HMAC.hexdigest('SHA256', @auth_secret, timestamp)
|
|
@@ -1,51 +1,54 @@
|
|
|
1
1
|
require 'openssl'
|
|
2
2
|
require 'json'
|
|
3
3
|
require 'time'
|
|
4
|
-
require 'digest'
|
|
5
4
|
|
|
6
5
|
module ForestAdminDatasourceRpc
|
|
7
6
|
module Utils
|
|
8
7
|
class SchemaPollingClient
|
|
9
|
-
attr_reader :closed
|
|
8
|
+
attr_reader :closed
|
|
10
9
|
|
|
11
|
-
DEFAULT_POLLING_INTERVAL = 600
|
|
12
|
-
MIN_POLLING_INTERVAL = 1
|
|
13
|
-
MAX_POLLING_INTERVAL = 3600
|
|
10
|
+
DEFAULT_POLLING_INTERVAL = 600 # seconds (10 minutes)
|
|
11
|
+
MIN_POLLING_INTERVAL = 1 # seconds (minimum safe interval)
|
|
12
|
+
MAX_POLLING_INTERVAL = 3600 # seconds (1 hour max)
|
|
14
13
|
|
|
15
|
-
def initialize(uri, auth_secret,
|
|
16
|
-
introspection_etag: nil, &on_schema_change)
|
|
14
|
+
def initialize(uri, auth_secret, options = {}, &on_schema_change)
|
|
17
15
|
@uri = uri
|
|
18
16
|
@auth_secret = auth_secret
|
|
19
|
-
@polling_interval = polling_interval
|
|
17
|
+
@polling_interval = options[:polling_interval] || DEFAULT_POLLING_INTERVAL
|
|
20
18
|
@on_schema_change = on_schema_change
|
|
21
19
|
@closed = false
|
|
22
|
-
@introspection_schema = introspection_schema
|
|
23
|
-
@introspection_etag = introspection_etag
|
|
24
|
-
@current_schema = nil
|
|
25
20
|
@cached_etag = nil
|
|
21
|
+
@polling_thread = nil
|
|
22
|
+
@mutex = Mutex.new
|
|
26
23
|
@connection_attempts = 0
|
|
27
|
-
@initial_sync_completed = false
|
|
28
|
-
@client_id = uri
|
|
29
24
|
|
|
25
|
+
# Validate polling interval
|
|
30
26
|
validate_polling_interval!
|
|
31
27
|
|
|
28
|
+
# RPC client for schema fetching with ETag support
|
|
32
29
|
@rpc_client = RpcClient.new(@uri, @auth_secret)
|
|
33
30
|
end
|
|
34
31
|
|
|
35
|
-
def start
|
|
36
|
-
return
|
|
37
|
-
|
|
38
|
-
ForestAdminAgent::Facades::Container.logger&.log('Info', "Getting schema from RPC agent on #{@uri}.")
|
|
39
|
-
fetch_initial_schema_sync
|
|
32
|
+
def start
|
|
33
|
+
return if @closed
|
|
40
34
|
|
|
41
|
-
|
|
42
|
-
|
|
35
|
+
@mutex.synchronize do
|
|
36
|
+
return if @polling_thread&.alive?
|
|
37
|
+
|
|
38
|
+
@polling_thread = Thread.new do
|
|
39
|
+
polling_loop
|
|
40
|
+
rescue StandardError => e
|
|
41
|
+
ForestAdminAgent::Facades::Container.logger&.log(
|
|
42
|
+
'Error',
|
|
43
|
+
"[Schema Polling] Unexpected error in polling loop: #{e.class} - #{e.message}"
|
|
44
|
+
)
|
|
45
|
+
end
|
|
46
|
+
end
|
|
43
47
|
|
|
44
48
|
ForestAdminAgent::Facades::Container.logger&.log(
|
|
45
49
|
'Info',
|
|
46
|
-
"[Schema Polling]
|
|
50
|
+
"[Schema Polling] Polling started (interval: #{@polling_interval}s)"
|
|
47
51
|
)
|
|
48
|
-
true
|
|
49
52
|
end
|
|
50
53
|
|
|
51
54
|
def stop
|
|
@@ -54,11 +57,46 @@ module ForestAdminDatasourceRpc
|
|
|
54
57
|
@closed = true
|
|
55
58
|
ForestAdminAgent::Facades::Container.logger&.log('Debug', '[Schema Polling] Stopping polling')
|
|
56
59
|
|
|
57
|
-
|
|
60
|
+
@mutex.synchronize do
|
|
61
|
+
if @polling_thread&.alive?
|
|
62
|
+
@polling_thread.kill
|
|
63
|
+
@polling_thread = nil
|
|
64
|
+
end
|
|
65
|
+
end
|
|
58
66
|
|
|
59
67
|
ForestAdminAgent::Facades::Container.logger&.log('Debug', '[Schema Polling] Polling stopped')
|
|
60
68
|
end
|
|
61
69
|
|
|
70
|
+
private
|
|
71
|
+
|
|
72
|
+
def polling_loop
|
|
73
|
+
ForestAdminAgent::Facades::Container.logger&.log(
|
|
74
|
+
'Debug',
|
|
75
|
+
"[Schema Polling] Starting polling loop (interval: #{@polling_interval}s)"
|
|
76
|
+
)
|
|
77
|
+
|
|
78
|
+
loop do
|
|
79
|
+
break if @closed
|
|
80
|
+
|
|
81
|
+
begin
|
|
82
|
+
check_schema
|
|
83
|
+
rescue StandardError => e
|
|
84
|
+
handle_error(e)
|
|
85
|
+
end
|
|
86
|
+
|
|
87
|
+
# Sleep with interrupt check (check every second for early termination)
|
|
88
|
+
ForestAdminAgent::Facades::Container.logger&.log(
|
|
89
|
+
'Debug',
|
|
90
|
+
"[Schema Polling] Waiting #{@polling_interval}s before next check (current ETag: #{@cached_etag || "none"})"
|
|
91
|
+
)
|
|
92
|
+
remaining = @polling_interval
|
|
93
|
+
while remaining.positive? && !@closed
|
|
94
|
+
sleep([remaining, 1].min)
|
|
95
|
+
remaining -= 1
|
|
96
|
+
end
|
|
97
|
+
end
|
|
98
|
+
end
|
|
99
|
+
|
|
62
100
|
def check_schema
|
|
63
101
|
@connection_attempts += 1
|
|
64
102
|
log_checking_schema
|
|
@@ -69,73 +107,15 @@ module ForestAdminDatasourceRpc
|
|
|
69
107
|
log_connection_error(e)
|
|
70
108
|
rescue ForestAdminAgent::Http::Exceptions::AuthenticationOpenIdClient => e
|
|
71
109
|
log_authentication_error(e)
|
|
72
|
-
rescue ForestAdminDatasourceToolkit::Exceptions::ForestException => e
|
|
73
|
-
log_rpc_error(e)
|
|
74
110
|
rescue StandardError => e
|
|
75
111
|
log_unexpected_error(e)
|
|
76
112
|
end
|
|
77
113
|
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
Digest::SHA1.hexdigest(JSON.generate(schema))
|
|
84
|
-
end
|
|
85
|
-
|
|
86
|
-
def fetch_initial_schema_sync
|
|
87
|
-
# If we have an introspection schema, send its ETag to avoid re-downloading unchanged schema
|
|
88
|
-
introspection_etag = @introspection_etag || (@introspection_schema && compute_etag(@introspection_schema))
|
|
89
|
-
result = @rpc_client.fetch_schema('/forest/rpc-schema', if_none_match: introspection_etag)
|
|
90
|
-
|
|
91
|
-
if result == RpcClient::NotModified
|
|
92
|
-
# Schema unchanged from introspection - use introspection
|
|
93
|
-
@current_schema = @introspection_schema
|
|
94
|
-
@cached_etag = introspection_etag
|
|
95
|
-
@initial_sync_completed = true
|
|
96
|
-
ForestAdminAgent::Facades::Container.logger&.log(
|
|
97
|
-
'Info',
|
|
98
|
-
"[Schema Polling] RPC schema unchanged (HTTP 304), using introspection (ETag: #{@cached_etag})"
|
|
99
|
-
)
|
|
100
|
-
else
|
|
101
|
-
# New schema from RPC
|
|
102
|
-
@current_schema = result.body
|
|
103
|
-
@cached_etag = result.etag || compute_etag(@current_schema)
|
|
104
|
-
@initial_sync_completed = true
|
|
105
|
-
ForestAdminAgent::Facades::Container.logger&.log(
|
|
106
|
-
'Debug',
|
|
107
|
-
"[Schema Polling] Initial schema fetched successfully (ETag: #{@cached_etag})"
|
|
108
|
-
)
|
|
109
|
-
end
|
|
110
|
-
|
|
111
|
-
@introspection_schema = nil
|
|
112
|
-
rescue Faraday::ConnectionFailed, Faraday::TimeoutError,
|
|
113
|
-
ForestAdminAgent::Http::Exceptions::AuthenticationOpenIdClient,
|
|
114
|
-
ForestAdminDatasourceToolkit::Exceptions::ForestException, StandardError => e
|
|
115
|
-
handle_initial_fetch_error(e)
|
|
116
|
-
end
|
|
117
|
-
|
|
118
|
-
def handle_initial_fetch_error(error)
|
|
119
|
-
if @introspection_schema
|
|
120
|
-
# Fallback to introspection schema - don't crash
|
|
121
|
-
@current_schema = @introspection_schema
|
|
122
|
-
@cached_etag = @introspection_etag || compute_etag(@current_schema)
|
|
123
|
-
@introspection_schema = nil
|
|
124
|
-
@introspection_etag = nil
|
|
125
|
-
@initial_sync_completed = true
|
|
126
|
-
ForestAdminAgent::Facades::Container.logger&.log(
|
|
127
|
-
'Warn',
|
|
128
|
-
"RPC agent at #{@uri} is unreachable (#{error.class}: #{error.message}), " \
|
|
129
|
-
"using provided introspection schema (ETag: #{@cached_etag})"
|
|
130
|
-
)
|
|
131
|
-
else
|
|
132
|
-
# No introspection - re-raise to crash
|
|
133
|
-
ForestAdminAgent::Facades::Container.logger&.log(
|
|
134
|
-
'Error',
|
|
135
|
-
"Failed to get schema from RPC agent at #{@uri}: #{error.class} - #{error.message}"
|
|
136
|
-
)
|
|
137
|
-
raise error
|
|
138
|
-
end
|
|
114
|
+
def handle_error(error)
|
|
115
|
+
ForestAdminAgent::Facades::Container.logger&.log(
|
|
116
|
+
'Error',
|
|
117
|
+
"[Schema Polling] Error during schema check: #{error.class} - #{error.message}"
|
|
118
|
+
)
|
|
139
119
|
end
|
|
140
120
|
|
|
141
121
|
def trigger_schema_change_callback(schema)
|
|
@@ -182,27 +162,22 @@ module ForestAdminDatasourceRpc
|
|
|
182
162
|
end
|
|
183
163
|
|
|
184
164
|
def handle_schema_changed(result)
|
|
185
|
-
|
|
186
|
-
new_etag
|
|
187
|
-
|
|
188
|
-
if @initial_sync_completed
|
|
189
|
-
handle_schema_update(new_schema, new_etag)
|
|
190
|
-
else
|
|
191
|
-
@cached_etag = new_etag
|
|
192
|
-
@current_schema = new_schema
|
|
193
|
-
@initial_sync_completed = true
|
|
194
|
-
ForestAdminAgent::Facades::Container.logger&.log(
|
|
195
|
-
'Info',
|
|
196
|
-
"[Schema Polling] Initial sync completed successfully (ETag: #{new_etag})"
|
|
197
|
-
)
|
|
198
|
-
end
|
|
165
|
+
new_etag = result.etag
|
|
166
|
+
@cached_etag.nil? ? handle_initial_schema(new_etag) : handle_schema_update(result.body, new_etag)
|
|
199
167
|
@connection_attempts = 0
|
|
200
168
|
end
|
|
201
169
|
|
|
170
|
+
def handle_initial_schema(etag)
|
|
171
|
+
@cached_etag = etag
|
|
172
|
+
ForestAdminAgent::Facades::Container.logger&.log(
|
|
173
|
+
'Debug',
|
|
174
|
+
"[Schema Polling] Initial schema loaded successfully (ETag: #{etag})"
|
|
175
|
+
)
|
|
176
|
+
end
|
|
177
|
+
|
|
202
178
|
def handle_schema_update(schema, etag)
|
|
203
179
|
old_etag = @cached_etag
|
|
204
180
|
@cached_etag = etag
|
|
205
|
-
@current_schema = schema
|
|
206
181
|
msg = "[Schema Polling] Schema changed detected (old ETag: #{old_etag}, new ETag: #{etag}), " \
|
|
207
182
|
'triggering reload callback'
|
|
208
183
|
ForestAdminAgent::Facades::Container.logger&.log('Info', msg)
|
|
@@ -216,13 +191,6 @@ module ForestAdminDatasourceRpc
|
|
|
216
191
|
)
|
|
217
192
|
end
|
|
218
193
|
|
|
219
|
-
def log_rpc_error(error)
|
|
220
|
-
ForestAdminAgent::Facades::Container.logger&.log(
|
|
221
|
-
'Warn',
|
|
222
|
-
"[Schema Polling] RPC error: #{error.message}"
|
|
223
|
-
)
|
|
224
|
-
end
|
|
225
|
-
|
|
226
194
|
def log_authentication_error(error)
|
|
227
195
|
ForestAdminAgent::Facades::Container.logger&.log(
|
|
228
196
|
'Error',
|
|
@@ -6,9 +6,10 @@ module ForestAdminDatasourceRpc
|
|
|
6
6
|
include ForestAdminDatasourceRpc::Utils
|
|
7
7
|
include ForestAdminDatasourceCustomizer::Decorators::Action
|
|
8
8
|
|
|
9
|
-
def initialize(datasource, name, schema)
|
|
9
|
+
def initialize(datasource, name, options, schema)
|
|
10
10
|
super(datasource, name)
|
|
11
|
-
@
|
|
11
|
+
@options = options
|
|
12
|
+
@client = RpcClient.new(@options[:uri], @options[:auth_secret] || ForestAdminAgent::Facades::Container.cache(:auth_secret))
|
|
12
13
|
@rpc_collection_uri = "/forest/rpc/#{name}"
|
|
13
14
|
@base_params = { collection_name: name }
|
|
14
15
|
|
|
@@ -2,8 +2,6 @@ module ForestAdminDatasourceRpc
|
|
|
2
2
|
class Datasource < ForestAdminDatasourceToolkit::Datasource
|
|
3
3
|
include ForestAdminDatasourceRpc::Utils
|
|
4
4
|
|
|
5
|
-
attr_reader :shared_rpc_client, :rpc_relations
|
|
6
|
-
|
|
7
5
|
def initialize(options, introspection, schema_polling_client = nil)
|
|
8
6
|
super()
|
|
9
7
|
|
|
@@ -13,15 +11,11 @@ module ForestAdminDatasourceRpc
|
|
|
13
11
|
"collections and #{introspection[:charts].length} charts."
|
|
14
12
|
)
|
|
15
13
|
|
|
16
|
-
@shared_rpc_client = RpcClient.new(
|
|
17
|
-
options[:uri],
|
|
18
|
-
options[:auth_secret] || ForestAdminAgent::Facades::Container.cache(:auth_secret)
|
|
19
|
-
)
|
|
20
|
-
|
|
21
14
|
introspection[:collections].each do |schema|
|
|
22
|
-
add_collection(Collection.new(self, schema[:name], schema))
|
|
15
|
+
add_collection(Collection.new(self, schema[:name], options, schema))
|
|
23
16
|
end
|
|
24
17
|
|
|
18
|
+
@options = options
|
|
25
19
|
@charts = introspection[:charts]
|
|
26
20
|
@rpc_relations = introspection[:rpc_relations]
|
|
27
21
|
@schema_polling_client = schema_polling_client
|
|
@@ -31,12 +25,10 @@ module ForestAdminDatasourceRpc
|
|
|
31
25
|
@live_query_connections = native_query_connections.to_h { |conn| [conn[:name], conn[:name]] }
|
|
32
26
|
|
|
33
27
|
@schema = { charts: @charts }
|
|
34
|
-
|
|
35
|
-
# Register shutdown hook to cleanup schema polling gracefully
|
|
36
|
-
register_shutdown_hook if @schema_polling_client
|
|
37
28
|
end
|
|
38
29
|
|
|
39
30
|
def render_chart(caller, name)
|
|
31
|
+
client = RpcClient.new(@options[:uri], @options[:auth_secret] || ForestAdminAgent::Facades::Container.cache(:auth_secret))
|
|
40
32
|
url = 'forest/rpc-datasource-chart'
|
|
41
33
|
|
|
42
34
|
ForestAdminAgent::Facades::Container.logger.log(
|
|
@@ -44,10 +36,11 @@ module ForestAdminDatasourceRpc
|
|
|
44
36
|
"Forwarding datasource chart '#{name}' call to the Rpc agent on #{url}."
|
|
45
37
|
)
|
|
46
38
|
|
|
47
|
-
|
|
39
|
+
client.call_rpc(url, caller: caller, method: :post, payload: { chart: name })
|
|
48
40
|
end
|
|
49
41
|
|
|
50
42
|
def execute_native_query(connection_name, query, binds)
|
|
43
|
+
client = RpcClient.new(@options[:uri], @options[:auth_secret] || ForestAdminAgent::Facades::Container.cache(:auth_secret))
|
|
51
44
|
url = 'forest/rpc-native-query'
|
|
52
45
|
|
|
53
46
|
ForestAdminAgent::Facades::Container.logger.log(
|
|
@@ -55,11 +48,8 @@ module ForestAdminDatasourceRpc
|
|
|
55
48
|
"Forwarding native query for connection '#{connection_name}' to the Rpc agent on #{url}."
|
|
56
49
|
)
|
|
57
50
|
|
|
58
|
-
result =
|
|
59
|
-
|
|
60
|
-
method: :post,
|
|
61
|
-
payload: { connection_name: connection_name, query: query, binds: binds }
|
|
62
|
-
)
|
|
51
|
+
result = client.call_rpc(url, method: :post,
|
|
52
|
+
payload: { connection_name: connection_name, query: query, binds: binds })
|
|
63
53
|
ForestAdminDatasourceToolkit::Utils::HashHelper.convert_keys(result.to_a)
|
|
64
54
|
end
|
|
65
55
|
|
|
@@ -69,38 +59,15 @@ module ForestAdminDatasourceRpc
|
|
|
69
59
|
@cleaned_up = true
|
|
70
60
|
|
|
71
61
|
if @schema_polling_client
|
|
72
|
-
|
|
62
|
+
ForestAdminAgent::Facades::Container.logger&.log('Info', '[RPCDatasource] Stopping schema polling...')
|
|
73
63
|
@schema_polling_client.stop
|
|
74
|
-
|
|
64
|
+
ForestAdminAgent::Facades::Container.logger&.log('Info', '[RPCDatasource] Schema polling stopped')
|
|
75
65
|
end
|
|
76
66
|
rescue StandardError => e
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
def register_shutdown_hook
|
|
83
|
-
# Register at_exit hook for graceful shutdown
|
|
84
|
-
# This ensures schema polling is stopped when the application exits
|
|
85
|
-
at_exit do
|
|
86
|
-
cleanup
|
|
87
|
-
end
|
|
88
|
-
end
|
|
89
|
-
|
|
90
|
-
def log_info(message)
|
|
91
|
-
return unless defined?(ForestAdminAgent::Facades::Container)
|
|
92
|
-
|
|
93
|
-
ForestAdminAgent::Facades::Container.logger&.log('Info', message)
|
|
94
|
-
rescue StandardError
|
|
95
|
-
# Silently ignore logging errors during shutdown
|
|
96
|
-
end
|
|
97
|
-
|
|
98
|
-
def log_error(message)
|
|
99
|
-
return unless defined?(ForestAdminAgent::Facades::Container)
|
|
100
|
-
|
|
101
|
-
ForestAdminAgent::Facades::Container.logger&.log('Error', message)
|
|
102
|
-
rescue StandardError
|
|
103
|
-
# Silently ignore logging errors during shutdown
|
|
67
|
+
ForestAdminAgent::Facades::Container.logger&.log(
|
|
68
|
+
'Error',
|
|
69
|
+
"[RPCDatasource] Error during cleanup: #{e.class} - #{e.message}"
|
|
70
|
+
)
|
|
104
71
|
end
|
|
105
72
|
end
|
|
106
73
|
end
|
|
@@ -8,64 +8,116 @@ loader.setup
|
|
|
8
8
|
module ForestAdminDatasourceRpc
|
|
9
9
|
class Error < StandardError; end
|
|
10
10
|
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
11
|
+
# Build a RPC datasource with schema polling enabled.
|
|
12
|
+
#
|
|
13
|
+
# @param options [Hash] Configuration options
|
|
14
|
+
# @option options [String] :uri The URI of the RPC agent
|
|
15
|
+
# @option options [String] :auth_secret The authentication secret (optional, will use cache if not provided)
|
|
16
|
+
# @option options [Integer] :schema_polling_interval Polling interval in seconds (optional)
|
|
17
|
+
# - Default: 600 seconds (10 minutes)
|
|
18
|
+
# - Can be overridden with ENV['SCHEMA_POLLING_INTERVAL']
|
|
19
|
+
# - Valid range: 1-3600 seconds
|
|
20
|
+
# - Priority: options[:schema_polling_interval] > ENV['SCHEMA_POLLING_INTERVAL'] > default
|
|
21
|
+
# - Example: SCHEMA_POLLING_INTERVAL=30 for development (30 seconds)
|
|
22
|
+
#
|
|
23
|
+
# @return [ForestAdminDatasourceRpc::Datasource] The configured datasource with schema polling
|
|
15
24
|
def self.build(options)
|
|
16
25
|
uri = options[:uri]
|
|
17
26
|
auth_secret = options[:auth_secret] || ForestAdminAgent::Facades::Container.cache(:auth_secret)
|
|
18
|
-
|
|
19
|
-
provided_introspection_etag = options[:introspection_etag]
|
|
27
|
+
ForestAdminAgent::Facades::Container.logger.log('Info', "Getting schema from RPC agent on #{uri}.")
|
|
20
28
|
|
|
21
|
-
|
|
22
|
-
ENV['SCHEMA_POLLING_INTERVAL_SEC']&.to_i ||
|
|
23
|
-
600
|
|
29
|
+
schema = nil
|
|
24
30
|
|
|
25
|
-
|
|
26
|
-
|
|
31
|
+
begin
|
|
32
|
+
rpc_client = Utils::RpcClient.new(uri, auth_secret)
|
|
33
|
+
response = rpc_client.fetch_schema('/forest/rpc-schema')
|
|
34
|
+
schema = response.body
|
|
35
|
+
rescue Faraday::ConnectionFailed => e
|
|
36
|
+
ForestAdminAgent::Facades::Container.logger.log(
|
|
37
|
+
'Error',
|
|
38
|
+
"Connection failed to RPC agent at #{uri}: #{e.message}\n#{e.backtrace.join("\n")}"
|
|
39
|
+
)
|
|
40
|
+
rescue Faraday::TimeoutError => e
|
|
41
|
+
ForestAdminAgent::Facades::Container.logger.log(
|
|
42
|
+
'Error',
|
|
43
|
+
"Request timeout to RPC agent at #{uri}: #{e.message}"
|
|
44
|
+
)
|
|
45
|
+
rescue ForestAdminAgent::Http::Exceptions::AuthenticationOpenIdClient => e
|
|
46
|
+
ForestAdminAgent::Facades::Container.logger.log(
|
|
47
|
+
'Error',
|
|
48
|
+
"Authentication failed with RPC agent at #{uri}: #{e.message}"
|
|
49
|
+
)
|
|
50
|
+
rescue StandardError => e
|
|
51
|
+
ForestAdminAgent::Facades::Container.logger.log(
|
|
52
|
+
'Error',
|
|
53
|
+
"Failed to get schema from RPC agent at #{uri}: #{e.class} - #{e.message}\n#{e.backtrace.join("\n")}"
|
|
54
|
+
)
|
|
55
|
+
end
|
|
56
|
+
|
|
57
|
+
if schema.nil?
|
|
58
|
+
# return empty datasource for not breaking stack
|
|
59
|
+
ForestAdminDatasourceToolkit::Datasource.new
|
|
60
|
+
else
|
|
61
|
+
# Create schema polling client with configurable polling interval
|
|
62
|
+
# Priority: options[:schema_polling_interval] > ENV['SCHEMA_POLLING_INTERVAL'] > default (600)
|
|
63
|
+
polling_interval = if options[:schema_polling_interval]
|
|
64
|
+
options[:schema_polling_interval]
|
|
65
|
+
elsif ENV['SCHEMA_POLLING_INTERVAL']
|
|
66
|
+
ENV['SCHEMA_POLLING_INTERVAL'].to_i
|
|
67
|
+
else
|
|
68
|
+
600 # 10 minutes by default
|
|
69
|
+
end
|
|
27
70
|
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
) do
|
|
35
|
-
Thread.new do
|
|
71
|
+
polling_options = {
|
|
72
|
+
polling_interval: polling_interval
|
|
73
|
+
}
|
|
74
|
+
|
|
75
|
+
schema_polling = Utils::SchemaPollingClient.new(uri, auth_secret, polling_options) do
|
|
76
|
+
# Callback when schema change is detected
|
|
36
77
|
logger = ForestAdminAgent::Facades::Container.logger
|
|
37
|
-
logger.log('Info', '[RPCDatasource] Schema change detected, reloading agent
|
|
38
|
-
|
|
39
|
-
ForestAdminAgent::Builder::AgentFactory.instance.reload!
|
|
40
|
-
logger.log('Info', '[RPCDatasource] Agent reload completed successfully')
|
|
41
|
-
rescue StandardError => e
|
|
42
|
-
logger.log('Error', "[RPCDatasource] Agent reload failed: #{e.class} - #{e.message}")
|
|
43
|
-
end
|
|
78
|
+
logger.log('Info', '[RPCDatasource] Schema change detected, reloading agent...')
|
|
79
|
+
ForestAdminAgent::Builder::AgentFactory.instance.reload!
|
|
44
80
|
end
|
|
45
|
-
|
|
81
|
+
schema_polling.start
|
|
46
82
|
|
|
47
|
-
|
|
48
|
-
# - Without introspection: crashes if RPC is unreachable
|
|
49
|
-
# - With introspection: falls back to introspection if RPC is unreachable
|
|
50
|
-
schema_polling.start?
|
|
83
|
+
datasource = ForestAdminDatasourceRpc::Datasource.new(options, schema, schema_polling)
|
|
51
84
|
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
raise ForestAdminDatasourceToolkit::Exceptions::ForestException,
|
|
55
|
-
'Fatal: Unable to build RPC datasource - no introspection schema was provided and schema fetch failed'
|
|
56
|
-
end
|
|
85
|
+
# Setup cleanup hooks for proper schema polling client shutdown
|
|
86
|
+
setup_cleanup_hooks(datasource)
|
|
57
87
|
|
|
58
|
-
|
|
59
|
-
|
|
88
|
+
datasource
|
|
89
|
+
end
|
|
60
90
|
end
|
|
61
91
|
|
|
62
|
-
def self.
|
|
63
|
-
|
|
64
|
-
|
|
92
|
+
def self.setup_cleanup_hooks(datasource)
|
|
93
|
+
# Register cleanup handler for graceful shutdown
|
|
94
|
+
at_exit do
|
|
95
|
+
datasource.cleanup
|
|
96
|
+
rescue StandardError => e
|
|
97
|
+
# Silently ignore errors during exit cleanup to prevent test pollution
|
|
98
|
+
warn "[RPCDatasource] Error during at_exit cleanup: #{e.message}" if $VERBOSE
|
|
99
|
+
end
|
|
65
100
|
|
|
66
|
-
#
|
|
67
|
-
|
|
68
|
-
|
|
101
|
+
# Handle SIGINT (Ctrl+C)
|
|
102
|
+
Signal.trap('INT') do
|
|
103
|
+
begin
|
|
104
|
+
ForestAdminAgent::Facades::Container.logger&.log('Info', '[RPCDatasource] Received SIGINT, cleaning up...')
|
|
105
|
+
rescue StandardError
|
|
106
|
+
# Logger might not be available
|
|
107
|
+
end
|
|
108
|
+
datasource.cleanup
|
|
109
|
+
exit(0)
|
|
110
|
+
end
|
|
69
111
|
|
|
70
|
-
|
|
112
|
+
# Handle SIGTERM (default kill signal)
|
|
113
|
+
Signal.trap('TERM') do
|
|
114
|
+
begin
|
|
115
|
+
ForestAdminAgent::Facades::Container.logger&.log('Info', '[RPCDatasource] Received SIGTERM, cleaning up...')
|
|
116
|
+
rescue StandardError
|
|
117
|
+
# Logger might not be available
|
|
118
|
+
end
|
|
119
|
+
datasource.cleanup
|
|
120
|
+
exit(0)
|
|
121
|
+
end
|
|
122
|
+
end
|
|
71
123
|
end
|
metadata
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
|
2
2
|
name: forest_admin_datasource_rpc
|
|
3
3
|
version: !ruby/object:Gem::Version
|
|
4
|
-
version:
|
|
4
|
+
version: 2.0.0
|
|
5
5
|
platform: ruby
|
|
6
6
|
authors:
|
|
7
7
|
- Matthieu
|
|
@@ -9,7 +9,7 @@ authors:
|
|
|
9
9
|
autorequire:
|
|
10
10
|
bindir: exe
|
|
11
11
|
cert_chain: []
|
|
12
|
-
date:
|
|
12
|
+
date: 2025-12-11 00:00:00.000000000 Z
|
|
13
13
|
dependencies:
|
|
14
14
|
- !ruby/object:Gem::Dependency
|
|
15
15
|
name: base64
|
|
@@ -25,20 +25,6 @@ dependencies:
|
|
|
25
25
|
- - ">="
|
|
26
26
|
- !ruby/object:Gem::Version
|
|
27
27
|
version: '0'
|
|
28
|
-
- !ruby/object:Gem::Dependency
|
|
29
|
-
name: benchmark
|
|
30
|
-
requirement: !ruby/object:Gem::Requirement
|
|
31
|
-
requirements:
|
|
32
|
-
- - ">="
|
|
33
|
-
- !ruby/object:Gem::Version
|
|
34
|
-
version: '0'
|
|
35
|
-
type: :runtime
|
|
36
|
-
prerelease: false
|
|
37
|
-
version_requirements: !ruby/object:Gem::Requirement
|
|
38
|
-
requirements:
|
|
39
|
-
- - ">="
|
|
40
|
-
- !ruby/object:Gem::Version
|
|
41
|
-
version: '0'
|
|
42
28
|
- !ruby/object:Gem::Dependency
|
|
43
29
|
name: bigdecimal
|
|
44
30
|
requirement: !ruby/object:Gem::Requirement
|
|
@@ -53,20 +39,6 @@ dependencies:
|
|
|
53
39
|
- - ">="
|
|
54
40
|
- !ruby/object:Gem::Version
|
|
55
41
|
version: '0'
|
|
56
|
-
- !ruby/object:Gem::Dependency
|
|
57
|
-
name: cgi
|
|
58
|
-
requirement: !ruby/object:Gem::Requirement
|
|
59
|
-
requirements:
|
|
60
|
-
- - ">="
|
|
61
|
-
- !ruby/object:Gem::Version
|
|
62
|
-
version: '0'
|
|
63
|
-
type: :runtime
|
|
64
|
-
prerelease: false
|
|
65
|
-
version_requirements: !ruby/object:Gem::Requirement
|
|
66
|
-
requirements:
|
|
67
|
-
- - ">="
|
|
68
|
-
- !ruby/object:Gem::Version
|
|
69
|
-
version: '0'
|
|
70
42
|
- !ruby/object:Gem::Dependency
|
|
71
43
|
name: csv
|
|
72
44
|
requirement: !ruby/object:Gem::Requirement
|
|
@@ -95,20 +67,6 @@ dependencies:
|
|
|
95
67
|
- - "~>"
|
|
96
68
|
- !ruby/object:Gem::Version
|
|
97
69
|
version: '2.7'
|
|
98
|
-
- !ruby/object:Gem::Dependency
|
|
99
|
-
name: logger
|
|
100
|
-
requirement: !ruby/object:Gem::Requirement
|
|
101
|
-
requirements:
|
|
102
|
-
- - ">="
|
|
103
|
-
- !ruby/object:Gem::Version
|
|
104
|
-
version: '0'
|
|
105
|
-
type: :runtime
|
|
106
|
-
prerelease: false
|
|
107
|
-
version_requirements: !ruby/object:Gem::Requirement
|
|
108
|
-
requirements:
|
|
109
|
-
- - ">="
|
|
110
|
-
- !ruby/object:Gem::Version
|
|
111
|
-
version: '0'
|
|
112
70
|
- !ruby/object:Gem::Dependency
|
|
113
71
|
name: mutex_m
|
|
114
72
|
requirement: !ruby/object:Gem::Requirement
|
|
@@ -168,10 +126,8 @@ files:
|
|
|
168
126
|
- lib/forest_admin_datasource_rpc.rb
|
|
169
127
|
- lib/forest_admin_datasource_rpc/Utils/rpc_client.rb
|
|
170
128
|
- lib/forest_admin_datasource_rpc/Utils/schema_polling_client.rb
|
|
171
|
-
- lib/forest_admin_datasource_rpc/Utils/schema_polling_pool.rb
|
|
172
129
|
- lib/forest_admin_datasource_rpc/collection.rb
|
|
173
130
|
- lib/forest_admin_datasource_rpc/datasource.rb
|
|
174
|
-
- lib/forest_admin_datasource_rpc/reconciliate_rpc.rb
|
|
175
131
|
- lib/forest_admin_datasource_rpc/version.rb
|
|
176
132
|
homepage: https://www.forestadmin.com
|
|
177
133
|
licenses:
|
|
@@ -1,286 +0,0 @@
|
|
|
1
|
-
require 'singleton'
|
|
2
|
-
|
|
3
|
-
module ForestAdminDatasourceRpc
|
|
4
|
-
module Utils
|
|
5
|
-
# Thread pool manager for RPC schema polling.
|
|
6
|
-
# Uses a single scheduler thread that dispatches polling tasks to a bounded
|
|
7
|
-
# pool of worker threads, preventing thread exhaustion when many RPC slaves
|
|
8
|
-
# are configured.
|
|
9
|
-
#
|
|
10
|
-
# Design principles:
|
|
11
|
-
# - Minimal mutex hold times to avoid blocking HTTP request threads
|
|
12
|
-
# - Workers yield control frequently to prevent GIL starvation
|
|
13
|
-
# - Non-blocking queue operations where possible
|
|
14
|
-
class SchemaPollingPool
|
|
15
|
-
include Singleton
|
|
16
|
-
|
|
17
|
-
DEFAULT_MAX_THREADS = 5
|
|
18
|
-
MIN_THREADS = 1
|
|
19
|
-
MAX_THREADS = 50
|
|
20
|
-
SCHEDULER_INTERVAL = 1
|
|
21
|
-
INITIAL_STAGGER_WINDOW = 30
|
|
22
|
-
|
|
23
|
-
attr_reader :max_threads, :configured
|
|
24
|
-
|
|
25
|
-
def initialize
|
|
26
|
-
@mutex = Mutex.new
|
|
27
|
-
@clients = {}
|
|
28
|
-
@work_queue = Queue.new
|
|
29
|
-
@workers = []
|
|
30
|
-
@running = false
|
|
31
|
-
@max_threads = DEFAULT_MAX_THREADS
|
|
32
|
-
@shutdown_requested = false
|
|
33
|
-
@configured = false
|
|
34
|
-
@scheduler_thread = nil
|
|
35
|
-
end
|
|
36
|
-
|
|
37
|
-
# Configure the pool before starting. Must be called before any clients register.
|
|
38
|
-
# @param max_threads [Integer] Maximum number of worker threads (1-20)
|
|
39
|
-
def configure(max_threads:)
|
|
40
|
-
@mutex.synchronize do
|
|
41
|
-
raise 'Cannot configure pool while running' if @running
|
|
42
|
-
|
|
43
|
-
validated_max = max_threads.to_i.clamp(MIN_THREADS, MAX_THREADS)
|
|
44
|
-
@max_threads = validated_max
|
|
45
|
-
@configured = true
|
|
46
|
-
|
|
47
|
-
log('Info', "[SchemaPollingPool] Configured with max_threads: #{@max_threads}")
|
|
48
|
-
end
|
|
49
|
-
end
|
|
50
|
-
|
|
51
|
-
def register?(client_id, client)
|
|
52
|
-
should_start = false
|
|
53
|
-
|
|
54
|
-
@mutex.synchronize do
|
|
55
|
-
if @clients.key?(client_id)
|
|
56
|
-
log('Warn', "[SchemaPollingPool] Client #{client_id} already registered, skipping")
|
|
57
|
-
return false
|
|
58
|
-
end
|
|
59
|
-
|
|
60
|
-
@clients[client_id] = {
|
|
61
|
-
client: client,
|
|
62
|
-
last_poll_at: nil,
|
|
63
|
-
next_poll_at: calculate_initial_poll_time
|
|
64
|
-
}
|
|
65
|
-
|
|
66
|
-
log('Info', "[SchemaPollingPool] Registered client: #{client_id} (#{@clients.size} total clients)")
|
|
67
|
-
|
|
68
|
-
should_start = !@running
|
|
69
|
-
end
|
|
70
|
-
|
|
71
|
-
start_pool if should_start
|
|
72
|
-
|
|
73
|
-
true
|
|
74
|
-
end
|
|
75
|
-
|
|
76
|
-
def unregister?(client_id)
|
|
77
|
-
should_stop = false
|
|
78
|
-
|
|
79
|
-
@mutex.synchronize do
|
|
80
|
-
unless @clients.key?(client_id)
|
|
81
|
-
log('Debug', "[SchemaPollingPool] Client #{client_id} not found for unregister")
|
|
82
|
-
return false
|
|
83
|
-
end
|
|
84
|
-
|
|
85
|
-
@clients.delete(client_id)
|
|
86
|
-
log('Info', "[SchemaPollingPool] Unregistered client: #{client_id} (#{@clients.size} remaining)")
|
|
87
|
-
|
|
88
|
-
should_stop = @clients.empty? && @running
|
|
89
|
-
end
|
|
90
|
-
|
|
91
|
-
stop_pool if should_stop
|
|
92
|
-
|
|
93
|
-
true
|
|
94
|
-
end
|
|
95
|
-
|
|
96
|
-
def client_count
|
|
97
|
-
@mutex.synchronize { @clients.size }
|
|
98
|
-
end
|
|
99
|
-
|
|
100
|
-
def running?
|
|
101
|
-
@mutex.synchronize { @running }
|
|
102
|
-
end
|
|
103
|
-
|
|
104
|
-
def shutdown!
|
|
105
|
-
@mutex.synchronize do
|
|
106
|
-
return unless @running
|
|
107
|
-
|
|
108
|
-
@shutdown_requested = true
|
|
109
|
-
end
|
|
110
|
-
|
|
111
|
-
stop_pool
|
|
112
|
-
|
|
113
|
-
@mutex.synchronize do
|
|
114
|
-
@clients.clear
|
|
115
|
-
@shutdown_requested = false
|
|
116
|
-
end
|
|
117
|
-
end
|
|
118
|
-
|
|
119
|
-
def reset!
|
|
120
|
-
shutdown!
|
|
121
|
-
@mutex.synchronize do
|
|
122
|
-
@max_threads = DEFAULT_MAX_THREADS
|
|
123
|
-
@configured = false
|
|
124
|
-
end
|
|
125
|
-
end
|
|
126
|
-
|
|
127
|
-
private
|
|
128
|
-
|
|
129
|
-
def start_pool
|
|
130
|
-
@mutex.synchronize do
|
|
131
|
-
return if @running
|
|
132
|
-
|
|
133
|
-
@running = true
|
|
134
|
-
@shutdown_requested = false
|
|
135
|
-
|
|
136
|
-
thread_count = @clients.size.clamp(MIN_THREADS, @max_threads)
|
|
137
|
-
|
|
138
|
-
log('Info',
|
|
139
|
-
"[SchemaPollingPool] Starting pool with #{thread_count} worker threads for #{@clients.size} clients")
|
|
140
|
-
|
|
141
|
-
thread_count.times do |i|
|
|
142
|
-
@workers << Thread.new { worker_loop(i) }
|
|
143
|
-
end
|
|
144
|
-
|
|
145
|
-
@scheduler_thread = Thread.new { scheduler_loop }
|
|
146
|
-
end
|
|
147
|
-
end
|
|
148
|
-
|
|
149
|
-
def stop_pool
|
|
150
|
-
workers_to_join = nil
|
|
151
|
-
scheduler_to_join = nil
|
|
152
|
-
|
|
153
|
-
@mutex.synchronize do
|
|
154
|
-
return unless @running
|
|
155
|
-
|
|
156
|
-
log('Info', '[SchemaPollingPool] Stopping pool...')
|
|
157
|
-
|
|
158
|
-
@running = false
|
|
159
|
-
|
|
160
|
-
@workers.size.times { @work_queue << nil }
|
|
161
|
-
|
|
162
|
-
workers_to_join = @workers.dup
|
|
163
|
-
scheduler_to_join = @scheduler_thread
|
|
164
|
-
|
|
165
|
-
@workers.clear
|
|
166
|
-
@scheduler_thread = nil
|
|
167
|
-
end
|
|
168
|
-
|
|
169
|
-
workers_to_join&.each { |w| w.join(2) }
|
|
170
|
-
scheduler_to_join&.join(2)
|
|
171
|
-
|
|
172
|
-
@work_queue.clear
|
|
173
|
-
|
|
174
|
-
log('Info', '[SchemaPollingPool] Pool stopped')
|
|
175
|
-
end
|
|
176
|
-
|
|
177
|
-
def worker_loop(worker_id)
|
|
178
|
-
log('Debug', "[SchemaPollingPool] Worker #{worker_id} started")
|
|
179
|
-
|
|
180
|
-
loop do
|
|
181
|
-
task = fetch_next_task
|
|
182
|
-
break if task.nil?
|
|
183
|
-
|
|
184
|
-
process_task(task, worker_id)
|
|
185
|
-
Thread.pass
|
|
186
|
-
end
|
|
187
|
-
|
|
188
|
-
log('Debug', "[SchemaPollingPool] Worker #{worker_id} stopped")
|
|
189
|
-
end
|
|
190
|
-
|
|
191
|
-
def fetch_next_task
|
|
192
|
-
@work_queue.pop(true)
|
|
193
|
-
rescue ThreadError
|
|
194
|
-
Thread.pass
|
|
195
|
-
sleep(0.1)
|
|
196
|
-
retry if @running
|
|
197
|
-
nil
|
|
198
|
-
end
|
|
199
|
-
|
|
200
|
-
def process_task(task, worker_id)
|
|
201
|
-
client_id = task[:client_id]
|
|
202
|
-
execute_poll(client_id)
|
|
203
|
-
rescue StandardError => e
|
|
204
|
-
log('Error',
|
|
205
|
-
"[SchemaPollingPool] Worker #{worker_id} error polling #{client_id}: #{e.class} - #{e.message}")
|
|
206
|
-
end
|
|
207
|
-
|
|
208
|
-
def scheduler_loop
|
|
209
|
-
log('Debug', '[SchemaPollingPool] Scheduler started')
|
|
210
|
-
|
|
211
|
-
while @running
|
|
212
|
-
sleep_with_check(SCHEDULER_INTERVAL)
|
|
213
|
-
|
|
214
|
-
next unless @running
|
|
215
|
-
|
|
216
|
-
schedule_due_polls
|
|
217
|
-
end
|
|
218
|
-
|
|
219
|
-
log('Debug', '[SchemaPollingPool] Scheduler stopped')
|
|
220
|
-
end
|
|
221
|
-
|
|
222
|
-
def sleep_with_check(duration)
|
|
223
|
-
remaining = duration
|
|
224
|
-
while remaining.positive? && @running
|
|
225
|
-
sleep_time = [remaining, 1.0].min
|
|
226
|
-
sleep(sleep_time)
|
|
227
|
-
remaining -= sleep_time
|
|
228
|
-
Thread.pass
|
|
229
|
-
end
|
|
230
|
-
end
|
|
231
|
-
|
|
232
|
-
def schedule_due_polls
|
|
233
|
-
now = Time.now
|
|
234
|
-
polls_to_schedule = []
|
|
235
|
-
|
|
236
|
-
@mutex.synchronize do
|
|
237
|
-
@clients.each do |client_id, state|
|
|
238
|
-
next if state[:next_poll_at].nil?
|
|
239
|
-
next if now < state[:next_poll_at]
|
|
240
|
-
|
|
241
|
-
polls_to_schedule << client_id
|
|
242
|
-
|
|
243
|
-
interval = state[:client].instance_variable_get(:@polling_interval) || 600
|
|
244
|
-
state[:next_poll_at] = now + interval
|
|
245
|
-
end
|
|
246
|
-
end
|
|
247
|
-
|
|
248
|
-
polls_to_schedule.each do |client_id|
|
|
249
|
-
@work_queue << { client_id: client_id, scheduled_at: now }
|
|
250
|
-
end
|
|
251
|
-
end
|
|
252
|
-
|
|
253
|
-
def execute_poll(client_id)
|
|
254
|
-
client = nil
|
|
255
|
-
@mutex.synchronize do
|
|
256
|
-
state = @clients[client_id]
|
|
257
|
-
client = state[:client] if state
|
|
258
|
-
end
|
|
259
|
-
|
|
260
|
-
return unless client
|
|
261
|
-
return if client.closed
|
|
262
|
-
|
|
263
|
-
log('Debug', "[SchemaPollingPool] Polling client: #{client_id}")
|
|
264
|
-
|
|
265
|
-
client.check_schema
|
|
266
|
-
|
|
267
|
-
@mutex.synchronize do
|
|
268
|
-
@clients[client_id][:last_poll_at] = Time.now if @clients[client_id]
|
|
269
|
-
end
|
|
270
|
-
end
|
|
271
|
-
|
|
272
|
-
def calculate_initial_poll_time
|
|
273
|
-
# Stagger initial polls to avoid thundering herd
|
|
274
|
-
Time.now + rand(0.0..INITIAL_STAGGER_WINDOW.to_f)
|
|
275
|
-
end
|
|
276
|
-
|
|
277
|
-
def log(level, message)
|
|
278
|
-
return unless defined?(ForestAdminAgent::Facades::Container)
|
|
279
|
-
|
|
280
|
-
ForestAdminAgent::Facades::Container.logger&.log(level, message)
|
|
281
|
-
rescue StandardError
|
|
282
|
-
# Ignore logging errors to prevent cascading failures
|
|
283
|
-
end
|
|
284
|
-
end
|
|
285
|
-
end
|
|
286
|
-
end
|
|
@@ -1,71 +0,0 @@
|
|
|
1
|
-
module ForestAdminDatasourceRpc
|
|
2
|
-
class ReconciliateRpc < ForestAdminDatasourceCustomizer::Plugins::Plugin
|
|
3
|
-
def run(datasource_customizer, _collection_customizer = nil, options = {})
|
|
4
|
-
datasource_customizer.composite_datasource.datasources.each do |datasource|
|
|
5
|
-
real_datasource = get_datasource(datasource)
|
|
6
|
-
next unless real_datasource.is_a?(ForestAdminDatasourceRpc::Datasource)
|
|
7
|
-
|
|
8
|
-
# Disable search for non-searchable collections
|
|
9
|
-
real_datasource.collections.each_value do |collection|
|
|
10
|
-
unless collection.schema[:searchable]
|
|
11
|
-
cz = datasource_customizer.get_collection(get_collection_name(options[:rename], collection.name))
|
|
12
|
-
cz.disable_search
|
|
13
|
-
end
|
|
14
|
-
end
|
|
15
|
-
|
|
16
|
-
# Add relations from rpc_relations
|
|
17
|
-
(real_datasource.rpc_relations || {}).each do |collection_name, relations|
|
|
18
|
-
collection_name = get_collection_name(options[:rename], collection_name)
|
|
19
|
-
cz = datasource_customizer.get_collection(collection_name)
|
|
20
|
-
|
|
21
|
-
relations.each do |relation_name, relation_definition|
|
|
22
|
-
add_relation(cz, options[:rename], relation_name.to_s, relation_definition)
|
|
23
|
-
end
|
|
24
|
-
end
|
|
25
|
-
end
|
|
26
|
-
end
|
|
27
|
-
|
|
28
|
-
private
|
|
29
|
-
|
|
30
|
-
def get_datasource(datasource)
|
|
31
|
-
# can be publication -> rename deco or a custom one
|
|
32
|
-
while datasource.is_a?(ForestAdminDatasourceToolkit::Decorators::DatasourceDecorator)
|
|
33
|
-
datasource = datasource.child_datasource
|
|
34
|
-
end
|
|
35
|
-
|
|
36
|
-
datasource
|
|
37
|
-
end
|
|
38
|
-
|
|
39
|
-
def get_collection_name(renames, collection_name)
|
|
40
|
-
name = collection_name
|
|
41
|
-
|
|
42
|
-
if renames.is_a?(Proc)
|
|
43
|
-
name = renames.call(collection_name)
|
|
44
|
-
elsif renames.is_a?(Hash) && renames.key?(collection_name.to_s)
|
|
45
|
-
name = renames[collection_name.to_s]
|
|
46
|
-
end
|
|
47
|
-
|
|
48
|
-
name
|
|
49
|
-
end
|
|
50
|
-
|
|
51
|
-
def add_relation(collection_customizer, renames, relation_name, relation_definition)
|
|
52
|
-
relation = relation_definition.transform_keys(&:to_sym)
|
|
53
|
-
foreign_collection = get_collection_name(renames, relation[:foreign_collection])
|
|
54
|
-
options = relation.except(:type, :foreign_collection, :through_collection)
|
|
55
|
-
|
|
56
|
-
case relation[:type]
|
|
57
|
-
when 'ManyToMany'
|
|
58
|
-
through_collection = get_collection_name(renames, relation[:through_collection])
|
|
59
|
-
collection_customizer.add_many_to_many_relation(relation_name, foreign_collection, through_collection, options)
|
|
60
|
-
when 'OneToMany'
|
|
61
|
-
collection_customizer.add_one_to_many_relation(relation_name, foreign_collection, options)
|
|
62
|
-
when 'OneToOne'
|
|
63
|
-
collection_customizer.add_one_to_one_relation(relation_name, foreign_collection, options)
|
|
64
|
-
when 'ManyToOne'
|
|
65
|
-
collection_customizer.add_many_to_one_relation(relation_name, foreign_collection, options)
|
|
66
|
-
else
|
|
67
|
-
raise ForestAdminDatasourceToolkit::Exceptions::ForestException, "Unsupported relation type: #{relation[:type]}"
|
|
68
|
-
end
|
|
69
|
-
end
|
|
70
|
-
end
|
|
71
|
-
end
|