liteguard 0.2.20260314
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/LICENSE +158 -0
- data/README.md +66 -0
- data/lib/liteguard/client.rb +1282 -0
- data/lib/liteguard/evaluation.rb +104 -0
- data/lib/liteguard/scope.rb +159 -0
- data/lib/liteguard/types.rb +134 -0
- data/lib/liteguard.rb +19 -0
- metadata +85 -0
|
@@ -0,0 +1,1282 @@
|
|
|
1
|
+
require "json"
|
|
2
|
+
require "net/http"
|
|
3
|
+
require "objspace"
|
|
4
|
+
require "uri"
|
|
5
|
+
require "monitor"
|
|
6
|
+
|
|
7
|
+
module Liteguard
|
|
8
|
+
# Core Liteguard SDK client.
|
|
9
|
+
#
|
|
10
|
+
# This class is the primary Ruby SDK entrypoint.
|
|
11
|
+
class Client
|
|
12
|
+
DEFAULT_BACKEND_URL = "https://api.liteguard.io"
|
|
13
|
+
DEFAULT_REFRESH_RATE = 30
|
|
14
|
+
DEFAULT_FLUSH_RATE = 10
|
|
15
|
+
DEFAULT_FLUSH_SIZE = 500
|
|
16
|
+
DEFAULT_HTTP_TIMEOUT = 4
|
|
17
|
+
DEFAULT_FLUSH_BUFFER_MULTIPLIER = 4
|
|
18
|
+
PUBLIC_BUNDLE_KEY = "".freeze
|
|
19
|
+
|
|
20
|
+
GuardBundle = Struct.new(
|
|
21
|
+
:key,
|
|
22
|
+
:guards,
|
|
23
|
+
:ready,
|
|
24
|
+
:etag,
|
|
25
|
+
:protected_context,
|
|
26
|
+
:refresh_rate_seconds,
|
|
27
|
+
keyword_init: true
|
|
28
|
+
)
|
|
29
|
+
|
|
30
|
+
# Create a client instance.
|
|
31
|
+
#
|
|
32
|
+
# The client remains idle until {#start} is called.
|
|
33
|
+
#
|
|
34
|
+
# @param project_client_key_id [String] project client key identifier from
|
|
35
|
+
# the Liteguard control plane
|
|
36
|
+
# @param opts [Hash] initialization options
|
|
37
|
+
# @option opts [String, nil] :environment environment slug to send with API
|
|
38
|
+
# requests
|
|
39
|
+
# @option opts [Boolean] :fallback result returned when a bundle is not yet
|
|
40
|
+
# ready
|
|
41
|
+
# @option opts [Integer] :refresh_rate_seconds minimum refresh interval for
|
|
42
|
+
# guard bundles
|
|
43
|
+
# @option opts [Integer] :flush_rate_seconds telemetry flush interval
|
|
44
|
+
# @option opts [Integer] :flush_size number of signals buffered before an
|
|
45
|
+
# eager flush
|
|
46
|
+
# @option opts [Integer] :http_timeout_seconds connect and read timeout for
|
|
47
|
+
# API calls
|
|
48
|
+
# @option opts [Integer] :flush_buffer_multiplier multiplier used to cap the
|
|
49
|
+
# in-memory signal queue size
|
|
50
|
+
# @option opts [String] :backend_url base Liteguard API URL
|
|
51
|
+
# @option opts [Boolean] :quiet suppress warning output when `true`
|
|
52
|
+
# @option opts [Boolean] :disable_measurement disable telemetry measurements
|
|
53
|
+
# @return [void]
|
|
54
|
+
def initialize(project_client_key_id, opts = {})
|
|
55
|
+
@project_client_key_id = project_client_key_id
|
|
56
|
+
@environment = opts.fetch(:environment, "").to_s
|
|
57
|
+
@fallback = opts.fetch(:fallback, false)
|
|
58
|
+
@refresh_rate = normalize_positive_option(opts[:refresh_rate_seconds], DEFAULT_REFRESH_RATE)
|
|
59
|
+
@flush_rate = normalize_positive_option(opts[:flush_rate_seconds], DEFAULT_FLUSH_RATE)
|
|
60
|
+
@flush_size = normalize_positive_option(opts[:flush_size], DEFAULT_FLUSH_SIZE)
|
|
61
|
+
@http_timeout_seconds = normalize_positive_option(opts[:http_timeout_seconds], DEFAULT_HTTP_TIMEOUT)
|
|
62
|
+
@flush_buffer_multiplier = normalize_positive_option(
|
|
63
|
+
opts[:flush_buffer_multiplier],
|
|
64
|
+
DEFAULT_FLUSH_BUFFER_MULTIPLIER
|
|
65
|
+
)
|
|
66
|
+
@backend_url = opts.fetch(:backend_url, DEFAULT_BACKEND_URL).to_s.chomp("/")
|
|
67
|
+
@backend_url = DEFAULT_BACKEND_URL if @backend_url.empty?
|
|
68
|
+
@quiet = opts.fetch(:quiet, true)
|
|
69
|
+
@disable_measurement = opts.fetch(:disable_measurement, false)
|
|
70
|
+
|
|
71
|
+
@monitor = Monitor.new
|
|
72
|
+
@refresh_cond = @monitor.new_cond
|
|
73
|
+
@flush_cond = @monitor.new_cond
|
|
74
|
+
@stopped = false
|
|
75
|
+
@refresh_wakeup_requested = false
|
|
76
|
+
@flush_requested = false
|
|
77
|
+
@async_flush_scheduled = false
|
|
78
|
+
@current_refresh_rate = @refresh_rate
|
|
79
|
+
|
|
80
|
+
@bundles = { PUBLIC_BUNDLE_KEY => create_empty_bundle(PUBLIC_BUNDLE_KEY, nil) }
|
|
81
|
+
@default_scope = Scope.new(self, {}, PUBLIC_BUNDLE_KEY, nil)
|
|
82
|
+
@active_scope_key = "liteguard_active_scope_#{object_id}"
|
|
83
|
+
|
|
84
|
+
@signal_buffer = []
|
|
85
|
+
@dropped_signals_pending = 0
|
|
86
|
+
@reported_unadopted_guards = {}
|
|
87
|
+
@pending_unadopted_guards = {}
|
|
88
|
+
@rate_limit_state = {}
|
|
89
|
+
end
|
|
90
|
+
|
|
91
|
+
# Perform the initial bundle fetch and start background worker threads.
|
|
92
|
+
#
|
|
93
|
+
# @return [void]
|
|
94
|
+
def start
|
|
95
|
+
fetch_guards_for_bundle(PUBLIC_BUNDLE_KEY)
|
|
96
|
+
@refresh_thread = Thread.new { refresh_loop }
|
|
97
|
+
@flush_thread = Thread.new { flush_loop }
|
|
98
|
+
@refresh_thread.abort_on_exception = false
|
|
99
|
+
@flush_thread.abort_on_exception = false
|
|
100
|
+
end
|
|
101
|
+
|
|
102
|
+
# Stop background workers and flush remaining telemetry.
|
|
103
|
+
#
|
|
104
|
+
# @return [void]
|
|
105
|
+
def shutdown
|
|
106
|
+
@monitor.synchronize do
|
|
107
|
+
@stopped = true
|
|
108
|
+
@flush_requested = true
|
|
109
|
+
@refresh_cond.broadcast
|
|
110
|
+
@flush_cond.broadcast
|
|
111
|
+
end
|
|
112
|
+
shutdown_timeout = @http_timeout_seconds
|
|
113
|
+
@refresh_thread&.join(shutdown_timeout)
|
|
114
|
+
@flush_thread&.join(shutdown_timeout)
|
|
115
|
+
flush_signals
|
|
116
|
+
end
|
|
117
|
+
|
|
118
|
+
# Run a block in a correlated execution scope.
|
|
119
|
+
#
|
|
120
|
+
# Signals emitted inside the block share an execution identifier and parent
|
|
121
|
+
# relationships so that related checks and executions can be stitched
|
|
122
|
+
# together server-side.
|
|
123
|
+
#
|
|
124
|
+
# @yield Runs inside a correlated execution scope
|
|
125
|
+
# @return [Object] the block return value
|
|
126
|
+
def with_execution
|
|
127
|
+
existing = Thread.current[:liteguard_execution_state]
|
|
128
|
+
return yield if existing
|
|
129
|
+
|
|
130
|
+
Thread.current[:liteguard_execution_state] = {
|
|
131
|
+
execution_id: next_signal_id,
|
|
132
|
+
sequence_number: 0,
|
|
133
|
+
last_signal_id: nil,
|
|
134
|
+
}
|
|
135
|
+
begin
|
|
136
|
+
yield
|
|
137
|
+
ensure
|
|
138
|
+
Thread.current[:liteguard_execution_state] = nil
|
|
139
|
+
end
|
|
140
|
+
end
|
|
141
|
+
|
|
142
|
+
# ---------------------------------------------------------------------
|
|
143
|
+
# Scope API
|
|
144
|
+
# ---------------------------------------------------------------------
|
|
145
|
+
|
|
146
|
+
# Create an immutable request scope for explicit evaluation.
|
|
147
|
+
#
|
|
148
|
+
# @param properties [Hash] request-scoped properties to attach
|
|
149
|
+
# @return [Scope] a new immutable scope
|
|
150
|
+
def create_scope(properties = {})
|
|
151
|
+
Scope.new(self, normalize_properties(properties), PUBLIC_BUNDLE_KEY, nil)
|
|
152
|
+
end
|
|
153
|
+
|
|
154
|
+
# Return the active scope for the current thread.
|
|
155
|
+
#
|
|
156
|
+
# @return [Scope] the active scope, or the default scope when none is bound
|
|
157
|
+
def active_scope
|
|
158
|
+
scope = Thread.current[@active_scope_key]
|
|
159
|
+
return scope if scope.is_a?(Scope) && scope.belongs_to?(self)
|
|
160
|
+
|
|
161
|
+
@default_scope
|
|
162
|
+
end
|
|
163
|
+
|
|
164
|
+
# Bind a scope for the duration of a block.
|
|
165
|
+
#
|
|
166
|
+
# @param scope [Scope, nil] scope to bind, or `nil` to reuse the current
|
|
167
|
+
# scope
|
|
168
|
+
# @yield Runs with the resolved scope bound as active
|
|
169
|
+
# @return [Object] the block return value
|
|
170
|
+
# @raise [ArgumentError] if no block is given or the scope belongs to a
|
|
171
|
+
# different client
|
|
172
|
+
def with_scope(scope)
|
|
173
|
+
raise ArgumentError, "with_scope requires a block" unless block_given?
|
|
174
|
+
|
|
175
|
+
resolved = resolve_scope(scope)
|
|
176
|
+
previous = Thread.current[@active_scope_key]
|
|
177
|
+
Thread.current[@active_scope_key] = resolved
|
|
178
|
+
begin
|
|
179
|
+
yield
|
|
180
|
+
ensure
|
|
181
|
+
if previous.nil?
|
|
182
|
+
Thread.current[@active_scope_key] = nil
|
|
183
|
+
else
|
|
184
|
+
Thread.current[@active_scope_key] = previous
|
|
185
|
+
end
|
|
186
|
+
end
|
|
187
|
+
end
|
|
188
|
+
|
|
189
|
+
# Merge properties over the current scope for the duration of a block.
|
|
190
|
+
#
|
|
191
|
+
# @param properties [Hash] properties to merge
|
|
192
|
+
# @yield Runs with a derived scope bound as active
|
|
193
|
+
# @return [Object] the block return value
|
|
194
|
+
# @raise [ArgumentError] if no block is given
|
|
195
|
+
def with_properties(properties)
|
|
196
|
+
raise ArgumentError, "with_properties requires a block" unless block_given?
|
|
197
|
+
|
|
198
|
+
with_scope(active_scope.with_properties(properties)) { yield }
|
|
199
|
+
end
|
|
200
|
+
|
|
201
|
+
# Bind a protected context for the duration of a block.
|
|
202
|
+
#
|
|
203
|
+
# @param protected_context [ProtectedContext, Hash] signed protected context
|
|
204
|
+
# @yield Runs with a derived protected-context scope bound as active
|
|
205
|
+
# @return [Object] the block return value
|
|
206
|
+
# @raise [ArgumentError] if no block is given
|
|
207
|
+
def with_protected_context(protected_context)
|
|
208
|
+
raise ArgumentError, "with_protected_context requires a block" unless block_given?
|
|
209
|
+
|
|
210
|
+
with_scope(active_scope.bind_protected_context(protected_context)) { yield }
|
|
211
|
+
end
|
|
212
|
+
|
|
213
|
+
# Replace the active scope with one that includes merged properties.
|
|
214
|
+
#
|
|
215
|
+
# @param properties [Hash] properties to merge into the active scope
|
|
216
|
+
# @return [Scope] the derived active scope
|
|
217
|
+
def add_properties(properties)
|
|
218
|
+
replace_current_scope(active_scope.with_properties(properties))
|
|
219
|
+
end
|
|
220
|
+
|
|
221
|
+
# Replace the active scope with one that omits the named properties.
|
|
222
|
+
#
|
|
223
|
+
# @param names [Array<String, Symbol>] property names to remove
|
|
224
|
+
# @return [Scope] the derived active scope
|
|
225
|
+
def clear_properties(names)
|
|
226
|
+
replace_current_scope(active_scope.clear_properties(Array(names).map(&:to_s)))
|
|
227
|
+
end
|
|
228
|
+
|
|
229
|
+
# Replace the active scope with an empty property scope.
|
|
230
|
+
#
|
|
231
|
+
# @return [Scope] the derived active scope
|
|
232
|
+
def reset_properties
|
|
233
|
+
replace_current_scope(active_scope.reset_properties)
|
|
234
|
+
end
|
|
235
|
+
|
|
236
|
+
# Replace the active scope with a protected-context-derived scope.
|
|
237
|
+
#
|
|
238
|
+
# @param protected_context [ProtectedContext, Hash] signed protected context
|
|
239
|
+
# @return [Scope] the derived active scope
|
|
240
|
+
def bind_protected_context(protected_context)
|
|
241
|
+
replace_current_scope(active_scope.bind_protected_context(protected_context))
|
|
242
|
+
end
|
|
243
|
+
|
|
244
|
+
# Replace the active scope with one using the public bundle.
|
|
245
|
+
#
|
|
246
|
+
# @return [Scope] the derived active scope
|
|
247
|
+
def clear_protected_context
|
|
248
|
+
replace_current_scope(active_scope.clear_protected_context)
|
|
249
|
+
end
|
|
250
|
+
|
|
251
|
+
# Return the current thread's evaluation properties.
|
|
252
|
+
#
|
|
253
|
+
# @return [Hash] active scope properties
|
|
254
|
+
def context
|
|
255
|
+
active_scope.properties
|
|
256
|
+
end
|
|
257
|
+
|
|
258
|
+
# Replace the current thread's active scope.
|
|
259
|
+
#
|
|
260
|
+
# Mutation helpers are intentionally thread-local so request-scoped data
|
|
261
|
+
# cannot leak through the process-wide default scope.
|
|
262
|
+
#
|
|
263
|
+
# @param scope [Scope] scope to install
|
|
264
|
+
# @return [Scope] the resolved scope
|
|
265
|
+
def replace_current_scope(scope)
|
|
266
|
+
resolved = resolve_scope(scope)
|
|
267
|
+
Thread.current[@active_scope_key] = resolved
|
|
268
|
+
resolved
|
|
269
|
+
end
|
|
270
|
+
|
|
271
|
+
# Resolve a scope argument and verify that it belongs to this client.
|
|
272
|
+
#
|
|
273
|
+
# @param scope [Scope, nil] candidate scope
|
|
274
|
+
# @return [Scope] resolved scope
|
|
275
|
+
# @raise [ArgumentError] if the scope belongs to a different client
|
|
276
|
+
def resolve_scope(scope)
|
|
277
|
+
resolved = scope || active_scope
|
|
278
|
+
raise ArgumentError, "[liteguard] scope belongs to a different client" unless resolved.belongs_to?(self)
|
|
279
|
+
|
|
280
|
+
resolved
|
|
281
|
+
end
|
|
282
|
+
|
|
283
|
+
# Derive a scope bound to the bundle for the given protected context.
|
|
284
|
+
#
|
|
285
|
+
# @param scope [Scope] base scope
|
|
286
|
+
# @param protected_context [ProtectedContext, Hash] signed protected context
|
|
287
|
+
# @return [Scope] a derived scope
|
|
288
|
+
def bind_protected_context_to_scope(scope, protected_context)
|
|
289
|
+
resolve_scope(scope)
|
|
290
|
+
normalized = normalize_protected_context(protected_context)
|
|
291
|
+
bundle_key = ensure_bundle_for_protected_context(normalized)
|
|
292
|
+
Scope.new(self, scope.properties, bundle_key, normalized)
|
|
293
|
+
end
|
|
294
|
+
|
|
295
|
+
# Ensure the public bundle is available before returning.
|
|
296
|
+
#
|
|
297
|
+
# @return [void]
|
|
298
|
+
def ensure_public_bundle_ready
|
|
299
|
+
bundle = @monitor.synchronize { @bundles[PUBLIC_BUNDLE_KEY] }
|
|
300
|
+
return if bundle&.ready
|
|
301
|
+
|
|
302
|
+
fetch_guards_for_bundle(PUBLIC_BUNDLE_KEY)
|
|
303
|
+
end
|
|
304
|
+
|
|
305
|
+
# ---------------------------------------------------------------------
|
|
306
|
+
# Core API
|
|
307
|
+
# ---------------------------------------------------------------------
|
|
308
|
+
|
|
309
|
+
# Evaluate a guard in the active scope and emit telemetry.
|
|
310
|
+
#
|
|
311
|
+
# @param name [String] guard name to evaluate
|
|
312
|
+
# @param options [Hash, nil] optional per-call overrides
|
|
313
|
+
# @return [Boolean] `true` when the guard resolves open
|
|
314
|
+
def is_open(name, options = nil, **legacy_options)
|
|
315
|
+
options = normalize_is_open_options(options, legacy_options)
|
|
316
|
+
evaluate_guard_in_scope(active_scope, name.to_s, options, emit_signal: true)[:result]
|
|
317
|
+
end
|
|
318
|
+
|
|
319
|
+
# Evaluate a guard in the provided scope and emit telemetry.
|
|
320
|
+
#
|
|
321
|
+
# @param scope [Scope] scope to evaluate against
|
|
322
|
+
# @param name [String] guard name to evaluate
|
|
323
|
+
# @param options [Hash, nil] optional per-call overrides
|
|
324
|
+
# @return [Boolean] `true` when the guard resolves open
|
|
325
|
+
def is_open_in_scope(scope, name, options = nil, **legacy_options)
|
|
326
|
+
options = normalize_is_open_options(options, legacy_options)
|
|
327
|
+
evaluate_guard_in_scope(scope, name.to_s, options, emit_signal: true)[:result]
|
|
328
|
+
end
|
|
329
|
+
|
|
330
|
+
# Evaluate a guard in the active scope without emitting telemetry.
|
|
331
|
+
#
|
|
332
|
+
# @param name [String] guard name to evaluate
|
|
333
|
+
# @param options [Hash, nil] optional per-call overrides
|
|
334
|
+
# @return [Boolean] `true` when the guard resolves open
|
|
335
|
+
def peek_is_open(name, options = nil, **legacy_options)
|
|
336
|
+
options = normalize_is_open_options(options, legacy_options)
|
|
337
|
+
evaluate_guard_in_scope(active_scope, name.to_s, options, emit_signal: false)[:result]
|
|
338
|
+
end
|
|
339
|
+
|
|
340
|
+
# Evaluate a guard in the provided scope without emitting telemetry.
|
|
341
|
+
#
|
|
342
|
+
# @param scope [Scope] scope to evaluate against
|
|
343
|
+
# @param name [String] guard name to evaluate
|
|
344
|
+
# @param options [Hash, nil] optional per-call overrides
|
|
345
|
+
# @return [Boolean] `true` when the guard resolves open
|
|
346
|
+
def peek_is_open_in_scope(scope, name, options = nil, **legacy_options)
|
|
347
|
+
options = normalize_is_open_options(options, legacy_options)
|
|
348
|
+
evaluate_guard_in_scope(scope, name.to_s, options, emit_signal: false)[:result]
|
|
349
|
+
end
|
|
350
|
+
|
|
351
|
+
# Evaluate a guard and execute the block only when it resolves open.
|
|
352
|
+
#
|
|
353
|
+
# @param name [String] guard name to evaluate
|
|
354
|
+
# @param options [Hash, nil] optional per-call overrides
|
|
355
|
+
# @yield Runs only when the guard resolves open
|
|
356
|
+
# @return [Object, nil] the block return value, or `nil` when the guard is
|
|
357
|
+
# closed
|
|
358
|
+
# @raise [ArgumentError] if no block is given
|
|
359
|
+
def execute_if_open(name, options = nil, **legacy_options)
|
|
360
|
+
raise ArgumentError, "execute_if_open requires a block" unless block_given?
|
|
361
|
+
|
|
362
|
+
with_execution do
|
|
363
|
+
normalized_options = normalize_is_open_options(options, legacy_options)
|
|
364
|
+
evaluation = evaluate_guard_in_scope(active_scope, name.to_s, normalized_options, emit_signal: true)
|
|
365
|
+
return nil unless evaluation[:result]
|
|
366
|
+
return yield if evaluation[:signal].nil?
|
|
367
|
+
|
|
368
|
+
measurement_enabled = measurement_enabled?(evaluation[:guard], normalized_options)
|
|
369
|
+
started_at = Process.clock_gettime(Process::CLOCK_MONOTONIC, :nanosecond)
|
|
370
|
+
begin
|
|
371
|
+
value = yield
|
|
372
|
+
buffer_signal(
|
|
373
|
+
name.to_s,
|
|
374
|
+
true,
|
|
375
|
+
evaluation[:props],
|
|
376
|
+
kind: "guard_execution",
|
|
377
|
+
measurement: measurement_enabled ? capture_guard_execution_measurement(started_at, true) : nil,
|
|
378
|
+
parent_signal_id_override: evaluation[:signal].signal_id
|
|
379
|
+
)
|
|
380
|
+
value
|
|
381
|
+
rescue Exception => e # rubocop:disable Lint/RescueException
|
|
382
|
+
buffer_signal(
|
|
383
|
+
name.to_s,
|
|
384
|
+
true,
|
|
385
|
+
evaluation[:props],
|
|
386
|
+
kind: "guard_execution",
|
|
387
|
+
measurement: measurement_enabled ? capture_guard_execution_measurement(started_at, false, e) : nil,
|
|
388
|
+
parent_signal_id_override: evaluation[:signal].signal_id
|
|
389
|
+
)
|
|
390
|
+
raise
|
|
391
|
+
end
|
|
392
|
+
end
|
|
393
|
+
end
|
|
394
|
+
|
|
395
|
+
# Scope-aware wrapper for {#execute_if_open}.
|
|
396
|
+
#
|
|
397
|
+
# @param scope [Scope] scope to evaluate against
|
|
398
|
+
# @param name [String] guard name to evaluate
|
|
399
|
+
# @param options [Hash, nil] optional per-call overrides
|
|
400
|
+
# @yield Runs only when the guard resolves open
|
|
401
|
+
# @return [Object, nil] the block return value, or `nil` when the guard is
|
|
402
|
+
# closed
|
|
403
|
+
def execute_if_open_in_scope(scope, name, options = nil, **legacy_options, &block)
|
|
404
|
+
with_scope(scope) { execute_if_open(name, options, **legacy_options, &block) }
|
|
405
|
+
end
|
|
406
|
+
|
|
407
|
+
# Evaluate a guard within a resolved scope and optionally emit telemetry.
|
|
408
|
+
#
|
|
409
|
+
# @param scope [Scope] scope to evaluate against
|
|
410
|
+
# @param name [String] guard name to evaluate
|
|
411
|
+
# @param options [Hash] normalized evaluation options
|
|
412
|
+
# @param emit_signal [Boolean] whether to buffer a `guard_check` signal
|
|
413
|
+
# @return [Hash] evaluation result metadata including `:result`, `:guard`,
|
|
414
|
+
# `:props`, and `:signal`
|
|
415
|
+
def evaluate_guard_in_scope(scope, name, options, emit_signal:)
|
|
416
|
+
resolved_scope = resolve_scope(scope)
|
|
417
|
+
bundle = bundle_for_scope(resolved_scope)
|
|
418
|
+
effective_fallback = options[:fallback].nil? ? @fallback : options[:fallback]
|
|
419
|
+
return { result: effective_fallback, guard: nil, props: nil, signal: nil } unless bundle.ready
|
|
420
|
+
|
|
421
|
+
guard = bundle.guards[name]
|
|
422
|
+
if guard.nil?
|
|
423
|
+
record_unadopted_guard(name)
|
|
424
|
+
return { result: true, guard: nil, props: nil, signal: nil }
|
|
425
|
+
end
|
|
426
|
+
unless guard.adopted
|
|
427
|
+
record_unadopted_guard(name)
|
|
428
|
+
return { result: true, guard: guard, props: nil, signal: nil }
|
|
429
|
+
end
|
|
430
|
+
|
|
431
|
+
props = resolved_scope.properties
|
|
432
|
+
if options[:properties]
|
|
433
|
+
props = props.merge(options[:properties])
|
|
434
|
+
end
|
|
435
|
+
|
|
436
|
+
result = Evaluation.evaluate_guard(guard, props)
|
|
437
|
+
if result && guard.rate_limit_per_minute.to_i > 0
|
|
438
|
+
result = if emit_signal
|
|
439
|
+
check_rate_limit(name, guard.rate_limit_per_minute, guard.rate_limit_properties, props)
|
|
440
|
+
else
|
|
441
|
+
would_pass_rate_limit(name, guard.rate_limit_per_minute, guard.rate_limit_properties, props)
|
|
442
|
+
end
|
|
443
|
+
end
|
|
444
|
+
|
|
445
|
+
signal = nil
|
|
446
|
+
if emit_signal
|
|
447
|
+
signal = buffer_signal(
|
|
448
|
+
name,
|
|
449
|
+
result,
|
|
450
|
+
props,
|
|
451
|
+
kind: "guard_check",
|
|
452
|
+
measurement: measurement_enabled?(guard, options) ? capture_guard_check_measurement : nil
|
|
453
|
+
)
|
|
454
|
+
end
|
|
455
|
+
|
|
456
|
+
{ result: result, guard: guard, props: props, signal: signal }
|
|
457
|
+
end
|
|
458
|
+
|
|
459
|
+
# ---------------------------------------------------------------------
|
|
460
|
+
# Signals
|
|
461
|
+
# ---------------------------------------------------------------------
|
|
462
|
+
|
|
463
|
+
# Flush all buffered telemetry and unadopted guard reports.
|
|
464
|
+
#
|
|
465
|
+
# @return [void]
|
|
466
|
+
def flush_signals
|
|
467
|
+
batch, unadopted_guard_names = @monitor.synchronize do
|
|
468
|
+
buffered = @signal_buffer.dup
|
|
469
|
+
@signal_buffer.clear
|
|
470
|
+
names = @pending_unadopted_guards.keys.sort
|
|
471
|
+
@pending_unadopted_guards.clear
|
|
472
|
+
[buffered, names]
|
|
473
|
+
end
|
|
474
|
+
return if batch.empty? && unadopted_guard_names.empty?
|
|
475
|
+
|
|
476
|
+
flush_signal_batch(batch) unless batch.empty?
|
|
477
|
+
flush_unadopted_guards(unadopted_guard_names) unless unadopted_guard_names.empty?
|
|
478
|
+
end
|
|
479
|
+
|
|
480
|
+
# Upload a batch of buffered signals.
|
|
481
|
+
#
|
|
482
|
+
# Failed uploads are returned to the in-memory queue subject to buffer
|
|
483
|
+
# limits.
|
|
484
|
+
#
|
|
485
|
+
# @param batch [Array<Signal>] signal batch to upload
|
|
486
|
+
# @return [void]
|
|
487
|
+
def flush_signal_batch(batch)
|
|
488
|
+
payload = JSON.generate(
|
|
489
|
+
projectClientKeyId: @project_client_key_id,
|
|
490
|
+
environment: @environment,
|
|
491
|
+
signals: batch.map do |signal|
|
|
492
|
+
{
|
|
493
|
+
guardName: signal.guard_name,
|
|
494
|
+
result: signal.result,
|
|
495
|
+
properties: signal.properties,
|
|
496
|
+
timestampMs: signal.timestamp_ms,
|
|
497
|
+
signalId: signal.signal_id,
|
|
498
|
+
executionId: signal.execution_id,
|
|
499
|
+
sequenceNumber: signal.sequence_number,
|
|
500
|
+
callsiteId: signal.callsite_id,
|
|
501
|
+
kind: signal.kind,
|
|
502
|
+
droppedSignalsSinceLast: signal.dropped_signals_since_last,
|
|
503
|
+
**(signal.parent_signal_id ? { parentSignalId: signal.parent_signal_id } : {}),
|
|
504
|
+
**(signal.measurement ? { measurement: signal_measurement_payload(signal.measurement) } : {})
|
|
505
|
+
}
|
|
506
|
+
end
|
|
507
|
+
)
|
|
508
|
+
post_json("/api/v1/signals", payload)
|
|
509
|
+
rescue => e
|
|
510
|
+
log "[liteguard] signal flush failed: #{e}"
|
|
511
|
+
@monitor.synchronize do
|
|
512
|
+
@signal_buffer.unshift(*batch)
|
|
513
|
+
max_buf = max_buffer_size
|
|
514
|
+
if @signal_buffer.size > max_buf
|
|
515
|
+
@dropped_signals_pending += @signal_buffer.size - max_buf
|
|
516
|
+
@signal_buffer.pop(@signal_buffer.size - max_buf)
|
|
517
|
+
end
|
|
518
|
+
end
|
|
519
|
+
end
|
|
520
|
+
|
|
521
|
+
# Upload unadopted guard names discovered during evaluation.
|
|
522
|
+
#
|
|
523
|
+
# @param unadopted_guard_names [Array<String>] guard names to report
|
|
524
|
+
# @return [void]
|
|
525
|
+
def flush_unadopted_guards(unadopted_guard_names)
|
|
526
|
+
payload = JSON.generate(
|
|
527
|
+
projectClientKeyId: @project_client_key_id,
|
|
528
|
+
environment: @environment,
|
|
529
|
+
guardNames: unadopted_guard_names
|
|
530
|
+
)
|
|
531
|
+
post_json("/api/v1/unadopted-guards", payload)
|
|
532
|
+
rescue => e
|
|
533
|
+
log "[liteguard] unadopted guard flush failed: #{e}"
|
|
534
|
+
@monitor.synchronize do
|
|
535
|
+
unadopted_guard_names.each { |name| @pending_unadopted_guards[name] = true }
|
|
536
|
+
end
|
|
537
|
+
end
|
|
538
|
+
|
|
539
|
+
# Buffer a signal for asynchronous upload.
|
|
540
|
+
#
|
|
541
|
+
# @param guard_name [String] guard name associated with the signal
|
|
542
|
+
# @param result [Boolean] guard result associated with the signal
|
|
543
|
+
# @param props [Hash] evaluation properties snapshot
|
|
544
|
+
# @param kind [String] signal kind such as `guard_check` or
|
|
545
|
+
# `guard_execution`
|
|
546
|
+
# @param measurement [SignalPerformance, nil] optional measurement payload
|
|
547
|
+
# @param parent_signal_id_override [String, nil] explicit parent signal ID
|
|
548
|
+
# @return [Signal] buffered signal instance
|
|
549
|
+
def buffer_signal(guard_name, result, props, kind:, measurement: nil, parent_signal_id_override: nil)
|
|
550
|
+
metadata = next_signal_metadata(parent_signal_id_override)
|
|
551
|
+
signal = Signal.new(
|
|
552
|
+
guard_name: guard_name,
|
|
553
|
+
result: result,
|
|
554
|
+
properties: props.dup,
|
|
555
|
+
timestamp_ms: (Time.now.to_f * 1000).to_i,
|
|
556
|
+
trace: nil,
|
|
557
|
+
signal_id: metadata[:signal_id],
|
|
558
|
+
execution_id: metadata[:execution_id],
|
|
559
|
+
parent_signal_id: metadata[:parent_signal_id],
|
|
560
|
+
sequence_number: metadata[:sequence_number],
|
|
561
|
+
callsite_id: capture_callsite_id,
|
|
562
|
+
kind: kind,
|
|
563
|
+
dropped_signals_since_last: take_dropped_signals,
|
|
564
|
+
measurement: measurement
|
|
565
|
+
)
|
|
566
|
+
should_flush = false
|
|
567
|
+
@monitor.synchronize do
|
|
568
|
+
if @signal_buffer.size >= max_buffer_size
|
|
569
|
+
@signal_buffer.shift
|
|
570
|
+
@dropped_signals_pending += 1
|
|
571
|
+
end
|
|
572
|
+
@signal_buffer << signal
|
|
573
|
+
should_flush = @signal_buffer.size >= @flush_size
|
|
574
|
+
end
|
|
575
|
+
schedule_async_flush if should_flush
|
|
576
|
+
signal
|
|
577
|
+
end
|
|
578
|
+
|
|
579
|
+
# Request a background flush without doing network I/O on the caller path.
|
|
580
|
+
#
|
|
581
|
+
# @return [void]
|
|
582
|
+
def schedule_async_flush
|
|
583
|
+
spawn_worker = false
|
|
584
|
+
|
|
585
|
+
@monitor.synchronize do
|
|
586
|
+
if @flush_thread&.alive?
|
|
587
|
+
@flush_requested = true
|
|
588
|
+
@flush_cond.broadcast
|
|
589
|
+
elsif !@async_flush_scheduled
|
|
590
|
+
@async_flush_scheduled = true
|
|
591
|
+
spawn_worker = true
|
|
592
|
+
end
|
|
593
|
+
end
|
|
594
|
+
|
|
595
|
+
return unless spawn_worker
|
|
596
|
+
|
|
597
|
+
worker = Thread.new do
|
|
598
|
+
begin
|
|
599
|
+
flush_signals
|
|
600
|
+
ensure
|
|
601
|
+
reschedule = false
|
|
602
|
+
@monitor.synchronize do
|
|
603
|
+
@async_flush_scheduled = false
|
|
604
|
+
reschedule = !@stopped && @signal_buffer.size >= @flush_size
|
|
605
|
+
end
|
|
606
|
+
schedule_async_flush if reschedule
|
|
607
|
+
end
|
|
608
|
+
end
|
|
609
|
+
worker.abort_on_exception = false
|
|
610
|
+
worker.report_on_exception = false if worker.respond_to?(:report_on_exception=)
|
|
611
|
+
end
|
|
612
|
+
|
|
613
|
+
# Build correlation metadata for the next signal.
|
|
614
|
+
#
|
|
615
|
+
# @param parent_signal_id_override [String, nil] explicit parent signal ID
|
|
616
|
+
# @return [Hash] signal correlation metadata
|
|
617
|
+
def next_signal_metadata(parent_signal_id_override = nil)
|
|
618
|
+
signal_id = next_signal_id
|
|
619
|
+
state = Thread.current[:liteguard_execution_state]
|
|
620
|
+
unless state
|
|
621
|
+
return {
|
|
622
|
+
signal_id: signal_id,
|
|
623
|
+
execution_id: next_signal_id,
|
|
624
|
+
parent_signal_id: nil,
|
|
625
|
+
sequence_number: 1
|
|
626
|
+
}
|
|
627
|
+
end
|
|
628
|
+
|
|
629
|
+
state[:sequence_number] += 1
|
|
630
|
+
parent_signal_id = parent_signal_id_override || state[:last_signal_id]
|
|
631
|
+
state[:last_signal_id] = signal_id
|
|
632
|
+
{
|
|
633
|
+
signal_id: signal_id,
|
|
634
|
+
execution_id: state[:execution_id],
|
|
635
|
+
parent_signal_id: parent_signal_id,
|
|
636
|
+
sequence_number: state[:sequence_number]
|
|
637
|
+
}
|
|
638
|
+
end
|
|
639
|
+
|
|
640
|
+
# Generate a unique signal identifier.
|
|
641
|
+
#
|
|
642
|
+
# @return [String] unique signal ID
|
|
643
|
+
def next_signal_id
|
|
644
|
+
@signal_counter ||= 0
|
|
645
|
+
@signal_counter += 1
|
|
646
|
+
"#{Process.clock_gettime(Process::CLOCK_REALTIME, :nanosecond).to_s(16)}-#{@signal_counter.to_s(16)}"
|
|
647
|
+
end
|
|
648
|
+
|
|
649
|
+
# Consume and reset the dropped-signal counter.
|
|
650
|
+
#
|
|
651
|
+
# @return [Integer] number of dropped signals since the last emitted signal
|
|
652
|
+
def take_dropped_signals
|
|
653
|
+
@monitor.synchronize do
|
|
654
|
+
dropped = @dropped_signals_pending
|
|
655
|
+
@dropped_signals_pending = 0
|
|
656
|
+
dropped
|
|
657
|
+
end
|
|
658
|
+
end
|
|
659
|
+
|
|
660
|
+
# Return the maximum in-memory signal buffer size.
|
|
661
|
+
#
|
|
662
|
+
# @return [Integer] maximum signal count retained before dropping oldest
|
|
663
|
+
def max_buffer_size
|
|
664
|
+
@flush_size * @flush_buffer_multiplier
|
|
665
|
+
end
|
|
666
|
+
|
|
667
|
+
# ---------------------------------------------------------------------
|
|
668
|
+
# Guard refresh
|
|
669
|
+
# ---------------------------------------------------------------------
|
|
670
|
+
|
|
671
|
+
# Create a placeholder bundle entry before the first fetch completes.
|
|
672
|
+
#
|
|
673
|
+
# @param bundle_key [String] cache key for the bundle
|
|
674
|
+
# @param protected_context [ProtectedContext, nil] protected context backing
|
|
675
|
+
# the bundle
|
|
676
|
+
# @return [GuardBundle] empty bundle record
|
|
677
|
+
def create_empty_bundle(bundle_key, protected_context)
|
|
678
|
+
GuardBundle.new(
|
|
679
|
+
key: bundle_key,
|
|
680
|
+
guards: {},
|
|
681
|
+
ready: false,
|
|
682
|
+
etag: "",
|
|
683
|
+
protected_context: protected_context ? copy_protected_context(protected_context) : nil,
|
|
684
|
+
refresh_rate_seconds: @refresh_rate
|
|
685
|
+
)
|
|
686
|
+
end
|
|
687
|
+
|
|
688
|
+
# Look up the bundle associated with a scope.
|
|
689
|
+
#
|
|
690
|
+
# @param scope [Scope] scope whose bundle should be resolved
|
|
691
|
+
# @return [GuardBundle] resolved bundle, or the public bundle fallback
|
|
692
|
+
def bundle_for_scope(scope)
|
|
693
|
+
@monitor.synchronize do
|
|
694
|
+
@bundles[scope.bundle_key] || @bundles[PUBLIC_BUNDLE_KEY]
|
|
695
|
+
end
|
|
696
|
+
end
|
|
697
|
+
|
|
698
|
+
# Ensure a bundle exists for the given protected context and fetch it if
|
|
699
|
+
# needed.
|
|
700
|
+
#
|
|
701
|
+
# @param protected_context [ProtectedContext] normalized protected context
|
|
702
|
+
# @return [String] cache key for the bundle
|
|
703
|
+
def ensure_bundle_for_protected_context(protected_context)
|
|
704
|
+
bundle_key = protected_context_cache_key(protected_context)
|
|
705
|
+
ready = @monitor.synchronize do
|
|
706
|
+
@bundles[bundle_key] ||= create_empty_bundle(bundle_key, protected_context)
|
|
707
|
+
@bundles[bundle_key].ready
|
|
708
|
+
end
|
|
709
|
+
return bundle_key if ready
|
|
710
|
+
|
|
711
|
+
fetch_guards_for_bundle(bundle_key)
|
|
712
|
+
bundle_key
|
|
713
|
+
end
|
|
714
|
+
|
|
715
|
+
# Build a stable cache key for a protected context.
|
|
716
|
+
#
|
|
717
|
+
# @param protected_context [ProtectedContext, nil] protected context to hash
|
|
718
|
+
# @return [String] bundle cache key
|
|
719
|
+
def protected_context_cache_key(protected_context)
|
|
720
|
+
return PUBLIC_BUNDLE_KEY if protected_context.nil?
|
|
721
|
+
|
|
722
|
+
keys = protected_context.properties.keys.sort
|
|
723
|
+
parts = [protected_context.signature, ""]
|
|
724
|
+
keys.each do |key|
|
|
725
|
+
parts << "#{key}=#{protected_context.properties[key]}"
|
|
726
|
+
end
|
|
727
|
+
parts.join("\x00")
|
|
728
|
+
end
|
|
729
|
+
|
|
730
|
+
# Wake the refresh loop so it can recompute its next wait interval.
|
|
731
|
+
#
|
|
732
|
+
# @return [void]
|
|
733
|
+
def request_refresh_reschedule
|
|
734
|
+
@monitor.synchronize do
|
|
735
|
+
@refresh_wakeup_requested = true
|
|
736
|
+
@refresh_cond.signal
|
|
737
|
+
end
|
|
738
|
+
end
|
|
739
|
+
|
|
740
|
+
# Fetch guard data for a bundle from the Liteguard backend.
|
|
741
|
+
#
|
|
742
|
+
# @param bundle_key [String] bundle cache key to refresh
|
|
743
|
+
# @return [void]
|
|
744
|
+
def fetch_guards_for_bundle(bundle_key)
|
|
745
|
+
bundle = @monitor.synchronize { @bundles[bundle_key] ||= create_empty_bundle(bundle_key, nil) }
|
|
746
|
+
protected_context = bundle.protected_context ? copy_protected_context(bundle.protected_context) : nil
|
|
747
|
+
|
|
748
|
+
payload = {
|
|
749
|
+
projectClientKeyId: @project_client_key_id,
|
|
750
|
+
environment: @environment
|
|
751
|
+
}
|
|
752
|
+
if protected_context
|
|
753
|
+
payload[:protectedContext] = {
|
|
754
|
+
properties: protected_context.properties,
|
|
755
|
+
signature: protected_context.signature
|
|
756
|
+
}
|
|
757
|
+
end
|
|
758
|
+
|
|
759
|
+
uri = URI("#{@backend_url}/api/v1/guards")
|
|
760
|
+
req = Net::HTTP::Post.new(uri)
|
|
761
|
+
req["Authorization"] = "Bearer #{@project_client_key_id}"
|
|
762
|
+
req["Content-Type"] = "application/json"
|
|
763
|
+
req["If-None-Match"] = bundle.etag unless bundle.etag.to_s.empty?
|
|
764
|
+
req.body = JSON.generate(payload)
|
|
765
|
+
|
|
766
|
+
response = Net::HTTP.start(
|
|
767
|
+
uri.host,
|
|
768
|
+
uri.port,
|
|
769
|
+
use_ssl: uri.scheme == "https",
|
|
770
|
+
open_timeout: @http_timeout_seconds,
|
|
771
|
+
read_timeout: @http_timeout_seconds
|
|
772
|
+
) { |http| http.request(req) }
|
|
773
|
+
|
|
774
|
+
return if response.code == "304"
|
|
775
|
+
return log("[liteguard] guard fetch returned #{response.code}") unless response.code == "200"
|
|
776
|
+
|
|
777
|
+
body = JSON.parse(response.body)
|
|
778
|
+
server_refresh_rate = body["refreshRateSeconds"].to_i
|
|
779
|
+
effective_refresh_rate = if server_refresh_rate.positive?
|
|
780
|
+
[@refresh_rate, server_refresh_rate].max
|
|
781
|
+
else
|
|
782
|
+
@refresh_rate
|
|
783
|
+
end
|
|
784
|
+
guards = (body["guards"] || []).map { |raw_guard| parse_guard(raw_guard) }
|
|
785
|
+
refresh_rate_changed = false
|
|
786
|
+
@monitor.synchronize do
|
|
787
|
+
previous_refresh_rate = @current_refresh_rate
|
|
788
|
+
@bundles[bundle_key] = GuardBundle.new(
|
|
789
|
+
key: bundle_key,
|
|
790
|
+
guards: guards.each_with_object({}) { |guard, acc| acc[guard.name] = guard },
|
|
791
|
+
ready: true,
|
|
792
|
+
etag: body["etag"] || "",
|
|
793
|
+
protected_context: protected_context,
|
|
794
|
+
refresh_rate_seconds: effective_refresh_rate
|
|
795
|
+
)
|
|
796
|
+
recompute_refresh_interval_locked
|
|
797
|
+
refresh_rate_changed = @current_refresh_rate != previous_refresh_rate
|
|
798
|
+
end
|
|
799
|
+
request_refresh_reschedule if refresh_rate_changed
|
|
800
|
+
rescue => e
|
|
801
|
+
log "[liteguard] guard fetch error: #{e}"
|
|
802
|
+
end
|
|
803
|
+
|
|
804
|
+
# Recompute the shortest refresh interval across all known bundles.
|
|
805
|
+
#
|
|
806
|
+
# @return [void]
|
|
807
|
+
def recompute_refresh_interval_locked
|
|
808
|
+
next_refresh_rate = @bundles.values.map(&:refresh_rate_seconds).select(&:positive?).min
|
|
809
|
+
@current_refresh_rate = next_refresh_rate || @refresh_rate
|
|
810
|
+
end
|
|
811
|
+
|
|
812
|
+
# Background loop that periodically refreshes guard bundles.
|
|
813
|
+
#
|
|
814
|
+
# @return [void]
|
|
815
|
+
def refresh_loop
|
|
816
|
+
@monitor.synchronize do
|
|
817
|
+
until @stopped
|
|
818
|
+
@refresh_cond.wait(@current_refresh_rate)
|
|
819
|
+
break if @stopped
|
|
820
|
+
if @refresh_wakeup_requested
|
|
821
|
+
@refresh_wakeup_requested = false
|
|
822
|
+
next
|
|
823
|
+
end
|
|
824
|
+
bundle_keys = @bundles.keys.sort
|
|
825
|
+
@monitor.mon_exit
|
|
826
|
+
begin
|
|
827
|
+
bundle_keys.each { |bundle_key| fetch_guards_for_bundle(bundle_key) }
|
|
828
|
+
ensure
|
|
829
|
+
@monitor.mon_enter
|
|
830
|
+
end
|
|
831
|
+
end
|
|
832
|
+
end
|
|
833
|
+
end
|
|
834
|
+
|
|
835
|
+
# Background loop that periodically flushes buffered telemetry.
|
|
836
|
+
#
|
|
837
|
+
# @return [void]
|
|
838
|
+
def flush_loop
|
|
839
|
+
@monitor.synchronize do
|
|
840
|
+
until @stopped
|
|
841
|
+
@flush_cond.wait(@flush_rate)
|
|
842
|
+
break if @stopped
|
|
843
|
+
@flush_requested = false
|
|
844
|
+
@monitor.mon_exit
|
|
845
|
+
begin
|
|
846
|
+
flush_signals
|
|
847
|
+
ensure
|
|
848
|
+
@monitor.mon_enter
|
|
849
|
+
end
|
|
850
|
+
end
|
|
851
|
+
end
|
|
852
|
+
end
|
|
853
|
+
|
|
854
|
+
# ---------------------------------------------------------------------
|
|
855
|
+
# Helpers
|
|
856
|
+
# ---------------------------------------------------------------------
|
|
857
|
+
|
|
858
|
+
# POST a JSON payload to the Liteguard backend.
|
|
859
|
+
#
|
|
860
|
+
# @param path [String] request path relative to `backend_url`
|
|
861
|
+
# @param payload [String] pre-encoded JSON payload
|
|
862
|
+
# @return [void]
|
|
863
|
+
# @raise [RuntimeError] when the response is not successful
|
|
864
|
+
def post_json(path, payload)
|
|
865
|
+
uri = URI("#{@backend_url}#{path}")
|
|
866
|
+
req = Net::HTTP::Post.new(uri)
|
|
867
|
+
req["Authorization"] = "Bearer #{@project_client_key_id}"
|
|
868
|
+
req["Content-Type"] = "application/json"
|
|
869
|
+
req["X-Liteguard-Environment"] = @environment unless @environment.empty?
|
|
870
|
+
req.body = payload
|
|
871
|
+
|
|
872
|
+
response = Net::HTTP.start(
|
|
873
|
+
uri.host,
|
|
874
|
+
uri.port,
|
|
875
|
+
use_ssl: uri.scheme == "https",
|
|
876
|
+
open_timeout: @http_timeout_seconds,
|
|
877
|
+
read_timeout: @http_timeout_seconds
|
|
878
|
+
) { |http| http.request(req) }
|
|
879
|
+
raise "request returned #{response.code}" unless response.is_a?(Net::HTTPSuccess)
|
|
880
|
+
end
|
|
881
|
+
|
|
882
|
+
# Parse a guard payload returned by the backend.
|
|
883
|
+
#
|
|
884
|
+
# @param raw [Hash] decoded guard payload
|
|
885
|
+
# @return [Guard] parsed guard record
|
|
886
|
+
def parse_guard(raw)
|
|
887
|
+
Guard.new(
|
|
888
|
+
name: raw["name"],
|
|
889
|
+
rules: (raw["rules"] || []).map { |rule| parse_rule(rule) },
|
|
890
|
+
default_value: !!raw["defaultValue"],
|
|
891
|
+
adopted: !!raw["adopted"],
|
|
892
|
+
rate_limit_per_minute: (raw["rateLimitPerMinute"] || 0).to_i,
|
|
893
|
+
rate_limit_properties: Array(raw["rateLimitProperties"]),
|
|
894
|
+
disable_measurement: raw.key?("disableMeasurement") ? raw["disableMeasurement"] : nil
|
|
895
|
+
)
|
|
896
|
+
end
|
|
897
|
+
|
|
898
|
+
# Parse a rule payload returned by the backend.
|
|
899
|
+
#
|
|
900
|
+
# @param raw [Hash] decoded rule payload
|
|
901
|
+
# @return [Rule] parsed rule record
|
|
902
|
+
def parse_rule(raw)
|
|
903
|
+
Rule.new(
|
|
904
|
+
property_name: raw["propertyName"],
|
|
905
|
+
operator: raw["operator"].downcase.to_sym,
|
|
906
|
+
values: raw["values"] || [],
|
|
907
|
+
result: !!raw["result"],
|
|
908
|
+
enabled: raw.fetch("enabled", true)
|
|
909
|
+
)
|
|
910
|
+
end
|
|
911
|
+
|
|
912
|
+
# Consume a rate-limit slot for an open guard evaluation.
|
|
913
|
+
#
|
|
914
|
+
# @param name [String] guard name
|
|
915
|
+
# @param limit_per_minute [Integer] allowed evaluations per minute
|
|
916
|
+
# @param rate_limit_properties [Array<String>] properties contributing to
|
|
917
|
+
# the bucket key
|
|
918
|
+
# @param props [Hash] evaluation properties
|
|
919
|
+
# @return [Boolean] `true` when the evaluation is within the limit
|
|
920
|
+
def check_rate_limit(name, limit_per_minute, rate_limit_properties, props)
|
|
921
|
+
now = Process.clock_gettime(Process::CLOCK_MONOTONIC)
|
|
922
|
+
key = rate_limit_bucket_key(name, rate_limit_properties, props)
|
|
923
|
+
@monitor.synchronize do
|
|
924
|
+
entry = @rate_limit_state[key] || { window_start: now, count: 0 }
|
|
925
|
+
entry = { window_start: now, count: 0 } if now - entry[:window_start] >= 60.0
|
|
926
|
+
if entry[:count] >= limit_per_minute
|
|
927
|
+
@rate_limit_state[key] = entry
|
|
928
|
+
return false
|
|
929
|
+
end
|
|
930
|
+
@rate_limit_state[key] = { window_start: entry[:window_start], count: entry[:count] + 1 }
|
|
931
|
+
end
|
|
932
|
+
true
|
|
933
|
+
end
|
|
934
|
+
|
|
935
|
+
# Check whether an evaluation would pass rate limiting without consuming a
|
|
936
|
+
# slot.
|
|
937
|
+
#
|
|
938
|
+
# @param name [String] guard name
|
|
939
|
+
# @param limit_per_minute [Integer] allowed evaluations per minute
|
|
940
|
+
# @param rate_limit_properties [Array<String>] properties contributing to
|
|
941
|
+
# the bucket key
|
|
942
|
+
# @param props [Hash] evaluation properties
|
|
943
|
+
# @return [Boolean] `true` when the evaluation would pass the limit
|
|
944
|
+
def would_pass_rate_limit(name, limit_per_minute, rate_limit_properties, props)
|
|
945
|
+
now = Process.clock_gettime(Process::CLOCK_MONOTONIC)
|
|
946
|
+
key = rate_limit_bucket_key(name, rate_limit_properties, props)
|
|
947
|
+
@monitor.synchronize do
|
|
948
|
+
entry = @rate_limit_state[key]
|
|
949
|
+
return true if entry.nil?
|
|
950
|
+
return true if now - entry[:window_start] >= 60.0
|
|
951
|
+
|
|
952
|
+
entry[:count] < limit_per_minute
|
|
953
|
+
end
|
|
954
|
+
end
|
|
955
|
+
|
|
956
|
+
# Build the cache key used for rate-limit buckets.
|
|
957
|
+
#
|
|
958
|
+
# @param name [String] guard name
|
|
959
|
+
# @param rate_limit_properties [Array<String>, nil] bucket property names
|
|
960
|
+
# @param props [Hash] evaluation properties
|
|
961
|
+
# @return [String] rate-limit bucket key
|
|
962
|
+
def rate_limit_bucket_key(name, rate_limit_properties, props)
|
|
963
|
+
return name if rate_limit_properties.nil? || rate_limit_properties.empty?
|
|
964
|
+
|
|
965
|
+
parts = rate_limit_properties.map { |property| "#{property}=#{props[property] || ''}" }
|
|
966
|
+
"#{name}\x00#{parts.join("\x00")}"
|
|
967
|
+
end
|
|
968
|
+
|
|
969
|
+
# Normalize a positive integer option, falling back to a default when the
|
|
970
|
+
# provided value is blank, zero, or negative.
|
|
971
|
+
#
|
|
972
|
+
# @param value [Object] raw option value
|
|
973
|
+
# @param default [Integer] default to use when normalization fails
|
|
974
|
+
# @return [Integer] normalized positive integer
|
|
975
|
+
def normalize_positive_option(value, default)
|
|
976
|
+
normalized = value.nil? ? default : value.to_i
|
|
977
|
+
normalized.positive? ? normalized : default
|
|
978
|
+
end
|
|
979
|
+
|
|
980
|
+
# Normalize guard-evaluation options into a canonical hash.
|
|
981
|
+
#
|
|
982
|
+
# @param options [Hash, nil] primary options hash
|
|
983
|
+
# @param legacy_options [Hash] keyword arguments merged on top
|
|
984
|
+
# @return [Hash] normalized option hash
|
|
985
|
+
# @raise [ArgumentError] when options are invalid or contain unknown keys
|
|
986
|
+
def normalize_is_open_options(options, legacy_options = {})
|
|
987
|
+
raw = if options.nil?
|
|
988
|
+
legacy_options
|
|
989
|
+
elsif options.is_a?(Hash)
|
|
990
|
+
options.merge(legacy_options)
|
|
991
|
+
else
|
|
992
|
+
raise ArgumentError, "is_open options must be a Hash"
|
|
993
|
+
end
|
|
994
|
+
|
|
995
|
+
unknown_keys = raw.keys.map(&:to_sym) - CHECK_OPTION_KEYS
|
|
996
|
+
raise ArgumentError, "unknown is_open option(s): #{unknown_keys.join(', ')}" unless unknown_keys.empty?
|
|
997
|
+
|
|
998
|
+
properties = raw[:properties] || raw["properties"]
|
|
999
|
+
{
|
|
1000
|
+
properties: properties ? normalize_properties(properties) : nil,
|
|
1001
|
+
fallback: raw.key?(:fallback) ? raw[:fallback] : raw["fallback"],
|
|
1002
|
+
disable_measurement: raw.key?(:disable_measurement) ? raw[:disable_measurement] : raw["disable_measurement"] || false
|
|
1003
|
+
}
|
|
1004
|
+
end
|
|
1005
|
+
|
|
1006
|
+
# Normalize property keys to strings.
|
|
1007
|
+
#
|
|
1008
|
+
# @param properties [Hash, nil] raw property hash
|
|
1009
|
+
# @return [Hash] normalized property hash
|
|
1010
|
+
def normalize_properties(properties)
|
|
1011
|
+
return {} if properties.nil?
|
|
1012
|
+
|
|
1013
|
+
properties.each_with_object({}) { |(key, value), acc| acc[key.to_s] = value }
|
|
1014
|
+
end
|
|
1015
|
+
|
|
1016
|
+
# Normalize a protected-context payload into a `ProtectedContext` object.
|
|
1017
|
+
#
|
|
1018
|
+
# @param protected_context [ProtectedContext, Hash] raw protected context
|
|
1019
|
+
# @return [ProtectedContext] normalized protected context
|
|
1020
|
+
def normalize_protected_context(protected_context)
|
|
1021
|
+
raw = if protected_context.is_a?(ProtectedContext)
|
|
1022
|
+
{ properties: protected_context.properties, signature: protected_context.signature }
|
|
1023
|
+
else
|
|
1024
|
+
protected_context
|
|
1025
|
+
end
|
|
1026
|
+
ProtectedContext.new(
|
|
1027
|
+
properties: normalize_properties(raw[:properties] || raw["properties"] || {}),
|
|
1028
|
+
signature: (raw[:signature] || raw["signature"]).to_s
|
|
1029
|
+
)
|
|
1030
|
+
end
|
|
1031
|
+
|
|
1032
|
+
# Return a defensive copy of a protected context.
|
|
1033
|
+
#
|
|
1034
|
+
# @param protected_context [ProtectedContext, nil] protected context to copy
|
|
1035
|
+
# @return [ProtectedContext, nil] copied protected context or `nil`
|
|
1036
|
+
def copy_protected_context(protected_context)
|
|
1037
|
+
return nil if protected_context.nil?
|
|
1038
|
+
|
|
1039
|
+
ProtectedContext.new(
|
|
1040
|
+
properties: protected_context.properties.dup,
|
|
1041
|
+
signature: protected_context.signature.dup
|
|
1042
|
+
)
|
|
1043
|
+
end
|
|
1044
|
+
|
|
1045
|
+
# Determine whether measurement capture is enabled for an evaluation.
|
|
1046
|
+
#
|
|
1047
|
+
# @param guard [Guard, nil] guard being evaluated
|
|
1048
|
+
# @param options [Hash] normalized evaluation options
|
|
1049
|
+
# @return [Boolean] `true` when measurements should be captured
|
|
1050
|
+
def measurement_enabled?(guard, options)
|
|
1051
|
+
return false if @disable_measurement
|
|
1052
|
+
return false if options[:disable_measurement]
|
|
1053
|
+
return false if guard&.disable_measurement
|
|
1054
|
+
|
|
1055
|
+
true
|
|
1056
|
+
end
|
|
1057
|
+
|
|
1058
|
+
# Capture process metrics for a guard-check signal.
|
|
1059
|
+
#
|
|
1060
|
+
# @return [SignalPerformance] measurement payload for a guard check
|
|
1061
|
+
def capture_guard_check_measurement
|
|
1062
|
+
gc_stats = GC.stat
|
|
1063
|
+
SignalPerformance.new(
|
|
1064
|
+
guard_check: GuardCheckPerformance.new(
|
|
1065
|
+
rss_bytes: nil,
|
|
1066
|
+
heap_used_bytes: gc_slot_bytes(gc_stats, :heap_live_slots),
|
|
1067
|
+
heap_total_bytes: gc_slot_bytes(gc_stats, :heap_available_slots),
|
|
1068
|
+
cpu_time_ns: Process.clock_gettime(Process::CLOCK_PROCESS_CPUTIME_ID, :nanosecond),
|
|
1069
|
+
gc_count: gc_stat_value(gc_stats, :count),
|
|
1070
|
+
thread_count: Thread.list.length
|
|
1071
|
+
),
|
|
1072
|
+
guard_execution: nil
|
|
1073
|
+
)
|
|
1074
|
+
end
|
|
1075
|
+
|
|
1076
|
+
# Capture process metrics for a guard-execution signal.
|
|
1077
|
+
#
|
|
1078
|
+
# @param started_at [Integer] monotonic start time in nanoseconds
|
|
1079
|
+
# @param completed [Boolean] whether the guarded block completed normally
|
|
1080
|
+
# @param error [Exception, nil] exception raised by the guarded block
|
|
1081
|
+
# @return [SignalPerformance] measurement payload for a guard execution
|
|
1082
|
+
def capture_guard_execution_measurement(started_at, completed, error = nil)
|
|
1083
|
+
gc_stats = GC.stat
|
|
1084
|
+
SignalPerformance.new(
|
|
1085
|
+
guard_check: nil,
|
|
1086
|
+
guard_execution: GuardExecutionPerformance.new(
|
|
1087
|
+
duration_ns: Process.clock_gettime(Process::CLOCK_MONOTONIC, :nanosecond) - started_at,
|
|
1088
|
+
rss_end_bytes: nil,
|
|
1089
|
+
heap_used_end_bytes: gc_slot_bytes(gc_stats, :heap_live_slots),
|
|
1090
|
+
heap_total_end_bytes: gc_slot_bytes(gc_stats, :heap_available_slots),
|
|
1091
|
+
cpu_time_end_ns: Process.clock_gettime(Process::CLOCK_PROCESS_CPUTIME_ID, :nanosecond),
|
|
1092
|
+
gc_count_end: gc_stat_value(gc_stats, :count),
|
|
1093
|
+
thread_count_end: Thread.list.length,
|
|
1094
|
+
completed: completed,
|
|
1095
|
+
error_class: error&.class&.name
|
|
1096
|
+
)
|
|
1097
|
+
)
|
|
1098
|
+
end
|
|
1099
|
+
|
|
1100
|
+
# Safely extract an integer GC stat from the current runtime.
|
|
1101
|
+
#
|
|
1102
|
+
# @param stats [Hash] GC statistics hash
|
|
1103
|
+
# @param key [Symbol] desired stat key
|
|
1104
|
+
# @return [Integer, nil] integer stat value or `nil`
|
|
1105
|
+
def gc_stat_value(stats, key)
|
|
1106
|
+
value = stats[key]
|
|
1107
|
+
value.is_a?(Numeric) ? value.to_i : nil
|
|
1108
|
+
end
|
|
1109
|
+
|
|
1110
|
+
# Convert a GC slot count into bytes using the current Ruby slot size.
|
|
1111
|
+
#
|
|
1112
|
+
# @param stats [Hash] GC statistics hash
|
|
1113
|
+
# @param key [Symbol] desired slot-count stat key
|
|
1114
|
+
# @return [Integer, nil] byte count or `nil` when unavailable
|
|
1115
|
+
def gc_slot_bytes(stats, key)
|
|
1116
|
+
slots = gc_stat_value(stats, key)
|
|
1117
|
+
slot_size = ruby_internal_constant(:RVALUE_SIZE)
|
|
1118
|
+
return nil if slots.nil? || slot_size.nil?
|
|
1119
|
+
|
|
1120
|
+
slots * slot_size
|
|
1121
|
+
end
|
|
1122
|
+
|
|
1123
|
+
# Safely read a Ruby VM internal constant.
|
|
1124
|
+
#
|
|
1125
|
+
# @param key [Symbol] desired constant name
|
|
1126
|
+
# @return [Integer, nil] integer constant or `nil`
|
|
1127
|
+
def ruby_internal_constant(key)
|
|
1128
|
+
return nil unless defined?(GC::INTERNAL_CONSTANTS)
|
|
1129
|
+
|
|
1130
|
+
value = GC::INTERNAL_CONSTANTS[key]
|
|
1131
|
+
value.is_a?(Numeric) ? value.to_i : nil
|
|
1132
|
+
end
|
|
1133
|
+
|
|
1134
|
+
# Convert an internal measurement struct into the API payload shape.
|
|
1135
|
+
#
|
|
1136
|
+
# @param measurement [SignalPerformance] measurement to serialize
|
|
1137
|
+
# @return [Hash] JSON-ready measurement payload
|
|
1138
|
+
def signal_measurement_payload(measurement)
|
|
1139
|
+
payload = {}
|
|
1140
|
+
if measurement.guard_check
|
|
1141
|
+
payload[:guardCheck] = {
|
|
1142
|
+
rssBytes: measurement.guard_check.rss_bytes,
|
|
1143
|
+
heapUsedBytes: measurement.guard_check.heap_used_bytes,
|
|
1144
|
+
heapTotalBytes: measurement.guard_check.heap_total_bytes,
|
|
1145
|
+
cpuTimeNs: measurement.guard_check.cpu_time_ns,
|
|
1146
|
+
gcCount: measurement.guard_check.gc_count,
|
|
1147
|
+
threadCount: measurement.guard_check.thread_count,
|
|
1148
|
+
}
|
|
1149
|
+
end
|
|
1150
|
+
if measurement.guard_execution
|
|
1151
|
+
payload[:guardExecution] = {
|
|
1152
|
+
durationNs: measurement.guard_execution.duration_ns,
|
|
1153
|
+
rssEndBytes: measurement.guard_execution.rss_end_bytes,
|
|
1154
|
+
heapUsedEndBytes: measurement.guard_execution.heap_used_end_bytes,
|
|
1155
|
+
heapTotalEndBytes: measurement.guard_execution.heap_total_end_bytes,
|
|
1156
|
+
cpuTimeEndNs: measurement.guard_execution.cpu_time_end_ns,
|
|
1157
|
+
gcCountEnd: measurement.guard_execution.gc_count_end,
|
|
1158
|
+
threadCountEnd: measurement.guard_execution.thread_count_end,
|
|
1159
|
+
completed: measurement.guard_execution.completed,
|
|
1160
|
+
errorClass: measurement.guard_execution.error_class,
|
|
1161
|
+
}
|
|
1162
|
+
end
|
|
1163
|
+
payload
|
|
1164
|
+
end
|
|
1165
|
+
|
|
1166
|
+
# Track an unadopted guard so it can be reported once.
|
|
1167
|
+
#
|
|
1168
|
+
# @param name [String] guard name to record
|
|
1169
|
+
# @return [void]
|
|
1170
|
+
def record_unadopted_guard(name)
|
|
1171
|
+
@monitor.synchronize do
|
|
1172
|
+
return if @reported_unadopted_guards[name]
|
|
1173
|
+
|
|
1174
|
+
@reported_unadopted_guards[name] = true
|
|
1175
|
+
@pending_unadopted_guards[name] = true
|
|
1176
|
+
end
|
|
1177
|
+
end
|
|
1178
|
+
|
|
1179
|
+
# Capture a stable callsite identifier outside of Liteguard internals.
|
|
1180
|
+
#
|
|
1181
|
+
# @return [String] `path:line` identifier, or `unknown`
|
|
1182
|
+
def capture_callsite_id
|
|
1183
|
+
frame = caller_locations.find do |location|
|
|
1184
|
+
path = location.absolute_path.to_s.tr("\\", "/")
|
|
1185
|
+
!path.end_with?("/sdk/ruby/lib/liteguard/client.rb") &&
|
|
1186
|
+
!path.end_with?("/sdk/ruby/lib/liteguard/scope.rb") &&
|
|
1187
|
+
!path.end_with?("/sdk/ruby/lib/liteguard.rb")
|
|
1188
|
+
end
|
|
1189
|
+
return "unknown" unless frame
|
|
1190
|
+
|
|
1191
|
+
"#{frame.absolute_path || frame.path}:#{frame.lineno}"
|
|
1192
|
+
end
|
|
1193
|
+
|
|
1194
|
+
# Emit a warning message unless quiet mode is enabled.
|
|
1195
|
+
#
|
|
1196
|
+
# @param message [String] log message
|
|
1197
|
+
# @return [void]
|
|
1198
|
+
def log(message)
|
|
1199
|
+
warn message unless @quiet
|
|
1200
|
+
end
|
|
1201
|
+
|
|
1202
|
+
# ---------------------------------------------------------------------
|
|
1203
|
+
# Test helpers
|
|
1204
|
+
# ---------------------------------------------------------------------
|
|
1205
|
+
|
|
1206
|
+
# Replace the public bundle with test data.
|
|
1207
|
+
#
|
|
1208
|
+
# @param guards [Array<Guard>] guards to install in the public bundle
|
|
1209
|
+
# @return [void]
|
|
1210
|
+
def set_guards(guards)
|
|
1211
|
+
@monitor.synchronize do
|
|
1212
|
+
@bundles[PUBLIC_BUNDLE_KEY] = GuardBundle.new(
|
|
1213
|
+
key: PUBLIC_BUNDLE_KEY,
|
|
1214
|
+
guards: guards.each_with_object({}) { |guard, acc| acc[guard.name] = guard },
|
|
1215
|
+
ready: true,
|
|
1216
|
+
etag: "",
|
|
1217
|
+
protected_context: nil,
|
|
1218
|
+
refresh_rate_seconds: @refresh_rate
|
|
1219
|
+
)
|
|
1220
|
+
recompute_refresh_interval_locked
|
|
1221
|
+
end
|
|
1222
|
+
end
|
|
1223
|
+
|
|
1224
|
+
# Install a protected bundle for testing.
|
|
1225
|
+
#
|
|
1226
|
+
# @param protected_context [ProtectedContext, Hash] protected context key
|
|
1227
|
+
# @param guards [Array<Guard>] guards to install for that bundle
|
|
1228
|
+
# @return [void]
|
|
1229
|
+
def set_protected_guards(protected_context, guards)
|
|
1230
|
+
normalized = normalize_protected_context(protected_context)
|
|
1231
|
+
bundle_key = protected_context_cache_key(normalized)
|
|
1232
|
+
@monitor.synchronize do
|
|
1233
|
+
@bundles[bundle_key] = GuardBundle.new(
|
|
1234
|
+
key: bundle_key,
|
|
1235
|
+
guards: guards.each_with_object({}) { |guard, acc| acc[guard.name] = guard },
|
|
1236
|
+
ready: true,
|
|
1237
|
+
etag: "",
|
|
1238
|
+
protected_context: normalized,
|
|
1239
|
+
refresh_rate_seconds: @refresh_rate
|
|
1240
|
+
)
|
|
1241
|
+
recompute_refresh_interval_locked
|
|
1242
|
+
end
|
|
1243
|
+
end
|
|
1244
|
+
|
|
1245
|
+
# Clear rate-limit state for one guard or for all guards.
|
|
1246
|
+
#
|
|
1247
|
+
# @param name [String, nil] optional guard name to clear
|
|
1248
|
+
# @return [void]
|
|
1249
|
+
def reset_rate_limit_state(name = nil)
|
|
1250
|
+
@monitor.synchronize do
|
|
1251
|
+
if name
|
|
1252
|
+
@rate_limit_state.delete(name)
|
|
1253
|
+
prefix = "#{name}\x00"
|
|
1254
|
+
@rate_limit_state.delete_if { |key, _| key.start_with?(prefix) }
|
|
1255
|
+
else
|
|
1256
|
+
@rate_limit_state.clear
|
|
1257
|
+
end
|
|
1258
|
+
end
|
|
1259
|
+
end
|
|
1260
|
+
|
|
1261
|
+
# Return pending unadopted-guard names for tests.
|
|
1262
|
+
#
|
|
1263
|
+
# @return [Array<String>] pending unadopted guard names
|
|
1264
|
+
def pending_unadopted_guards_for_testing
|
|
1265
|
+
@monitor.synchronize { @pending_unadopted_guards.keys.sort }
|
|
1266
|
+
end
|
|
1267
|
+
|
|
1268
|
+
# Return the number of cached bundles for tests.
|
|
1269
|
+
#
|
|
1270
|
+
# @return [Integer] number of known bundles
|
|
1271
|
+
def known_bundle_count_for_testing
|
|
1272
|
+
@monitor.synchronize { @bundles.size }
|
|
1273
|
+
end
|
|
1274
|
+
|
|
1275
|
+
# Return the current refresh rate for tests.
|
|
1276
|
+
#
|
|
1277
|
+
# @return [Integer] current refresh interval in seconds
|
|
1278
|
+
def current_refresh_rate_for_testing
|
|
1279
|
+
@monitor.synchronize { @current_refresh_rate }
|
|
1280
|
+
end
|
|
1281
|
+
end
|
|
1282
|
+
end
|