tsikol 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/CHANGELOG.md +22 -0
- data/CONTRIBUTING.md +84 -0
- data/LICENSE +21 -0
- data/README.md +579 -0
- data/Rakefile +12 -0
- data/docs/README.md +69 -0
- data/docs/api/middleware.md +721 -0
- data/docs/api/prompt.md +858 -0
- data/docs/api/resource.md +651 -0
- data/docs/api/server.md +509 -0
- data/docs/api/test-helpers.md +591 -0
- data/docs/api/tool.md +527 -0
- data/docs/cookbook/authentication.md +651 -0
- data/docs/cookbook/caching.md +877 -0
- data/docs/cookbook/dynamic-tools.md +970 -0
- data/docs/cookbook/error-handling.md +887 -0
- data/docs/cookbook/logging.md +1044 -0
- data/docs/cookbook/rate-limiting.md +717 -0
- data/docs/examples/code-assistant.md +922 -0
- data/docs/examples/complete-server.md +726 -0
- data/docs/examples/database-manager.md +1198 -0
- data/docs/examples/devops-tools.md +1382 -0
- data/docs/examples/echo-server.md +501 -0
- data/docs/examples/weather-service.md +822 -0
- data/docs/guides/completion.md +472 -0
- data/docs/guides/getting-started.md +462 -0
- data/docs/guides/middleware.md +823 -0
- data/docs/guides/project-structure.md +434 -0
- data/docs/guides/prompts.md +920 -0
- data/docs/guides/resources.md +720 -0
- data/docs/guides/sampling.md +804 -0
- data/docs/guides/testing.md +863 -0
- data/docs/guides/tools.md +627 -0
- data/examples/README.md +92 -0
- data/examples/advanced_features.rb +129 -0
- data/examples/basic-migrated/app/prompts/weather_chat.rb +44 -0
- data/examples/basic-migrated/app/resources/weather_alerts.rb +18 -0
- data/examples/basic-migrated/app/tools/get_current_weather.rb +34 -0
- data/examples/basic-migrated/app/tools/get_forecast.rb +30 -0
- data/examples/basic-migrated/app/tools/get_weather_by_coords.rb +48 -0
- data/examples/basic-migrated/server.rb +25 -0
- data/examples/basic.rb +73 -0
- data/examples/full_featured.rb +175 -0
- data/examples/middleware_example.rb +112 -0
- data/examples/sampling_example.rb +104 -0
- data/examples/weather-service/app/prompts/weather/chat.rb +90 -0
- data/examples/weather-service/app/resources/weather/alerts.rb +59 -0
- data/examples/weather-service/app/tools/weather/get_current.rb +82 -0
- data/examples/weather-service/app/tools/weather/get_forecast.rb +90 -0
- data/examples/weather-service/server.rb +28 -0
- data/exe/tsikol +6 -0
- data/lib/tsikol/cli/templates/Gemfile.erb +10 -0
- data/lib/tsikol/cli/templates/README.md.erb +38 -0
- data/lib/tsikol/cli/templates/gitignore.erb +49 -0
- data/lib/tsikol/cli/templates/prompt.rb.erb +53 -0
- data/lib/tsikol/cli/templates/resource.rb.erb +29 -0
- data/lib/tsikol/cli/templates/server.rb.erb +24 -0
- data/lib/tsikol/cli/templates/tool.rb.erb +60 -0
- data/lib/tsikol/cli.rb +203 -0
- data/lib/tsikol/error_handler.rb +141 -0
- data/lib/tsikol/health.rb +198 -0
- data/lib/tsikol/http_transport.rb +72 -0
- data/lib/tsikol/lifecycle.rb +149 -0
- data/lib/tsikol/middleware.rb +168 -0
- data/lib/tsikol/prompt.rb +101 -0
- data/lib/tsikol/resource.rb +53 -0
- data/lib/tsikol/router.rb +190 -0
- data/lib/tsikol/server.rb +660 -0
- data/lib/tsikol/stdio_transport.rb +108 -0
- data/lib/tsikol/test_helpers.rb +261 -0
- data/lib/tsikol/tool.rb +111 -0
- data/lib/tsikol/version.rb +5 -0
- data/lib/tsikol.rb +72 -0
- metadata +219 -0
@@ -0,0 +1,1044 @@
|
|
1
|
+
# Logging Recipe
|
2
|
+
|
3
|
+
This recipe shows comprehensive logging strategies for debugging, monitoring, and auditing your MCP server.
|
4
|
+
|
5
|
+
## Basic Logging
|
6
|
+
|
7
|
+
### Server Logging Configuration
|
8
|
+
|
9
|
+
```ruby
|
10
|
+
Tsikol.start(name: "logged-server") do
|
11
|
+
# Enable logging capability
|
12
|
+
logging true
|
13
|
+
|
14
|
+
# Use built-in logging middleware
|
15
|
+
use Tsikol::LoggingMiddleware,
|
16
|
+
level: :info,
|
17
|
+
include_params: true,
|
18
|
+
include_response: true,
|
19
|
+
max_response_length: 500
|
20
|
+
|
21
|
+
# Custom logger configuration
|
22
|
+
before_start do
|
23
|
+
configure_logger
|
24
|
+
end
|
25
|
+
|
26
|
+
tool LoggedTool
|
27
|
+
resource LoggedResource
|
28
|
+
end
|
29
|
+
|
30
|
+
def configure_logger
|
31
|
+
# Set up custom logger
|
32
|
+
require 'logger'
|
33
|
+
|
34
|
+
logger = Logger.new(STDOUT)
|
35
|
+
logger.level = ENV['LOG_LEVEL'] || Logger::INFO
|
36
|
+
|
37
|
+
# Custom formatter
|
38
|
+
logger.formatter = proc do |severity, datetime, progname, msg|
|
39
|
+
{
|
40
|
+
timestamp: datetime.iso8601,
|
41
|
+
level: severity,
|
42
|
+
program: progname,
|
43
|
+
message: msg,
|
44
|
+
pid: Process.pid,
|
45
|
+
thread: Thread.current.object_id
|
46
|
+
}.to_json + "\n"
|
47
|
+
end
|
48
|
+
|
49
|
+
# Make logger available globally
|
50
|
+
Thread.current[:logger] = logger
|
51
|
+
end
|
52
|
+
```
|
53
|
+
|
54
|
+
### Tool Logging
|
55
|
+
|
56
|
+
```ruby
|
57
|
+
class LoggedTool < Tsikol::Tool
|
58
|
+
description "Tool with comprehensive logging"
|
59
|
+
|
60
|
+
parameter :operation do
|
61
|
+
type :string
|
62
|
+
required
|
63
|
+
end
|
64
|
+
|
65
|
+
parameter :data do
|
66
|
+
type :object
|
67
|
+
required
|
68
|
+
end
|
69
|
+
|
70
|
+
def execute(operation:, data:)
|
71
|
+
request_id = SecureRandom.uuid
|
72
|
+
|
73
|
+
log :info, "Tool execution started",
|
74
|
+
request_id: request_id,
|
75
|
+
operation: operation,
|
76
|
+
data_size: data.to_json.bytesize
|
77
|
+
|
78
|
+
start_time = Time.now
|
79
|
+
|
80
|
+
begin
|
81
|
+
result = perform_operation(operation, data)
|
82
|
+
|
83
|
+
log :info, "Tool execution completed",
|
84
|
+
request_id: request_id,
|
85
|
+
duration_ms: ((Time.now - start_time) * 1000).round(2),
|
86
|
+
result_size: result.to_json.bytesize
|
87
|
+
|
88
|
+
result
|
89
|
+
|
90
|
+
rescue => e
|
91
|
+
log :error, "Tool execution failed",
|
92
|
+
request_id: request_id,
|
93
|
+
duration_ms: ((Time.now - start_time) * 1000).round(2),
|
94
|
+
error: e.class.name,
|
95
|
+
message: e.message,
|
96
|
+
backtrace: e.backtrace&.first(5)
|
97
|
+
|
98
|
+
raise
|
99
|
+
end
|
100
|
+
end
|
101
|
+
|
102
|
+
private
|
103
|
+
|
104
|
+
def perform_operation(operation, data)
|
105
|
+
log :debug, "Processing operation", operation: operation
|
106
|
+
|
107
|
+
case operation
|
108
|
+
when "transform"
|
109
|
+
transform_data(data)
|
110
|
+
when "analyze"
|
111
|
+
analyze_data(data)
|
112
|
+
else
|
113
|
+
raise ArgumentError, "Unknown operation: #{operation}"
|
114
|
+
end
|
115
|
+
end
|
116
|
+
|
117
|
+
def set_server(server)
|
118
|
+
@server = server
|
119
|
+
|
120
|
+
# Set up logging if server supports it
|
121
|
+
if @server.logging_enabled?
|
122
|
+
define_singleton_method(:log) do |level, message, **data|
|
123
|
+
@server.log(level, "[#{self.class.name}] #{message}", data: data)
|
124
|
+
end
|
125
|
+
else
|
126
|
+
# No-op logging if not enabled
|
127
|
+
define_singleton_method(:log) do |*args|
|
128
|
+
end
|
129
|
+
end
|
130
|
+
end
|
131
|
+
end
|
132
|
+
```
|
133
|
+
|
134
|
+
## Structured Logging
|
135
|
+
|
136
|
+
### JSON Logger Middleware
|
137
|
+
|
138
|
+
```ruby
|
139
|
+
class StructuredLoggingMiddleware < Tsikol::Middleware
|
140
|
+
def initialize(app, options = {})
|
141
|
+
@app = app
|
142
|
+
@logger = options[:logger] || create_default_logger
|
143
|
+
@include_params = options.fetch(:include_params, true)
|
144
|
+
@sanitizer = options[:sanitizer] || DefaultSanitizer.new
|
145
|
+
end
|
146
|
+
|
147
|
+
def call(request)
|
148
|
+
context = build_request_context(request)
|
149
|
+
|
150
|
+
log_event(:request_started, context)
|
151
|
+
|
152
|
+
start_time = Time.now
|
153
|
+
response = nil
|
154
|
+
|
155
|
+
begin
|
156
|
+
response = @app.call(request)
|
157
|
+
|
158
|
+
context[:response] = build_response_context(response)
|
159
|
+
context[:duration_ms] = ((Time.now - start_time) * 1000).round(2)
|
160
|
+
|
161
|
+
log_event(:request_completed, context)
|
162
|
+
|
163
|
+
response
|
164
|
+
|
165
|
+
rescue => e
|
166
|
+
context[:error] = build_error_context(e)
|
167
|
+
context[:duration_ms] = ((Time.now - start_time) * 1000).round(2)
|
168
|
+
|
169
|
+
log_event(:request_failed, context)
|
170
|
+
|
171
|
+
raise
|
172
|
+
end
|
173
|
+
end
|
174
|
+
|
175
|
+
private
|
176
|
+
|
177
|
+
def create_default_logger
|
178
|
+
logger = Logger.new(STDOUT)
|
179
|
+
logger.formatter = JsonFormatter.new
|
180
|
+
logger
|
181
|
+
end
|
182
|
+
|
183
|
+
def build_request_context(request)
|
184
|
+
context = {
|
185
|
+
request_id: request["id"],
|
186
|
+
method: request["method"],
|
187
|
+
timestamp: Time.now.iso8601
|
188
|
+
}
|
189
|
+
|
190
|
+
if @include_params && request["params"]
|
191
|
+
context[:params] = @sanitizer.sanitize(request["params"])
|
192
|
+
end
|
193
|
+
|
194
|
+
context
|
195
|
+
end
|
196
|
+
|
197
|
+
def build_response_context(response)
|
198
|
+
if response[:error]
|
199
|
+
{
|
200
|
+
status: "error",
|
201
|
+
error_code: response[:error][:code],
|
202
|
+
error_message: response[:error][:message]
|
203
|
+
}
|
204
|
+
else
|
205
|
+
{
|
206
|
+
status: "success",
|
207
|
+
result_type: response[:result].class.name
|
208
|
+
}
|
209
|
+
end
|
210
|
+
end
|
211
|
+
|
212
|
+
def build_error_context(error)
|
213
|
+
{
|
214
|
+
error_class: error.class.name,
|
215
|
+
error_message: error.message,
|
216
|
+
backtrace: error.backtrace&.first(3)
|
217
|
+
}
|
218
|
+
end
|
219
|
+
|
220
|
+
def log_event(event, context)
|
221
|
+
@logger.info({
|
222
|
+
event: event,
|
223
|
+
service: "mcp-server",
|
224
|
+
**context
|
225
|
+
})
|
226
|
+
end
|
227
|
+
end
|
228
|
+
|
229
|
+
class JsonFormatter
|
230
|
+
def call(severity, timestamp, progname, msg)
|
231
|
+
log_entry = {
|
232
|
+
timestamp: timestamp.iso8601,
|
233
|
+
level: severity,
|
234
|
+
program: progname
|
235
|
+
}
|
236
|
+
|
237
|
+
if msg.is_a?(Hash)
|
238
|
+
log_entry.merge!(msg)
|
239
|
+
else
|
240
|
+
log_entry[:message] = msg
|
241
|
+
end
|
242
|
+
|
243
|
+
JSON.generate(log_entry) + "\n"
|
244
|
+
end
|
245
|
+
end
|
246
|
+
|
247
|
+
class DefaultSanitizer
|
248
|
+
SENSITIVE_KEYS = %w[password token secret key auth api_key].freeze
|
249
|
+
|
250
|
+
def sanitize(data)
|
251
|
+
case data
|
252
|
+
when Hash
|
253
|
+
data.transform_values do |value|
|
254
|
+
if sensitive_key?(data.keys)
|
255
|
+
"[REDACTED]"
|
256
|
+
else
|
257
|
+
sanitize(value)
|
258
|
+
end
|
259
|
+
end
|
260
|
+
when Array
|
261
|
+
data.map { |item| sanitize(item) }
|
262
|
+
else
|
263
|
+
data
|
264
|
+
end
|
265
|
+
end
|
266
|
+
|
267
|
+
private
|
268
|
+
|
269
|
+
def sensitive_key?(keys)
|
270
|
+
keys.any? do |key|
|
271
|
+
SENSITIVE_KEYS.any? { |sensitive| key.to_s.downcase.include?(sensitive) }
|
272
|
+
end
|
273
|
+
end
|
274
|
+
end
|
275
|
+
```
|
276
|
+
|
277
|
+
### Request Context Logging
|
278
|
+
|
279
|
+
```ruby
|
280
|
+
class RequestContextMiddleware < Tsikol::Middleware
|
281
|
+
def call(request)
|
282
|
+
# Set up request context for logging
|
283
|
+
RequestContext.with_context(
|
284
|
+
request_id: request["id"] || SecureRandom.uuid,
|
285
|
+
method: request["method"],
|
286
|
+
user_id: request.dig("authenticated_user", "id"),
|
287
|
+
client_id: request.dig("params", "_client_id"),
|
288
|
+
correlation_id: request.dig("params", "_correlation_id")
|
289
|
+
) do
|
290
|
+
@app.call(request)
|
291
|
+
end
|
292
|
+
end
|
293
|
+
end
|
294
|
+
|
295
|
+
module RequestContext
|
296
|
+
extend self
|
297
|
+
|
298
|
+
def with_context(context)
|
299
|
+
old_context = current
|
300
|
+
self.current = current.merge(context)
|
301
|
+
yield
|
302
|
+
ensure
|
303
|
+
self.current = old_context
|
304
|
+
end
|
305
|
+
|
306
|
+
def current
|
307
|
+
Thread.current[:request_context] ||= {}
|
308
|
+
end
|
309
|
+
|
310
|
+
def current=(context)
|
311
|
+
Thread.current[:request_context] = context
|
312
|
+
end
|
313
|
+
|
314
|
+
def to_h
|
315
|
+
current.dup
|
316
|
+
end
|
317
|
+
end
|
318
|
+
|
319
|
+
# Enhanced logger that includes context
|
320
|
+
class ContextualLogger
|
321
|
+
def initialize(base_logger)
|
322
|
+
@base_logger = base_logger
|
323
|
+
end
|
324
|
+
|
325
|
+
%i[debug info warn error fatal].each do |level|
|
326
|
+
define_method(level) do |message, **data|
|
327
|
+
log_with_context(level, message, data)
|
328
|
+
end
|
329
|
+
end
|
330
|
+
|
331
|
+
private
|
332
|
+
|
333
|
+
def log_with_context(level, message, data)
|
334
|
+
context = RequestContext.to_h
|
335
|
+
|
336
|
+
@base_logger.send(level, {
|
337
|
+
message: message,
|
338
|
+
**context,
|
339
|
+
**data
|
340
|
+
})
|
341
|
+
end
|
342
|
+
end
|
343
|
+
```
|
344
|
+
|
345
|
+
## Log Aggregation
|
346
|
+
|
347
|
+
### Multi-Destination Logging
|
348
|
+
|
349
|
+
```ruby
|
350
|
+
class MultiLogger
|
351
|
+
def initialize
|
352
|
+
@loggers = []
|
353
|
+
end
|
354
|
+
|
355
|
+
def add_logger(logger)
|
356
|
+
@loggers << logger
|
357
|
+
end
|
358
|
+
|
359
|
+
%i[debug info warn error fatal].each do |level|
|
360
|
+
define_method(level) do |message, **data|
|
361
|
+
@loggers.each do |logger|
|
362
|
+
logger.send(level, message, **data) rescue nil
|
363
|
+
end
|
364
|
+
end
|
365
|
+
end
|
366
|
+
end
|
367
|
+
|
368
|
+
# File logger with rotation
|
369
|
+
file_logger = Logger.new(
|
370
|
+
"logs/mcp-server.log",
|
371
|
+
10, # Keep 10 files
|
372
|
+
10_485_760 # 10MB per file
|
373
|
+
)
|
374
|
+
|
375
|
+
# Syslog logger
|
376
|
+
require 'syslog/logger'
|
377
|
+
syslog_logger = Syslog::Logger.new("mcp-server")
|
378
|
+
|
379
|
+
# JSON logger for structured logs
|
380
|
+
json_logger = Logger.new("logs/mcp-server.json")
|
381
|
+
json_logger.formatter = JsonFormatter.new
|
382
|
+
|
383
|
+
# CloudWatch logger (example)
|
384
|
+
class CloudWatchLogger
|
385
|
+
def initialize(log_group, log_stream)
|
386
|
+
@client = Aws::CloudWatchLogs::Client.new
|
387
|
+
@log_group = log_group
|
388
|
+
@log_stream = log_stream
|
389
|
+
@buffer = []
|
390
|
+
@mutex = Mutex.new
|
391
|
+
|
392
|
+
start_flush_thread
|
393
|
+
end
|
394
|
+
|
395
|
+
def log(level, message, data = {})
|
396
|
+
event = {
|
397
|
+
timestamp: (Time.now.to_f * 1000).to_i,
|
398
|
+
message: {
|
399
|
+
level: level.to_s.upcase,
|
400
|
+
message: message,
|
401
|
+
**data
|
402
|
+
}.to_json
|
403
|
+
}
|
404
|
+
|
405
|
+
@mutex.synchronize { @buffer << event }
|
406
|
+
end
|
407
|
+
|
408
|
+
%i[debug info warn error fatal].each do |level|
|
409
|
+
define_method(level) do |message, **data|
|
410
|
+
log(level, message, data)
|
411
|
+
end
|
412
|
+
end
|
413
|
+
|
414
|
+
private
|
415
|
+
|
416
|
+
def start_flush_thread
|
417
|
+
Thread.new do
|
418
|
+
loop do
|
419
|
+
sleep 5
|
420
|
+
flush_buffer
|
421
|
+
end
|
422
|
+
end
|
423
|
+
end
|
424
|
+
|
425
|
+
def flush_buffer
|
426
|
+
events = @mutex.synchronize do
|
427
|
+
@buffer.dup.tap { @buffer.clear }
|
428
|
+
end
|
429
|
+
|
430
|
+
return if events.empty?
|
431
|
+
|
432
|
+
@client.put_log_events(
|
433
|
+
log_group_name: @log_group,
|
434
|
+
log_stream_name: @log_stream,
|
435
|
+
log_events: events
|
436
|
+
)
|
437
|
+
rescue => e
|
438
|
+
# Re-add events to buffer on failure
|
439
|
+
@mutex.synchronize { @buffer.concat(events) }
|
440
|
+
end
|
441
|
+
end
|
442
|
+
|
443
|
+
# Combine all loggers
|
444
|
+
multi_logger = MultiLogger.new
|
445
|
+
multi_logger.add_logger(file_logger)
|
446
|
+
multi_logger.add_logger(syslog_logger)
|
447
|
+
multi_logger.add_logger(json_logger)
|
448
|
+
multi_logger.add_logger(CloudWatchLogger.new("mcp-servers", "production"))
|
449
|
+
```
|
450
|
+
|
451
|
+
## Performance Logging
|
452
|
+
|
453
|
+
### Request Performance Tracking
|
454
|
+
|
455
|
+
```ruby
|
456
|
+
class PerformanceLoggingMiddleware < Tsikol::Middleware
|
457
|
+
def initialize(app, options = {})
|
458
|
+
@app = app
|
459
|
+
@slow_request_threshold = options[:slow_request_threshold] || 1000 # ms
|
460
|
+
@metrics_logger = options[:metrics_logger] || create_metrics_logger
|
461
|
+
end
|
462
|
+
|
463
|
+
def call(request)
|
464
|
+
timer = RequestTimer.new(request)
|
465
|
+
|
466
|
+
begin
|
467
|
+
response = @app.call(request)
|
468
|
+
|
469
|
+
timer.stop
|
470
|
+
log_performance_metrics(timer, response)
|
471
|
+
|
472
|
+
response
|
473
|
+
|
474
|
+
rescue => e
|
475
|
+
timer.stop
|
476
|
+
log_performance_metrics(timer, nil, e)
|
477
|
+
raise
|
478
|
+
end
|
479
|
+
end
|
480
|
+
|
481
|
+
private
|
482
|
+
|
483
|
+
def log_performance_metrics(timer, response, error = nil)
|
484
|
+
metrics = timer.metrics
|
485
|
+
|
486
|
+
# Always log slow requests
|
487
|
+
if metrics[:total_ms] > @slow_request_threshold
|
488
|
+
log :warning, "Slow request detected", metrics
|
489
|
+
end
|
490
|
+
|
491
|
+
# Log to metrics system
|
492
|
+
@metrics_logger.info({
|
493
|
+
event: "request_performance",
|
494
|
+
**metrics,
|
495
|
+
status: error ? "error" : "success",
|
496
|
+
error_class: error&.class&.name
|
497
|
+
})
|
498
|
+
end
|
499
|
+
|
500
|
+
def create_metrics_logger
|
501
|
+
logger = Logger.new("logs/metrics.log")
|
502
|
+
logger.formatter = proc do |_, timestamp, _, msg|
|
503
|
+
"#{timestamp.iso8601} #{msg.to_json}\n"
|
504
|
+
end
|
505
|
+
logger
|
506
|
+
end
|
507
|
+
end
|
508
|
+
|
509
|
+
class RequestTimer
|
510
|
+
attr_reader :request, :checkpoints
|
511
|
+
|
512
|
+
def initialize(request)
|
513
|
+
@request = request
|
514
|
+
@start_time = Time.now
|
515
|
+
@checkpoints = {}
|
516
|
+
@stopped = false
|
517
|
+
end
|
518
|
+
|
519
|
+
def checkpoint(name)
|
520
|
+
@checkpoints[name] = Time.now unless @stopped
|
521
|
+
end
|
522
|
+
|
523
|
+
def stop
|
524
|
+
@end_time = Time.now
|
525
|
+
@stopped = true
|
526
|
+
end
|
527
|
+
|
528
|
+
def metrics
|
529
|
+
total_time = (@end_time || Time.now) - @start_time
|
530
|
+
|
531
|
+
metrics = {
|
532
|
+
method: @request["method"],
|
533
|
+
request_id: @request["id"],
|
534
|
+
total_ms: (total_time * 1000).round(2),
|
535
|
+
timestamp: @start_time.iso8601
|
536
|
+
}
|
537
|
+
|
538
|
+
# Add checkpoint timings
|
539
|
+
@checkpoints.each do |name, time|
|
540
|
+
metrics["#{name}_ms"] = ((time - @start_time) * 1000).round(2)
|
541
|
+
end
|
542
|
+
|
543
|
+
metrics
|
544
|
+
end
|
545
|
+
end
|
546
|
+
```
|
547
|
+
|
548
|
+
### Database Query Logging
|
549
|
+
|
550
|
+
```ruby
|
551
|
+
class QueryLogger
|
552
|
+
def initialize(options = {})
|
553
|
+
@logger = options[:logger] || create_query_logger
|
554
|
+
@slow_query_threshold = options[:slow_query_threshold] || 100 # ms
|
555
|
+
@include_binds = options[:include_binds] || false
|
556
|
+
end
|
557
|
+
|
558
|
+
def log_query(sql, binds = [], &block)
|
559
|
+
start_time = Time.now
|
560
|
+
rows_affected = nil
|
561
|
+
|
562
|
+
begin
|
563
|
+
result = yield
|
564
|
+
rows_affected = extract_rows_affected(result)
|
565
|
+
result
|
566
|
+
rescue => e
|
567
|
+
log_query_event(sql, binds, start_time, error: e)
|
568
|
+
raise
|
569
|
+
else
|
570
|
+
log_query_event(sql, binds, start_time, rows_affected: rows_affected)
|
571
|
+
end
|
572
|
+
end
|
573
|
+
|
574
|
+
private
|
575
|
+
|
576
|
+
def log_query_event(sql, binds, start_time, error: nil, rows_affected: nil)
|
577
|
+
duration_ms = ((Time.now - start_time) * 1000).round(2)
|
578
|
+
|
579
|
+
event = {
|
580
|
+
event: "database_query",
|
581
|
+
sql: sanitize_sql(sql),
|
582
|
+
duration_ms: duration_ms,
|
583
|
+
timestamp: start_time.iso8601,
|
584
|
+
context: RequestContext.to_h
|
585
|
+
}
|
586
|
+
|
587
|
+
event[:binds] = binds if @include_binds && binds.any?
|
588
|
+
event[:rows_affected] = rows_affected if rows_affected
|
589
|
+
event[:error] = error.message if error
|
590
|
+
|
591
|
+
level = determine_log_level(duration_ms, error)
|
592
|
+
@logger.send(level, event)
|
593
|
+
end
|
594
|
+
|
595
|
+
def determine_log_level(duration_ms, error)
|
596
|
+
return :error if error
|
597
|
+
return :warning if duration_ms > @slow_query_threshold
|
598
|
+
:debug
|
599
|
+
end
|
600
|
+
|
601
|
+
def sanitize_sql(sql)
|
602
|
+
# Remove excess whitespace
|
603
|
+
sql.gsub(/\s+/, ' ').strip
|
604
|
+
end
|
605
|
+
|
606
|
+
def extract_rows_affected(result)
|
607
|
+
case result
|
608
|
+
when Integer
|
609
|
+
result
|
610
|
+
when Array
|
611
|
+
result.size
|
612
|
+
else
|
613
|
+
nil
|
614
|
+
end
|
615
|
+
end
|
616
|
+
|
617
|
+
def create_query_logger
|
618
|
+
Logger.new("logs/queries.log").tap do |logger|
|
619
|
+
logger.formatter = JsonFormatter.new
|
620
|
+
end
|
621
|
+
end
|
622
|
+
end
|
623
|
+
|
624
|
+
# Usage in database operations
|
625
|
+
class DatabaseTool < Tsikol::Tool
|
626
|
+
def initialize
|
627
|
+
super
|
628
|
+
@query_logger = QueryLogger.new
|
629
|
+
end
|
630
|
+
|
631
|
+
def execute(query:)
|
632
|
+
@query_logger.log_query(query) do
|
633
|
+
# Execute actual database query
|
634
|
+
Database.connection.execute(query)
|
635
|
+
end
|
636
|
+
end
|
637
|
+
end
|
638
|
+
```
|
639
|
+
|
640
|
+
## Audit Logging
|
641
|
+
|
642
|
+
### Security Audit Trail
|
643
|
+
|
644
|
+
```ruby
|
645
|
+
class AuditLogger
|
646
|
+
def initialize(options = {})
|
647
|
+
@logger = create_audit_logger(options[:log_file] || "logs/audit.log")
|
648
|
+
@include_ip = options[:include_ip] || true
|
649
|
+
@include_user_agent = options[:include_user_agent] || true
|
650
|
+
end
|
651
|
+
|
652
|
+
def log_event(event_type, details = {})
|
653
|
+
audit_entry = build_audit_entry(event_type, details)
|
654
|
+
@logger.info(audit_entry)
|
655
|
+
|
656
|
+
# Also send to security monitoring system
|
657
|
+
send_to_siem(audit_entry) if critical_event?(event_type)
|
658
|
+
end
|
659
|
+
|
660
|
+
def log_authentication(success:, user_id: nil, method: nil, reason: nil)
|
661
|
+
log_event(
|
662
|
+
success ? :authentication_success : :authentication_failure,
|
663
|
+
user_id: user_id,
|
664
|
+
method: method,
|
665
|
+
reason: reason
|
666
|
+
)
|
667
|
+
end
|
668
|
+
|
669
|
+
def log_authorization(success:, user_id:, resource:, action:, reason: nil)
|
670
|
+
log_event(
|
671
|
+
success ? :authorization_success : :authorization_failure,
|
672
|
+
user_id: user_id,
|
673
|
+
resource: resource,
|
674
|
+
action: action,
|
675
|
+
reason: reason
|
676
|
+
)
|
677
|
+
end
|
678
|
+
|
679
|
+
def log_data_access(user_id:, resource:, operation:, data_classification: nil)
|
680
|
+
log_event(
|
681
|
+
:data_access,
|
682
|
+
user_id: user_id,
|
683
|
+
resource: resource,
|
684
|
+
operation: operation,
|
685
|
+
data_classification: data_classification
|
686
|
+
)
|
687
|
+
end
|
688
|
+
|
689
|
+
def log_configuration_change(user_id:, setting:, old_value:, new_value:)
|
690
|
+
log_event(
|
691
|
+
:configuration_change,
|
692
|
+
user_id: user_id,
|
693
|
+
setting: setting,
|
694
|
+
old_value: sanitize_value(old_value),
|
695
|
+
new_value: sanitize_value(new_value)
|
696
|
+
)
|
697
|
+
end
|
698
|
+
|
699
|
+
private
|
700
|
+
|
701
|
+
def build_audit_entry(event_type, details)
|
702
|
+
entry = {
|
703
|
+
event_type: event_type,
|
704
|
+
timestamp: Time.now.iso8601,
|
705
|
+
**details,
|
706
|
+
**RequestContext.to_h
|
707
|
+
}
|
708
|
+
|
709
|
+
# Add request metadata
|
710
|
+
if request = Thread.current[:mcp_request]
|
711
|
+
entry[:client_ip] = extract_client_ip(request) if @include_ip
|
712
|
+
entry[:user_agent] = request.dig("meta", "user_agent") if @include_user_agent
|
713
|
+
end
|
714
|
+
|
715
|
+
entry
|
716
|
+
end
|
717
|
+
|
718
|
+
def critical_event?(event_type)
|
719
|
+
%i[
|
720
|
+
authentication_failure
|
721
|
+
authorization_failure
|
722
|
+
configuration_change
|
723
|
+
privilege_escalation
|
724
|
+
data_export
|
725
|
+
].include?(event_type)
|
726
|
+
end
|
727
|
+
|
728
|
+
def send_to_siem(audit_entry)
|
729
|
+
# Send to Security Information and Event Management system
|
730
|
+
Thread.new do
|
731
|
+
SiemClient.send_event(audit_entry)
|
732
|
+
rescue => e
|
733
|
+
# Log but don't fail
|
734
|
+
@logger.error("Failed to send to SIEM: #{e.message}")
|
735
|
+
end
|
736
|
+
end
|
737
|
+
|
738
|
+
def sanitize_value(value)
|
739
|
+
case value
|
740
|
+
when String
|
741
|
+
value.include?("password") ? "[REDACTED]" : value
|
742
|
+
when Hash
|
743
|
+
value.transform_values { |v| sanitize_value(v) }
|
744
|
+
else
|
745
|
+
value
|
746
|
+
end
|
747
|
+
end
|
748
|
+
|
749
|
+
def create_audit_logger(log_file)
|
750
|
+
# Audit logs should be append-only and tamper-evident
|
751
|
+
Logger.new(log_file, 0, nil).tap do |logger|
|
752
|
+
logger.formatter = proc do |_, timestamp, _, msg|
|
753
|
+
# Include checksum for tamper detection
|
754
|
+
entry = msg.merge(timestamp: timestamp.iso8601)
|
755
|
+
checksum = Digest::SHA256.hexdigest(entry.to_json)
|
756
|
+
|
757
|
+
"#{entry.to_json}|#{checksum}\n"
|
758
|
+
end
|
759
|
+
end
|
760
|
+
end
|
761
|
+
end
|
762
|
+
|
763
|
+
# Audit middleware
|
764
|
+
class AuditMiddleware < Tsikol::Middleware
|
765
|
+
def initialize(app, options = {})
|
766
|
+
@app = app
|
767
|
+
@audit_logger = options[:audit_logger] || AuditLogger.new
|
768
|
+
@audit_methods = options[:audit_methods] || sensitive_methods
|
769
|
+
end
|
770
|
+
|
771
|
+
def call(request)
|
772
|
+
if should_audit?(request)
|
773
|
+
audit_request(request)
|
774
|
+
end
|
775
|
+
|
776
|
+
response = @app.call(request)
|
777
|
+
|
778
|
+
if should_audit?(request)
|
779
|
+
audit_response(request, response)
|
780
|
+
end
|
781
|
+
|
782
|
+
response
|
783
|
+
end
|
784
|
+
|
785
|
+
private
|
786
|
+
|
787
|
+
def should_audit?(request)
|
788
|
+
@audit_methods.include?(request["method"])
|
789
|
+
end
|
790
|
+
|
791
|
+
def sensitive_methods
|
792
|
+
%w[
|
793
|
+
tools/call
|
794
|
+
resources/read
|
795
|
+
configuration/update
|
796
|
+
users/create
|
797
|
+
users/update
|
798
|
+
users/delete
|
799
|
+
]
|
800
|
+
end
|
801
|
+
|
802
|
+
def audit_request(request)
|
803
|
+
user_id = request.dig("authenticated_user", "id")
|
804
|
+
|
805
|
+
@audit_logger.log_event(
|
806
|
+
:api_request,
|
807
|
+
method: request["method"],
|
808
|
+
user_id: user_id,
|
809
|
+
params: sanitize_params(request["params"])
|
810
|
+
)
|
811
|
+
end
|
812
|
+
|
813
|
+
def audit_response(request, response)
|
814
|
+
if response[:error]
|
815
|
+
@audit_logger.log_event(
|
816
|
+
:api_error,
|
817
|
+
method: request["method"],
|
818
|
+
error_code: response[:error][:code],
|
819
|
+
error_message: response[:error][:message]
|
820
|
+
)
|
821
|
+
end
|
822
|
+
end
|
823
|
+
end
|
824
|
+
```
|
825
|
+
|
826
|
+
## Log Analysis Tools
|
827
|
+
|
828
|
+
### Log Parser and Analyzer
|
829
|
+
|
830
|
+
```ruby
|
831
|
+
class LogAnalyzer
|
832
|
+
def initialize(log_file)
|
833
|
+
@log_file = log_file
|
834
|
+
end
|
835
|
+
|
836
|
+
def analyze_performance(time_range = nil)
|
837
|
+
stats = {
|
838
|
+
total_requests: 0,
|
839
|
+
slow_requests: 0,
|
840
|
+
errors: 0,
|
841
|
+
average_duration: 0,
|
842
|
+
percentiles: {}
|
843
|
+
}
|
844
|
+
|
845
|
+
durations = []
|
846
|
+
|
847
|
+
parse_logs(time_range) do |entry|
|
848
|
+
next unless entry[:event] == "request_completed"
|
849
|
+
|
850
|
+
stats[:total_requests] += 1
|
851
|
+
duration = entry[:duration_ms]
|
852
|
+
durations << duration
|
853
|
+
|
854
|
+
stats[:slow_requests] += 1 if duration > 1000
|
855
|
+
stats[:errors] += 1 if entry[:status] == "error"
|
856
|
+
end
|
857
|
+
|
858
|
+
if durations.any?
|
859
|
+
stats[:average_duration] = (durations.sum / durations.size).round(2)
|
860
|
+
stats[:percentiles] = calculate_percentiles(durations)
|
861
|
+
end
|
862
|
+
|
863
|
+
stats
|
864
|
+
end
|
865
|
+
|
866
|
+
def find_errors(time_range = nil)
|
867
|
+
errors = []
|
868
|
+
|
869
|
+
parse_logs(time_range) do |entry|
|
870
|
+
if entry[:level] == "ERROR" || entry[:event] == "request_failed"
|
871
|
+
errors << {
|
872
|
+
timestamp: entry[:timestamp],
|
873
|
+
error: entry[:error_message] || entry[:message],
|
874
|
+
method: entry[:method],
|
875
|
+
request_id: entry[:request_id]
|
876
|
+
}
|
877
|
+
end
|
878
|
+
end
|
879
|
+
|
880
|
+
errors
|
881
|
+
end
|
882
|
+
|
883
|
+
def trace_request(request_id)
|
884
|
+
entries = []
|
885
|
+
|
886
|
+
parse_logs do |entry|
|
887
|
+
if entry[:request_id] == request_id
|
888
|
+
entries << entry
|
889
|
+
end
|
890
|
+
end
|
891
|
+
|
892
|
+
entries.sort_by { |e| e[:timestamp] }
|
893
|
+
end
|
894
|
+
|
895
|
+
private
|
896
|
+
|
897
|
+
def parse_logs(time_range = nil)
|
898
|
+
File.foreach(@log_file) do |line|
|
899
|
+
begin
|
900
|
+
entry = JSON.parse(line.strip)
|
901
|
+
|
902
|
+
if time_range
|
903
|
+
timestamp = Time.parse(entry["timestamp"])
|
904
|
+
next unless time_range.cover?(timestamp)
|
905
|
+
end
|
906
|
+
|
907
|
+
yield(entry.transform_keys(&:to_sym))
|
908
|
+
rescue JSON::ParserError
|
909
|
+
# Skip malformed lines
|
910
|
+
end
|
911
|
+
end
|
912
|
+
end
|
913
|
+
|
914
|
+
def calculate_percentiles(values)
|
915
|
+
sorted = values.sort
|
916
|
+
|
917
|
+
{
|
918
|
+
p50: percentile(sorted, 0.5),
|
919
|
+
p90: percentile(sorted, 0.9),
|
920
|
+
p95: percentile(sorted, 0.95),
|
921
|
+
p99: percentile(sorted, 0.99)
|
922
|
+
}
|
923
|
+
end
|
924
|
+
|
925
|
+
def percentile(sorted_values, p)
|
926
|
+
index = (sorted_values.size * p).ceil - 1
|
927
|
+
sorted_values[index].round(2)
|
928
|
+
end
|
929
|
+
end
|
930
|
+
|
931
|
+
# Usage
|
932
|
+
analyzer = LogAnalyzer.new("logs/mcp-server.json")
|
933
|
+
|
934
|
+
# Analyze last hour performance
|
935
|
+
last_hour = (Time.now - 3600)..Time.now
|
936
|
+
stats = analyzer.analyze_performance(last_hour)
|
937
|
+
puts "Performance stats: #{stats}"
|
938
|
+
|
939
|
+
# Find recent errors
|
940
|
+
errors = analyzer.find_errors(last_hour)
|
941
|
+
errors.each do |error|
|
942
|
+
puts "Error at #{error[:timestamp]}: #{error[:error]}"
|
943
|
+
end
|
944
|
+
|
945
|
+
# Trace specific request
|
946
|
+
request_trace = analyzer.trace_request("req-123")
|
947
|
+
request_trace.each do |entry|
|
948
|
+
puts "#{entry[:timestamp]} [#{entry[:level]}] #{entry[:message]}"
|
949
|
+
end
|
950
|
+
```
|
951
|
+
|
952
|
+
## Testing with Logging
|
953
|
+
|
954
|
+
```ruby
|
955
|
+
require 'minitest/autorun'
|
956
|
+
require 'stringio'
|
957
|
+
|
958
|
+
class LoggingTest < Minitest::Test
|
959
|
+
def setup
|
960
|
+
@log_output = StringIO.new
|
961
|
+
@logger = Logger.new(@log_output)
|
962
|
+
|
963
|
+
@server = Tsikol::Server.new(name: "test")
|
964
|
+
@server.logging true
|
965
|
+
@server.use StructuredLoggingMiddleware, logger: @logger
|
966
|
+
|
967
|
+
@server.tool "test_tool" do |input:|
|
968
|
+
@server.log :info, "Processing input", size: input.size
|
969
|
+
input.upcase
|
970
|
+
end
|
971
|
+
|
972
|
+
@client = Tsikol::TestHelpers::TestClient.new(@server)
|
973
|
+
end
|
974
|
+
|
975
|
+
def test_logs_requests
|
976
|
+
@client.call_tool("test_tool", { "input" => "hello" })
|
977
|
+
|
978
|
+
logs = parse_log_output
|
979
|
+
|
980
|
+
assert logs.any? { |log| log[:event] == "request_started" }
|
981
|
+
assert logs.any? { |log| log[:event] == "request_completed" }
|
982
|
+
assert logs.any? { |log| log[:message] == "Processing input" }
|
983
|
+
end
|
984
|
+
|
985
|
+
def test_logs_errors
|
986
|
+
@server.tool "error_tool" do
|
987
|
+
raise "Test error"
|
988
|
+
end
|
989
|
+
|
990
|
+
assert_raises(StandardError) do
|
991
|
+
@client.call_tool("error_tool", {})
|
992
|
+
end
|
993
|
+
|
994
|
+
logs = parse_log_output
|
995
|
+
|
996
|
+
error_log = logs.find { |log| log[:event] == "request_failed" }
|
997
|
+
assert error_log
|
998
|
+
assert_equal "Test error", error_log[:error][:error_message]
|
999
|
+
end
|
1000
|
+
|
1001
|
+
def test_sanitizes_sensitive_data
|
1002
|
+
@client.call_tool("test_tool", {
|
1003
|
+
"input" => "data",
|
1004
|
+
"password" => "secret123"
|
1005
|
+
})
|
1006
|
+
|
1007
|
+
logs = parse_log_output
|
1008
|
+
log_output = @log_output.string
|
1009
|
+
|
1010
|
+
refute log_output.include?("secret123")
|
1011
|
+
assert log_output.include?("[REDACTED]")
|
1012
|
+
end
|
1013
|
+
|
1014
|
+
private
|
1015
|
+
|
1016
|
+
def parse_log_output
|
1017
|
+
@log_output.string.lines.map do |line|
|
1018
|
+
JSON.parse(line, symbolize_names: true)
|
1019
|
+
rescue JSON::ParserError
|
1020
|
+
nil
|
1021
|
+
end.compact
|
1022
|
+
end
|
1023
|
+
end
|
1024
|
+
```
|
1025
|
+
|
1026
|
+
## Best Practices
|
1027
|
+
|
1028
|
+
1. **Use structured logging** (JSON) for easier parsing
|
1029
|
+
2. **Include request context** in all log entries
|
1030
|
+
3. **Sanitize sensitive data** before logging
|
1031
|
+
4. **Use appropriate log levels** (debug, info, warn, error)
|
1032
|
+
5. **Implement log rotation** to manage disk space
|
1033
|
+
6. **Centralize logs** for multi-server deployments
|
1034
|
+
7. **Set up alerts** for critical errors
|
1035
|
+
8. **Retain audit logs** according to compliance requirements
|
1036
|
+
9. **Monitor log volume** to detect anomalies
|
1037
|
+
10. **Use correlation IDs** to trace requests across services
|
1038
|
+
|
1039
|
+
## Next Steps
|
1040
|
+
|
1041
|
+
- Set up [Monitoring](monitoring.md) based on logs
|
1042
|
+
- Implement [Error Handling](error-handling.md) with proper logging
|
1043
|
+
- Add [Performance](performance.md) tracking
|
1044
|
+
- Configure [Security](security.md) audit logging
|