cuboid 0.3.6 → 0.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/README.md +195 -0
- data/cuboid.gemspec +4 -0
- data/lib/cuboid/application.rb +84 -3
- data/lib/cuboid/mcp/auth.rb +99 -0
- data/lib/cuboid/mcp/core_tools.rb +318 -0
- data/lib/cuboid/mcp/live.rb +166 -0
- data/lib/cuboid/mcp/server.rb +426 -0
- data/lib/cuboid/option_groups/paths.rb +40 -0
- data/lib/cuboid/processes/executables/base.rb +37 -0
- data/lib/cuboid/processes/executables/mcp.rb +20 -0
- data/lib/cuboid/processes/instances.rb +9 -1
- data/lib/cuboid/processes/manager.rb +22 -1
- data/lib/cuboid/rest/server/instance_helpers.rb +21 -70
- data/lib/cuboid/rest/server/routes/instances.rb +1 -3
- data/lib/cuboid/rest/server.rb +1 -1
- data/lib/cuboid/rpc/server/agent.rb +6 -1
- data/lib/cuboid/rpc/server/instance.rb +32 -0
- data/lib/cuboid/server/instance_helpers.rb +131 -0
- data/lib/version +1 -1
- data/spec/cuboid/mcp/auth_spec.rb +179 -0
- data/spec/cuboid/mcp/server_spec.rb +346 -0
- data/spec/cuboid/rest/server_spec.rb +3 -4
- data/spec/support/shared/option_group.rb +11 -1
- metadata +26 -2
|
@@ -1,80 +1,35 @@
|
|
|
1
|
+
require_relative '../../server/instance_helpers'
|
|
2
|
+
|
|
1
3
|
module Cuboid
|
|
2
4
|
module Rest
|
|
3
5
|
class Server
|
|
4
6
|
|
|
7
|
+
# Sinatra-coupled supplement to `Cuboid::Server::InstanceHelpers` —
|
|
8
|
+
# the methods that read `env`, call `handle_error` (a Sinatra helper
|
|
9
|
+
# defined on `Rest::Server`), or prune `session` entries belonging to
|
|
10
|
+
# scheduler-removed instances. Everything that doesn't need Sinatra
|
|
11
|
+
# stays on the shared module above.
|
|
5
12
|
module InstanceHelpers
|
|
6
13
|
|
|
7
|
-
|
|
8
|
-
@@agents = {}
|
|
9
|
-
|
|
10
|
-
def get_instance
|
|
11
|
-
if agent
|
|
12
|
-
options = {
|
|
13
|
-
owner: self.class.to_s,
|
|
14
|
-
helpers: {
|
|
15
|
-
owner: {
|
|
16
|
-
url: env['HTTP_HOST']
|
|
17
|
-
}
|
|
18
|
-
}
|
|
19
|
-
}
|
|
20
|
-
|
|
21
|
-
if (info = agent.spawn( options ))
|
|
22
|
-
connect_to_instance( info['url'], info['token'] )
|
|
23
|
-
end
|
|
24
|
-
else
|
|
25
|
-
Processes::Instances.spawn( application: Options.paths.application, daemonize: true )
|
|
26
|
-
end
|
|
27
|
-
end
|
|
28
|
-
|
|
29
|
-
def agents
|
|
30
|
-
@@agents.keys
|
|
31
|
-
end
|
|
32
|
-
|
|
33
|
-
def agent
|
|
34
|
-
return if !Options.agent.url
|
|
35
|
-
@agent ||= connect_to_agent( Options.agent.url )
|
|
36
|
-
end
|
|
37
|
-
|
|
38
|
-
def unplug_agent( url )
|
|
39
|
-
connect_to_agent( url ).node.unplug
|
|
40
|
-
|
|
41
|
-
c = @@agents.delete( url )
|
|
42
|
-
c.close if c
|
|
43
|
-
end
|
|
44
|
-
|
|
45
|
-
def connect_to_agent( url )
|
|
46
|
-
@@agents[url] ||= RPC::Client::Agent.new( url )
|
|
47
|
-
end
|
|
14
|
+
include ::Cuboid::Server::InstanceHelpers
|
|
48
15
|
|
|
49
|
-
|
|
50
|
-
|
|
16
|
+
# Forward the request host to the shared spawner so the Agent can
|
|
17
|
+
# log who asked for the instance.
|
|
18
|
+
def spawn( owner_url: env['HTTP_HOST'] )
|
|
19
|
+
super
|
|
51
20
|
end
|
|
52
21
|
|
|
22
|
+
# Adds Sinatra-session cleanup for IDs the scheduler has dropped.
|
|
23
|
+
# The shared `update_from_scheduler` already removes them from the
|
|
24
|
+
# instance map; this override prunes the matching session keys so a
|
|
25
|
+
# second request from the same browser doesn't try to reach a dead
|
|
26
|
+
# instance.
|
|
53
27
|
def update_from_scheduler
|
|
54
28
|
return if !scheduler
|
|
55
29
|
|
|
56
|
-
scheduler.
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
(scheduler.failed.keys | scheduler.completed.keys).each do |id|
|
|
61
|
-
session.delete id
|
|
62
|
-
client = instances.delete( id )
|
|
63
|
-
client.close if client
|
|
64
|
-
end
|
|
65
|
-
end
|
|
66
|
-
|
|
67
|
-
def scheduler
|
|
68
|
-
return if !Options.scheduler.url
|
|
69
|
-
@scheduler ||= connect_to_scheduler( Options.scheduler.url )
|
|
70
|
-
end
|
|
71
|
-
|
|
72
|
-
def connect_to_scheduler( url )
|
|
73
|
-
RPC::Client::Scheduler.new( url )
|
|
74
|
-
end
|
|
75
|
-
|
|
76
|
-
def instances
|
|
77
|
-
@@instances
|
|
30
|
+
pruned = scheduler.failed.keys | scheduler.completed.keys
|
|
31
|
+
super
|
|
32
|
+
pruned.each { |id| session.delete id }
|
|
78
33
|
end
|
|
79
34
|
|
|
80
35
|
def instance_for( id, &block )
|
|
@@ -84,14 +39,10 @@ module InstanceHelpers
|
|
|
84
39
|
end
|
|
85
40
|
|
|
86
41
|
handle_error cleanup do
|
|
87
|
-
block.call
|
|
42
|
+
block.call instances[id]
|
|
88
43
|
end
|
|
89
44
|
end
|
|
90
45
|
|
|
91
|
-
def exists?( id )
|
|
92
|
-
instances.include? id
|
|
93
|
-
end
|
|
94
|
-
|
|
95
46
|
end
|
|
96
47
|
|
|
97
48
|
end
|
|
@@ -20,7 +20,7 @@ module Instances
|
|
|
20
20
|
|
|
21
21
|
options = ::JSON.load( request.body.read ) || {}
|
|
22
22
|
|
|
23
|
-
instance =
|
|
23
|
+
instance = self.spawn
|
|
24
24
|
max_utilization! if !instance
|
|
25
25
|
|
|
26
26
|
handle_error proc { (instance.shutdown rescue nil) } do
|
|
@@ -110,8 +110,6 @@ module Instances
|
|
|
110
110
|
app.delete '/instances/:instance' do
|
|
111
111
|
ensure_instance!
|
|
112
112
|
id = params[:instance]
|
|
113
|
-
|
|
114
|
-
instance = instances[id]
|
|
115
113
|
handle_error { (instance.shutdown rescue nil) }
|
|
116
114
|
|
|
117
115
|
instances.delete( id ).close
|
data/lib/cuboid/rest/server.rb
CHANGED
|
@@ -18,7 +18,7 @@ class Server < Sinatra::Base
|
|
|
18
18
|
|
|
19
19
|
Dir.glob( "#{File.dirname( __FILE__ )}/server/routes/*.rb" ).each { |f| require f }
|
|
20
20
|
|
|
21
|
-
helpers InstanceHelpers
|
|
21
|
+
helpers ::Cuboid::Rest::Server::InstanceHelpers
|
|
22
22
|
|
|
23
23
|
register Sinatra::Namespace
|
|
24
24
|
Cuboid::Application.application.rest_services.each do |name, service|
|
|
@@ -320,12 +320,17 @@ class Agent
|
|
|
320
320
|
end
|
|
321
321
|
|
|
322
322
|
def spawn_instance( options = {}, &block )
|
|
323
|
+
# `detached: true` opts the spawned engine out of the
|
|
324
|
+
# base.rb parent-death watchdog: an agent restarting / dying
|
|
325
|
+
# must NOT take the engine with it (grid pattern — the
|
|
326
|
+
# instance is owned by whoever connects, not the agent).
|
|
323
327
|
Processes::Instances.spawn( options.merge(
|
|
324
328
|
address: @server.address,
|
|
325
329
|
port_range: Options.agent.instance_port_range,
|
|
326
330
|
token: Utilities.generate_token,
|
|
327
331
|
application: Options.paths.application,
|
|
328
|
-
daemonize: true
|
|
332
|
+
daemonize: true,
|
|
333
|
+
detached: true
|
|
329
334
|
)) do |client|
|
|
330
335
|
block.call(
|
|
331
336
|
'token' => client.token,
|
|
@@ -226,6 +226,27 @@ class Instance
|
|
|
226
226
|
end
|
|
227
227
|
|
|
228
228
|
# Makes the server go bye-bye...Lights out!
|
|
229
|
+
#
|
|
230
|
+
# `shutdown` must reliably take the Ruby process with it. Stopping
|
|
231
|
+
# the reactor + RPC server alone leaves the Application's non-daemon
|
|
232
|
+
# threads (audit workers, browser cluster manager, etc.) blocking
|
|
233
|
+
# the runtime — historically this leaked engine subprocesses every
|
|
234
|
+
# time `kill_instance` was called over MCP, and showed up in the
|
|
235
|
+
# cuboid spec suite as leftover ruby processes after the run.
|
|
236
|
+
# The `instance.shutdown` RPC returned success but the daemonised
|
|
237
|
+
# process never actually exited.
|
|
238
|
+
#
|
|
239
|
+
# Two-stage exit:
|
|
240
|
+
# 1. Raise SystemExit on the **main thread** so the at_exit
|
|
241
|
+
# chain runs (Cuboid_<pid> tmpdir cleanup, live-plugin's
|
|
242
|
+
# `exited` push). SystemExit raised on a non-main thread
|
|
243
|
+
# only kills that thread — must hit the main one.
|
|
244
|
+
# 2. Watchdog SIGKILL after a grace window in case a
|
|
245
|
+
# non-daemon Application thread refuses to release. The
|
|
246
|
+
# Paths boot-sweep reaps the orphaned tmpdir on the next
|
|
247
|
+
# cuboid process launch even when at_exit didn't run.
|
|
248
|
+
SHUTDOWN_GRACE_SECONDS = 5.0
|
|
249
|
+
|
|
229
250
|
def shutdown( &block )
|
|
230
251
|
if @shutdown
|
|
231
252
|
block.call if block_given?
|
|
@@ -243,6 +264,17 @@ class Instance
|
|
|
243
264
|
@server.shutdown
|
|
244
265
|
@raktr.stop
|
|
245
266
|
block.call true if block_given?
|
|
267
|
+
|
|
268
|
+
# Stage 1 — graceful: SystemExit on the main thread so
|
|
269
|
+
# at_exit handlers run.
|
|
270
|
+
main = Thread.main
|
|
271
|
+
if main && main.alive? && main != Thread.current
|
|
272
|
+
main.raise( SystemExit.new( 0 ) ) rescue nil
|
|
273
|
+
end
|
|
274
|
+
|
|
275
|
+
# Stage 2 — watchdog: hammer if main can't unwind.
|
|
276
|
+
sleep SHUTDOWN_GRACE_SECONDS
|
|
277
|
+
Process.kill( 'KILL', Process.pid ) rescue nil
|
|
246
278
|
end
|
|
247
279
|
|
|
248
280
|
true
|
|
@@ -0,0 +1,131 @@
|
|
|
1
|
+
module Cuboid
|
|
2
|
+
module Server
|
|
3
|
+
|
|
4
|
+
# Shared registry + lookup helpers for the running engine instances
|
|
5
|
+
# any front-end (REST, MCP, scheduler-sync) drives. The two
|
|
6
|
+
# class-variables (`@@instances`, `@@agents`) are intentionally
|
|
7
|
+
# module-level so every includer sees the same map without explicit
|
|
8
|
+
# cross-process plumbing.
|
|
9
|
+
#
|
|
10
|
+
# `spawn` here picks an Agent if one is configured (so grid mode keeps
|
|
11
|
+
# working) or falls back to local `Processes::Instances.spawn`.
|
|
12
|
+
# Sinatra-only surface — `instance_for`, REST-side scheduler-session
|
|
13
|
+
# cleanup, and the env-derived owner URL on `spawn` — lives on
|
|
14
|
+
# `Cuboid::Rest::Server::InstanceHelpers`, which mixes this in.
|
|
15
|
+
module InstanceHelpers
|
|
16
|
+
|
|
17
|
+
@@instances = {}
|
|
18
|
+
@@agents = {}
|
|
19
|
+
|
|
20
|
+
def self.instances
|
|
21
|
+
@@instances
|
|
22
|
+
end
|
|
23
|
+
|
|
24
|
+
# Spawn a new engine instance. If an Agent URL is configured the
|
|
25
|
+
# instance is provisioned via the Agent (grid path); otherwise we
|
|
26
|
+
# fork a local one via `Processes::Instances.spawn`.
|
|
27
|
+
#
|
|
28
|
+
# `owner_url` is forwarded to the Agent as `helpers.owner.url` —
|
|
29
|
+
# purely metadata identifying who asked. Sinatra/REST callers pass
|
|
30
|
+
# `env['HTTP_HOST']`; MCP and other non-Rack callers can leave it
|
|
31
|
+
# nil or pass whatever they have. Module-level so callers without
|
|
32
|
+
# an includer context (e.g. `MCP::CoreTools::SpawnInstance`) can
|
|
33
|
+
# use it as `Cuboid::Server::InstanceHelpers.spawn`.
|
|
34
|
+
def self.spawn( owner_url: nil )
|
|
35
|
+
if (a = agent)
|
|
36
|
+
options = {
|
|
37
|
+
owner: name,
|
|
38
|
+
helpers: { owner: { url: owner_url } }
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
if (info = a.spawn( options ))
|
|
42
|
+
connect_to_instance( info['url'], info['token'] )
|
|
43
|
+
end
|
|
44
|
+
else
|
|
45
|
+
::Cuboid::Processes::Instances.spawn(
|
|
46
|
+
application: ::Cuboid::Options.paths.application,
|
|
47
|
+
daemonize: true
|
|
48
|
+
)
|
|
49
|
+
end
|
|
50
|
+
end
|
|
51
|
+
|
|
52
|
+
def self.agent
|
|
53
|
+
return if !::Cuboid::Options.agent.url
|
|
54
|
+
@@agents[::Cuboid::Options.agent.url] ||=
|
|
55
|
+
::Cuboid::RPC::Client::Agent.new( ::Cuboid::Options.agent.url )
|
|
56
|
+
end
|
|
57
|
+
|
|
58
|
+
def self.connect_to_agent( url )
|
|
59
|
+
@@agents[url] ||= ::Cuboid::RPC::Client::Agent.new( url )
|
|
60
|
+
end
|
|
61
|
+
|
|
62
|
+
def self.connect_to_instance( url, token )
|
|
63
|
+
::Cuboid::RPC::Client::Instance.new( url, token )
|
|
64
|
+
end
|
|
65
|
+
|
|
66
|
+
def agents
|
|
67
|
+
@@agents.keys
|
|
68
|
+
end
|
|
69
|
+
|
|
70
|
+
def agent
|
|
71
|
+
InstanceHelpers.agent
|
|
72
|
+
end
|
|
73
|
+
|
|
74
|
+
def spawn( owner_url: nil )
|
|
75
|
+
InstanceHelpers.spawn( owner_url: owner_url )
|
|
76
|
+
end
|
|
77
|
+
|
|
78
|
+
def unplug_agent( url )
|
|
79
|
+
InstanceHelpers.connect_to_agent( url ).node.unplug
|
|
80
|
+
|
|
81
|
+
c = @@agents.delete( url )
|
|
82
|
+
c.close if c
|
|
83
|
+
end
|
|
84
|
+
|
|
85
|
+
def connect_to_agent( url )
|
|
86
|
+
InstanceHelpers.connect_to_agent( url )
|
|
87
|
+
end
|
|
88
|
+
|
|
89
|
+
def connect_to_instance( url, token )
|
|
90
|
+
InstanceHelpers.connect_to_instance( url, token )
|
|
91
|
+
end
|
|
92
|
+
|
|
93
|
+
# Pulls scheduler-tracked running instances into the local map and
|
|
94
|
+
# closes/removes any that the scheduler reports failed or completed.
|
|
95
|
+
# Sinatra-side session cleanup for the same IDs is the responsibility
|
|
96
|
+
# of `Cuboid::Rest::Server::InstanceHelpers#update_from_scheduler`,
|
|
97
|
+
# which calls super then prunes its session.
|
|
98
|
+
def update_from_scheduler
|
|
99
|
+
return if !scheduler
|
|
100
|
+
|
|
101
|
+
scheduler.running.each do |id, info|
|
|
102
|
+
instances[id] ||= connect_to_instance( info['url'], info['token'] )
|
|
103
|
+
end
|
|
104
|
+
|
|
105
|
+
(scheduler.failed.keys | scheduler.completed.keys).each do |id|
|
|
106
|
+
client = instances.delete( id )
|
|
107
|
+
client.close if client
|
|
108
|
+
end
|
|
109
|
+
end
|
|
110
|
+
|
|
111
|
+
def scheduler
|
|
112
|
+
return if !Options.scheduler.url
|
|
113
|
+
@scheduler ||= connect_to_scheduler( Options.scheduler.url )
|
|
114
|
+
end
|
|
115
|
+
|
|
116
|
+
def connect_to_scheduler( url )
|
|
117
|
+
RPC::Client::Scheduler.new( url )
|
|
118
|
+
end
|
|
119
|
+
|
|
120
|
+
def instances
|
|
121
|
+
InstanceHelpers.instances
|
|
122
|
+
end
|
|
123
|
+
|
|
124
|
+
def exists?( id )
|
|
125
|
+
instances.include? id
|
|
126
|
+
end
|
|
127
|
+
|
|
128
|
+
end
|
|
129
|
+
|
|
130
|
+
end
|
|
131
|
+
end
|
data/lib/version
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
0.
|
|
1
|
+
0.4
|
|
@@ -0,0 +1,179 @@
|
|
|
1
|
+
require 'spec_helper'
|
|
2
|
+
require "#{Cuboid::Options.paths.lib}/mcp/auth"
|
|
3
|
+
|
|
4
|
+
describe Cuboid::MCP::Auth do
|
|
5
|
+
# Inner app: any time the middleware passes a request through, the
|
|
6
|
+
# inner app records the env it saw and replies 200 OK. Lets us
|
|
7
|
+
# check that env['cuboid.mcp.auth'] is populated AND that
|
|
8
|
+
# short-circuited (401) requests never reach it.
|
|
9
|
+
let(:inner_app) do
|
|
10
|
+
seen = []
|
|
11
|
+
app = ->(env) {
|
|
12
|
+
seen << env
|
|
13
|
+
[200, { 'content-type' => 'text/plain' }, ['ok']]
|
|
14
|
+
}
|
|
15
|
+
# Expose `seen` for assertions.
|
|
16
|
+
app.singleton_class.send(:define_method, :seen_envs) { seen }
|
|
17
|
+
app
|
|
18
|
+
end
|
|
19
|
+
|
|
20
|
+
let(:middleware) { described_class.new(inner_app) }
|
|
21
|
+
|
|
22
|
+
# Each test installs a fresh anonymous Application subclass so we
|
|
23
|
+
# don't leak validators across examples.
|
|
24
|
+
let(:fake_application) { Class.new(Cuboid::Application) }
|
|
25
|
+
|
|
26
|
+
before(:each) do
|
|
27
|
+
@prev_application = Cuboid::Application.application
|
|
28
|
+
Cuboid::Application.application = fake_application
|
|
29
|
+
end
|
|
30
|
+
|
|
31
|
+
after(:each) do
|
|
32
|
+
Cuboid::Application.application = @prev_application
|
|
33
|
+
end
|
|
34
|
+
|
|
35
|
+
def env(headers = {})
|
|
36
|
+
# Minimum env Rack expects; HTTP_AUTHORIZATION is the only
|
|
37
|
+
# header the middleware reads.
|
|
38
|
+
{
|
|
39
|
+
'REQUEST_METHOD' => 'POST',
|
|
40
|
+
'PATH_INFO' => '/mcp',
|
|
41
|
+
'rack.input' => StringIO.new('{}'),
|
|
42
|
+
'rack.errors' => StringIO.new
|
|
43
|
+
}.merge(headers)
|
|
44
|
+
end
|
|
45
|
+
|
|
46
|
+
context 'when no validator is registered' do
|
|
47
|
+
it 'passes the request through unchanged' do
|
|
48
|
+
status, _, _ = middleware.call(env)
|
|
49
|
+
status.should == 200
|
|
50
|
+
inner_app.seen_envs.size.should == 1
|
|
51
|
+
end
|
|
52
|
+
|
|
53
|
+
it 'does not populate cuboid.mcp.auth' do
|
|
54
|
+
middleware.call(env)
|
|
55
|
+
inner_app.seen_envs.first['cuboid.mcp.auth'].should be_nil
|
|
56
|
+
end
|
|
57
|
+
end
|
|
58
|
+
|
|
59
|
+
context 'when a validator is registered' do
|
|
60
|
+
before do
|
|
61
|
+
fake_application.mcp_authenticate_with do |token|
|
|
62
|
+
token == 'good-token' ? { user: 'alice' } : nil
|
|
63
|
+
end
|
|
64
|
+
end
|
|
65
|
+
|
|
66
|
+
context 'and the Authorization header is missing' do
|
|
67
|
+
it 'responds 401 with invalid_request' do
|
|
68
|
+
status, headers, body = middleware.call(env)
|
|
69
|
+
|
|
70
|
+
status.should == 401
|
|
71
|
+
headers['www-authenticate']
|
|
72
|
+
.should == 'Bearer realm="MCP", error="invalid_request"'
|
|
73
|
+
|
|
74
|
+
JSON.parse(body.first)['error']['message'].should == 'invalid_request'
|
|
75
|
+
end
|
|
76
|
+
|
|
77
|
+
it 'never reaches the inner app' do
|
|
78
|
+
middleware.call(env)
|
|
79
|
+
inner_app.seen_envs.should be_empty
|
|
80
|
+
end
|
|
81
|
+
end
|
|
82
|
+
|
|
83
|
+
context 'and the Authorization header is not a Bearer scheme' do
|
|
84
|
+
it 'responds 401 with invalid_request' do
|
|
85
|
+
status, _, _ = middleware.call(
|
|
86
|
+
env('HTTP_AUTHORIZATION' => 'Basic dXNlcjpwYXNz')
|
|
87
|
+
)
|
|
88
|
+
status.should == 401
|
|
89
|
+
inner_app.seen_envs.should be_empty
|
|
90
|
+
end
|
|
91
|
+
end
|
|
92
|
+
|
|
93
|
+
context 'and the Bearer token is wrong' do
|
|
94
|
+
it 'responds 401 with invalid_token' do
|
|
95
|
+
status, headers, _ = middleware.call(
|
|
96
|
+
env('HTTP_AUTHORIZATION' => 'Bearer not-the-token')
|
|
97
|
+
)
|
|
98
|
+
|
|
99
|
+
status.should == 401
|
|
100
|
+
headers['www-authenticate']
|
|
101
|
+
.should == 'Bearer realm="MCP", error="invalid_token"'
|
|
102
|
+
|
|
103
|
+
inner_app.seen_envs.should be_empty
|
|
104
|
+
end
|
|
105
|
+
end
|
|
106
|
+
|
|
107
|
+
context 'and the Bearer token is correct' do
|
|
108
|
+
it 'passes the request through' do
|
|
109
|
+
status, _, _ = middleware.call(
|
|
110
|
+
env('HTTP_AUTHORIZATION' => 'Bearer good-token')
|
|
111
|
+
)
|
|
112
|
+
status.should == 200
|
|
113
|
+
end
|
|
114
|
+
|
|
115
|
+
it "stashes the validator's return value in env['cuboid.mcp.auth']" do
|
|
116
|
+
middleware.call(
|
|
117
|
+
env('HTTP_AUTHORIZATION' => 'Bearer good-token')
|
|
118
|
+
)
|
|
119
|
+
|
|
120
|
+
inner_app.seen_envs.first['cuboid.mcp.auth']
|
|
121
|
+
.should == { user: 'alice' }
|
|
122
|
+
end
|
|
123
|
+
|
|
124
|
+
it 'is case-insensitive on the Bearer keyword' do
|
|
125
|
+
status, _, _ = middleware.call(
|
|
126
|
+
env('HTTP_AUTHORIZATION' => 'bearer good-token')
|
|
127
|
+
)
|
|
128
|
+
status.should == 200
|
|
129
|
+
end
|
|
130
|
+
|
|
131
|
+
it 'tolerates extra whitespace between Bearer and the token' do
|
|
132
|
+
status, _, _ = middleware.call(
|
|
133
|
+
env('HTTP_AUTHORIZATION' => "Bearer good-token")
|
|
134
|
+
)
|
|
135
|
+
status.should == 200
|
|
136
|
+
end
|
|
137
|
+
end
|
|
138
|
+
|
|
139
|
+
context 'and the validator raises' do
|
|
140
|
+
before do
|
|
141
|
+
fake_application.mcp_authenticate_with do |_token|
|
|
142
|
+
raise 'database is down'
|
|
143
|
+
end
|
|
144
|
+
end
|
|
145
|
+
|
|
146
|
+
it 'responds 401 (not 500) so internals never leak' do
|
|
147
|
+
status, headers, _ = middleware.call(
|
|
148
|
+
env('HTTP_AUTHORIZATION' => 'Bearer whatever')
|
|
149
|
+
)
|
|
150
|
+
|
|
151
|
+
status.should == 401
|
|
152
|
+
headers['www-authenticate']
|
|
153
|
+
.should == 'Bearer realm="MCP", error="invalid_token"'
|
|
154
|
+
|
|
155
|
+
inner_app.seen_envs.should be_empty
|
|
156
|
+
end
|
|
157
|
+
end
|
|
158
|
+
end
|
|
159
|
+
|
|
160
|
+
context 'when the validator is replaced after the middleware was instantiated' do
|
|
161
|
+
# Important property: the middleware reads the validator at
|
|
162
|
+
# request time, not at construction time, so applications can
|
|
163
|
+
# swap implementations during a long-running process.
|
|
164
|
+
it 'picks up the new validator on the next request' do
|
|
165
|
+
mw = middleware
|
|
166
|
+
|
|
167
|
+
status, _, _ = mw.call(env('HTTP_AUTHORIZATION' => 'Bearer x'))
|
|
168
|
+
status.should == 200 # no validator yet → pass-through
|
|
169
|
+
|
|
170
|
+
fake_application.mcp_authenticate_with { |t| t == 'x' }
|
|
171
|
+
|
|
172
|
+
status, _, _ = mw.call(env('HTTP_AUTHORIZATION' => 'Bearer x'))
|
|
173
|
+
status.should == 200
|
|
174
|
+
|
|
175
|
+
status, _, _ = mw.call(env('HTTP_AUTHORIZATION' => 'Bearer y'))
|
|
176
|
+
status.should == 401
|
|
177
|
+
end
|
|
178
|
+
end
|
|
179
|
+
end
|