vsm 0.0.1 → 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (42) hide show
  1. checksums.yaml +4 -4
  2. data/.claude/settings.local.json +17 -0
  3. data/CLAUDE.md +134 -0
  4. data/README.md +531 -17
  5. data/examples/01_echo_tool.rb +70 -0
  6. data/examples/02_openai_streaming.rb +73 -0
  7. data/examples/02b_anthropic_streaming.rb +61 -0
  8. data/examples/02c_gemini_streaming.rb +60 -0
  9. data/examples/03_openai_tools.rb +106 -0
  10. data/examples/03b_anthropic_tools.rb +96 -0
  11. data/examples/03c_gemini_tools.rb +95 -0
  12. data/lib/vsm/async_channel.rb +21 -0
  13. data/lib/vsm/capsule.rb +44 -0
  14. data/lib/vsm/drivers/anthropic/async_driver.rb +210 -0
  15. data/lib/vsm/drivers/family.rb +16 -0
  16. data/lib/vsm/drivers/gemini/async_driver.rb +149 -0
  17. data/lib/vsm/drivers/openai/async_driver.rb +202 -0
  18. data/lib/vsm/dsl.rb +50 -0
  19. data/lib/vsm/executors/fiber_executor.rb +10 -0
  20. data/lib/vsm/executors/thread_executor.rb +19 -0
  21. data/lib/vsm/homeostat.rb +19 -0
  22. data/lib/vsm/lens/event_hub.rb +73 -0
  23. data/lib/vsm/lens/server.rb +188 -0
  24. data/lib/vsm/lens/stats.rb +58 -0
  25. data/lib/vsm/lens/tui.rb +88 -0
  26. data/lib/vsm/lens.rb +79 -0
  27. data/lib/vsm/message.rb +6 -0
  28. data/lib/vsm/observability/ledger.rb +25 -0
  29. data/lib/vsm/port.rb +11 -0
  30. data/lib/vsm/roles/coordination.rb +49 -0
  31. data/lib/vsm/roles/governance.rb +9 -0
  32. data/lib/vsm/roles/identity.rb +11 -0
  33. data/lib/vsm/roles/intelligence.rb +168 -0
  34. data/lib/vsm/roles/operations.rb +33 -0
  35. data/lib/vsm/runtime.rb +18 -0
  36. data/lib/vsm/tool/acts_as_tool.rb +20 -0
  37. data/lib/vsm/tool/capsule.rb +12 -0
  38. data/lib/vsm/tool/descriptor.rb +16 -0
  39. data/lib/vsm/version.rb +1 -1
  40. data/lib/vsm.rb +33 -0
  41. data/llms.txt +322 -0
  42. metadata +67 -25
@@ -0,0 +1,210 @@
1
+ # frozen_string_literal: true
2
+ require "json"
3
+ require "net/http"
4
+ require "uri"
5
+ require "securerandom"
6
+
7
+ module VSM
8
+ module Drivers
9
+ module Anthropic
10
+ class AsyncDriver
11
+ def initialize(api_key:, model:, base_url: "https://api.anthropic.com/v1", version: "2023-06-01")
12
+ @api_key, @model, @base, @version = api_key, model, base_url, version
13
+ end
14
+
15
+ def run!(conversation:, tools:, policy: {}, &emit)
16
+ # Always use Net::HTTP with SSE
17
+ emitted_terminal = false
18
+
19
+ headers = {
20
+ "x-api-key" => @api_key,
21
+ "anthropic-version" => @version,
22
+ "content-type" => "application/json",
23
+ "accept" => "text/event-stream"
24
+ }
25
+
26
+ messages = to_anthropic_messages(conversation, policy[:system_prompt])
27
+ tool_list = normalize_anthropic_tools(tools)
28
+ payload = {
29
+ model: @model,
30
+ system: policy[:system_prompt],
31
+ messages: messages,
32
+ max_tokens: 512,
33
+ stream: true
34
+ }
35
+ if tool_list.any?
36
+ payload[:tools] = tool_list
37
+ payload[:tool_choice] = { type: "auto" }
38
+ end
39
+ body = JSON.dump(payload)
40
+
41
+ url = URI.parse("#{@base}/messages")
42
+ http = Net::HTTP.new(url.host, url.port)
43
+ http.use_ssl = (url.scheme == "https")
44
+ http.read_timeout = 120
45
+
46
+ req = Net::HTTP::Post.new(url.request_uri)
47
+ headers.each { |k,v| req[k] = v }
48
+ req.body = body
49
+
50
+ res = http.request(req) do |response|
51
+ ct = response["content-type"]
52
+ if response.code.to_i != 200
53
+ err_body = +""
54
+ response.read_body { |chunk| err_body << chunk }
55
+ preview = err_body.to_s.byteslice(0, 400)
56
+ emit.call(:assistant_final, "Anthropic HTTP #{response.code}: #{preview}")
57
+ emitted_terminal = true
58
+ next
59
+ end
60
+
61
+ if ct && ct.include?("text/event-stream")
62
+ buffer = +""
63
+ textbuf = +""
64
+ toolbuf = {}
65
+ tool_calls = []
66
+
67
+ response.read_body do |chunk|
68
+ buffer << chunk
69
+ while (i = buffer.index("\n"))
70
+ line = buffer.slice!(0..i)
71
+ line.chomp!
72
+ next unless line.start_with?("data:")
73
+ data = line.sub("data:","").strip
74
+ next if data.empty? || data == "[DONE]"
75
+ obj = JSON.parse(data) rescue nil
76
+ next unless obj
77
+ ev = obj["type"].to_s
78
+ if ENV["VSM_DEBUG_STREAM"] == "1"
79
+ $stderr.puts "anthropic(nethttp) <= #{ev}: #{data.byteslice(0, 160)}"
80
+ end
81
+
82
+ case ev
83
+ when "content_block_delta"
84
+ idx = obj["index"]; delta = obj["delta"] || {}
85
+ case delta["type"]
86
+ when "text_delta"
87
+ part = delta["text"].to_s
88
+ textbuf << part
89
+ emit.call(:assistant_delta, part)
90
+ when "input_json_delta"
91
+ toolbuf[idx] ||= { id: nil, name: nil, json: +"" }
92
+ toolbuf[idx][:json] << (delta["partial_json"] || "")
93
+ end
94
+ when "content_block_start"
95
+ # For anthropic, the key can be 'content' or 'content_block'
96
+ c = obj["content"] || obj["content_block"] || {}
97
+ if c["type"] == "tool_use"
98
+ name = c["name"] || obj["name"]
99
+ toolbuf[obj["index"]] = { id: c["id"], name: name, json: +"" }
100
+ end
101
+ when "content_block_stop"
102
+ idx = obj["index"]
103
+ if tb = toolbuf[idx]
104
+ args = tb[:json].empty? ? {} : (JSON.parse(tb[:json]) rescue {"_raw"=>tb[:json]})
105
+ # Only enqueue if name is present
106
+ if tb[:name].to_s.strip != "" && tb[:id]
107
+ tool_calls << { id: tb[:id], name: tb[:name], arguments: args }
108
+ end
109
+ end
110
+ when "message_stop"
111
+ if tool_calls.any?
112
+ emit.call(:tool_calls, tool_calls)
113
+ else
114
+ emit.call(:assistant_final, textbuf.dup)
115
+ end
116
+ emitted_terminal = true
117
+ end
118
+ end
119
+ end
120
+
121
+ unless emitted_terminal
122
+ # If the stream closed without a terminal, emit final text
123
+ emit.call(:assistant_final, textbuf)
124
+ emitted_terminal = true
125
+ end
126
+ else
127
+ # Non-streaming JSON
128
+ data = ""
129
+ response.read_body { |chunk| data << chunk }
130
+ obj = JSON.parse(data) rescue {}
131
+ parts = Array(obj.dig("content"))
132
+ calls = []
133
+ text = +""
134
+ parts.each do |p|
135
+ case p["type"]
136
+ when "text" then text << p["text"].to_s
137
+ when "tool_use" then calls << { id: p["id"] || SecureRandom.uuid, name: p["name"], arguments: p["input"] || {} }
138
+ end
139
+ end
140
+ if calls.any?
141
+ emit.call(:tool_calls, calls)
142
+ else
143
+ emit.call(:assistant_final, text)
144
+ end
145
+ emitted_terminal = true
146
+ end
147
+ end
148
+
149
+ :done
150
+ end
151
+
152
+ private
153
+ # (no IPv6/IPv4 forcing; rely on default Internet)
154
+ def normalize_anthropic_tools(tools)
155
+ Array(tools).map { |t| normalize_anthropic_tool(t) }
156
+ end
157
+
158
+ def normalize_anthropic_tool(t)
159
+ return t.to_anthropic_tool if t.respond_to?(:to_anthropic_tool)
160
+
161
+ # Provider-shaped: {name:, description:, input_schema: {…}}
162
+ if t.is_a?(Hash) && (t[:input_schema] || t["input_schema"])
163
+ return t
164
+ end
165
+
166
+ # Neutral hash {name:, description:, schema:}
167
+ if t.is_a?(Hash) && (t[:name] || t["name"])
168
+ return {
169
+ name: t[:name] || t["name"],
170
+ description: t[:description] || t["description"] || "",
171
+ input_schema: t[:schema] || t["schema"] || {}
172
+ }
173
+ end
174
+
175
+ raise TypeError, "unsupported tool descriptor: #{t.inspect}"
176
+ end
177
+
178
+
179
+ def to_anthropic_messages(neutral, _system)
180
+ # Build content blocks per message; keep ordering
181
+ neutral.map do |m|
182
+ case m[:role]
183
+ when "user"
184
+ { role: "user", content: [{ type: "text", text: m[:content].to_s }] }
185
+ when "assistant"
186
+ { role: "assistant", content: [{ type: "text", text: m[:content].to_s }] }
187
+ when "assistant_tool_calls"
188
+ blocks = Array(m[:tool_calls]).map { |c|
189
+ { type: "tool_use", id: c[:id], name: c[:name], input: c[:arguments] || {} }
190
+ }
191
+ { role: "assistant", content: blocks }
192
+ when "tool_result"
193
+ { role: "user", content: [{ type: "tool_result", tool_use_id: m[:tool_call_id], content: m[:content].to_s }] }
194
+ end
195
+ end.compact
196
+ end
197
+
198
+ def extract_sse_line!(buffer)
199
+ if (i = buffer.index("\n"))
200
+ line = buffer.slice!(0..i)
201
+ line.chomp!
202
+ return line
203
+ end
204
+ nil
205
+ end
206
+ end
207
+ end
208
+ end
209
+ end
210
+
@@ -0,0 +1,16 @@
1
+ # frozen_string_literal: true
2
+ module VSM
3
+ module Drivers
4
+ module Family
5
+ def self.of(driver)
6
+ case driver
7
+ when VSM::Drivers::OpenAI::AsyncDriver then :openai
8
+ when VSM::Drivers::Anthropic::AsyncDriver then :anthropic
9
+ when VSM::Drivers::Gemini::AsyncDriver then :gemini
10
+ else :openai
11
+ end
12
+ end
13
+ end
14
+ end
15
+ end
16
+
@@ -0,0 +1,149 @@
1
+ # frozen_string_literal: true
2
+ require "async"
3
+ require "net/http"
4
+ require "uri"
5
+ require "json"
6
+ require "securerandom"
7
+
8
+ module VSM
9
+ module Drivers
10
+ module Gemini
11
+ class AsyncDriver
12
+ def initialize(api_key:, model:, base_url: "https://generativelanguage.googleapis.com/v1beta", streaming: true)
13
+ @api_key, @model, @base, @streaming = api_key, model, base_url, streaming
14
+ end
15
+
16
+ def run!(conversation:, tools:, policy: {}, &emit)
17
+ contents = to_gemini_contents(conversation)
18
+ fndecls = normalize_gemini_tools(tools)
19
+ if @streaming
20
+ uri = URI.parse("#{@base}/models/#{@model}:streamGenerateContent?alt=sse&key=#{@api_key}")
21
+ headers = { "content-type" => "application/json", "accept" => "text/event-stream" }
22
+ body = JSON.dump({ contents: contents, system_instruction: (policy[:system_prompt] && { parts: [{ text: policy[:system_prompt] }], role: "user" }), tools: [{ functionDeclarations: fndecls }] })
23
+ http = Net::HTTP.new(uri.host, uri.port)
24
+ http.use_ssl = (uri.scheme == "https")
25
+ req = Net::HTTP::Post.new(uri.request_uri)
26
+ headers.each { |k,v| req[k] = v }
27
+ req.body = body
28
+ http.request(req) do |res|
29
+ if res.code.to_i != 200
30
+ err = +""; res.read_body { |c| err << c }
31
+ emit.call(:assistant_final, "Gemini HTTP #{res.code}: #{err.to_s.byteslice(0, 400)}")
32
+ next
33
+ end
34
+ buffer = +""; text = +""; calls = []
35
+ res.read_body do |chunk|
36
+ buffer << chunk
37
+ while (i = buffer.index("\n"))
38
+ line = buffer.slice!(0..i)
39
+ line.chomp!
40
+ next unless line.start_with?("data:")
41
+ data = line.sub("data:","").strip
42
+ next if data.empty? || data == "[DONE]"
43
+ obj = JSON.parse(data) rescue nil
44
+ next unless obj
45
+ parts = (obj.dig("candidates",0,"content","parts") || [])
46
+ parts.each do |p|
47
+ if (t = p["text"]) && !t.empty?
48
+ text << t
49
+ emit.call(:assistant_delta, t)
50
+ end
51
+ if (fc = p["functionCall"]) && fc["name"]
52
+ calls << { id: SecureRandom.uuid, name: fc["name"], arguments: (fc["args"] || {}) }
53
+ end
54
+ end
55
+ end
56
+ end
57
+ if calls.any?
58
+ emit.call(:tool_calls, calls)
59
+ else
60
+ emit.call(:assistant_final, text)
61
+ end
62
+ end
63
+ else
64
+ uri = URI.parse("#{@base}/models/#{@model}:generateContent?key=#{@api_key}")
65
+ headers = { "content-type" => "application/json" }
66
+ body = JSON.dump({ contents: contents, system_instruction: (policy[:system_prompt] && { parts: [{ text: policy[:system_prompt] }], role: "user" }), tools: [{ functionDeclarations: fndecls }] })
67
+ http = Net::HTTP.new(uri.host, uri.port)
68
+ http.use_ssl = (uri.scheme == "https")
69
+ req = Net::HTTP::Post.new(uri.request_uri)
70
+ headers.each { |k,v| req[k] = v }
71
+ req.body = body
72
+ res = http.request(req)
73
+ if res.code.to_i != 200
74
+ emit.call(:assistant_final, "Gemini HTTP #{res.code}")
75
+ else
76
+ data = JSON.parse(res.body) rescue {}
77
+ parts = (data.dig("candidates",0,"content","parts") || [])
78
+ calls = parts.filter_map { |p| fc = p["functionCall"]; fc && { id: SecureRandom.uuid, name: fc["name"], arguments: fc["args"] || {} } }
79
+ if calls.any?
80
+ emit.call(:tool_calls, calls)
81
+ else
82
+ text = parts.filter_map { |p| p["text"] }.join
83
+ emit.call(:assistant_final, text.to_s)
84
+ end
85
+ end
86
+ end
87
+ :done
88
+ end
89
+
90
+ private
91
+ # (no IPv6/IPv4 forcing; rely on default Internet)
92
+ def normalize_gemini_tools(tools)
93
+ Array(tools).map { |t| normalize_gemini_tool(t) }
94
+ end
95
+
96
+ def normalize_gemini_tool(t)
97
+ return t.to_gemini_tool if t.respond_to?(:to_gemini_tool)
98
+
99
+ # Provider-shaped: { name:, description:, parameters: {…} }
100
+ if t.is_a?(Hash) && (t[:parameters] || t["parameters"])
101
+ return t
102
+ end
103
+
104
+ # Neutral hash {name:, description:, schema:}
105
+ if t.is_a?(Hash) && (t[:name] || t["name"])
106
+ return {
107
+ name: t[:name] || t["name"],
108
+ description: t[:description] || t["description"] || "",
109
+ parameters: t[:schema] || t["schema"] || {}
110
+ }
111
+ end
112
+
113
+ raise TypeError, "unsupported tool descriptor: #{t.inspect}"
114
+ end
115
+
116
+
117
+ def to_gemini_contents(neutral)
118
+ items = []
119
+ neutral.each do |m|
120
+ case m[:role]
121
+ when "user"
122
+ items << { role: "user", parts: [{ text: m[:content].to_s }] }
123
+ when "assistant"
124
+ items << { role: "model", parts: [{ text: m[:content].to_s }] }
125
+ when "assistant_tool_calls"
126
+ # Gemini doesn't need us to echo previous functionCall(s)
127
+ # Skip: model will remember its own functionCall
128
+ when "tool_result"
129
+ # Provide functionResponse so model can continue
130
+ name = m[:name] || "tool"
131
+ items << { role: "user", parts: [{ functionResponse: { name: name, response: { content: m[:content].to_s } } }] }
132
+ end
133
+ end
134
+ items
135
+ end
136
+
137
+ def extract_sse_line!(buffer)
138
+ if (i = buffer.index("\n"))
139
+ line = buffer.slice!(0..i)
140
+ line.chomp!
141
+ return line
142
+ end
143
+ nil
144
+ end
145
+ end
146
+ end
147
+ end
148
+ end
149
+
@@ -0,0 +1,202 @@
1
+ # frozen_string_literal: true
2
+ require "async"
3
+ require "async/http/internet"
4
+ require "json"
5
+
6
+ module VSM
7
+ module Drivers
8
+ module OpenAI
9
+ class AsyncDriver
10
+ def initialize(api_key:, model:, base_url: "https://api.openai.com/v1")
11
+ @api_key, @model, @base = api_key, model, base_url
12
+ end
13
+
14
+ MAX_TOOL_TURNS = 8
15
+
16
+ def run!(conversation:, tools:, policy: {}, &emit)
17
+ internet = Async::HTTP::Internet.new
18
+ begin
19
+ headers = {
20
+ "Authorization" => "Bearer #{@api_key}",
21
+ "Content-Type" => "application/json",
22
+ "Accept" => "text/event-stream"
23
+ }
24
+
25
+ messages = to_openai_messages(conversation, policy[:system_prompt])
26
+ tool_list = normalize_openai_tools(tools)
27
+
28
+ req_body = JSON.dump({
29
+ model: @model,
30
+ messages: messages,
31
+ tools: tool_list,
32
+ tool_choice: "auto",
33
+ stream: true
34
+ })
35
+
36
+ # Debug logging
37
+ if ENV["VSM_DEBUG_STREAM"] == "1"
38
+ $stderr.puts "openai => messages: #{JSON.pretty_generate(messages)}"
39
+ $stderr.puts "openai => tools count: #{tool_list.size}"
40
+ end
41
+
42
+ response = internet.post("#{@base}/chat/completions", headers, req_body)
43
+
44
+ if response.status != 200
45
+ body = response.read
46
+ warn "openai HTTP #{response.status}: #{body}"
47
+ emit.call(:assistant_final, "")
48
+ return :done
49
+ end
50
+
51
+ buffer = +""
52
+ text_buffer = +""
53
+ tc_partial = Hash.new { |h,k| h[k] = { id: nil, name: nil, args_str: +"" } }
54
+
55
+ response.body.each do |chunk|
56
+ buffer << chunk
57
+ while (line = extract_sse_line!(buffer))
58
+ next if line.empty? || line.start_with?(":")
59
+ next unless line.start_with?("data:")
60
+ data = line.sub("data:","").strip
61
+ $stderr.puts("openai <= #{data}") if ENV["VSM_DEBUG_STREAM"] == "1"
62
+ next if data == "[DONE]"
63
+
64
+ obj = JSON.parse(data) rescue nil
65
+ next unless obj
66
+ choice = obj.dig("choices",0) || {}
67
+ delta = choice["delta"] || {}
68
+
69
+ if (content = delta["content"])
70
+ text_buffer << content
71
+ emit.call(:assistant_delta, content)
72
+ end
73
+
74
+ if (tcs = delta["tool_calls"])
75
+ tcs.each do |tc|
76
+ idx = tc["index"] || 0
77
+ cell = tc_partial[idx]
78
+ cell[:id] ||= tc["id"]
79
+ fn = tc["function"] || {}
80
+ cell[:name] ||= fn["name"] if fn["name"]
81
+ cell[:args_str] << (fn["arguments"] || "")
82
+ end
83
+ end
84
+
85
+ if (fr = choice["finish_reason"])
86
+ case fr
87
+ when "tool_calls"
88
+ calls = tc_partial.keys.sort.map do |i|
89
+ cell = tc_partial[i]
90
+ {
91
+ id: cell[:id] || "call_#{i}",
92
+ name: cell[:name] || "unknown_tool",
93
+ arguments: safe_json(cell[:args_str])
94
+ }
95
+ end
96
+ tc_partial.clear
97
+ emit.call(:tool_calls, calls)
98
+ when "stop", "length", "content_filter"
99
+ emit.call(:assistant_final, text_buffer.dup)
100
+ text_buffer.clear
101
+ end
102
+ end
103
+ end
104
+ end
105
+ ensure
106
+ internet.close
107
+ end
108
+ :done
109
+ end
110
+
111
+ private
112
+ def normalize_openai_tools(tools)
113
+ Array(tools).map { |t| normalize_openai_tool(t) }
114
+ end
115
+
116
+ def normalize_openai_tool(t)
117
+ # Case 1: our Descriptor object
118
+ return t.to_openai_tool if t.respond_to?(:to_openai_tool)
119
+
120
+ # Case 2: provider-shaped already (OpenAI tools API)
121
+ if (t.is_a?(Hash) && (t[:type] || t["type"]))
122
+ return t
123
+ end
124
+
125
+ # Case 3: neutral hash {name:, description:, schema:}
126
+ if t.is_a?(Hash) && (t[:name] || t["name"])
127
+ return {
128
+ type: "function",
129
+ function: {
130
+ name: t[:name] || t["name"],
131
+ description: t[:description] || t["description"] || "",
132
+ parameters: t[:schema] || t["schema"] || {}
133
+ }
134
+ }
135
+ end
136
+
137
+ raise TypeError, "unsupported tool descriptor: #{t.inspect}"
138
+ end
139
+
140
+
141
+ def to_openai_messages(neutral, system_prompt)
142
+ msgs = []
143
+ msgs << { role: "system", content: system_prompt } if system_prompt
144
+ neutral.each do |m|
145
+ case m[:role]
146
+ when "user"
147
+ msgs << { role: "user", content: m[:content].to_s }
148
+ when "assistant"
149
+ msgs << { role: "assistant", content: m[:content].to_s }
150
+ when "assistant_tool_calls"
151
+ msg = {
152
+ role: "assistant",
153
+ tool_calls: Array(m[:tool_calls]).map { |c|
154
+ {
155
+ id: c[:id],
156
+ type: "function",
157
+ function: {
158
+ name: c[:name],
159
+ arguments: JSON.dump(c[:arguments] || {})
160
+ }
161
+ }
162
+ }
163
+ }
164
+ msgs << msg
165
+ if ENV["VSM_DEBUG_STREAM"] == "1"
166
+ $stderr.puts "OpenAI: Converting assistant_tool_calls: #{msg[:tool_calls].map{|tc| "#{tc[:function][:name]}(#{tc[:id]})"}.join(', ')}"
167
+ end
168
+ when "tool_result"
169
+ msg = {
170
+ role: "tool",
171
+ tool_call_id: m[:tool_call_id],
172
+ content: m[:content].to_s
173
+ }
174
+ msgs << msg
175
+ if ENV["VSM_DEBUG_STREAM"] == "1"
176
+ $stderr.puts "OpenAI: Converting tool_result(#{m[:tool_call_id]}): #{m[:content].to_s.slice(0, 100)}"
177
+ end
178
+ end
179
+ end
180
+ msgs
181
+ end
182
+
183
+ def extract_sse_line!(buffer)
184
+ if (i = buffer.index("\n"))
185
+ line = buffer.slice!(0..i)
186
+ line.chomp!
187
+ return line
188
+ end
189
+ nil
190
+ end
191
+
192
+ def safe_json(s)
193
+ return {} if s.nil? || s.empty?
194
+ JSON.parse(s)
195
+ rescue JSON::ParserError
196
+ { "_raw" => s }
197
+ end
198
+ end
199
+ end
200
+ end
201
+ end
202
+
data/lib/vsm/dsl.rb ADDED
@@ -0,0 +1,50 @@
1
+ # frozen_string_literal: true
2
+ module VSM
3
+ module DSL
4
+ class Builder
5
+ def initialize(name)
6
+ @name = name
7
+ @roles = {}
8
+ @children = {}
9
+ end
10
+
11
+ def identity(klass: VSM::Identity, args: {}) = (@roles[:identity] = klass.new(**args))
12
+ def governance(klass: VSM::Governance, args: {}) = (@roles[:governance] = klass.new(**args))
13
+ def coordination(klass: VSM::Coordination, args: {}) = (@roles[:coordination] = klass.new(**args))
14
+ def intelligence(klass: VSM::Intelligence, args: {}) = (@roles[:intelligence] = klass.new(**args))
15
+ def operations(klass: VSM::Operations, args: {}, &blk)
16
+ @roles[:operations] = klass.new(**args)
17
+ if blk
18
+ builder = ChildrenBuilder.new
19
+ builder.instance_eval(&blk)
20
+ @children.merge!(builder.result)
21
+ end
22
+ end
23
+
24
+ def monitoring(klass: VSM::Monitoring, args: {}) = (@roles[:monitoring] = klass.new(**args))
25
+
26
+ def build
27
+ # Inject governance into tool capsules if they accept it
28
+ @children.each_value do |child|
29
+ child.governance = @roles[:governance] if child.respond_to?(:governance=)
30
+ end
31
+ VSM::Capsule.new(name: @name, roles: @roles, children: @children)
32
+ end
33
+
34
+ class ChildrenBuilder
35
+ def initialize; @children = {}; end
36
+ def capsule(name, klass:, args: {})
37
+ @children[name.to_s] = klass.new(**args)
38
+ end
39
+ def result = @children
40
+ def method_missing(*) = result
41
+ def respond_to_missing?(*) = true
42
+ end
43
+ end
44
+
45
+ def self.define(name, &blk)
46
+ Builder.new(name).tap { |b| b.instance_eval(&blk) }.build
47
+ end
48
+ end
49
+ end
50
+
@@ -0,0 +1,10 @@
1
+ # frozen_string_literal: true
2
+ module VSM
3
+ module Executors
4
+ module FiberExecutor
5
+ def self.call(tool, args)
6
+ tool.run(args) # runs in current Async task
7
+ end
8
+ end
9
+ end
10
+ end
@@ -0,0 +1,19 @@
1
+ # frozen_string_literal: true
2
+ module VSM
3
+ module Executors
4
+ module ThreadExecutor
5
+ def self.call(tool, args)
6
+ q = Queue.new
7
+ Thread.new do
8
+ begin
9
+ q << [:ok, tool.run(args)]
10
+ rescue => e
11
+ q << [:err, e]
12
+ end
13
+ end
14
+ tag, val = q.pop
15
+ tag == :ok ? val : raise(val)
16
+ end
17
+ end
18
+ end
19
+ end
@@ -0,0 +1,19 @@
1
+ # frozen_string_literal: true
2
+ module VSM
3
+ class Homeostat
4
+ attr_reader :limits
5
+
6
+ def initialize
7
+ @limits = { tokens: 8_000, time_ms: 15_000, bytes: 2_000_000 }
8
+ @usage = Hash.new(0)
9
+ end
10
+
11
+ def usage_snapshot
12
+ @usage.dup
13
+ end
14
+
15
+ def alarm?(message)
16
+ message.meta&.dig(:severity) == :algedonic
17
+ end
18
+ end
19
+ end