kairos-chain 3.9.0 → 3.9.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +10 -0
- data/lib/kairos_mcp/version.rb +1 -1
- data/templates/skillsets/llm_client/llm_call.rb +272 -0
- data/templates/skillsets/llm_client/llm_client/adapter.rb +69 -0
- data/templates/skillsets/llm_client/llm_client/anthropic_adapter.rb +165 -0
- data/templates/skillsets/llm_client/llm_client/bedrock_adapter.rb +169 -0
- data/templates/skillsets/llm_client/llm_client/claude_code_adapter.rb +154 -0
- data/templates/skillsets/llm_client/llm_client/openai_adapter.rb +171 -0
- data/templates/skillsets/llm_client/llm_client/schema_converter.rb +81 -0
- data/templates/skillsets/llm_client/llm_client.yml +48 -0
- data/templates/skillsets/llm_client/llm_configure.rb +75 -0
- data/templates/skillsets/llm_client/llm_status.rb +66 -0
- data/templates/skillsets/llm_client/skillset.json +22 -0
- metadata +12 -1
checksums.yaml
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
---
|
|
2
2
|
SHA256:
|
|
3
|
-
metadata.gz:
|
|
4
|
-
data.tar.gz:
|
|
3
|
+
metadata.gz: 21836fb01fd3a42cad85b2976517109e825e2bb0bbd3b096ef02a814a48a61ea
|
|
4
|
+
data.tar.gz: 73d3c52a49d34ccc58b7ee843492ce862cec3107f705e04c5a41af0640e210fd
|
|
5
5
|
SHA512:
|
|
6
|
-
metadata.gz:
|
|
7
|
-
data.tar.gz:
|
|
6
|
+
metadata.gz: 4a0da1ccb68ebec0a33fd79be6ddf16da004191d46759eab8f940a0c3ba5c687771ddcc472e164fcd688a9c2e94e657f948eb34dfbb403de970c797add85e87a
|
|
7
|
+
data.tar.gz: ff56f6cdea6087a5e9885037f86442941069abd96bbdc01006f193ea6aee4ed0fa8012f0a6224f3d958b333e14fada684fd8e6d5b31be7cdddcc11e03c3bd067
|
data/CHANGELOG.md
CHANGED
|
@@ -4,6 +4,16 @@ All notable changes to the `kairos-chain` gem will be documented in this file.
|
|
|
4
4
|
|
|
5
5
|
This project follows [Semantic Versioning](https://semver.org/).
|
|
6
6
|
|
|
7
|
+
## [3.9.1] - 2026-03-30
|
|
8
|
+
|
|
9
|
+
### Fixed
|
|
10
|
+
|
|
11
|
+
- **llm_client SkillSet missing from gem templates** — `llm_client` files existed
|
|
12
|
+
only in the project-root `templates/` but not in the gem-bundled
|
|
13
|
+
`KairosChain_mcp_server/templates/skillsets/llm_client/`. This caused
|
|
14
|
+
`llm_call`, `llm_configure`, and `llm_status` tools to be unavailable on
|
|
15
|
+
fresh installs, breaking the `agent` SkillSet (`depends_on: ["llm_client"]`).
|
|
16
|
+
|
|
7
17
|
## [3.9.0] - 2026-03-30
|
|
8
18
|
|
|
9
19
|
### Added
|
data/lib/kairos_mcp/version.rb
CHANGED
|
@@ -0,0 +1,272 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
require 'json'
|
|
4
|
+
require 'digest'
|
|
5
|
+
require 'time'
|
|
6
|
+
require_relative '../lib/llm_client/adapter'
|
|
7
|
+
require_relative '../lib/llm_client/anthropic_adapter'
|
|
8
|
+
require_relative '../lib/llm_client/openai_adapter'
|
|
9
|
+
require_relative '../lib/llm_client/claude_code_adapter'
|
|
10
|
+
require_relative '../lib/llm_client/bedrock_adapter'
|
|
11
|
+
require_relative '../lib/llm_client/schema_converter'
|
|
12
|
+
|
|
13
|
+
module KairosMcp
|
|
14
|
+
module SkillSets
|
|
15
|
+
module LlmClient
|
|
16
|
+
module Tools
|
|
17
|
+
class LlmCall < KairosMcp::Tools::BaseTool
|
|
18
|
+
def name
|
|
19
|
+
'llm_call'
|
|
20
|
+
end
|
|
21
|
+
|
|
22
|
+
def description
|
|
23
|
+
'Make exactly one LLM API call. Returns response including tool_use requests. ' \
|
|
24
|
+
'Does NOT execute tools, loop, retry, or fall back. Pure transport.'
|
|
25
|
+
end
|
|
26
|
+
|
|
27
|
+
def category
|
|
28
|
+
:llm
|
|
29
|
+
end
|
|
30
|
+
|
|
31
|
+
def usecase_tags
|
|
32
|
+
%w[llm api call provider transport]
|
|
33
|
+
end
|
|
34
|
+
|
|
35
|
+
def related_tools
|
|
36
|
+
%w[llm_configure llm_status]
|
|
37
|
+
end
|
|
38
|
+
|
|
39
|
+
def input_schema
|
|
40
|
+
{
|
|
41
|
+
type: 'object',
|
|
42
|
+
properties: {
|
|
43
|
+
messages: {
|
|
44
|
+
type: 'array',
|
|
45
|
+
description: 'Conversation messages array (role + content)',
|
|
46
|
+
items: { type: 'object' }
|
|
47
|
+
},
|
|
48
|
+
system: {
|
|
49
|
+
type: 'string',
|
|
50
|
+
description: 'System prompt (optional)'
|
|
51
|
+
},
|
|
52
|
+
tools: {
|
|
53
|
+
type: 'array',
|
|
54
|
+
description: 'Tool names or fnmatch patterns to include as LLM tools (optional)',
|
|
55
|
+
items: { type: 'string' }
|
|
56
|
+
},
|
|
57
|
+
invocation_context_json: {
|
|
58
|
+
type: 'string',
|
|
59
|
+
description: 'Serialized InvocationContext for policy-filtered schema discovery (optional)'
|
|
60
|
+
},
|
|
61
|
+
model: {
|
|
62
|
+
type: 'string',
|
|
63
|
+
description: 'Model override (optional, uses config default)'
|
|
64
|
+
},
|
|
65
|
+
max_tokens: {
|
|
66
|
+
type: 'integer',
|
|
67
|
+
description: 'Max tokens override (optional)'
|
|
68
|
+
},
|
|
69
|
+
temperature: {
|
|
70
|
+
type: 'number',
|
|
71
|
+
description: 'Temperature override (optional)'
|
|
72
|
+
}
|
|
73
|
+
},
|
|
74
|
+
required: ['messages']
|
|
75
|
+
}
|
|
76
|
+
end
|
|
77
|
+
|
|
78
|
+
def call(arguments)
|
|
79
|
+
config = load_config
|
|
80
|
+
adapter = build_adapter(config)
|
|
81
|
+
messages = arguments['messages']
|
|
82
|
+
system = arguments['system']
|
|
83
|
+
model = arguments['model']
|
|
84
|
+
max_tokens = arguments['max_tokens']
|
|
85
|
+
temperature = arguments['temperature']
|
|
86
|
+
|
|
87
|
+
# Resolve tool schemas with policy filtering
|
|
88
|
+
tool_schemas = nil
|
|
89
|
+
tool_names_provided = []
|
|
90
|
+
if arguments['tools'] && !arguments['tools'].empty?
|
|
91
|
+
ctx = deserialize_context(arguments['invocation_context_json'])
|
|
92
|
+
tool_schemas, tool_names_provided = resolve_and_convert_tools(
|
|
93
|
+
arguments['tools'], ctx, config
|
|
94
|
+
)
|
|
95
|
+
end
|
|
96
|
+
|
|
97
|
+
# Make the API call
|
|
98
|
+
raw_response = adapter.call(
|
|
99
|
+
messages: messages,
|
|
100
|
+
system: system,
|
|
101
|
+
tools: tool_schemas,
|
|
102
|
+
model: model,
|
|
103
|
+
max_tokens: max_tokens,
|
|
104
|
+
temperature: temperature
|
|
105
|
+
)
|
|
106
|
+
|
|
107
|
+
# Track usage
|
|
108
|
+
usage = extract_usage(raw_response, adapter)
|
|
109
|
+
|
|
110
|
+
# Build success payload
|
|
111
|
+
actual_model = raw_response['model'] || model || config['model'] || 'unknown'
|
|
112
|
+
payload = {
|
|
113
|
+
'status' => 'ok',
|
|
114
|
+
'provider' => config['provider'],
|
|
115
|
+
'model' => actual_model,
|
|
116
|
+
'response' => raw_response,
|
|
117
|
+
'usage' => usage,
|
|
118
|
+
'snapshot' => build_snapshot(
|
|
119
|
+
actual_model, system, messages, tool_names_provided, raw_response
|
|
120
|
+
)
|
|
121
|
+
}
|
|
122
|
+
|
|
123
|
+
UsageTracker.record(usage)
|
|
124
|
+
text_content(JSON.generate(payload))
|
|
125
|
+
rescue KairosMcp::SkillSets::LlmClient::ApiError => e
|
|
126
|
+
text_content(JSON.generate({
|
|
127
|
+
'status' => 'error',
|
|
128
|
+
'error' => {
|
|
129
|
+
'type' => e.is_a?(AuthError) ? 'auth_error' : 'api_error',
|
|
130
|
+
'message' => e.message,
|
|
131
|
+
'provider' => e.provider,
|
|
132
|
+
'retryable' => e.retryable,
|
|
133
|
+
'rate_limited' => e.rate_limited,
|
|
134
|
+
'suggested_backoff_seconds' => e.suggested_backoff
|
|
135
|
+
}
|
|
136
|
+
}))
|
|
137
|
+
rescue StandardError => e
|
|
138
|
+
text_content(JSON.generate({
|
|
139
|
+
'status' => 'error',
|
|
140
|
+
'error' => {
|
|
141
|
+
'type' => classify_error(e),
|
|
142
|
+
'message' => e.message,
|
|
143
|
+
'provider' => nil,
|
|
144
|
+
'retryable' => false,
|
|
145
|
+
'rate_limited' => false,
|
|
146
|
+
'suggested_backoff_seconds' => nil
|
|
147
|
+
}
|
|
148
|
+
}))
|
|
149
|
+
end
|
|
150
|
+
|
|
151
|
+
private
|
|
152
|
+
|
|
153
|
+
def load_config
|
|
154
|
+
config_path = File.join(__dir__, '..', 'config', 'llm_client.yml')
|
|
155
|
+
if File.exist?(config_path)
|
|
156
|
+
require 'yaml'
|
|
157
|
+
YAML.safe_load(File.read(config_path), permitted_classes: [Symbol]) || {}
|
|
158
|
+
else
|
|
159
|
+
{ 'provider' => 'anthropic', 'model' => 'claude-sonnet-4-6',
|
|
160
|
+
'api_key_env' => 'ANTHROPIC_API_KEY' }
|
|
161
|
+
end
|
|
162
|
+
end
|
|
163
|
+
|
|
164
|
+
def build_adapter(config)
|
|
165
|
+
case config['provider']
|
|
166
|
+
when 'openai', 'local', 'openrouter'
|
|
167
|
+
OpenaiAdapter.new(config)
|
|
168
|
+
when 'claude_code'
|
|
169
|
+
ClaudeCodeAdapter.new(config)
|
|
170
|
+
when 'bedrock'
|
|
171
|
+
BedrockAdapter.new(config)
|
|
172
|
+
else
|
|
173
|
+
AnthropicAdapter.new(config)
|
|
174
|
+
end
|
|
175
|
+
end
|
|
176
|
+
|
|
177
|
+
def deserialize_context(json_string)
|
|
178
|
+
return nil if json_string.nil? || json_string.empty?
|
|
179
|
+
|
|
180
|
+
ctx = KairosMcp::InvocationContext.from_json(json_string)
|
|
181
|
+
return ctx if ctx
|
|
182
|
+
|
|
183
|
+
# Fail-closed: malformed context returns a deny-all context
|
|
184
|
+
KairosMcp::InvocationContext.new(whitelist: [])
|
|
185
|
+
rescue StandardError
|
|
186
|
+
# Parse error → deny-all (fail-closed, not fail-open)
|
|
187
|
+
KairosMcp::InvocationContext.new(whitelist: [])
|
|
188
|
+
end
|
|
189
|
+
|
|
190
|
+
def resolve_and_convert_tools(patterns, invocation_context, config)
|
|
191
|
+
return [nil, []] unless @registry
|
|
192
|
+
|
|
193
|
+
all_schemas = @registry.list_tools
|
|
194
|
+
|
|
195
|
+
# Expand fnmatch patterns
|
|
196
|
+
resolved = all_schemas.select { |s|
|
|
197
|
+
patterns.any? { |pat| File.fnmatch(pat, s[:name]) }
|
|
198
|
+
}
|
|
199
|
+
|
|
200
|
+
# Policy filter: only include tools the context allows
|
|
201
|
+
if invocation_context
|
|
202
|
+
resolved = resolved.select { |s| invocation_context.allowed?(s[:name]) }
|
|
203
|
+
end
|
|
204
|
+
|
|
205
|
+
tool_names = resolved.map { |s| s[:name] }
|
|
206
|
+
|
|
207
|
+
# Convert to provider format
|
|
208
|
+
target = config['provider'] == 'openai' ? :openai : :anthropic
|
|
209
|
+
result = SchemaConverter.convert_batch(resolved, target)
|
|
210
|
+
|
|
211
|
+
unless result[:errors].empty?
|
|
212
|
+
warn "[llm_call] Schema conversion errors: #{result[:errors].map { |e| e[:tool] }.join(', ')}"
|
|
213
|
+
end
|
|
214
|
+
|
|
215
|
+
[result[:schemas], tool_names]
|
|
216
|
+
end
|
|
217
|
+
|
|
218
|
+
def extract_usage(response, adapter)
|
|
219
|
+
input_t = response.delete('input_tokens').to_i
|
|
220
|
+
output_t = response.delete('output_tokens').to_i
|
|
221
|
+
{
|
|
222
|
+
'input_tokens' => input_t,
|
|
223
|
+
'output_tokens' => output_t,
|
|
224
|
+
'total_tokens' => input_t + output_t
|
|
225
|
+
}
|
|
226
|
+
end
|
|
227
|
+
|
|
228
|
+
def build_snapshot(model, system, messages, tool_names, response)
|
|
229
|
+
{
|
|
230
|
+
'timestamp' => Time.now.iso8601,
|
|
231
|
+
'model' => model,
|
|
232
|
+
'system_prompt_hash' => system ? Digest::SHA256.hexdigest(system)[0..15] : nil,
|
|
233
|
+
'messages_count' => messages&.length || 0,
|
|
234
|
+
'tool_schemas_provided' => tool_names,
|
|
235
|
+
'response_summary_length' => (response['content'] || '').length
|
|
236
|
+
}
|
|
237
|
+
end
|
|
238
|
+
|
|
239
|
+
def classify_error(error)
|
|
240
|
+
case error
|
|
241
|
+
when JSON::ParserError then 'parse_error'
|
|
242
|
+
when Errno::ENOENT, Errno::EACCES then 'config_error'
|
|
243
|
+
else 'api_error'
|
|
244
|
+
end
|
|
245
|
+
end
|
|
246
|
+
end
|
|
247
|
+
|
|
248
|
+
# In-memory usage tracker (per ToolRegistry lifecycle)
|
|
249
|
+
module UsageTracker
|
|
250
|
+
@mutex = Mutex.new
|
|
251
|
+
@stats = { calls: 0, input_tokens: 0, output_tokens: 0 }
|
|
252
|
+
|
|
253
|
+
def self.record(usage)
|
|
254
|
+
@mutex.synchronize do
|
|
255
|
+
@stats[:calls] += 1
|
|
256
|
+
@stats[:input_tokens] += usage['input_tokens'].to_i
|
|
257
|
+
@stats[:output_tokens] += usage['output_tokens'].to_i
|
|
258
|
+
end
|
|
259
|
+
end
|
|
260
|
+
|
|
261
|
+
def self.stats
|
|
262
|
+
@mutex.synchronize { @stats.dup }
|
|
263
|
+
end
|
|
264
|
+
|
|
265
|
+
def self.reset!
|
|
266
|
+
@mutex.synchronize { @stats = { calls: 0, input_tokens: 0, output_tokens: 0 } }
|
|
267
|
+
end
|
|
268
|
+
end
|
|
269
|
+
end
|
|
270
|
+
end
|
|
271
|
+
end
|
|
272
|
+
end
|
|
@@ -0,0 +1,69 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module KairosMcp
|
|
4
|
+
module SkillSets
|
|
5
|
+
module LlmClient
|
|
6
|
+
# Abstract LLM provider adapter. Subclasses implement #call.
|
|
7
|
+
class Adapter
|
|
8
|
+
attr_reader :config
|
|
9
|
+
|
|
10
|
+
def initialize(config)
|
|
11
|
+
@config = config
|
|
12
|
+
end
|
|
13
|
+
|
|
14
|
+
# Make one API call. Returns normalized response hash.
|
|
15
|
+
# Subclasses MUST rescue provider errors and raise ApiError.
|
|
16
|
+
def call(messages:, system: nil, tools: nil, model: nil,
|
|
17
|
+
max_tokens: nil, temperature: nil)
|
|
18
|
+
raise NotImplementedError, "#{self.class}#call not implemented"
|
|
19
|
+
end
|
|
20
|
+
|
|
21
|
+
protected
|
|
22
|
+
|
|
23
|
+
def resolve_model(override)
|
|
24
|
+
override || @config['model'] || @config[:model]
|
|
25
|
+
end
|
|
26
|
+
|
|
27
|
+
def resolve_max_tokens(override)
|
|
28
|
+
override || @config['default_max_tokens'] || @config[:default_max_tokens] || 4096
|
|
29
|
+
end
|
|
30
|
+
|
|
31
|
+
def resolve_temperature(override)
|
|
32
|
+
override || @config['default_temperature'] || @config[:default_temperature] || 0.7
|
|
33
|
+
end
|
|
34
|
+
|
|
35
|
+
def resolve_api_key
|
|
36
|
+
env_var = @config['api_key_env'] || @config[:api_key_env]
|
|
37
|
+
raise AuthError, "No api_key_env configured" unless env_var
|
|
38
|
+
|
|
39
|
+
key = ENV[env_var]
|
|
40
|
+
raise AuthError, "Environment variable '#{env_var}' is not set" unless key && !key.empty?
|
|
41
|
+
|
|
42
|
+
key
|
|
43
|
+
end
|
|
44
|
+
|
|
45
|
+
def timeout_seconds
|
|
46
|
+
@config['timeout_seconds'] || @config[:timeout_seconds] || 120
|
|
47
|
+
end
|
|
48
|
+
end
|
|
49
|
+
|
|
50
|
+
class ApiError < StandardError
|
|
51
|
+
attr_reader :provider, :retryable, :rate_limited, :suggested_backoff
|
|
52
|
+
|
|
53
|
+
def initialize(message, provider: nil, retryable: false, rate_limited: false, suggested_backoff: nil)
|
|
54
|
+
@provider = provider
|
|
55
|
+
@retryable = retryable
|
|
56
|
+
@rate_limited = rate_limited
|
|
57
|
+
@suggested_backoff = suggested_backoff
|
|
58
|
+
super(message)
|
|
59
|
+
end
|
|
60
|
+
end
|
|
61
|
+
|
|
62
|
+
class AuthError < ApiError
|
|
63
|
+
def initialize(message)
|
|
64
|
+
super(message, retryable: false)
|
|
65
|
+
end
|
|
66
|
+
end
|
|
67
|
+
end
|
|
68
|
+
end
|
|
69
|
+
end
|
|
@@ -0,0 +1,165 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
require 'faraday'
|
|
4
|
+
require 'json'
|
|
5
|
+
require_relative 'adapter'
|
|
6
|
+
require_relative 'schema_converter'
|
|
7
|
+
|
|
8
|
+
module KairosMcp
|
|
9
|
+
module SkillSets
|
|
10
|
+
module LlmClient
|
|
11
|
+
class AnthropicAdapter < Adapter
|
|
12
|
+
API_URL = 'https://api.anthropic.com'
|
|
13
|
+
API_VERSION = '2023-06-01'
|
|
14
|
+
|
|
15
|
+
def call(messages:, system: nil, tools: nil, model: nil,
|
|
16
|
+
max_tokens: nil, temperature: nil)
|
|
17
|
+
api_key = resolve_api_key
|
|
18
|
+
|
|
19
|
+
body = {
|
|
20
|
+
model: resolve_model(model),
|
|
21
|
+
max_tokens: resolve_max_tokens(max_tokens),
|
|
22
|
+
messages: convert_messages(messages)
|
|
23
|
+
}
|
|
24
|
+
body[:system] = system if system
|
|
25
|
+
body[:temperature] = resolve_temperature(temperature) unless temperature.nil?
|
|
26
|
+
body[:tools] = tools if tools && !tools.empty?
|
|
27
|
+
|
|
28
|
+
response = connection(api_key).post('/v1/messages') do |req|
|
|
29
|
+
req.body = JSON.generate(body)
|
|
30
|
+
end
|
|
31
|
+
|
|
32
|
+
parse_response(response)
|
|
33
|
+
rescue Faraday::TimeoutError => e
|
|
34
|
+
raise ApiError.new("Request timed out: #{e.message}",
|
|
35
|
+
provider: 'anthropic', retryable: true)
|
|
36
|
+
rescue Faraday::ConnectionFailed => e
|
|
37
|
+
raise ApiError.new("Connection failed: #{e.message}",
|
|
38
|
+
provider: 'anthropic', retryable: true)
|
|
39
|
+
rescue AuthError
|
|
40
|
+
raise
|
|
41
|
+
rescue StandardError => e
|
|
42
|
+
raise ApiError.new("Anthropic API error: #{e.message}", provider: 'anthropic')
|
|
43
|
+
end
|
|
44
|
+
|
|
45
|
+
private
|
|
46
|
+
|
|
47
|
+
def connection(api_key)
|
|
48
|
+
Faraday.new(url: API_URL) do |f|
|
|
49
|
+
f.request :json
|
|
50
|
+
f.headers['x-api-key'] = api_key
|
|
51
|
+
f.headers['anthropic-version'] = API_VERSION
|
|
52
|
+
f.headers['Content-Type'] = 'application/json'
|
|
53
|
+
f.options.timeout = timeout_seconds
|
|
54
|
+
f.options.open_timeout = 10
|
|
55
|
+
f.adapter Faraday.default_adapter
|
|
56
|
+
end
|
|
57
|
+
end
|
|
58
|
+
|
|
59
|
+
# Convert canonical intermediate messages to Anthropic API format.
|
|
60
|
+
# Canonical: role 'tool' + tool_use_id, assistant with tool_calls array.
|
|
61
|
+
# Anthropic: role 'user' + tool_result content block, assistant with tool_use content blocks.
|
|
62
|
+
# Messages already in Anthropic-native format pass through unchanged.
|
|
63
|
+
def convert_messages(messages)
|
|
64
|
+
messages.map do |msg|
|
|
65
|
+
role = msg['role'] || msg[:role]
|
|
66
|
+
content = msg['content'] || msg[:content]
|
|
67
|
+
|
|
68
|
+
case role
|
|
69
|
+
when 'tool'
|
|
70
|
+
tool_id = msg['tool_use_id'] || msg[:tool_use_id]
|
|
71
|
+
if tool_id
|
|
72
|
+
{
|
|
73
|
+
'role' => 'user',
|
|
74
|
+
'content' => [{ 'type' => 'tool_result',
|
|
75
|
+
'tool_use_id' => tool_id,
|
|
76
|
+
'content' => content.is_a?(String) ? content : JSON.generate(content) }]
|
|
77
|
+
}
|
|
78
|
+
else
|
|
79
|
+
msg # Native format or unknown — pass through unchanged
|
|
80
|
+
end
|
|
81
|
+
when 'assistant'
|
|
82
|
+
tool_calls = msg['tool_calls'] || msg[:tool_calls]
|
|
83
|
+
if tool_calls && !tool_calls.empty?
|
|
84
|
+
content_blocks = []
|
|
85
|
+
content_blocks << { 'type' => 'text', 'text' => content } if content
|
|
86
|
+
tool_calls.each do |tc|
|
|
87
|
+
content_blocks << {
|
|
88
|
+
'type' => 'tool_use',
|
|
89
|
+
'id' => tc['id'] || tc[:id],
|
|
90
|
+
'name' => tc['name'] || tc[:name],
|
|
91
|
+
'input' => tc['input'] || tc[:input] || {}
|
|
92
|
+
}
|
|
93
|
+
end
|
|
94
|
+
{ 'role' => 'assistant', 'content' => content_blocks }
|
|
95
|
+
else
|
|
96
|
+
{ 'role' => role, 'content' => content }
|
|
97
|
+
end
|
|
98
|
+
else
|
|
99
|
+
{ 'role' => role, 'content' => content }
|
|
100
|
+
end
|
|
101
|
+
end
|
|
102
|
+
end
|
|
103
|
+
|
|
104
|
+
def parse_response(response)
|
|
105
|
+
body = JSON.parse(response.body)
|
|
106
|
+
|
|
107
|
+
if response.status == 429
|
|
108
|
+
backoff = response.headers['retry-after']&.to_i
|
|
109
|
+
raise ApiError.new("Rate limited",
|
|
110
|
+
provider: 'anthropic', retryable: true,
|
|
111
|
+
rate_limited: true, suggested_backoff: backoff)
|
|
112
|
+
end
|
|
113
|
+
|
|
114
|
+
unless response.status == 200
|
|
115
|
+
raise ApiError.new(
|
|
116
|
+
body.dig('error', 'message') || "HTTP #{response.status}",
|
|
117
|
+
provider: 'anthropic',
|
|
118
|
+
retryable: response.status >= 500
|
|
119
|
+
)
|
|
120
|
+
end
|
|
121
|
+
|
|
122
|
+
normalize_response(body)
|
|
123
|
+
end
|
|
124
|
+
|
|
125
|
+
def normalize_response(body)
|
|
126
|
+
content_text = []
|
|
127
|
+
tool_use = []
|
|
128
|
+
|
|
129
|
+
(body['content'] || []).each do |block|
|
|
130
|
+
case block['type']
|
|
131
|
+
when 'text'
|
|
132
|
+
content_text << block['text']
|
|
133
|
+
when 'tool_use'
|
|
134
|
+
tool_use << {
|
|
135
|
+
'id' => block['id'],
|
|
136
|
+
'name' => block['name'],
|
|
137
|
+
'input' => block['input']
|
|
138
|
+
}
|
|
139
|
+
end
|
|
140
|
+
end
|
|
141
|
+
|
|
142
|
+
usage = body['usage'] || {}
|
|
143
|
+
{
|
|
144
|
+
'content' => content_text.empty? ? nil : content_text.join("\n"),
|
|
145
|
+
'tool_use' => tool_use.empty? ? nil : tool_use,
|
|
146
|
+
'stop_reason' => map_stop_reason(body['stop_reason']),
|
|
147
|
+
'model' => body['model'],
|
|
148
|
+
'input_tokens' => usage['input_tokens'],
|
|
149
|
+
'output_tokens' => usage['output_tokens']
|
|
150
|
+
}
|
|
151
|
+
end
|
|
152
|
+
|
|
153
|
+
def map_stop_reason(reason)
|
|
154
|
+
case reason
|
|
155
|
+
when 'end_turn' then 'end_turn'
|
|
156
|
+
when 'tool_use' then 'tool_use'
|
|
157
|
+
when 'max_tokens' then 'max_tokens'
|
|
158
|
+
when 'stop_sequence' then 'stop_sequence'
|
|
159
|
+
else reason || 'unknown'
|
|
160
|
+
end
|
|
161
|
+
end
|
|
162
|
+
end
|
|
163
|
+
end
|
|
164
|
+
end
|
|
165
|
+
end
|
|
@@ -0,0 +1,169 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
require 'json'
|
|
4
|
+
require_relative 'adapter'
|
|
5
|
+
require_relative 'schema_converter'
|
|
6
|
+
|
|
7
|
+
module KairosMcp
|
|
8
|
+
module SkillSets
|
|
9
|
+
module LlmClient
|
|
10
|
+
# Adapter for AWS Bedrock (Claude on AWS).
|
|
11
|
+
# Data stays within AWS — no external LLM provider data transfer.
|
|
12
|
+
# Requires: gem 'aws-sdk-bedrockruntime'
|
|
13
|
+
class BedrockAdapter < Adapter
|
|
14
|
+
def call(messages:, system: nil, tools: nil, model: nil,
|
|
15
|
+
max_tokens: nil, temperature: nil)
|
|
16
|
+
client = bedrock_client
|
|
17
|
+
|
|
18
|
+
converted_messages = convert_messages(messages)
|
|
19
|
+
|
|
20
|
+
payload = {
|
|
21
|
+
anthropic_version: 'bedrock-2023-05-31',
|
|
22
|
+
max_tokens: resolve_max_tokens(max_tokens),
|
|
23
|
+
messages: converted_messages
|
|
24
|
+
}
|
|
25
|
+
payload[:system] = system if system
|
|
26
|
+
payload[:temperature] = resolve_temperature(temperature) unless temperature.nil?
|
|
27
|
+
|
|
28
|
+
if tools && !tools.empty?
|
|
29
|
+
payload[:tools] = tools.map do |t|
|
|
30
|
+
{
|
|
31
|
+
name: t[:name] || t['name'],
|
|
32
|
+
description: t[:description] || t['description'],
|
|
33
|
+
input_schema: t[:input_schema] || t['input_schema'] || t[:inputSchema] || t['inputSchema']
|
|
34
|
+
}
|
|
35
|
+
end
|
|
36
|
+
end
|
|
37
|
+
|
|
38
|
+
response = client.invoke_model(
|
|
39
|
+
model_id: resolve_model(model),
|
|
40
|
+
body: JSON.generate(payload),
|
|
41
|
+
content_type: 'application/json',
|
|
42
|
+
accept: 'application/json'
|
|
43
|
+
)
|
|
44
|
+
|
|
45
|
+
body = JSON.parse(response.body.string)
|
|
46
|
+
normalize_response(body)
|
|
47
|
+
rescue LoadError
|
|
48
|
+
raise ApiError.new(
|
|
49
|
+
"Bedrock adapter requires 'aws-sdk-bedrockruntime' gem. " \
|
|
50
|
+
"Add to Gemfile: gem 'aws-sdk-bedrockruntime', '~> 1.0'",
|
|
51
|
+
provider: 'bedrock', retryable: false
|
|
52
|
+
)
|
|
53
|
+
rescue AuthError
|
|
54
|
+
raise
|
|
55
|
+
rescue StandardError => e
|
|
56
|
+
if e.class.name.include?('Aws::')
|
|
57
|
+
retryable = e.respond_to?(:retryable?) ? e.retryable? : false
|
|
58
|
+
raise ApiError.new(
|
|
59
|
+
"Bedrock API error: #{e.message}",
|
|
60
|
+
provider: 'bedrock', retryable: retryable
|
|
61
|
+
)
|
|
62
|
+
end
|
|
63
|
+
raise ApiError.new("Bedrock error: #{e.message}", provider: 'bedrock')
|
|
64
|
+
end
|
|
65
|
+
|
|
66
|
+
private
|
|
67
|
+
|
|
68
|
+
def bedrock_client
|
|
69
|
+
begin
|
|
70
|
+
require 'aws-sdk-bedrockruntime'
|
|
71
|
+
rescue LoadError
|
|
72
|
+
raise LoadError, "aws-sdk-bedrockruntime not installed"
|
|
73
|
+
end
|
|
74
|
+
|
|
75
|
+
region = @config['aws_region'] || @config[:aws_region] ||
|
|
76
|
+
ENV.fetch('AWS_REGION', 'us-east-1')
|
|
77
|
+
|
|
78
|
+
Aws::BedrockRuntime::Client.new(region: region)
|
|
79
|
+
end
|
|
80
|
+
|
|
81
|
+
def resolve_model(override)
|
|
82
|
+
override || @config['model'] || @config[:model] ||
|
|
83
|
+
ENV.fetch('AWS_BEDROCK_MODEL_ID', 'anthropic.claude-sonnet-4-5-20250929-v1:0')
|
|
84
|
+
end
|
|
85
|
+
|
|
86
|
+
def convert_messages(messages)
|
|
87
|
+
messages.map do |msg|
|
|
88
|
+
role = msg['role'] || msg[:role]
|
|
89
|
+
content = msg['content'] || msg[:content]
|
|
90
|
+
|
|
91
|
+
case role
|
|
92
|
+
when 'system'
|
|
93
|
+
# Bedrock system is a top-level parameter, not a message.
|
|
94
|
+
# If caller passes system in messages, convert to user context.
|
|
95
|
+
{ role: 'user', content: "[System Context]: #{content}" }
|
|
96
|
+
when 'tool'
|
|
97
|
+
tool_id = msg['tool_use_id'] || msg[:tool_use_id]
|
|
98
|
+
{
|
|
99
|
+
role: 'user',
|
|
100
|
+
content: [{
|
|
101
|
+
type: 'tool_result',
|
|
102
|
+
tool_use_id: tool_id,
|
|
103
|
+
content: content.is_a?(String) ? content : JSON.generate(content)
|
|
104
|
+
}]
|
|
105
|
+
}
|
|
106
|
+
when 'assistant'
|
|
107
|
+
tool_calls = msg['tool_calls'] || msg[:tool_calls]
|
|
108
|
+
if tool_calls
|
|
109
|
+
content_blocks = []
|
|
110
|
+
content_blocks << { type: 'text', text: content } if content
|
|
111
|
+
tool_calls.each do |tc|
|
|
112
|
+
content_blocks << {
|
|
113
|
+
type: 'tool_use',
|
|
114
|
+
id: tc['id'] || tc[:id],
|
|
115
|
+
name: tc['name'] || tc[:name],
|
|
116
|
+
input: tc['input'] || tc[:input] || tc['arguments'] || tc[:arguments] || {}
|
|
117
|
+
}
|
|
118
|
+
end
|
|
119
|
+
{ role: 'assistant', content: content_blocks }
|
|
120
|
+
else
|
|
121
|
+
{ role: 'assistant', content: content }
|
|
122
|
+
end
|
|
123
|
+
else
|
|
124
|
+
{ role: role, content: content }
|
|
125
|
+
end
|
|
126
|
+
end
|
|
127
|
+
end
|
|
128
|
+
|
|
129
|
+
def normalize_response(body)
|
|
130
|
+
content_text = []
|
|
131
|
+
tool_use = []
|
|
132
|
+
|
|
133
|
+
(body['content'] || []).each do |block|
|
|
134
|
+
case block['type']
|
|
135
|
+
when 'text'
|
|
136
|
+
content_text << block['text']
|
|
137
|
+
when 'tool_use'
|
|
138
|
+
tool_use << {
|
|
139
|
+
'id' => block['id'],
|
|
140
|
+
'name' => block['name'],
|
|
141
|
+
'input' => block['input']
|
|
142
|
+
}
|
|
143
|
+
end
|
|
144
|
+
end
|
|
145
|
+
|
|
146
|
+
usage = body['usage'] || {}
|
|
147
|
+
{
|
|
148
|
+
'content' => content_text.empty? ? nil : content_text.join("\n"),
|
|
149
|
+
'tool_use' => tool_use.empty? ? nil : tool_use,
|
|
150
|
+
'stop_reason' => map_stop_reason(body['stop_reason']),
|
|
151
|
+
'model' => body['model'],
|
|
152
|
+
'input_tokens' => usage['input_tokens'],
|
|
153
|
+
'output_tokens' => usage['output_tokens']
|
|
154
|
+
}
|
|
155
|
+
end
|
|
156
|
+
|
|
157
|
+
def map_stop_reason(reason)
|
|
158
|
+
case reason
|
|
159
|
+
when 'end_turn' then 'end_turn'
|
|
160
|
+
when 'tool_use' then 'tool_use'
|
|
161
|
+
when 'max_tokens' then 'max_tokens'
|
|
162
|
+
when 'stop_sequence' then 'stop_sequence'
|
|
163
|
+
else reason || 'unknown'
|
|
164
|
+
end
|
|
165
|
+
end
|
|
166
|
+
end
|
|
167
|
+
end
|
|
168
|
+
end
|
|
169
|
+
end
|
|
@@ -0,0 +1,154 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
require 'json'
|
|
4
|
+
require 'open3'
|
|
5
|
+
require_relative 'adapter'
|
|
6
|
+
|
|
7
|
+
module KairosMcp
|
|
8
|
+
module SkillSets
|
|
9
|
+
module LlmClient
|
|
10
|
+
# Adapter that uses Claude Code CLI as the LLM backend.
|
|
11
|
+
# No API costs — uses the Claude Code subscription.
|
|
12
|
+
# Invokes `claude -p --output-format json` as a subprocess.
|
|
13
|
+
class ClaudeCodeAdapter < Adapter
|
|
14
|
+
def call(messages:, system: nil, tools: nil, model: nil,
|
|
15
|
+
max_tokens: nil, temperature: nil)
|
|
16
|
+
prompt = build_prompt(messages, system, tools)
|
|
17
|
+
|
|
18
|
+
args = ['claude', '-p', '--output-format', 'json']
|
|
19
|
+
args += ['--model', model] if model
|
|
20
|
+
|
|
21
|
+
stdout, stderr, status = Open3.capture3(*args, stdin_data: prompt)
|
|
22
|
+
|
|
23
|
+
unless status.success?
|
|
24
|
+
raise ApiError.new(
|
|
25
|
+
"Claude Code exited with status #{status.exitstatus}: #{stderr[0..200]}",
|
|
26
|
+
provider: 'claude_code', retryable: false
|
|
27
|
+
)
|
|
28
|
+
end
|
|
29
|
+
|
|
30
|
+
parse_response(stdout)
|
|
31
|
+
rescue Errno::ENOENT
|
|
32
|
+
raise ApiError.new(
|
|
33
|
+
"Claude Code CLI not found. Install: https://docs.anthropic.com/en/docs/claude-code",
|
|
34
|
+
provider: 'claude_code', retryable: false
|
|
35
|
+
)
|
|
36
|
+
rescue ApiError
|
|
37
|
+
raise
|
|
38
|
+
rescue StandardError => e
|
|
39
|
+
raise ApiError.new("Claude Code error: #{e.message}", provider: 'claude_code')
|
|
40
|
+
end
|
|
41
|
+
|
|
42
|
+
private
|
|
43
|
+
|
|
44
|
+
def build_prompt(messages, system, tools)
|
|
45
|
+
parts = []
|
|
46
|
+
|
|
47
|
+
if system
|
|
48
|
+
parts << "[System]: #{system}"
|
|
49
|
+
parts << ""
|
|
50
|
+
end
|
|
51
|
+
|
|
52
|
+
if tools && !tools.empty?
|
|
53
|
+
parts << "[Available tools - respond with JSON when you want to use a tool]:"
|
|
54
|
+
tools.each do |t|
|
|
55
|
+
name = t[:name] || t['name']
|
|
56
|
+
desc = t[:description] || t['description']
|
|
57
|
+
schema = t[:input_schema] || t['input_schema'] || t[:inputSchema] || t['inputSchema']
|
|
58
|
+
parts << "- #{name}: #{desc}"
|
|
59
|
+
if schema && schema.is_a?(Hash) && schema['properties']
|
|
60
|
+
params = schema['properties'].keys.join(', ')
|
|
61
|
+
parts << " Parameters: #{params}"
|
|
62
|
+
end
|
|
63
|
+
end
|
|
64
|
+
parts << ""
|
|
65
|
+
parts << "To use a tool, include in your response:"
|
|
66
|
+
parts << '```json'
|
|
67
|
+
parts << '{"tool_use": [{"name": "tool_name", "input": {"param": "value"}}]}'
|
|
68
|
+
parts << '```'
|
|
69
|
+
parts << ""
|
|
70
|
+
end
|
|
71
|
+
|
|
72
|
+
messages.each do |msg|
|
|
73
|
+
role = msg['role'] || msg[:role]
|
|
74
|
+
content = msg['content'] || msg[:content]
|
|
75
|
+
case role
|
|
76
|
+
when 'user'
|
|
77
|
+
parts << content.to_s
|
|
78
|
+
when 'assistant'
|
|
79
|
+
parts << "[Previous assistant response]: #{content}"
|
|
80
|
+
when 'tool'
|
|
81
|
+
tool_id = msg['tool_use_id'] || msg[:tool_use_id] || 'unknown'
|
|
82
|
+
parts << "[Tool result for #{tool_id}]: #{content}"
|
|
83
|
+
end
|
|
84
|
+
end
|
|
85
|
+
|
|
86
|
+
parts.join("\n")
|
|
87
|
+
end
|
|
88
|
+
|
|
89
|
+
def parse_response(stdout)
|
|
90
|
+
data = JSON.parse(stdout)
|
|
91
|
+
|
|
92
|
+
unless data['type'] == 'result'
|
|
93
|
+
raise ApiError.new(
|
|
94
|
+
"Unexpected Claude Code response type: #{data['type']}",
|
|
95
|
+
provider: 'claude_code'
|
|
96
|
+
)
|
|
97
|
+
end
|
|
98
|
+
|
|
99
|
+
if data['is_error']
|
|
100
|
+
raise ApiError.new(
|
|
101
|
+
data['result'] || 'Claude Code returned an error',
|
|
102
|
+
provider: 'claude_code', retryable: false
|
|
103
|
+
)
|
|
104
|
+
end
|
|
105
|
+
|
|
106
|
+
result_text = data['result'] || ''
|
|
107
|
+
tool_use = extract_tool_use(result_text)
|
|
108
|
+
usage = data['usage'] || {}
|
|
109
|
+
|
|
110
|
+
{
|
|
111
|
+
'content' => tool_use ? nil : result_text,
|
|
112
|
+
'tool_use' => tool_use,
|
|
113
|
+
'stop_reason' => tool_use ? 'tool_use' : map_stop_reason(data['stop_reason']),
|
|
114
|
+
'model' => data.dig('modelUsage')&.keys&.first || 'claude_code',
|
|
115
|
+
'input_tokens' => usage['input_tokens'],
|
|
116
|
+
'output_tokens' => usage['output_tokens']
|
|
117
|
+
}
|
|
118
|
+
end
|
|
119
|
+
|
|
120
|
+
# Try to extract tool_use JSON from Claude Code's text response
|
|
121
|
+
def extract_tool_use(text)
|
|
122
|
+
# Look for JSON block with tool_use
|
|
123
|
+
json_match = text.match(/```json\s*\n?(.*?)\n?\s*```/m) ||
|
|
124
|
+
text.match(/\{[^{}]*"tool_use"\s*:/m)
|
|
125
|
+
|
|
126
|
+
return nil unless json_match
|
|
127
|
+
|
|
128
|
+
json_str = json_match[1] || json_match[0]
|
|
129
|
+
parsed = JSON.parse(json_str)
|
|
130
|
+
|
|
131
|
+
if parsed.is_a?(Hash) && parsed['tool_use'].is_a?(Array)
|
|
132
|
+
parsed['tool_use'].map do |tu|
|
|
133
|
+
{
|
|
134
|
+
'id' => "cc_#{SecureRandom.hex(4)}",
|
|
135
|
+
'name' => tu['name'],
|
|
136
|
+
'input' => tu['input'] || {}
|
|
137
|
+
}
|
|
138
|
+
end
|
|
139
|
+
end
|
|
140
|
+
rescue JSON::ParserError
|
|
141
|
+
nil
|
|
142
|
+
end
|
|
143
|
+
|
|
144
|
+
def map_stop_reason(reason)
|
|
145
|
+
case reason
|
|
146
|
+
when 'end_turn' then 'end_turn'
|
|
147
|
+
when 'max_tokens' then 'max_tokens'
|
|
148
|
+
else reason || 'end_turn'
|
|
149
|
+
end
|
|
150
|
+
end
|
|
151
|
+
end
|
|
152
|
+
end
|
|
153
|
+
end
|
|
154
|
+
end
|
|
@@ -0,0 +1,171 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
require 'faraday'
|
|
4
|
+
require 'json'
|
|
5
|
+
require_relative 'adapter'
|
|
6
|
+
require_relative 'schema_converter'
|
|
7
|
+
|
|
8
|
+
module KairosMcp
|
|
9
|
+
module SkillSets
|
|
10
|
+
module LlmClient
|
|
11
|
+
class OpenaiAdapter < Adapter
|
|
12
|
+
API_URL = 'https://api.openai.com'
|
|
13
|
+
|
|
14
|
+
def call(messages:, system: nil, tools: nil, model: nil,
|
|
15
|
+
max_tokens: nil, temperature: nil)
|
|
16
|
+
api_key = resolve_api_key
|
|
17
|
+
|
|
18
|
+
all_messages = []
|
|
19
|
+
all_messages << { 'role' => 'system', 'content' => system } if system
|
|
20
|
+
all_messages.concat(messages)
|
|
21
|
+
all_messages = convert_messages(all_messages)
|
|
22
|
+
|
|
23
|
+
body = {
|
|
24
|
+
model: resolve_model(model),
|
|
25
|
+
max_tokens: resolve_max_tokens(max_tokens),
|
|
26
|
+
messages: all_messages,
|
|
27
|
+
temperature: resolve_temperature(temperature)
|
|
28
|
+
}
|
|
29
|
+
body[:tools] = tools if tools && !tools.empty?
|
|
30
|
+
|
|
31
|
+
response = connection(api_key).post('/v1/chat/completions') do |req|
|
|
32
|
+
req.body = JSON.generate(body)
|
|
33
|
+
end
|
|
34
|
+
|
|
35
|
+
parse_response(response)
|
|
36
|
+
rescue Faraday::TimeoutError => e
|
|
37
|
+
raise ApiError.new("Request timed out: #{e.message}",
|
|
38
|
+
provider: 'openai', retryable: true)
|
|
39
|
+
rescue Faraday::ConnectionFailed => e
|
|
40
|
+
raise ApiError.new("Connection failed: #{e.message}",
|
|
41
|
+
provider: 'openai', retryable: true)
|
|
42
|
+
rescue AuthError
|
|
43
|
+
raise
|
|
44
|
+
rescue StandardError => e
|
|
45
|
+
raise ApiError.new("OpenAI API error: #{e.message}", provider: 'openai')
|
|
46
|
+
end
|
|
47
|
+
|
|
48
|
+
private
|
|
49
|
+
|
|
50
|
+
def connection(api_key)
|
|
51
|
+
Faraday.new(url: base_url) do |f|
|
|
52
|
+
f.request :json
|
|
53
|
+
f.headers['Authorization'] = "Bearer #{api_key}"
|
|
54
|
+
f.headers['Content-Type'] = 'application/json'
|
|
55
|
+
f.options.timeout = timeout_seconds
|
|
56
|
+
f.options.open_timeout = 10
|
|
57
|
+
f.adapter Faraday.default_adapter
|
|
58
|
+
end
|
|
59
|
+
end
|
|
60
|
+
|
|
61
|
+
def base_url
|
|
62
|
+
@config['base_url'] || @config[:base_url] || API_URL
|
|
63
|
+
end
|
|
64
|
+
|
|
65
|
+
# Convert canonical intermediate messages to OpenAI API format.
|
|
66
|
+
# Canonical: role 'tool' + tool_use_id, assistant with tool_calls [{id, name, input}].
|
|
67
|
+
# OpenAI: role 'tool' + tool_call_id, assistant with tool_calls [{id, type, function}].
|
|
68
|
+
# Messages already in OpenAI-native format pass through unchanged.
|
|
69
|
+
def convert_messages(messages)
|
|
70
|
+
messages.map do |msg|
|
|
71
|
+
role = msg['role'] || msg[:role]
|
|
72
|
+
content = msg['content'] || msg[:content]
|
|
73
|
+
|
|
74
|
+
case role
|
|
75
|
+
when 'tool'
|
|
76
|
+
tool_call_id = msg['tool_call_id'] || msg[:tool_call_id] ||
|
|
77
|
+
msg['tool_use_id'] || msg[:tool_use_id]
|
|
78
|
+
if tool_call_id
|
|
79
|
+
{ 'role' => 'tool',
|
|
80
|
+
'tool_call_id' => tool_call_id,
|
|
81
|
+
'content' => content.is_a?(String) ? content : JSON.generate(content) }
|
|
82
|
+
else
|
|
83
|
+
msg # Native format or unknown — pass through unchanged
|
|
84
|
+
end
|
|
85
|
+
when 'assistant'
|
|
86
|
+
tool_calls = msg['tool_calls'] || msg[:tool_calls]
|
|
87
|
+
if tool_calls && tool_calls.first && !tool_calls.first.key?('type')
|
|
88
|
+
{ 'role' => 'assistant',
|
|
89
|
+
'content' => content,
|
|
90
|
+
'tool_calls' => tool_calls.map { |tc|
|
|
91
|
+
{ 'id' => tc['id'] || tc[:id],
|
|
92
|
+
'type' => 'function',
|
|
93
|
+
'function' => {
|
|
94
|
+
'name' => tc['name'] || tc[:name],
|
|
95
|
+
'arguments' => JSON.generate(tc['input'] || tc[:input] || {})
|
|
96
|
+
} }
|
|
97
|
+
} }
|
|
98
|
+
else
|
|
99
|
+
msg
|
|
100
|
+
end
|
|
101
|
+
else
|
|
102
|
+
msg
|
|
103
|
+
end
|
|
104
|
+
end
|
|
105
|
+
end
|
|
106
|
+
|
|
107
|
+
def parse_response(response)
|
|
108
|
+
body = JSON.parse(response.body)
|
|
109
|
+
|
|
110
|
+
if response.status == 429
|
|
111
|
+
backoff = response.headers['retry-after']&.to_i
|
|
112
|
+
raise ApiError.new("Rate limited",
|
|
113
|
+
provider: 'openai', retryable: true,
|
|
114
|
+
rate_limited: true, suggested_backoff: backoff)
|
|
115
|
+
end
|
|
116
|
+
|
|
117
|
+
unless response.status == 200
|
|
118
|
+
raise ApiError.new(
|
|
119
|
+
body.dig('error', 'message') || "HTTP #{response.status}",
|
|
120
|
+
provider: 'openai',
|
|
121
|
+
retryable: response.status >= 500
|
|
122
|
+
)
|
|
123
|
+
end
|
|
124
|
+
|
|
125
|
+
normalize_response(body)
|
|
126
|
+
end
|
|
127
|
+
|
|
128
|
+
def normalize_response(body)
|
|
129
|
+
choice = body.dig('choices', 0, 'message') || {}
|
|
130
|
+
|
|
131
|
+
tool_use = nil
|
|
132
|
+
if choice['tool_calls']
|
|
133
|
+
tool_use = choice['tool_calls'].map do |tc|
|
|
134
|
+
input = begin
|
|
135
|
+
JSON.parse(tc.dig('function', 'arguments') || '{}')
|
|
136
|
+
rescue JSON::ParserError
|
|
137
|
+
{ '_raw' => tc.dig('function', 'arguments') }
|
|
138
|
+
end
|
|
139
|
+
{
|
|
140
|
+
'id' => tc['id'],
|
|
141
|
+
'name' => tc.dig('function', 'name'),
|
|
142
|
+
'input' => input
|
|
143
|
+
}
|
|
144
|
+
end
|
|
145
|
+
end
|
|
146
|
+
|
|
147
|
+
usage = body['usage'] || {}
|
|
148
|
+
{
|
|
149
|
+
'content' => choice['content'],
|
|
150
|
+
'tool_use' => tool_use,
|
|
151
|
+
'stop_reason' => map_stop_reason(choice['finish_reason'] || body.dig('choices', 0, 'finish_reason')),
|
|
152
|
+
'model' => body['model'],
|
|
153
|
+
'input_tokens' => usage['prompt_tokens'],
|
|
154
|
+
'output_tokens' => usage['completion_tokens']
|
|
155
|
+
}
|
|
156
|
+
end
|
|
157
|
+
|
|
158
|
+
# OpenAI finish_reason → canonical stop_reason
|
|
159
|
+
def map_stop_reason(reason)
|
|
160
|
+
case reason
|
|
161
|
+
when 'stop' then 'end_turn'
|
|
162
|
+
when 'tool_calls' then 'tool_use'
|
|
163
|
+
when 'length' then 'max_tokens'
|
|
164
|
+
when 'content_filter' then 'content_filter'
|
|
165
|
+
else reason || 'unknown'
|
|
166
|
+
end
|
|
167
|
+
end
|
|
168
|
+
end
|
|
169
|
+
end
|
|
170
|
+
end
|
|
171
|
+
end
|
|
@@ -0,0 +1,81 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module KairosMcp
|
|
4
|
+
module SkillSets
|
|
5
|
+
module LlmClient
|
|
6
|
+
# Converts MCP tool schemas to LLM provider formats.
|
|
7
|
+
module SchemaConverter
|
|
8
|
+
module_function
|
|
9
|
+
|
|
10
|
+
# MCP → Anthropic: rename inputSchema → input_schema
|
|
11
|
+
def to_anthropic(mcp_schema)
|
|
12
|
+
{
|
|
13
|
+
name: mcp_schema[:name],
|
|
14
|
+
description: truncate_description(mcp_schema[:description], 4096),
|
|
15
|
+
input_schema: mcp_schema[:inputSchema] || { type: 'object', properties: {} }
|
|
16
|
+
}
|
|
17
|
+
end
|
|
18
|
+
|
|
19
|
+
# MCP → OpenAI: wrap in function envelope, normalize JSON Schema
|
|
20
|
+
def to_openai(mcp_schema)
|
|
21
|
+
params = normalize_for_openai(mcp_schema[:inputSchema] || { type: 'object', properties: {} })
|
|
22
|
+
{
|
|
23
|
+
type: 'function',
|
|
24
|
+
function: {
|
|
25
|
+
name: mcp_schema[:name],
|
|
26
|
+
description: truncate_description(mcp_schema[:description], 1024),
|
|
27
|
+
parameters: params
|
|
28
|
+
}
|
|
29
|
+
}
|
|
30
|
+
end
|
|
31
|
+
|
|
32
|
+
# Batch convert with error isolation per tool
|
|
33
|
+
def convert_batch(mcp_schemas, target)
|
|
34
|
+
converter = target == :openai ? method(:to_openai) : method(:to_anthropic)
|
|
35
|
+
results = []
|
|
36
|
+
errors = []
|
|
37
|
+
|
|
38
|
+
mcp_schemas.each do |schema|
|
|
39
|
+
results << converter.call(schema)
|
|
40
|
+
rescue StandardError => e
|
|
41
|
+
errors << { tool: schema[:name], error: e.message }
|
|
42
|
+
end
|
|
43
|
+
|
|
44
|
+
{ schemas: results, errors: errors }
|
|
45
|
+
end
|
|
46
|
+
|
|
47
|
+
# Normalize JSON Schema for OpenAI strict mode compatibility
|
|
48
|
+
def normalize_for_openai(schema)
|
|
49
|
+
return schema unless schema.is_a?(Hash)
|
|
50
|
+
|
|
51
|
+
normalized = schema.dup
|
|
52
|
+
|
|
53
|
+
if normalized['type'] == 'object' || normalized[:type] == 'object'
|
|
54
|
+
# OpenAI requires explicit additionalProperties: false
|
|
55
|
+
key = normalized.key?(:type) ? :additionalProperties : 'additionalProperties'
|
|
56
|
+
normalized[key] = false unless normalized.key?(key) || normalized.key?(:additionalProperties) || normalized.key?('additionalProperties')
|
|
57
|
+
end
|
|
58
|
+
|
|
59
|
+
# Recursively normalize nested properties
|
|
60
|
+
props_key = normalized.key?(:properties) ? :properties : 'properties'
|
|
61
|
+
if normalized[props_key].is_a?(Hash)
|
|
62
|
+
normalized[props_key] = normalized[props_key].transform_values { |v| normalize_for_openai(v) }
|
|
63
|
+
end
|
|
64
|
+
|
|
65
|
+
# Normalize items in arrays
|
|
66
|
+
items_key = normalized.key?(:items) ? :items : 'items'
|
|
67
|
+
if normalized[items_key].is_a?(Hash)
|
|
68
|
+
normalized[items_key] = normalize_for_openai(normalized[items_key])
|
|
69
|
+
end
|
|
70
|
+
|
|
71
|
+
normalized
|
|
72
|
+
end
|
|
73
|
+
|
|
74
|
+
def truncate_description(desc, max_len)
|
|
75
|
+
return '' if desc.nil?
|
|
76
|
+
desc.length > max_len ? "#{desc[0...max_len - 3]}..." : desc
|
|
77
|
+
end
|
|
78
|
+
end
|
|
79
|
+
end
|
|
80
|
+
end
|
|
81
|
+
end
|
|
@@ -0,0 +1,48 @@
|
|
|
1
|
+
# LLM Client Configuration
|
|
2
|
+
# API keys are NEVER stored here — only env var names.
|
|
3
|
+
#
|
|
4
|
+
# Supported providers:
|
|
5
|
+
# anthropic — Direct Anthropic API (requires ANTHROPIC_API_KEY)
|
|
6
|
+
# openai — Direct OpenAI API (requires OPENAI_API_KEY)
|
|
7
|
+
# openrouter — OpenRouter proxy (requires OPENROUTER_API_KEY, set base_url)
|
|
8
|
+
# bedrock — AWS Bedrock (uses AWS credentials, data stays in AWS)
|
|
9
|
+
# claude_code — Claude Code CLI subprocess (subscription, no API cost)
|
|
10
|
+
# local — Local model via OpenAI-compatible API (Ollama, vLLM)
|
|
11
|
+
|
|
12
|
+
provider: anthropic
|
|
13
|
+
model: claude-sonnet-4-6
|
|
14
|
+
api_key_env: ANTHROPIC_API_KEY
|
|
15
|
+
default_max_tokens: 4096
|
|
16
|
+
default_temperature: 0.7
|
|
17
|
+
timeout_seconds: 120
|
|
18
|
+
|
|
19
|
+
# --- Provider-specific examples (uncomment one) ---
|
|
20
|
+
|
|
21
|
+
# # OpenRouter (cheaper API proxy, supports many models):
|
|
22
|
+
# provider: openrouter
|
|
23
|
+
# model: anthropic/claude-sonnet-4-6
|
|
24
|
+
# api_key_env: OPENROUTER_API_KEY
|
|
25
|
+
# base_url: https://openrouter.ai/api/v1
|
|
26
|
+
|
|
27
|
+
# # AWS Bedrock (data stays within AWS):
|
|
28
|
+
# provider: bedrock
|
|
29
|
+
# model: anthropic.claude-sonnet-4-5-20250929-v1:0
|
|
30
|
+
# aws_region: us-east-1
|
|
31
|
+
# # Uses AWS credentials from environment (AWS_ACCESS_KEY_ID, etc. or IAM role)
|
|
32
|
+
|
|
33
|
+
# # Claude Code CLI (subscription, no API cost):
|
|
34
|
+
# provider: claude_code
|
|
35
|
+
# # No api_key_env needed — uses Claude Code subscription auth
|
|
36
|
+
|
|
37
|
+
# # Local model (Ollama):
|
|
38
|
+
# provider: local
|
|
39
|
+
# model: llama3.2
|
|
40
|
+
# base_url: http://127.0.0.1:11434/v1
|
|
41
|
+
|
|
42
|
+
# Cost estimates (USD per 1M tokens, for llm_status display)
|
|
43
|
+
cost_estimates:
|
|
44
|
+
claude-sonnet-4-6: { input: 3.0, output: 15.0 }
|
|
45
|
+
claude-haiku-4-5: { input: 0.80, output: 4.0 }
|
|
46
|
+
claude-opus-4-6: { input: 15.0, output: 75.0 }
|
|
47
|
+
gpt-4o: { input: 2.50, output: 10.0 }
|
|
48
|
+
gpt-4o-mini: { input: 0.15, output: 0.60 }
|
|
@@ -0,0 +1,75 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
require 'json'
|
|
4
|
+
require 'yaml'
|
|
5
|
+
|
|
6
|
+
module KairosMcp
|
|
7
|
+
module SkillSets
|
|
8
|
+
module LlmClient
|
|
9
|
+
module Tools
|
|
10
|
+
class LlmConfigure < KairosMcp::Tools::BaseTool
|
|
11
|
+
def name
|
|
12
|
+
'llm_configure'
|
|
13
|
+
end
|
|
14
|
+
|
|
15
|
+
def description
|
|
16
|
+
'Set or change the LLM provider, model, and API key env var. ' \
|
|
17
|
+
'API keys are never stored — only the environment variable name.'
|
|
18
|
+
end
|
|
19
|
+
|
|
20
|
+
def category
|
|
21
|
+
:llm
|
|
22
|
+
end
|
|
23
|
+
|
|
24
|
+
def input_schema
|
|
25
|
+
{
|
|
26
|
+
type: 'object',
|
|
27
|
+
properties: {
|
|
28
|
+
provider: {
|
|
29
|
+
type: 'string',
|
|
30
|
+
description: 'Provider: "anthropic", "openai", or "local"',
|
|
31
|
+
enum: %w[anthropic openai local]
|
|
32
|
+
},
|
|
33
|
+
model: { type: 'string', description: 'Model name' },
|
|
34
|
+
api_key_env: { type: 'string', description: 'Environment variable name for API key' },
|
|
35
|
+
base_url: { type: 'string', description: 'Base URL override (for local/proxy)' },
|
|
36
|
+
default_max_tokens: { type: 'integer' },
|
|
37
|
+
default_temperature: { type: 'number' }
|
|
38
|
+
}
|
|
39
|
+
}
|
|
40
|
+
end
|
|
41
|
+
|
|
42
|
+
def call(arguments)
|
|
43
|
+
config_path = File.join(__dir__, '..', 'config', 'llm_client.yml')
|
|
44
|
+
config = if File.exist?(config_path)
|
|
45
|
+
YAML.safe_load(File.read(config_path), permitted_classes: [Symbol]) || {}
|
|
46
|
+
else
|
|
47
|
+
{}
|
|
48
|
+
end
|
|
49
|
+
|
|
50
|
+
# Update only provided fields
|
|
51
|
+
%w[provider model api_key_env base_url default_max_tokens default_temperature].each do |key|
|
|
52
|
+
config[key] = arguments[key] if arguments.key?(key)
|
|
53
|
+
end
|
|
54
|
+
|
|
55
|
+
# Remove base_url if nil (reset to default)
|
|
56
|
+
config.delete('base_url') if config['base_url'].nil?
|
|
57
|
+
|
|
58
|
+
File.write(config_path, YAML.dump(config))
|
|
59
|
+
|
|
60
|
+
text_content(JSON.generate({
|
|
61
|
+
'status' => 'ok',
|
|
62
|
+
'message' => "Configuration updated",
|
|
63
|
+
'config' => config.reject { |k, _| k == 'cost_estimates' }
|
|
64
|
+
}))
|
|
65
|
+
rescue StandardError => e
|
|
66
|
+
text_content(JSON.generate({
|
|
67
|
+
'status' => 'error',
|
|
68
|
+
'error' => { 'type' => 'config_error', 'message' => e.message }
|
|
69
|
+
}))
|
|
70
|
+
end
|
|
71
|
+
end
|
|
72
|
+
end
|
|
73
|
+
end
|
|
74
|
+
end
|
|
75
|
+
end
|
|
@@ -0,0 +1,66 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
require 'json'
|
|
4
|
+
require 'yaml'
|
|
5
|
+
|
|
6
|
+
module KairosMcp
|
|
7
|
+
module SkillSets
|
|
8
|
+
module LlmClient
|
|
9
|
+
module Tools
|
|
10
|
+
class LlmStatus < KairosMcp::Tools::BaseTool
|
|
11
|
+
def name
|
|
12
|
+
'llm_status'
|
|
13
|
+
end
|
|
14
|
+
|
|
15
|
+
def description
|
|
16
|
+
'Show current LLM provider configuration and session usage statistics.'
|
|
17
|
+
end
|
|
18
|
+
|
|
19
|
+
def category
|
|
20
|
+
:llm
|
|
21
|
+
end
|
|
22
|
+
|
|
23
|
+
def input_schema
|
|
24
|
+
{ type: 'object', properties: {} }
|
|
25
|
+
end
|
|
26
|
+
|
|
27
|
+
def call(arguments)
|
|
28
|
+
config_path = File.join(__dir__, '..', 'config', 'llm_client.yml')
|
|
29
|
+
config = if File.exist?(config_path)
|
|
30
|
+
YAML.safe_load(File.read(config_path), permitted_classes: [Symbol]) || {}
|
|
31
|
+
else
|
|
32
|
+
{}
|
|
33
|
+
end
|
|
34
|
+
|
|
35
|
+
api_key_env = config['api_key_env']
|
|
36
|
+
api_key_set = api_key_env && ENV[api_key_env] && !ENV[api_key_env].empty?
|
|
37
|
+
|
|
38
|
+
stats = UsageTracker.stats
|
|
39
|
+
cost_estimates = config['cost_estimates'] || {}
|
|
40
|
+
model = config['model'] || 'unknown'
|
|
41
|
+
model_cost = cost_estimates[model] || {}
|
|
42
|
+
|
|
43
|
+
estimated_cost = 0.0
|
|
44
|
+
if model_cost.any?
|
|
45
|
+
input_cost = (model_cost['input'] || model_cost[:input] || 0).to_f
|
|
46
|
+
output_cost = (model_cost['output'] || model_cost[:output] || 0).to_f
|
|
47
|
+
estimated_cost = (stats[:input_tokens] * input_cost + stats[:output_tokens] * output_cost) / 1_000_000.0
|
|
48
|
+
end
|
|
49
|
+
|
|
50
|
+
text_content(JSON.generate({
|
|
51
|
+
'provider' => config['provider'] || 'not configured',
|
|
52
|
+
'model' => model,
|
|
53
|
+
'api_key_configured' => api_key_set || false,
|
|
54
|
+
'session_usage' => {
|
|
55
|
+
'total_calls' => stats[:calls],
|
|
56
|
+
'total_input_tokens' => stats[:input_tokens],
|
|
57
|
+
'total_output_tokens' => stats[:output_tokens],
|
|
58
|
+
'total_cost_estimate_usd' => estimated_cost.round(4)
|
|
59
|
+
}
|
|
60
|
+
}))
|
|
61
|
+
end
|
|
62
|
+
end
|
|
63
|
+
end
|
|
64
|
+
end
|
|
65
|
+
end
|
|
66
|
+
end
|
|
@@ -0,0 +1,22 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "llm_client",
|
|
3
|
+
"version": "0.1.0",
|
|
4
|
+
"description": "Pure LLM provider abstraction. One API call, returns response. No loop, no retry, no fallback.",
|
|
5
|
+
"author": "Masaomi Hatakeyama",
|
|
6
|
+
"layer": "L1",
|
|
7
|
+
"depends_on": [],
|
|
8
|
+
"provides": [
|
|
9
|
+
"llm_api_call",
|
|
10
|
+
"llm_configuration",
|
|
11
|
+
"schema_conversion"
|
|
12
|
+
],
|
|
13
|
+
"tool_classes": [
|
|
14
|
+
"KairosMcp::SkillSets::LlmClient::Tools::LlmCall",
|
|
15
|
+
"KairosMcp::SkillSets::LlmClient::Tools::LlmConfigure",
|
|
16
|
+
"KairosMcp::SkillSets::LlmClient::Tools::LlmStatus"
|
|
17
|
+
],
|
|
18
|
+
"config_files": ["config/llm_client.yml"],
|
|
19
|
+
"knowledge_dirs": ["knowledge/llm_client_guide"],
|
|
20
|
+
"gem_dependencies": ["faraday"],
|
|
21
|
+
"min_core_version": "2.8.0"
|
|
22
|
+
}
|
metadata
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
|
2
2
|
name: kairos-chain
|
|
3
3
|
version: !ruby/object:Gem::Version
|
|
4
|
-
version: 3.9.
|
|
4
|
+
version: 3.9.1
|
|
5
5
|
platform: ruby
|
|
6
6
|
authors:
|
|
7
7
|
- Masaomi Hatakeyama
|
|
@@ -307,6 +307,17 @@ files:
|
|
|
307
307
|
- templates/skillsets/knowledge_creator/skillset.json
|
|
308
308
|
- templates/skillsets/knowledge_creator/tools/kc_compare.rb
|
|
309
309
|
- templates/skillsets/knowledge_creator/tools/kc_evaluate.rb
|
|
310
|
+
- templates/skillsets/llm_client/llm_call.rb
|
|
311
|
+
- templates/skillsets/llm_client/llm_client.yml
|
|
312
|
+
- templates/skillsets/llm_client/llm_client/adapter.rb
|
|
313
|
+
- templates/skillsets/llm_client/llm_client/anthropic_adapter.rb
|
|
314
|
+
- templates/skillsets/llm_client/llm_client/bedrock_adapter.rb
|
|
315
|
+
- templates/skillsets/llm_client/llm_client/claude_code_adapter.rb
|
|
316
|
+
- templates/skillsets/llm_client/llm_client/openai_adapter.rb
|
|
317
|
+
- templates/skillsets/llm_client/llm_client/schema_converter.rb
|
|
318
|
+
- templates/skillsets/llm_client/llm_configure.rb
|
|
319
|
+
- templates/skillsets/llm_client/llm_status.rb
|
|
320
|
+
- templates/skillsets/llm_client/skillset.json
|
|
310
321
|
- templates/skillsets/mcp_client/config/mcp_client.yml
|
|
311
322
|
- templates/skillsets/mcp_client/lib/mcp_client.rb
|
|
312
323
|
- templates/skillsets/mcp_client/lib/mcp_client/client.rb
|