aia 0.8.6 → 0.9.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/.version +1 -1
- data/CHANGELOG.md +4 -0
- data/COMMITS.md +23 -0
- data/README.md +111 -41
- data/lib/aia/chat_processor_service.rb +14 -1
- data/lib/aia/config.rb +147 -13
- data/lib/aia/prompt_handler.rb +7 -9
- data/lib/aia/ruby_llm_adapter.rb +79 -33
- data/lib/aia/session.rb +127 -99
- data/lib/aia/ui_presenter.rb +10 -1
- data/lib/aia.rb +6 -4
- data/lib/extensions/ruby_llm/chat.rb +197 -0
- data/mcp_servers/README.md +90 -0
- data/mcp_servers/filesystem.json +9 -0
- data/mcp_servers/imcp.json +7 -0
- data/mcp_servers/launcher.json +11 -0
- data/mcp_servers/playwright_server_definition.json +9 -0
- data/mcp_servers/timeserver.json +8 -0
- metadata +21 -14
- data/lib/aia/ai_client_adapter.rb +0 -210
data/lib/aia/ruby_llm_adapter.rb
CHANGED
@@ -1,32 +1,44 @@
|
|
1
1
|
# lib/aia/ruby_llm_adapter.rb
|
2
|
-
#
|
3
2
|
|
4
3
|
require 'ruby_llm'
|
4
|
+
require 'mcp_client'
|
5
5
|
|
6
6
|
module AIA
|
7
7
|
class RubyLLMAdapter
|
8
8
|
def initialize
|
9
|
-
|
9
|
+
|
10
|
+
debug_me('=== RubyLLMAdapter ===')
|
11
|
+
|
12
|
+
@model = AIA.config.model
|
10
13
|
model_info = extract_model_parts(@model)
|
11
|
-
|
14
|
+
|
12
15
|
# Configure RubyLLM with available API keys
|
13
16
|
RubyLLM.configure do |config|
|
14
|
-
config.openai_api_key
|
17
|
+
config.openai_api_key = ENV.fetch('OPENAI_API_KEY', nil)
|
15
18
|
config.anthropic_api_key = ENV.fetch('ANTHROPIC_API_KEY', nil)
|
16
|
-
config.gemini_api_key
|
17
|
-
config.deepseek_api_key
|
18
|
-
|
19
|
+
config.gemini_api_key = ENV.fetch('GEMINI_API_KEY', nil)
|
20
|
+
config.deepseek_api_key = ENV.fetch('DEEPSEEK_API_KEY', nil)
|
21
|
+
|
19
22
|
# Bedrock configuration
|
20
|
-
config.bedrock_api_key
|
21
|
-
config.bedrock_secret_key
|
22
|
-
config.bedrock_region
|
23
|
+
config.bedrock_api_key = ENV.fetch('AWS_ACCESS_KEY_ID', nil)
|
24
|
+
config.bedrock_secret_key = ENV.fetch('AWS_SECRET_ACCESS_KEY', nil)
|
25
|
+
config.bedrock_region = ENV.fetch('AWS_REGION', nil)
|
23
26
|
config.bedrock_session_token = ENV.fetch('AWS_SESSION_TOKEN', nil)
|
24
27
|
end
|
25
|
-
|
26
|
-
|
28
|
+
|
29
|
+
debug_me{[ :model_info ]}
|
30
|
+
|
31
|
+
mcp_client, mcp_tools = generate_mcp_tools(model_info[:provider])
|
32
|
+
|
33
|
+
debug_me{[ :mcp_tools ]}
|
34
|
+
|
35
|
+
if mcp_tools && !mcp_tools.empty?
|
36
|
+
RubyLLM::Chat.with_mcp(client: mcp_client, call_tool_method: :call_tool, tools: mcp_tools)
|
37
|
+
end
|
38
|
+
|
27
39
|
@chat = RubyLLM.chat(model: model_info[:model])
|
28
40
|
end
|
29
|
-
|
41
|
+
|
30
42
|
def chat(prompt)
|
31
43
|
if @model.downcase.include?('dall-e') || @model.downcase.include?('image-generation')
|
32
44
|
text_to_image(prompt)
|
@@ -40,14 +52,14 @@ module AIA
|
|
40
52
|
text_to_text(prompt)
|
41
53
|
end
|
42
54
|
end
|
43
|
-
|
55
|
+
|
44
56
|
def transcribe(audio_file)
|
45
57
|
@chat.ask("Transcribe this audio", with: { audio: audio_file })
|
46
58
|
end
|
47
|
-
|
59
|
+
|
48
60
|
def speak(text)
|
49
61
|
output_file = "#{Time.now.to_i}.mp3"
|
50
|
-
|
62
|
+
|
51
63
|
# Note: RubyLLM doesn't have a direct text-to-speech feature
|
52
64
|
# This is a placeholder for a custom implementation or external service
|
53
65
|
begin
|
@@ -60,25 +72,59 @@ module AIA
|
|
60
72
|
"Error generating audio: #{e.message}"
|
61
73
|
end
|
62
74
|
end
|
63
|
-
|
75
|
+
|
64
76
|
def method_missing(method, *args, &block)
|
77
|
+
debug_me(tag: '== missing ==', levels: 25){[ :method, :args ]}
|
65
78
|
if @chat.respond_to?(method)
|
66
79
|
@chat.public_send(method, *args, &block)
|
67
80
|
else
|
68
81
|
super
|
69
82
|
end
|
70
83
|
end
|
71
|
-
|
84
|
+
|
72
85
|
def respond_to_missing?(method, include_private = false)
|
73
86
|
@chat.respond_to?(method) || super
|
74
87
|
end
|
75
|
-
|
88
|
+
|
76
89
|
private
|
77
|
-
|
90
|
+
|
91
|
+
# Generate an array of MCP tools, filtered and formatted for the correct provider.
|
92
|
+
# @param config [OpenStruct] the config object containing mcp_servers, allowed_tools, and model
|
93
|
+
# @return [Array<Hash>, nil] the filtered and formatted MCP tools or nil if no tools
|
94
|
+
def generate_mcp_tools(provider)
|
95
|
+
return [nil, nil] unless AIA.config.mcp_servers && !AIA.config.mcp_servers.empty?
|
96
|
+
|
97
|
+
debug_me('=== generate_mcp_tools ===')
|
98
|
+
|
99
|
+
# AIA.config.mcp_servers is now a path to the combined JSON file
|
100
|
+
mcp_client = MCPClient.create_client(server_definition_file: AIA.config.mcp_servers)
|
101
|
+
debug_me
|
102
|
+
all_tools = mcp_client.list_tools(cache: false).map(&:name)
|
103
|
+
debug_me
|
104
|
+
allowed = AIA.config.allowed_tools
|
105
|
+
debug_me
|
106
|
+
filtered_tools = allowed.nil? ? all_tools : all_tools & allowed
|
107
|
+
debug_me{[ :filtered_tools ]}
|
108
|
+
|
109
|
+
debug_me{[ :provider ]}
|
110
|
+
|
111
|
+
mcp_tools = if :anthropic == provider.to_sym
|
112
|
+
debug_me
|
113
|
+
mcp_client.to_anthropic_tools(tool_names: filtered_tools)
|
114
|
+
else
|
115
|
+
debug_me
|
116
|
+
mcp_client.to_openai_tools(tool_names: filtered_tools)
|
117
|
+
end
|
118
|
+
[mcp_client, mcp_tools]
|
119
|
+
rescue => e
|
120
|
+
STDERR.puts "ERROR: Failed to generate MCP tools: #{e.message}"
|
121
|
+
nil
|
122
|
+
end
|
123
|
+
|
78
124
|
def extract_model_parts(model_string)
|
79
125
|
parts = model_string.split('/')
|
80
126
|
parts.map!(&:strip)
|
81
|
-
|
127
|
+
|
82
128
|
if parts.length > 1
|
83
129
|
provider = parts[0]
|
84
130
|
model = parts[1]
|
@@ -86,10 +132,10 @@ module AIA
|
|
86
132
|
provider = nil # RubyLLM will figure it out from the model name
|
87
133
|
model = parts[0]
|
88
134
|
end
|
89
|
-
|
135
|
+
|
90
136
|
{ provider: provider, model: model }
|
91
137
|
end
|
92
|
-
|
138
|
+
|
93
139
|
def extract_text_prompt(prompt)
|
94
140
|
if prompt.is_a?(String)
|
95
141
|
prompt
|
@@ -101,18 +147,18 @@ module AIA
|
|
101
147
|
prompt.to_s
|
102
148
|
end
|
103
149
|
end
|
104
|
-
|
150
|
+
|
105
151
|
def text_to_text(prompt)
|
106
152
|
text_prompt = extract_text_prompt(prompt)
|
107
153
|
@chat.ask(text_prompt)
|
108
154
|
end
|
109
|
-
|
155
|
+
|
110
156
|
def text_to_image(prompt)
|
111
157
|
text_prompt = extract_text_prompt(prompt)
|
112
158
|
output_file = "#{Time.now.to_i}.png"
|
113
|
-
|
159
|
+
|
114
160
|
begin
|
115
|
-
RubyLLM.paint(text_prompt, output_path: output_file,
|
161
|
+
RubyLLM.paint(text_prompt, output_path: output_file,
|
116
162
|
size: AIA.config.image_size,
|
117
163
|
quality: AIA.config.image_quality,
|
118
164
|
style: AIA.config.image_style)
|
@@ -121,11 +167,11 @@ module AIA
|
|
121
167
|
"Error generating image: #{e.message}"
|
122
168
|
end
|
123
169
|
end
|
124
|
-
|
170
|
+
|
125
171
|
def image_to_text(prompt)
|
126
172
|
image_path = extract_image_path(prompt)
|
127
173
|
text_prompt = extract_text_prompt(prompt)
|
128
|
-
|
174
|
+
|
129
175
|
if image_path && File.exist?(image_path)
|
130
176
|
begin
|
131
177
|
@chat.ask(text_prompt, with: { image: image_path })
|
@@ -136,11 +182,11 @@ module AIA
|
|
136
182
|
text_to_text(prompt)
|
137
183
|
end
|
138
184
|
end
|
139
|
-
|
185
|
+
|
140
186
|
def text_to_audio(prompt)
|
141
187
|
text_prompt = extract_text_prompt(prompt)
|
142
188
|
output_file = "#{Time.now.to_i}.mp3"
|
143
|
-
|
189
|
+
|
144
190
|
begin
|
145
191
|
# Note: RubyLLM doesn't have a direct TTS feature
|
146
192
|
# This is a placeholder for a custom implementation
|
@@ -151,7 +197,7 @@ module AIA
|
|
151
197
|
"Error generating audio: #{e.message}"
|
152
198
|
end
|
153
199
|
end
|
154
|
-
|
200
|
+
|
155
201
|
def audio_to_text(prompt)
|
156
202
|
if prompt.is_a?(String) && File.exist?(prompt) &&
|
157
203
|
prompt.downcase.end_with?('.mp3', '.wav', '.m4a', '.flac')
|
@@ -165,7 +211,7 @@ module AIA
|
|
165
211
|
text_to_text(prompt)
|
166
212
|
end
|
167
213
|
end
|
168
|
-
|
214
|
+
|
169
215
|
def extract_image_path(prompt)
|
170
216
|
if prompt.is_a?(String)
|
171
217
|
prompt.scan(/\b[\w\/\.\-]+\.(jpg|jpeg|png|gif|webp)\b/i).first&.first
|
data/lib/aia/session.rb
CHANGED
@@ -22,6 +22,7 @@ module AIA
|
|
22
22
|
|
23
23
|
def initialize(prompt_handler)
|
24
24
|
@prompt_handler = prompt_handler
|
25
|
+
@chat_prompt_id = nil # Initialize to nil
|
25
26
|
|
26
27
|
# Special handling for chat mode with context files but no prompt ID
|
27
28
|
if AIA.chat? && AIA.config.prompt_id.empty? && AIA.config.context_files && !AIA.config.context_files.empty?
|
@@ -35,8 +36,8 @@ module AIA
|
|
35
36
|
@history_manager = HistoryManager.new(prompt: prompt_instance)
|
36
37
|
end
|
37
38
|
|
38
|
-
@context_manager
|
39
|
-
@ui_presenter
|
39
|
+
@context_manager = ContextManager.new(system_prompt: AIA.config.system_prompt)
|
40
|
+
@ui_presenter = UIPresenter.new
|
40
41
|
@directive_processor = DirectiveProcessor.new
|
41
42
|
@chat_processor = ChatProcessorService.new(@ui_presenter, @directive_processor)
|
42
43
|
|
@@ -111,7 +112,6 @@ module AIA
|
|
111
112
|
end
|
112
113
|
|
113
114
|
prompt.save
|
114
|
-
|
115
115
|
# Substitute variables and get final prompt text
|
116
116
|
prompt_text = prompt.to_s
|
117
117
|
|
@@ -151,129 +151,157 @@ module AIA
|
|
151
151
|
end
|
152
152
|
|
153
153
|
# Starts the interactive chat session.
|
154
|
+
# NOTE: there could have been an initial prompt sent into this session
|
155
|
+
# via a prompt_id on the command line, piped in text, or context files.
|
154
156
|
def start_chat(skip_context_files: false)
|
155
|
-
# Consider if display_chat_header is needed if robot+separator already shown
|
156
|
-
# For now, let's keep it, maybe add an indicator message
|
157
157
|
puts "\nEntering interactive chat mode..."
|
158
158
|
@ui_presenter.display_chat_header
|
159
159
|
|
160
|
-
|
161
|
-
|
162
|
-
|
163
|
-
if !skip_context_files && AIA.config.context_files && !AIA.config.context_files.empty?
|
164
|
-
context_content = AIA.config.context_files.map do |file|
|
165
|
-
File.read(file) rescue "Error reading file: #{file}"
|
166
|
-
end.join("\n\n")
|
167
|
-
|
168
|
-
if !context_content.empty?
|
169
|
-
# Add context files content to context
|
170
|
-
@context_manager.add_to_context(role: 'user', content: context_content)
|
171
|
-
|
172
|
-
# Process the context
|
173
|
-
operation_type = @chat_processor.determine_operation_type(AIA.config.model)
|
174
|
-
@ui_presenter.display_thinking_animation
|
175
|
-
response = @chat_processor.process_prompt(@context_manager.get_context, operation_type)
|
176
|
-
|
177
|
-
# Add AI response to context
|
178
|
-
@context_manager.add_to_context(role: 'assistant', content: response)
|
160
|
+
# Generate chat prompt ID
|
161
|
+
now = Time.now
|
162
|
+
@chat_prompt_id = "chat_#{now.strftime('%Y%m%d_%H%M%S')}"
|
179
163
|
|
180
|
-
|
181
|
-
|
182
|
-
|
183
|
-
|
164
|
+
# Create the temporary prompt
|
165
|
+
begin
|
166
|
+
# Create the unique? prompt ID in the file storage system with its initial text
|
167
|
+
PromptManager::Prompt.create(
|
168
|
+
id: @chat_prompt_id,
|
169
|
+
text: "Today's date is #{now.strftime('%Y-%m-%d')} and the current time is #{now.strftime('%H:%M:%S')}"
|
170
|
+
)
|
171
|
+
|
172
|
+
# Capture self for the handlers
|
173
|
+
session_instance = self
|
174
|
+
|
175
|
+
# Set up cleanup handlers only after prompt is created
|
176
|
+
at_exit { session_instance.send(:cleanup_chat_prompt) }
|
177
|
+
Signal.trap('INT') {
|
178
|
+
session_instance.send(:cleanup_chat_prompt)
|
179
|
+
exit
|
180
|
+
}
|
181
|
+
|
182
|
+
# Access this chat session's prompt object in order to do the dynamic things
|
183
|
+
# in follow up prompts that can be done in the batch mode like shell substitution. etc.
|
184
|
+
@chat_prompt = PromptManager::Prompt.new(
|
185
|
+
id: @chat_prompt_id,
|
186
|
+
directives_processor: @directive_processor,
|
187
|
+
erb_flag: AIA.config.erb,
|
188
|
+
envar_flag: AIA.config.shell,
|
189
|
+
external_binding: binding,
|
190
|
+
)
|
191
|
+
|
192
|
+
Reline::HISTORY.clear
|
193
|
+
|
194
|
+
# Load context files if any and not skipping
|
195
|
+
if !skip_context_files && AIA.config.context_files && !AIA.config.context_files.empty?
|
196
|
+
context = AIA.config.context_files.map do |file|
|
197
|
+
File.read(file) rescue "Error reading file: #{file}"
|
198
|
+
end.join("\n\n")
|
199
|
+
|
200
|
+
if !context.empty?
|
201
|
+
# Add context files content to context
|
202
|
+
@context_manager.add_to_context(role: 'user', content: context)
|
203
|
+
|
204
|
+
# Process the context
|
205
|
+
operation_type = @chat_processor.determine_operation_type(AIA.config.model)
|
206
|
+
@ui_presenter.display_thinking_animation
|
207
|
+
response = @chat_processor.process_prompt(@context_manager.get_context, operation_type)
|
208
|
+
|
209
|
+
# Add AI response to context
|
210
|
+
@context_manager.add_to_context(role: 'assistant', content: response)
|
211
|
+
|
212
|
+
# Output the response
|
213
|
+
@chat_processor.output_response(response)
|
214
|
+
@chat_processor.speak(response)
|
215
|
+
@ui_presenter.display_separator
|
216
|
+
end
|
184
217
|
end
|
185
|
-
end
|
186
|
-
|
187
|
-
# Check for piped input (STDIN not a TTY and has data)
|
188
|
-
if !STDIN.tty?
|
189
|
-
# Save the original STDIN
|
190
|
-
original_stdin = STDIN.dup
|
191
218
|
|
192
|
-
#
|
193
|
-
|
219
|
+
# Handle piped input
|
220
|
+
if !STDIN.tty?
|
221
|
+
original_stdin = STDIN.dup
|
222
|
+
piped_input = STDIN.read.strip
|
223
|
+
STDIN.reopen('/dev/tty')
|
194
224
|
|
195
|
-
|
196
|
-
|
225
|
+
if !piped_input.empty?
|
226
|
+
@chat_prompt.text = piped_input
|
227
|
+
processed_input = @chat_prompt.to_s
|
197
228
|
|
198
|
-
|
199
|
-
# Add piped input to context
|
200
|
-
@context_manager.add_to_context(role: 'user', content: piped_input)
|
229
|
+
@context_manager.add_to_context(role: 'user', content: processed_input)
|
201
230
|
|
202
|
-
|
203
|
-
|
204
|
-
|
205
|
-
response = @chat_processor.process_prompt(@context_manager.get_context, operation_type)
|
231
|
+
operation_type = @chat_processor.determine_operation_type(AIA.config.model)
|
232
|
+
@ui_presenter.display_thinking_animation
|
233
|
+
response = @chat_processor.process_prompt(@context_manager.get_context, operation_type)
|
206
234
|
|
207
|
-
|
208
|
-
|
235
|
+
@context_manager.add_to_context(role: 'assistant', content: response)
|
236
|
+
@chat_processor.output_response(response)
|
237
|
+
@chat_processor.speak(response) if AIA.speak?
|
238
|
+
@ui_presenter.display_separator
|
239
|
+
end
|
209
240
|
|
210
|
-
|
211
|
-
@chat_processor.output_response(response)
|
212
|
-
@chat_processor.speak(response) if AIA.speak?
|
213
|
-
@ui_presenter.display_separator
|
241
|
+
STDIN.reopen(original_stdin)
|
214
242
|
end
|
215
|
-
|
216
|
-
# Restore original stdin when done with piped input processing
|
217
|
-
STDIN.reopen(original_stdin)
|
218
|
-
end
|
219
|
-
|
220
|
-
loop do
|
221
|
-
# Get user input
|
222
|
-
prompt = @ui_presenter.ask_question
|
223
243
|
|
244
|
+
# Main chat loop
|
245
|
+
loop do
|
246
|
+
follow_up_prompt = @ui_presenter.ask_question
|
224
247
|
|
248
|
+
break if follow_up_prompt.nil? || follow_up_prompt.strip.downcase == 'exit' || follow_up_prompt.strip.empty?
|
225
249
|
|
226
|
-
|
227
|
-
|
228
|
-
|
229
|
-
|
230
|
-
file.puts "\nYou: #{prompt}"
|
250
|
+
if AIA.config.out_file
|
251
|
+
File.open(AIA.config.out_file, 'a') do |file|
|
252
|
+
file.puts "\nYou: #{follow_up_prompt}"
|
253
|
+
end
|
231
254
|
end
|
232
|
-
end
|
233
255
|
|
234
|
-
|
235
|
-
|
236
|
-
|
237
|
-
|
238
|
-
|
239
|
-
|
240
|
-
|
241
|
-
|
242
|
-
|
243
|
-
|
244
|
-
|
245
|
-
|
246
|
-
puts "\n#{directive_output}\n"
|
247
|
-
# Optionally add directive output to context or handle as needed
|
248
|
-
# Example: Add a summary to context
|
249
|
-
# @context_manager.add_to_context(role: 'assistant', content: "Directive executed. Output:\n#{directive_output}")
|
250
|
-
# For now, just use a placeholder prompt modification:
|
251
|
-
prompt = "I executed this directive: #{prompt}\nHere's the output: #{directive_output}\nLet's continue our conversation."
|
252
|
-
# Fall through to add this modified prompt to context and send to AI
|
256
|
+
if @directive_processor.directive?(follow_up_prompt)
|
257
|
+
directive_output = @directive_processor.process(follow_up_prompt, @context_manager)
|
258
|
+
|
259
|
+
if follow_up_prompt.strip.start_with?('//clear')
|
260
|
+
@ui_presenter.display_info("Chat context cleared.")
|
261
|
+
next
|
262
|
+
elsif directive_output.nil? || directive_output.strip.empty?
|
263
|
+
next
|
264
|
+
else
|
265
|
+
puts "\n#{directive_output}\n"
|
266
|
+
follow_up_prompt = "I executed this directive: #{follow_up_prompt}\nHere's the output: #{directive_output}\nLet's continue our conversation."
|
267
|
+
end
|
253
268
|
end
|
254
|
-
end
|
255
|
-
|
256
|
-
# Use ContextManager instead of HistoryManager
|
257
|
-
@context_manager.add_to_context(role: 'user', content: prompt)
|
258
269
|
|
259
|
-
|
260
|
-
|
270
|
+
@chat_prompt.text = follow_up_prompt
|
271
|
+
processed_prompt = @chat_prompt.to_s
|
261
272
|
|
262
|
-
|
263
|
-
|
264
|
-
response = @chat_processor.process_prompt(conversation, operation_type)
|
273
|
+
@context_manager.add_to_context(role: 'user', content: processed_prompt)
|
274
|
+
conversation = @context_manager.get_context
|
265
275
|
|
266
|
-
|
276
|
+
operation_type = @chat_processor.determine_operation_type(AIA.config.model)
|
277
|
+
@ui_presenter.display_thinking_animation
|
278
|
+
response = @chat_processor.process_prompt(conversation, operation_type)
|
267
279
|
|
268
|
-
|
269
|
-
|
280
|
+
@ui_presenter.display_ai_response(response)
|
281
|
+
@context_manager.add_to_context(role: 'assistant', content: response)
|
282
|
+
@chat_processor.speak(response)
|
270
283
|
|
271
|
-
|
284
|
+
@ui_presenter.display_separator
|
285
|
+
end
|
272
286
|
|
273
|
-
|
287
|
+
ensure
|
288
|
+
@ui_presenter.display_chat_end
|
274
289
|
end
|
290
|
+
end
|
275
291
|
|
276
|
-
|
292
|
+
private
|
293
|
+
|
294
|
+
def cleanup_chat_prompt
|
295
|
+
if @chat_prompt_id
|
296
|
+
puts "[DEBUG] Cleaning up chat prompt: #{@chat_prompt_id}" if AIA.debug?
|
297
|
+
begin
|
298
|
+
@chat_prompt.delete
|
299
|
+
@chat_prompt_id = nil # Prevent repeated attempts if error occurs elsewhere
|
300
|
+
rescue => e
|
301
|
+
STDERR.puts "[ERROR] Failed to delete chat prompt #{@chat_prompt_id}: #{e.class} - #{e.message}"
|
302
|
+
STDERR.puts e.backtrace.join("\n")
|
303
|
+
end
|
304
|
+
end
|
277
305
|
end
|
278
306
|
end
|
279
307
|
end
|
data/lib/aia/ui_presenter.rb
CHANGED
@@ -38,10 +38,19 @@ module AIA
|
|
38
38
|
def format_chat_response(response, output = $stdout)
|
39
39
|
indent = ' '
|
40
40
|
|
41
|
+
# Convert RubyLLM::Message to string if necessary
|
42
|
+
response_text = if response.is_a?(RubyLLM::Message)
|
43
|
+
response.content.to_s
|
44
|
+
elsif response.respond_to?(:to_s)
|
45
|
+
response.to_s
|
46
|
+
else
|
47
|
+
response
|
48
|
+
end
|
49
|
+
|
41
50
|
in_code_block = false
|
42
51
|
language = ''
|
43
52
|
|
44
|
-
|
53
|
+
response_text.each_line do |line|
|
45
54
|
line = line.chomp
|
46
55
|
|
47
56
|
# Check for code block delimiters
|
data/lib/aia.rb
CHANGED
@@ -4,9 +4,12 @@
|
|
4
4
|
# The AIA module serves as the namespace for the AIA application, which
|
5
5
|
# provides an interface for interacting with AI models and managing prompts.
|
6
6
|
|
7
|
-
require 'ai_client'
|
8
7
|
require 'ruby_llm'
|
8
|
+
require_relative 'extensions/ruby_llm/chat'
|
9
|
+
|
9
10
|
require 'prompt_manager'
|
11
|
+
require 'mcp_client'
|
12
|
+
|
10
13
|
require 'debug_me'
|
11
14
|
include DebugMe
|
12
15
|
$DEBUG_ME = false
|
@@ -18,7 +21,6 @@ require_relative 'aia/version'
|
|
18
21
|
require_relative 'aia/config'
|
19
22
|
require_relative 'aia/shell_command_executor'
|
20
23
|
require_relative 'aia/prompt_handler'
|
21
|
-
require_relative 'aia/ai_client_adapter'
|
22
24
|
require_relative 'aia/ruby_llm_adapter'
|
23
25
|
require_relative 'aia/directive_processor'
|
24
26
|
require_relative 'aia/history_manager'
|
@@ -78,14 +80,14 @@ module AIA
|
|
78
80
|
end
|
79
81
|
|
80
82
|
prompt_handler = PromptHandler.new
|
81
|
-
|
83
|
+
|
82
84
|
# Initialize the appropriate client adapter based on configuration
|
83
85
|
@config.client = if @config.adapter == 'ruby_llm'
|
84
86
|
RubyLLMAdapter.new
|
85
87
|
else
|
86
88
|
AIClientAdapter.new
|
87
89
|
end
|
88
|
-
|
90
|
+
|
89
91
|
session = Session.new(prompt_handler)
|
90
92
|
|
91
93
|
session.start
|