aia 0.8.6 → 0.9.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,32 +1,44 @@
1
1
  # lib/aia/ruby_llm_adapter.rb
2
- #
3
2
 
4
3
  require 'ruby_llm'
4
+ require 'mcp_client'
5
5
 
6
6
  module AIA
7
7
  class RubyLLMAdapter
8
8
  def initialize
9
- @model = AIA.config.model
9
+
10
+ debug_me('=== RubyLLMAdapter ===')
11
+
12
+ @model = AIA.config.model
10
13
  model_info = extract_model_parts(@model)
11
-
14
+
12
15
  # Configure RubyLLM with available API keys
13
16
  RubyLLM.configure do |config|
14
- config.openai_api_key = ENV.fetch('OPENAI_API_KEY', nil)
17
+ config.openai_api_key = ENV.fetch('OPENAI_API_KEY', nil)
15
18
  config.anthropic_api_key = ENV.fetch('ANTHROPIC_API_KEY', nil)
16
- config.gemini_api_key = ENV.fetch('GEMINI_API_KEY', nil)
17
- config.deepseek_api_key = ENV.fetch('DEEPSEEK_API_KEY', nil)
18
-
19
+ config.gemini_api_key = ENV.fetch('GEMINI_API_KEY', nil)
20
+ config.deepseek_api_key = ENV.fetch('DEEPSEEK_API_KEY', nil)
21
+
19
22
  # Bedrock configuration
20
- config.bedrock_api_key = ENV.fetch('AWS_ACCESS_KEY_ID', nil)
21
- config.bedrock_secret_key = ENV.fetch('AWS_SECRET_ACCESS_KEY', nil)
22
- config.bedrock_region = ENV.fetch('AWS_REGION', nil)
23
+ config.bedrock_api_key = ENV.fetch('AWS_ACCESS_KEY_ID', nil)
24
+ config.bedrock_secret_key = ENV.fetch('AWS_SECRET_ACCESS_KEY', nil)
25
+ config.bedrock_region = ENV.fetch('AWS_REGION', nil)
23
26
  config.bedrock_session_token = ENV.fetch('AWS_SESSION_TOKEN', nil)
24
27
  end
25
-
26
- # Initialize chat with the specified model
28
+
29
+ debug_me{[ :model_info ]}
30
+
31
+ mcp_client, mcp_tools = generate_mcp_tools(model_info[:provider])
32
+
33
+ debug_me{[ :mcp_tools ]}
34
+
35
+ if mcp_tools && !mcp_tools.empty?
36
+ RubyLLM::Chat.with_mcp(client: mcp_client, call_tool_method: :call_tool, tools: mcp_tools)
37
+ end
38
+
27
39
  @chat = RubyLLM.chat(model: model_info[:model])
28
40
  end
29
-
41
+
30
42
  def chat(prompt)
31
43
  if @model.downcase.include?('dall-e') || @model.downcase.include?('image-generation')
32
44
  text_to_image(prompt)
@@ -40,14 +52,14 @@ module AIA
40
52
  text_to_text(prompt)
41
53
  end
42
54
  end
43
-
55
+
44
56
  def transcribe(audio_file)
45
57
  @chat.ask("Transcribe this audio", with: { audio: audio_file })
46
58
  end
47
-
59
+
48
60
  def speak(text)
49
61
  output_file = "#{Time.now.to_i}.mp3"
50
-
62
+
51
63
  # Note: RubyLLM doesn't have a direct text-to-speech feature
52
64
  # This is a placeholder for a custom implementation or external service
53
65
  begin
@@ -60,25 +72,121 @@ module AIA
60
72
  "Error generating audio: #{e.message}"
61
73
  end
62
74
  end
63
-
75
+
64
76
  def method_missing(method, *args, &block)
77
+ debug_me(tag: '== missing ==', levels: 25){[ :method, :args ]}
65
78
  if @chat.respond_to?(method)
66
79
  @chat.public_send(method, *args, &block)
67
80
  else
68
81
  super
69
82
  end
70
83
  end
71
-
84
+
85
+ # Clear the chat context/history
86
+ # Needed for the //clear directive
87
+ def clear_context
88
+ AIA.debug_me(tag: '== AGGRESSIVELY clearing LLM context ==') do
89
+ begin
90
+ # Option 1: Directly clear the messages array in the current chat object
91
+ if @chat.instance_variable_defined?(:@messages)
92
+ AIA.debug_me("Directly clearing @messages array")
93
+ old_messages = @chat.instance_variable_get(:@messages)
94
+ AIA.debug_me{[:old_messages, old_messages.length]}
95
+ # Force a completely empty array, not just attempting to clear it
96
+ @chat.instance_variable_set(:@messages, [])
97
+ end
98
+
99
+ # Option 2: Force RubyLLM to create a new chat instance at the global level
100
+ # This ensures any shared state is reset
101
+ AIA.debug_me("Force global RubyLLM chat reset")
102
+ model_info = extract_model_parts(@model)
103
+ RubyLLM.instance_variable_set(:@chat, nil) if RubyLLM.instance_variable_defined?(:@chat)
104
+
105
+ # Option 3: Create a completely fresh chat instance for this adapter
106
+ @chat = nil # First nil it to help garbage collection
107
+ @chat = RubyLLM.chat(model: model_info[:model])
108
+ AIA.debug_me("Created fresh RubyLLM::Chat instance")
109
+
110
+ # Option 4: Call official clear_history method if it exists
111
+ if @chat.respond_to?(:clear_history)
112
+ AIA.debug_me("Calling clear_history method")
113
+ @chat.clear_history
114
+ end
115
+
116
+ # Option 5: If chat has messages, force set it to empty again as a final check
117
+ if @chat.instance_variable_defined?(:@messages) && !@chat.instance_variable_get(:@messages).empty?
118
+ AIA.debug_me("FINAL CHECK: @messages still not empty, forcing empty")
119
+ @chat.instance_variable_set(:@messages, [])
120
+ end
121
+
122
+ # Reset any MCP tools configuration
123
+ begin
124
+ mcp_client, mcp_tools = generate_mcp_tools(model_info[:provider])
125
+ if mcp_tools && !mcp_tools.empty?
126
+ AIA.debug_me("Reconfiguring MCP tools")
127
+ RubyLLM::Chat.with_mcp(client: mcp_client, call_tool_method: :call_tool, tools: mcp_tools)
128
+ end
129
+ rescue => mcp_error
130
+ AIA.debug_me{[:mcp_error, mcp_error.message]}
131
+ end
132
+
133
+ # Final verification
134
+ new_messages = @chat.instance_variable_defined?(:@messages) ? @chat.instance_variable_get(:@messages) : []
135
+ AIA.debug_me{[:new_messages, new_messages.length]}
136
+
137
+ return "Chat context successfully cleared."
138
+ rescue => e
139
+ AIA.debug_me{
140
+ [ :e, e.message, e.backtrace ]
141
+ }
142
+ return "Error clearing chat context: #{e.message}"
143
+ end
144
+ end
145
+ end
146
+
72
147
  def respond_to_missing?(method, include_private = false)
73
148
  @chat.respond_to?(method) || super
74
149
  end
75
-
150
+
76
151
  private
77
-
152
+
153
+ # Generate an array of MCP tools, filtered and formatted for the correct provider.
154
+ # @param config [OpenStruct] the config object containing mcp_servers, allowed_tools, and model
155
+ # @return [Array<Hash>, nil] the filtered and formatted MCP tools or nil if no tools
156
+ def generate_mcp_tools(provider)
157
+ return [nil, nil] unless AIA.config.mcp_servers && !AIA.config.mcp_servers.empty?
158
+
159
+ debug_me('=== generate_mcp_tools ===')
160
+
161
+ # AIA.config.mcp_servers is now a path to the combined JSON file
162
+ mcp_client = MCPClient.create_client(server_definition_file: AIA.config.mcp_servers)
163
+ debug_me
164
+ all_tools = mcp_client.list_tools(cache: false).map(&:name)
165
+ debug_me
166
+ allowed = AIA.config.allowed_tools
167
+ debug_me
168
+ filtered_tools = allowed.nil? ? all_tools : all_tools & allowed
169
+ debug_me{[ :filtered_tools ]}
170
+
171
+ debug_me{[ :provider ]}
172
+
173
+ mcp_tools = if :anthropic == provider.to_sym
174
+ debug_me
175
+ mcp_client.to_anthropic_tools(tool_names: filtered_tools)
176
+ else
177
+ debug_me
178
+ mcp_client.to_openai_tools(tool_names: filtered_tools)
179
+ end
180
+ [mcp_client, mcp_tools]
181
+ rescue => e
182
+ STDERR.puts "ERROR: Failed to generate MCP tools: #{e.message}"
183
+ nil
184
+ end
185
+
78
186
  def extract_model_parts(model_string)
79
187
  parts = model_string.split('/')
80
188
  parts.map!(&:strip)
81
-
189
+
82
190
  if parts.length > 1
83
191
  provider = parts[0]
84
192
  model = parts[1]
@@ -86,10 +194,10 @@ module AIA
86
194
  provider = nil # RubyLLM will figure it out from the model name
87
195
  model = parts[0]
88
196
  end
89
-
197
+
90
198
  { provider: provider, model: model }
91
199
  end
92
-
200
+
93
201
  def extract_text_prompt(prompt)
94
202
  if prompt.is_a?(String)
95
203
  prompt
@@ -101,18 +209,18 @@ module AIA
101
209
  prompt.to_s
102
210
  end
103
211
  end
104
-
212
+
105
213
  def text_to_text(prompt)
106
214
  text_prompt = extract_text_prompt(prompt)
107
215
  @chat.ask(text_prompt)
108
216
  end
109
-
217
+
110
218
  def text_to_image(prompt)
111
219
  text_prompt = extract_text_prompt(prompt)
112
220
  output_file = "#{Time.now.to_i}.png"
113
-
221
+
114
222
  begin
115
- RubyLLM.paint(text_prompt, output_path: output_file,
223
+ RubyLLM.paint(text_prompt, output_path: output_file,
116
224
  size: AIA.config.image_size,
117
225
  quality: AIA.config.image_quality,
118
226
  style: AIA.config.image_style)
@@ -121,11 +229,11 @@ module AIA
121
229
  "Error generating image: #{e.message}"
122
230
  end
123
231
  end
124
-
232
+
125
233
  def image_to_text(prompt)
126
234
  image_path = extract_image_path(prompt)
127
235
  text_prompt = extract_text_prompt(prompt)
128
-
236
+
129
237
  if image_path && File.exist?(image_path)
130
238
  begin
131
239
  @chat.ask(text_prompt, with: { image: image_path })
@@ -136,11 +244,11 @@ module AIA
136
244
  text_to_text(prompt)
137
245
  end
138
246
  end
139
-
247
+
140
248
  def text_to_audio(prompt)
141
249
  text_prompt = extract_text_prompt(prompt)
142
250
  output_file = "#{Time.now.to_i}.mp3"
143
-
251
+
144
252
  begin
145
253
  # Note: RubyLLM doesn't have a direct TTS feature
146
254
  # This is a placeholder for a custom implementation
@@ -151,7 +259,7 @@ module AIA
151
259
  "Error generating audio: #{e.message}"
152
260
  end
153
261
  end
154
-
262
+
155
263
  def audio_to_text(prompt)
156
264
  if prompt.is_a?(String) && File.exist?(prompt) &&
157
265
  prompt.downcase.end_with?('.mp3', '.wav', '.m4a', '.flac')
@@ -165,7 +273,7 @@ module AIA
165
273
  text_to_text(prompt)
166
274
  end
167
275
  end
168
-
276
+
169
277
  def extract_image_path(prompt)
170
278
  if prompt.is_a?(String)
171
279
  prompt.scan(/\b[\w\/\.\-]+\.(jpg|jpeg|png|gif|webp)\b/i).first&.first
data/lib/aia/session.rb CHANGED
@@ -22,6 +22,7 @@ module AIA
22
22
 
23
23
  def initialize(prompt_handler)
24
24
  @prompt_handler = prompt_handler
25
+ @chat_prompt_id = nil # Initialize to nil
25
26
 
26
27
  # Special handling for chat mode with context files but no prompt ID
27
28
  if AIA.chat? && AIA.config.prompt_id.empty? && AIA.config.context_files && !AIA.config.context_files.empty?
@@ -35,8 +36,8 @@ module AIA
35
36
  @history_manager = HistoryManager.new(prompt: prompt_instance)
36
37
  end
37
38
 
38
- @context_manager = ContextManager.new(system_prompt: AIA.config.system_prompt) # Add this line
39
- @ui_presenter = UIPresenter.new
39
+ @context_manager = ContextManager.new(system_prompt: AIA.config.system_prompt)
40
+ @ui_presenter = UIPresenter.new
40
41
  @directive_processor = DirectiveProcessor.new
41
42
  @chat_processor = ChatProcessorService.new(@ui_presenter, @directive_processor)
42
43
 
@@ -111,7 +112,6 @@ module AIA
111
112
  end
112
113
 
113
114
  prompt.save
114
-
115
115
  # Substitute variables and get final prompt text
116
116
  prompt_text = prompt.to_s
117
117
 
@@ -151,129 +151,177 @@ module AIA
151
151
  end
152
152
 
153
153
  # Starts the interactive chat session.
154
+ # NOTE: there could have been an initial prompt sent into this session
155
+ # via a prompt_id on the command line, piped in text, or context files.
154
156
  def start_chat(skip_context_files: false)
155
- # Consider if display_chat_header is needed if robot+separator already shown
156
- # For now, let's keep it, maybe add an indicator message
157
157
  puts "\nEntering interactive chat mode..."
158
158
  @ui_presenter.display_chat_header
159
159
 
160
- Reline::HISTORY.clear # Keep Reline history for user input editing, separate from chat context
161
-
162
- # Load context files if any and not skipping
163
- if !skip_context_files && AIA.config.context_files && !AIA.config.context_files.empty?
164
- context_content = AIA.config.context_files.map do |file|
165
- File.read(file) rescue "Error reading file: #{file}"
166
- end.join("\n\n")
167
-
168
- if !context_content.empty?
169
- # Add context files content to context
170
- @context_manager.add_to_context(role: 'user', content: context_content)
171
-
172
- # Process the context
173
- operation_type = @chat_processor.determine_operation_type(AIA.config.model)
174
- @ui_presenter.display_thinking_animation
175
- response = @chat_processor.process_prompt(@context_manager.get_context, operation_type)
176
-
177
- # Add AI response to context
178
- @context_manager.add_to_context(role: 'assistant', content: response)
160
+ # Generate chat prompt ID
161
+ now = Time.now
162
+ @chat_prompt_id = "chat_#{now.strftime('%Y%m%d_%H%M%S')}"
179
163
 
180
- # Output the response
181
- @chat_processor.output_response(response)
182
- @chat_processor.speak(response)
183
- @ui_presenter.display_separator
164
+ # Create the temporary prompt
165
+ begin
166
+ # Create the unique? prompt ID in the file storage system with its initial text
167
+ PromptManager::Prompt.create(
168
+ id: @chat_prompt_id,
169
+ text: "Today's date is #{now.strftime('%Y-%m-%d')} and the current time is #{now.strftime('%H:%M:%S')}"
170
+ )
171
+
172
+ # Capture self for the handlers
173
+ session_instance = self
174
+
175
+ # Set up cleanup handlers only after prompt is created
176
+ at_exit { session_instance.send(:cleanup_chat_prompt) }
177
+ Signal.trap('INT') {
178
+ session_instance.send(:cleanup_chat_prompt)
179
+ exit
180
+ }
181
+
182
+ # Access this chat session's prompt object in order to do the dynamic things
183
+ # in follow up prompts that can be done in the batch mode like shell substitution. etc.
184
+ @chat_prompt = PromptManager::Prompt.new(
185
+ id: @chat_prompt_id,
186
+ directives_processor: @directive_processor,
187
+ erb_flag: AIA.config.erb,
188
+ envar_flag: AIA.config.shell,
189
+ external_binding: binding,
190
+ )
191
+
192
+ Reline::HISTORY.clear
193
+
194
+ # Load context files if any and not skipping
195
+ if !skip_context_files && AIA.config.context_files && !AIA.config.context_files.empty?
196
+ context = AIA.config.context_files.map do |file|
197
+ File.read(file) rescue "Error reading file: #{file}"
198
+ end.join("\n\n")
199
+
200
+ if !context.empty?
201
+ # Add context files content to context
202
+ @context_manager.add_to_context(role: 'user', content: context)
203
+
204
+ # Process the context
205
+ operation_type = @chat_processor.determine_operation_type(AIA.config.model)
206
+ @ui_presenter.display_thinking_animation
207
+ response = @chat_processor.process_prompt(@context_manager.get_context, operation_type)
208
+
209
+ # Add AI response to context
210
+ @context_manager.add_to_context(role: 'assistant', content: response)
211
+
212
+ # Output the response
213
+ @chat_processor.output_response(response)
214
+ @chat_processor.speak(response)
215
+ @ui_presenter.display_separator
216
+ end
184
217
  end
185
- end
186
-
187
- # Check for piped input (STDIN not a TTY and has data)
188
- if !STDIN.tty?
189
- # Save the original STDIN
190
- original_stdin = STDIN.dup
191
218
 
192
- # Read the piped input
193
- piped_input = STDIN.read.strip
219
+ # Handle piped input
220
+ if !STDIN.tty?
221
+ original_stdin = STDIN.dup
222
+ piped_input = STDIN.read.strip
223
+ STDIN.reopen('/dev/tty')
194
224
 
195
- # Reopen STDIN to the terminal
196
- STDIN.reopen('/dev/tty')
225
+ if !piped_input.empty?
226
+ @chat_prompt.text = piped_input
227
+ processed_input = @chat_prompt.to_s
197
228
 
198
- if !piped_input.empty?
199
- # Add piped input to context
200
- @context_manager.add_to_context(role: 'user', content: piped_input)
229
+ @context_manager.add_to_context(role: 'user', content: processed_input)
201
230
 
202
- # Process the piped input
203
- operation_type = @chat_processor.determine_operation_type(AIA.config.model)
204
- @ui_presenter.display_thinking_animation
205
- response = @chat_processor.process_prompt(@context_manager.get_context, operation_type)
231
+ operation_type = @chat_processor.determine_operation_type(AIA.config.model)
232
+ @ui_presenter.display_thinking_animation
233
+ response = @chat_processor.process_prompt(@context_manager.get_context, operation_type)
206
234
 
207
- # Add AI response to context
208
- @context_manager.add_to_context(role: 'assistant', content: response)
235
+ @context_manager.add_to_context(role: 'assistant', content: response)
236
+ @chat_processor.output_response(response)
237
+ @chat_processor.speak(response) if AIA.speak?
238
+ @ui_presenter.display_separator
239
+ end
209
240
 
210
- # Output the response
211
- @chat_processor.output_response(response)
212
- @chat_processor.speak(response) if AIA.speak?
213
- @ui_presenter.display_separator
241
+ STDIN.reopen(original_stdin)
214
242
  end
215
-
216
- # Restore original stdin when done with piped input processing
217
- STDIN.reopen(original_stdin)
218
- end
219
-
220
- loop do
221
- # Get user input
222
- prompt = @ui_presenter.ask_question
223
243
 
244
+ # Main chat loop
245
+ loop do
246
+ follow_up_prompt = @ui_presenter.ask_question
224
247
 
248
+ break if follow_up_prompt.nil? || follow_up_prompt.strip.downcase == 'exit' || follow_up_prompt.strip.empty?
225
249
 
226
- break if prompt.nil? || prompt.strip.downcase == 'exit' || prompt.strip.empty?
227
-
228
- if AIA.config.out_file
229
- File.open(AIA.config.out_file, 'a') do |file|
230
- file.puts "\nYou: #{prompt}"
250
+ if AIA.config.out_file
251
+ File.open(AIA.config.out_file, 'a') do |file|
252
+ file.puts "\nYou: #{follow_up_prompt}"
253
+ end
231
254
  end
232
- end
233
255
 
234
- if @directive_processor.directive?(prompt)
235
- directive_output = @directive_processor.process(prompt, @context_manager) # Pass context_manager
236
-
237
- # Add check for specific directives like //clear that might modify context
238
- if prompt.strip.start_with?('//clear')
239
- # Context is likely cleared within directive_processor.process now
240
- # or add @context_manager.clear_context here if not handled internally
241
- @ui_presenter.display_info("Chat context cleared.")
242
- next # Skip API call after clearing
243
- elsif directive_output.nil? || directive_output.strip.empty?
244
- next # Skip API call if directive produced no output and wasn't //clear
245
- else
246
- puts "\n#{directive_output}\n"
247
- # Optionally add directive output to context or handle as needed
248
- # Example: Add a summary to context
249
- # @context_manager.add_to_context(role: 'assistant', content: "Directive executed. Output:\n#{directive_output}")
250
- # For now, just use a placeholder prompt modification:
251
- prompt = "I executed this directive: #{prompt}\nHere's the output: #{directive_output}\nLet's continue our conversation."
252
- # Fall through to add this modified prompt to context and send to AI
256
+ if @directive_processor.directive?(follow_up_prompt)
257
+ directive_output = @directive_processor.process(follow_up_prompt, @context_manager)
258
+
259
+ if follow_up_prompt.strip.start_with?('//clear')
260
+ # The directive processor has called context_manager.clear_context
261
+ # but we need a more aggressive approach to fully clear all context
262
+
263
+ # First, clear the context manager's context
264
+ @context_manager.clear_context(keep_system_prompt: true)
265
+
266
+ # Second, try clearing the client's context
267
+ if AIA.config.client && AIA.config.client.respond_to?(:clear_context)
268
+ AIA.config.client.clear_context
269
+ end
270
+
271
+ # Third, completely reinitialize the client to ensure fresh state
272
+ # This is the most aggressive approach to ensure no context remains
273
+ begin
274
+ AIA.config.client = AIA::RubyLLMAdapter.new
275
+ AIA.debug_me("Completely reinitialized client for //clear directive")
276
+ rescue => e
277
+ AIA.debug_me("Error reinitializing client: #{e.message}")
278
+ end
279
+
280
+ @ui_presenter.display_info("Chat context cleared.")
281
+ next
282
+ elsif directive_output.nil? || directive_output.strip.empty?
283
+ next
284
+ else
285
+ puts "\n#{directive_output}\n"
286
+ follow_up_prompt = "I executed this directive: #{follow_up_prompt}\nHere's the output: #{directive_output}\nLet's continue our conversation."
287
+ end
253
288
  end
254
- end
255
-
256
- # Use ContextManager instead of HistoryManager
257
- @context_manager.add_to_context(role: 'user', content: prompt)
258
289
 
259
- # Use ContextManager to get the conversation
260
- conversation = @context_manager.get_context # System prompt handled internally
290
+ @chat_prompt.text = follow_up_prompt
291
+ processed_prompt = @chat_prompt.to_s
261
292
 
262
- operation_type = @chat_processor.determine_operation_type(AIA.config.model)
263
- @ui_presenter.display_thinking_animation
264
- response = @chat_processor.process_prompt(conversation, operation_type)
293
+ @context_manager.add_to_context(role: 'user', content: processed_prompt)
294
+ conversation = @context_manager.get_context
265
295
 
266
- @ui_presenter.display_ai_response(response)
296
+ operation_type = @chat_processor.determine_operation_type(AIA.config.model)
297
+ @ui_presenter.display_thinking_animation
298
+ response = @chat_processor.process_prompt(conversation, operation_type)
267
299
 
268
- # Use ContextManager instead of HistoryManager
269
- @context_manager.add_to_context(role: 'assistant', content: response)
300
+ @ui_presenter.display_ai_response(response)
301
+ @context_manager.add_to_context(role: 'assistant', content: response)
302
+ @chat_processor.speak(response)
270
303
 
271
- @chat_processor.speak(response)
304
+ @ui_presenter.display_separator
305
+ end
272
306
 
273
- @ui_presenter.display_separator
307
+ ensure
308
+ @ui_presenter.display_chat_end
274
309
  end
310
+ end
275
311
 
276
- @ui_presenter.display_chat_end
312
+ private
313
+
314
+ def cleanup_chat_prompt
315
+ if @chat_prompt_id
316
+ puts "[DEBUG] Cleaning up chat prompt: #{@chat_prompt_id}" if AIA.debug?
317
+ begin
318
+ @chat_prompt.delete
319
+ @chat_prompt_id = nil # Prevent repeated attempts if error occurs elsewhere
320
+ rescue => e
321
+ STDERR.puts "[ERROR] Failed to delete chat prompt #{@chat_prompt_id}: #{e.class} - #{e.message}"
322
+ STDERR.puts e.backtrace.join("\n")
323
+ end
324
+ end
277
325
  end
278
326
  end
279
327
  end
@@ -38,10 +38,19 @@ module AIA
38
38
  def format_chat_response(response, output = $stdout)
39
39
  indent = ' '
40
40
 
41
+ # Convert RubyLLM::Message to string if necessary
42
+ response_text = if response.is_a?(RubyLLM::Message)
43
+ response.content.to_s
44
+ elsif response.respond_to?(:to_s)
45
+ response.to_s
46
+ else
47
+ response
48
+ end
49
+
41
50
  in_code_block = false
42
51
  language = ''
43
52
 
44
- response.each_line do |line|
53
+ response_text.each_line do |line|
45
54
  line = line.chomp
46
55
 
47
56
  # Check for code block delimiters
data/lib/aia.rb CHANGED
@@ -4,9 +4,12 @@
4
4
  # The AIA module serves as the namespace for the AIA application, which
5
5
  # provides an interface for interacting with AI models and managing prompts.
6
6
 
7
- require 'ai_client'
8
7
  require 'ruby_llm'
8
+ require_relative 'extensions/ruby_llm/chat'
9
+
9
10
  require 'prompt_manager'
11
+ require 'mcp_client'
12
+
10
13
  require 'debug_me'
11
14
  include DebugMe
12
15
  $DEBUG_ME = false
@@ -18,7 +21,6 @@ require_relative 'aia/version'
18
21
  require_relative 'aia/config'
19
22
  require_relative 'aia/shell_command_executor'
20
23
  require_relative 'aia/prompt_handler'
21
- require_relative 'aia/ai_client_adapter'
22
24
  require_relative 'aia/ruby_llm_adapter'
23
25
  require_relative 'aia/directive_processor'
24
26
  require_relative 'aia/history_manager'
@@ -78,14 +80,14 @@ module AIA
78
80
  end
79
81
 
80
82
  prompt_handler = PromptHandler.new
81
-
83
+
82
84
  # Initialize the appropriate client adapter based on configuration
83
85
  @config.client = if @config.adapter == 'ruby_llm'
84
86
  RubyLLMAdapter.new
85
87
  else
86
88
  AIClientAdapter.new
87
89
  end
88
-
90
+
89
91
  session = Session.new(prompt_handler)
90
92
 
91
93
  session.start