llama_bot_rails 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/MIT-LICENSE +20 -0
- data/README.md +249 -0
- data/Rakefile +8 -0
- data/app/assets/config/llama_bot_rails_manifest.js +1 -0
- data/app/assets/javascripts/llama_bot_rails/application.js +7 -0
- data/app/assets/javascripts/llama_bot_rails/chat.js +13 -0
- data/app/assets/stylesheets/llama_bot_rails/application.css +15 -0
- data/app/channels/llama_bot_rails/application_cable/channel.rb +8 -0
- data/app/channels/llama_bot_rails/application_cable/connection.rb +13 -0
- data/app/channels/llama_bot_rails/chat_channel.rb +306 -0
- data/app/controllers/llama_bot_rails/agent_controller.rb +72 -0
- data/app/controllers/llama_bot_rails/application_controller.rb +4 -0
- data/app/helpers/llama_bot_rails/application_helper.rb +4 -0
- data/app/javascript/channels/consumer.js +4 -0
- data/app/jobs/llama_bot_rails/application_job.rb +4 -0
- data/app/models/llama_bot_rails/application_record.rb +5 -0
- data/app/views/layouts/llama_bot_rails/application.html.erb +17 -0
- data/app/views/llama_bot_rails/agent/chat.html.erb +962 -0
- data/bin/rails +26 -0
- data/bin/rubocop +8 -0
- data/config/initializers/llama_bot_rails.rb +2 -0
- data/config/routes.rb +6 -0
- data/lib/llama_bot_rails/agent_state_builder.rb +17 -0
- data/lib/llama_bot_rails/engine.rb +23 -0
- data/lib/llama_bot_rails/llama_bot.rb +25 -0
- data/lib/llama_bot_rails/tools/rails_console_tool.rb +20 -0
- data/lib/llama_bot_rails/version.rb +3 -0
- data/lib/llama_bot_rails.rb +10 -0
- data/lib/tasks/llama_bot_rails_tasks.rake +4 -0
- metadata +128 -0
checksums.yaml
ADDED
@@ -0,0 +1,7 @@
|
|
1
|
+
---
|
2
|
+
SHA256:
|
3
|
+
metadata.gz: 3f23b70a2547901b996afd0faf1343947fe83e5c13cd99d15ae38726a3cf6408
|
4
|
+
data.tar.gz: 1b39d998ce51f856490d3206aeba465090e479b34360d4e6ff2ca80188cf17f7
|
5
|
+
SHA512:
|
6
|
+
metadata.gz: f07b88075a21db84055d2ffbb517ca5679f5f38bd7df73738b3635f4669235d0faa02fab0e77d810aa360148ff90aa024634284f7c1950b031c2bd3c9b3ccc18
|
7
|
+
data.tar.gz: ce749dc85a5c2a8378d848fc5f219fa6c3769fbcbb8b9dd30d569b879a35db8bbaac3d0964c69a62fbf1b7c1ec0404e8af6edbcffbd4eb88466528dc6a4ef68b
|
data/MIT-LICENSE
ADDED
@@ -0,0 +1,20 @@
|
|
1
|
+
Copyright Kody Kendall
|
2
|
+
|
3
|
+
Permission is hereby granted, free of charge, to any person obtaining
|
4
|
+
a copy of this software and associated documentation files (the
|
5
|
+
"Software"), to deal in the Software without restriction, including
|
6
|
+
without limitation the rights to use, copy, modify, merge, publish,
|
7
|
+
distribute, sublicense, and/or sell copies of the Software, and to
|
8
|
+
permit persons to whom the Software is furnished to do so, subject to
|
9
|
+
the following conditions:
|
10
|
+
|
11
|
+
The above copyright notice and this permission notice shall be
|
12
|
+
included in all copies or substantial portions of the Software.
|
13
|
+
|
14
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
15
|
+
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
16
|
+
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
17
|
+
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
18
|
+
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
19
|
+
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
20
|
+
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
data/README.md
ADDED
@@ -0,0 +1,249 @@
|
|
1
|
+
# π LlamaBotRails
|
2
|
+
|
3
|
+
**Turn any Rails app into an AI Agent in 2 minutes**
|
4
|
+
|
5
|
+
Chat with a powerful agent that has access to your models, your application context, and can run console commands. All powered by LangGraph + OpenAI.
|
6
|
+
|
7
|
+
[](https://badge.fury.io/rb/llama_bot_rails)
|
8
|
+
[](https://www.ruby-lang.org/)
|
9
|
+
[](https://rubyonrails.org/)
|
10
|
+
|
11
|
+
---
|
12
|
+
|
13
|
+
## π₯ **See it in action** (30-Second Demo)
|
14
|
+
|
15
|
+
π [Insert GIF or YouTube link here]
|
16
|
+
βWelcome to the future of Rails + AI.β
|
17
|
+
|
18
|
+
### The agent can:
|
19
|
+
|
20
|
+
- π **Explore your Rails app** (models, routes, controllers)
|
21
|
+
- πΎ **Query and create data** via Rails console
|
22
|
+
- π οΈ **Take action on your behalf**
|
23
|
+
- π§ **Understand your domain** through natural conversation
|
24
|
+
|
25
|
+
|
26
|
+
---
|
27
|
+
|
28
|
+
## π **Quickstart** β
|
29
|
+
|
30
|
+
|
31
|
+
```bash
|
32
|
+
|
33
|
+
# 1. Add the gem
|
34
|
+
bundle add llama_bot_rails
|
35
|
+
|
36
|
+
# 2. Install the routes & chat interface
|
37
|
+
rails generate llama_bot_rails:install
|
38
|
+
|
39
|
+
# 3. Clone & run the LangGraph backend
|
40
|
+
git clone https://github.com/kodykendall/llamabot
|
41
|
+
cd llamabot
|
42
|
+
OPENAI_API_KEY=your_key
|
43
|
+
cd backend && uvicorn app:app --reload
|
44
|
+
|
45
|
+
# Start your Rails server
|
46
|
+
rails server
|
47
|
+
|
48
|
+
# Visit the chat interface and start chatting.
|
49
|
+
open http://localhost:3000/llama_bot/agent/chat
|
50
|
+
|
51
|
+
```
|
52
|
+
|
53
|
+
**That's it.** β
You can now chat with your Rails app like a new assistant.
|
54
|
+
|
55
|
+
### Try asking:
|
56
|
+
- "What models do I have in this app?"
|
57
|
+
- "Show me the User model structure"
|
58
|
+
- "Create a test user"
|
59
|
+
- "What are my routes?"
|
60
|
+
|
61
|
+
### Prerequisites
|
62
|
+
- Rails 7.0+
|
63
|
+
- Ruby 2.7+
|
64
|
+
- Redis (for ActionCable)
|
65
|
+
- OpenAI API key
|
66
|
+
|
67
|
+
---
|
68
|
+
|
69
|
+
## 𧨠**Power & Responsibility**
|
70
|
+
|
71
|
+
### β οΈ **This gem gives the agent access to your Rails console.**
|
72
|
+
|
73
|
+
This is **incredibly powerful** -- and also potentially dangerous in production.
|
74
|
+
*Treat it like giving shell access to a developer.*
|
75
|
+
|
76
|
+
π« **Do not deploy this tool to production** without understanding the risks to your production data & application.
|
77
|
+
|
78
|
+
**π‘οΈ Production safety features coming soon**
|
79
|
+
|
80
|
+
## ποΈ **Architecture**
|
81
|
+
|
82
|
+
```
|
83
|
+
ββββββββββββββββββββββ WebSocket ββββββββββββββββββββββ
|
84
|
+
β Rails App β ββββββββββββββββββββββββ β LangGraph β
|
85
|
+
β (Your App) β β FastAPI (Python) β
|
86
|
+
ββββββββββββββββββββββ€ ββββββββββββββββββββββ€
|
87
|
+
β LlamaBotRails β β Agents & Tools β
|
88
|
+
β Gem β β (LangGraph) β
|
89
|
+
ββββββββββββββββββββββ ββββββββββββββββββββββ
|
90
|
+
|
91
|
+
```
|
92
|
+
|
93
|
+
**What happens:**
|
94
|
+
1. **Rails frontend** provides a chat interface
|
95
|
+
2. **ActionCable WebSocket** handles real-time communication to LangGraph
|
96
|
+
3. **LangGraph backend** runs the AI agent with access to tools
|
97
|
+
4. **Agent executes** A sequence of Rails console commands, reasoning throughout the process.
|
98
|
+
5. **Results stream back** to the chat interface in real-time
|
99
|
+
|
100
|
+
## π οΈ **Customization**
|
101
|
+
|
102
|
+
### Custom State Builder
|
103
|
+
|
104
|
+
Control what data your agent sees:
|
105
|
+
|
106
|
+
```ruby
|
107
|
+
# config/initializers/llama_bot_rails.rb
|
108
|
+
class CustomAgentStateBuilder < LlamaBotRails::AgentStateBuilder
|
109
|
+
def build
|
110
|
+
super.merge({
|
111
|
+
current_user: @context[:user]&.to_json,
|
112
|
+
app_version: Rails.application.version,
|
113
|
+
custom_context: gather_app_context
|
114
|
+
})
|
115
|
+
end
|
116
|
+
|
117
|
+
private
|
118
|
+
|
119
|
+
def gather_app_context
|
120
|
+
{
|
121
|
+
model_count: ActiveRecord::Base.subclasses.count,
|
122
|
+
route_count: Rails.application.routes.routes.count,
|
123
|
+
environment: Rails.env,
|
124
|
+
database_name: ActiveRecord::Base.connection_db_config.database
|
125
|
+
}
|
126
|
+
end
|
127
|
+
end
|
128
|
+
|
129
|
+
# Configure the gem to use your builder
|
130
|
+
Rails.application.configure do
|
131
|
+
config.llama_bot_rails.state_builder_class = "CustomAgentStateBuilder"
|
132
|
+
end
|
133
|
+
```
|
134
|
+
|
135
|
+
### Environment Configuration
|
136
|
+
|
137
|
+
```ruby
|
138
|
+
# config/environments/development.rb
|
139
|
+
Rails.application.configure do
|
140
|
+
config.llama_bot_rails.enable_console_tool = true
|
141
|
+
end
|
142
|
+
|
143
|
+
# config/environments/production.rb
|
144
|
+
Rails.application.configure do
|
145
|
+
config.llama_bot_rails.enable_console_tool = false # Disable in production
|
146
|
+
end
|
147
|
+
```
|
148
|
+
|
149
|
+
## π§ͺ **What You Can Build**
|
150
|
+
|
151
|
+
### Developer Assistant
|
152
|
+
- **Code exploration**: "Show me how authentication works"
|
153
|
+
- **Data analysis**: "How many users signed up this month?"
|
154
|
+
- **Quick prototyping**: "Create a basic blog post model"
|
155
|
+
|
156
|
+
## π§ **Under the Hood**
|
157
|
+
|
158
|
+
### Real-Time Communication
|
159
|
+
- **ActionCable WebSocket** Real-time Rails <-> Agent communication.
|
160
|
+
- **LangGraph Backend** FastAPI + OpenAI tool orchestration
|
161
|
+
|
162
|
+
### Security
|
163
|
+
- **Secure channel seperation** -> Per-session isolation.
|
164
|
+
- **Token expiration** and automatic refresh mechanisms
|
165
|
+
|
166
|
+
### Command Streaming
|
167
|
+
- **`run_rails_console_command`**: Execute Ruby code in Rails context
|
168
|
+
|
169
|
+
## π **Requirements**
|
170
|
+
|
171
|
+
### Rails Application
|
172
|
+
- **Rails 7.0+** - Modern Rails version with ActionCable support
|
173
|
+
- **Ruby 2.7+** - Compatible Ruby version
|
174
|
+
- **ActionCable configured** - For real-time WebSocket communication
|
175
|
+
- **Redis** - Recommended for production ActionCable backend
|
176
|
+
|
177
|
+
### LangGraph Backend
|
178
|
+
- **Python 3.11+** - Python runtime environment
|
179
|
+
- **FastAPI application** - Web framework for the agent backend
|
180
|
+
- **OpenAI API access** - For LLM capabilities
|
181
|
+
- **WebSocket support** - For real-time bidirectional communication
|
182
|
+
|
183
|
+
## 𧨠Troubleshooting
|
184
|
+
- Agent not responding? Check that backend is running and OpenAI key is set.
|
185
|
+
- WebSocket issues? Confirm LLAMABOT_WEBSOCKET_URL matches backend address.
|
186
|
+
|
187
|
+
## π€ **Contributing**
|
188
|
+
|
189
|
+
We'd love your help making LlamaBotRails better!
|
190
|
+
|
191
|
+
### How to Contribute
|
192
|
+
|
193
|
+
1. **Fork the repo**
|
194
|
+
2. **Create a feature branch**: `git checkout -b my-new-feature`
|
195
|
+
3. **Make your changes** and add tests
|
196
|
+
4. **Run the test suite**: `bundle exec rspec`
|
197
|
+
5. **Submit a pull request**
|
198
|
+
|
199
|
+
### Development Setup
|
200
|
+
|
201
|
+
```bash
|
202
|
+
# Clone the repo
|
203
|
+
git clone https://github.com/kodykendall/llama_bot_rails
|
204
|
+
cd llama_bot_rails
|
205
|
+
|
206
|
+
# Install dependencies
|
207
|
+
bundle install
|
208
|
+
|
209
|
+
# Run tests
|
210
|
+
bundle exec rspec
|
211
|
+
|
212
|
+
# Test in a real Rails app
|
213
|
+
cd example_app
|
214
|
+
bundle exec rails server
|
215
|
+
```
|
216
|
+
|
217
|
+
---
|
218
|
+
|
219
|
+
## π **What's Next?**
|
220
|
+
|
221
|
+
We're just getting started. Coming soon:
|
222
|
+
|
223
|
+
- π‘οΈ **Enhanced security controls** for production deployments
|
224
|
+
- π§ **More built-in tools** (scaffolding, API calls, database queries)
|
225
|
+
- π¨ **Customizable chat themes** and branding
|
226
|
+
- π **Analytics and monitoring** for agent interactions
|
227
|
+
- π **Plugin system** for custom tool development
|
228
|
+
- π€ **Multi-agent support** for complex workflows
|
229
|
+
- π **Background job integration** for long-running tasks
|
230
|
+
|
231
|
+
---
|
232
|
+
|
233
|
+
## π **License**
|
234
|
+
|
235
|
+
[MIT](https://opensource.org/licenses/MIT). β free for commercial and personal use.
|
236
|
+
|
237
|
+
---
|
238
|
+
|
239
|
+
## βοΈ **Support the Project!**
|
240
|
+
|
241
|
+
If LlamaBotRails helped you, **give us a star** βοΈ and **share it** with other Rails developers.
|
242
|
+
|
243
|
+
This is just the beginning. Let's build the Rails agentic future -- together.
|
244
|
+
|
245
|
+
**[βοΈ Star on GitHub](https://github.com/kodykendall/llama_bot_rails)** β’ **[π΄ Fork the repo](https://github.com/kodykendall/llama_bot_rails/fork)** β’ **[π¬ Join discussions](https://github.com/kodykendall/llama_bot_rails/discussions)**
|
246
|
+
|
247
|
+
---
|
248
|
+
|
249
|
+
*Built with β€οΈ by [Kody Kendall](https://kodykendall.com)*
|
data/Rakefile
ADDED
@@ -0,0 +1 @@
|
|
1
|
+
//= link_directory ../stylesheets/llama_bot_rails .css
|
@@ -0,0 +1,13 @@
|
|
1
|
+
// llama_bot_rails/app/assets/javascripts/llama_bot_rails/chat.js
|
2
|
+
|
3
|
+
const chatChannel = ActionCable.createConsumer().subscriptions.create(
|
4
|
+
{ channel: "LlamaBotRails::ChatChannel" },
|
5
|
+
{
|
6
|
+
received(data) {
|
7
|
+
console.log("Received:", data.message);
|
8
|
+
},
|
9
|
+
connected() {
|
10
|
+
console.log("Connected to llama_bot_rails_chat");
|
11
|
+
}
|
12
|
+
}
|
13
|
+
);
|
@@ -0,0 +1,15 @@
|
|
1
|
+
/*
|
2
|
+
* This is a manifest file that'll be compiled into application.css, which will include all the files
|
3
|
+
* listed below.
|
4
|
+
*
|
5
|
+
* Any CSS and SCSS file within this directory, lib/assets/stylesheets, vendor/assets/stylesheets,
|
6
|
+
* or any plugin's vendor/assets/stylesheets directory can be referenced here using a relative path.
|
7
|
+
*
|
8
|
+
* You're free to add application-wide styles to this file and they'll appear at the bottom of the
|
9
|
+
* compiled file so the styles you add here take precedence over styles defined in any other CSS/SCSS
|
10
|
+
* files in this directory. Styles in this file should be added after the last require_* statement.
|
11
|
+
* It is generally better to create a new file per style scope.
|
12
|
+
*
|
13
|
+
*= require_tree .
|
14
|
+
*= require_self
|
15
|
+
*/
|
@@ -0,0 +1,13 @@
|
|
1
|
+
# llama_bot_rails/app/channels/llama_bot_rails/application_cable/connection.rb
|
2
|
+
module LlamaBotRails
|
3
|
+
module ApplicationCable
|
4
|
+
class Connection < ActionCable::Connection::Base
|
5
|
+
identified_by :uuid
|
6
|
+
|
7
|
+
def connect
|
8
|
+
self.uuid = SecureRandom.uuid
|
9
|
+
end
|
10
|
+
end
|
11
|
+
end
|
12
|
+
end
|
13
|
+
|
@@ -0,0 +1,306 @@
|
|
1
|
+
require 'json' # Ensure JSON is required if not already
|
2
|
+
|
3
|
+
module LlamaBotRails
|
4
|
+
class ChatChannel < ApplicationCable::Channel
|
5
|
+
# _chat.html.erb front-end subscribes to this channel in _websocket.html.erb.
|
6
|
+
def subscribed
|
7
|
+
begin
|
8
|
+
stream_from "chat_channel_#{params[:session_id]}" # Public stream for session-based messages <- this is the channel we're subscribing to in _websocket.html.erb
|
9
|
+
Rails.logger.info "[LlamaBot] Subscribed to chat channel with session ID: #{params[:session_id]}"
|
10
|
+
|
11
|
+
@connection_id = SecureRandom.uuid
|
12
|
+
Rails.logger.info "[LlamaBot] Created new connection with ID: #{@connection_id}"
|
13
|
+
Rails.logger.info "[LlamaBot] Secure API token generated."
|
14
|
+
|
15
|
+
# Use a begin/rescue block to catch thread creation errors
|
16
|
+
begin
|
17
|
+
|
18
|
+
@api_token = Rails.application.message_verifier(:llamabot_ws).generate(
|
19
|
+
{ session_id: SecureRandom.uuid },
|
20
|
+
expires_in: 30.minutes
|
21
|
+
)
|
22
|
+
|
23
|
+
@worker = Thread.new do
|
24
|
+
Thread.current[:connection_id] = @connection_id
|
25
|
+
Thread.current.abort_on_exception = true # This will help surface errors
|
26
|
+
setup_external_websocket(@connection_id)
|
27
|
+
end
|
28
|
+
rescue => e
|
29
|
+
Rails.logger.error "[LlamaBot] Error in WebSocket subscription: #{e.message}"
|
30
|
+
Rails.logger.error e.backtrace.join("\n")
|
31
|
+
|
32
|
+
# Send error message to frontend before rejecting
|
33
|
+
begin
|
34
|
+
send_message_to_frontend("error", "Failed to establish chat connection: #{e.message}")
|
35
|
+
rescue => send_error
|
36
|
+
Rails.logger.error "[LlamaBot] Could not send error to frontend: #{send_error.message}"
|
37
|
+
end
|
38
|
+
|
39
|
+
reject # Reject the connection if there's an error
|
40
|
+
end
|
41
|
+
rescue ThreadError => e
|
42
|
+
Rails.logger.error "[LlamaBot] Failed to allocate thread: #{e.message}"
|
43
|
+
# Handle the error gracefully - potentially notify the client
|
44
|
+
send_message_to_frontend("error", "Failed to establish connection: #{e.message}")
|
45
|
+
end
|
46
|
+
end
|
47
|
+
|
48
|
+
def unsubscribed
|
49
|
+
connection_id = @connection_id
|
50
|
+
Rails.logger.info "[LlamaBot] Unsubscribing connection: #{connection_id}"
|
51
|
+
|
52
|
+
begin
|
53
|
+
# Only kill the worker if it belongs to this connection
|
54
|
+
if @worker && @worker[:connection_id] == connection_id
|
55
|
+
begin
|
56
|
+
@worker.kill
|
57
|
+
@worker = nil
|
58
|
+
Rails.logger.info "[LlamaBot] Killed worker thread for connection: #{connection_id}"
|
59
|
+
rescue => e
|
60
|
+
Rails.logger.error "[LlamaBot] Error killing worker thread: #{e.message}"
|
61
|
+
end
|
62
|
+
end
|
63
|
+
|
64
|
+
# Clean up async tasks with better error handling
|
65
|
+
begin
|
66
|
+
@listener_task&.stop rescue nil
|
67
|
+
@keepalive_task&.stop rescue nil
|
68
|
+
@external_ws_task&.stop rescue nil
|
69
|
+
rescue => e
|
70
|
+
Rails.logger.error "[LlamaBot] Error stopping async tasks: #{e.message}"
|
71
|
+
end
|
72
|
+
|
73
|
+
# Clean up the connection
|
74
|
+
if @external_ws_connection
|
75
|
+
begin
|
76
|
+
@external_ws_connection.close
|
77
|
+
Rails.logger.info "[LlamaBot] Closed external WebSocket connection for: #{connection_id}"
|
78
|
+
rescue => e
|
79
|
+
Rails.logger.warn "[LlamaBot] Could not close WebSocket connection: #{e.message}"
|
80
|
+
end
|
81
|
+
end
|
82
|
+
|
83
|
+
# Force garbage collection in development/test environments to help clean up
|
84
|
+
if !Rails.env.production?
|
85
|
+
GC.start
|
86
|
+
end
|
87
|
+
rescue => e
|
88
|
+
Rails.logger.error "[LlamaBot] Fatal error during channel unsubscription: #{e.message}"
|
89
|
+
Rails.logger.error e.backtrace.join("\n")
|
90
|
+
end
|
91
|
+
end
|
92
|
+
|
93
|
+
# Receive messages from _chat.html.erb frontend and send to llamabot FastAPI backend, frontend comes from the llamabot/_chat.html.erb chatbot, sent
|
94
|
+
# through external websocket to FastAPI/Python backend.
|
95
|
+
def receive(data)
|
96
|
+
begin
|
97
|
+
#used to validate the message before it's sent to the backend.
|
98
|
+
|
99
|
+
#This could be an example of how we might implement hooks & filters in the future.
|
100
|
+
validate_message(data) #Placeholder for now, we are using this to mock errors being thrown. In the future, we can add actual validation logic.
|
101
|
+
# Forward the processed data to the LlamaBot Backend Socket
|
102
|
+
message = data["message"]
|
103
|
+
|
104
|
+
# 1. Instantiate the builder
|
105
|
+
builder = state_builder_class.new(
|
106
|
+
params: { message: data["message"] },
|
107
|
+
context: { thread_id: data["thread_id"], api_token: @api_token }
|
108
|
+
)
|
109
|
+
|
110
|
+
# 2. Construct the LangGraph-ready state
|
111
|
+
state_payload = builder.build
|
112
|
+
|
113
|
+
# 3. Ship it over the existing WebSocket
|
114
|
+
send_to_external_application(state_payload)
|
115
|
+
|
116
|
+
# Log the incoming WebSocket data
|
117
|
+
Rails.logger.info "[LlamaBot] Got message from Javascript LlamaBot Frontend: #{data.inspect}"
|
118
|
+
rescue => e
|
119
|
+
Rails.logger.error "[LlamaBot] Error in receive method: #{e.message}"
|
120
|
+
send_message_to_frontend("error", e.message)
|
121
|
+
end
|
122
|
+
end
|
123
|
+
|
124
|
+
def send_message_to_frontend(type, message, trace_info = nil)
|
125
|
+
|
126
|
+
# Log trace info for debugging
|
127
|
+
Rails.logger.info "[LlamaBot] TRACE INFO DEBUG: Type: #{type}, Has trace info: #{trace_info.present?}"
|
128
|
+
|
129
|
+
message_data = {
|
130
|
+
type: type,
|
131
|
+
content: message
|
132
|
+
}
|
133
|
+
|
134
|
+
formatted_message = { message: message_data.to_json }.to_json
|
135
|
+
|
136
|
+
ActionCable.server.broadcast "chat_channel_#{params[:session_id]}", formatted_message
|
137
|
+
end
|
138
|
+
|
139
|
+
private
|
140
|
+
|
141
|
+
def state_builder_class
|
142
|
+
LlamaBotRails.config.state_builder_class.constantize
|
143
|
+
end
|
144
|
+
|
145
|
+
def setup_external_websocket(connection_id)
|
146
|
+
Thread.current[:connection_id] = connection_id
|
147
|
+
Rails.logger.info "[LlamaBot] Setting up external websocket for connection: #{connection_id}"
|
148
|
+
|
149
|
+
# Check if the WebSocket URL is configured
|
150
|
+
websocket_url = ENV['LLAMABOT_WEBSOCKET_URL']
|
151
|
+
if websocket_url.blank?
|
152
|
+
Rails.logger.warn "[LlamaBot] LLAMABOT_WEBSOCKET_URL not configured, skipping external WebSocket setup"
|
153
|
+
return
|
154
|
+
end
|
155
|
+
|
156
|
+
# endpoint = Async::HTTP::Endpoint.parse(ENV['LLAMABOT_WEBSOCKET_URL'])
|
157
|
+
uri = URI(websocket_url)
|
158
|
+
|
159
|
+
uri.scheme = 'wss'
|
160
|
+
uri.scheme = 'ws' if ENV['DEVELOPMENT_ENVIRONMENT'] == 'true'
|
161
|
+
|
162
|
+
endpoint = Async::HTTP::Endpoint.new(
|
163
|
+
uri,
|
164
|
+
ssl_context: OpenSSL::SSL::SSLContext.new.tap do |ctx|
|
165
|
+
ctx.verify_mode = OpenSSL::SSL::VERIFY_PEER
|
166
|
+
if ENV["STAGING_ENVIRONMENT"] == 'true'
|
167
|
+
ctx.ca_file = '/usr/local/etc/ca-certificates/cert.pem'
|
168
|
+
# M2 Air : ctx.ca_file = '/etc//ssl/cert.pem'
|
169
|
+
ctx.cert = OpenSSL::X509::Certificate.new(File.read(File.expand_path('~/.ssl/llamapress/cert.pem')))
|
170
|
+
ctx.key = OpenSSL::PKey::RSA.new(File.read(File.expand_path('~/.ssl/llamapress/key.pem')))
|
171
|
+
elsif ENV['DEVELOPMENT_ENVIRONMENT'] == 'true'
|
172
|
+
# do no ctx stuff
|
173
|
+
ctx.verify_mode = OpenSSL::SSL::VERIFY_NONE
|
174
|
+
else # production
|
175
|
+
ctx.ca_file ='/etc/ssl/certs/ca-certificates.crt'
|
176
|
+
end
|
177
|
+
end
|
178
|
+
)
|
179
|
+
|
180
|
+
# Initialize the connection and store it in an instance variable
|
181
|
+
@external_ws_task = Async do |task|
|
182
|
+
begin
|
183
|
+
@external_ws_connection = Async::WebSocket::Client.connect(endpoint)
|
184
|
+
Rails.logger.info "[LlamaBot] Connected to external WebSocket for connection: #{connection_id}"
|
185
|
+
|
186
|
+
#Tell llamabot frontend that we've connected to the backend
|
187
|
+
formatted_message = { message: {type: "external_ws_pong"} }.to_json
|
188
|
+
ActionCable.server.broadcast "chat_channel_#{params[:session_id]}", formatted_message
|
189
|
+
|
190
|
+
# Store tasks in instance variables so we can clean them up later
|
191
|
+
@listener_task = task.async do
|
192
|
+
listen_to_external_websocket(@external_ws_connection)
|
193
|
+
end
|
194
|
+
|
195
|
+
@keepalive_task = task.async do
|
196
|
+
send_keep_alive_pings(@external_ws_connection)
|
197
|
+
end
|
198
|
+
|
199
|
+
# Wait for tasks to complete or connection to close
|
200
|
+
[@listener_task, @keepalive_task].each(&:wait)
|
201
|
+
rescue => e
|
202
|
+
Rails.logger.error "[LlamaBot] Failed to connect to external WebSocket for connection #{connection_id}: #{e.message}"
|
203
|
+
ensure
|
204
|
+
# Clean up tasks if they exist
|
205
|
+
@listener_task&.stop
|
206
|
+
@keepalive_task&.stop
|
207
|
+
@external_ws_connection&.close
|
208
|
+
end
|
209
|
+
end
|
210
|
+
end
|
211
|
+
|
212
|
+
# Listen for messages from the LlamaBot Backend
|
213
|
+
def listen_to_external_websocket(connection)
|
214
|
+
while message = connection.read
|
215
|
+
|
216
|
+
#Try to fix the ping/pong issue keepliave
|
217
|
+
# if message.type == :ping
|
218
|
+
|
219
|
+
# # respond with :pong
|
220
|
+
# connection.write(Async::WebSocket::Messages::ControlFrame.new(:pong, frame.data))
|
221
|
+
# connection.flush
|
222
|
+
# next
|
223
|
+
# end
|
224
|
+
# Extract the actual message content
|
225
|
+
if message.buffer
|
226
|
+
message_content = message.buffer # Use .data to get the message content
|
227
|
+
else
|
228
|
+
message_content = message.content
|
229
|
+
end
|
230
|
+
|
231
|
+
Rails.logger.info "[LlamaBot] Received from external WebSocket: #{message_content}"
|
232
|
+
|
233
|
+
begin
|
234
|
+
parsed_message = JSON.parse(message_content)
|
235
|
+
|
236
|
+
if parsed_message["type"] != "pong"
|
237
|
+
# byebug
|
238
|
+
end
|
239
|
+
|
240
|
+
case parsed_message["type"]
|
241
|
+
when "ai"
|
242
|
+
# Add any additional handling for write_code messages here
|
243
|
+
formatted_message = { message: {type: "ai", content: parsed_message['content'], base_message: parsed_message["base_message"]} }.to_json
|
244
|
+
when "tool"
|
245
|
+
# Add any additional handling for tool messages here
|
246
|
+
formatted_message = { message: {type: "tool", content: parsed_message['content'], base_message: parsed_message["base_message"]} }.to_json
|
247
|
+
when "error"
|
248
|
+
Rails.logger.error "[LlamaBot] ---------Received error message!----------"
|
249
|
+
response = parsed_message['content']
|
250
|
+
formatted_message = { message: message_content }.to_json
|
251
|
+
Rails.logger.error "[LlamaBot] ---------------------> Response: #{response}"
|
252
|
+
Rails.logger.error "[LlamaBot] ---------Completed error message!----------"
|
253
|
+
when "pong"
|
254
|
+
# Tell llamabot frontend that we've received a pong response, and we're still connected
|
255
|
+
formatted_message = { message: {type: "pong"} }.to_json
|
256
|
+
end
|
257
|
+
rescue JSON::ParserError => e
|
258
|
+
Rails.logger.error "[LlamaBot] Failed to parse message as JSON: #{e.message}"
|
259
|
+
end
|
260
|
+
ActionCable.server.broadcast "chat_channel_#{params[:session_id]}", formatted_message
|
261
|
+
end
|
262
|
+
end
|
263
|
+
|
264
|
+
###
|
265
|
+
def send_keep_alive_pings(connection)
|
266
|
+
loop do
|
267
|
+
ping_message = {
|
268
|
+
type: 'ping',
|
269
|
+
connection_id: @connection_id,
|
270
|
+
connection_state: !connection.closed? ? 'connected' : 'disconnected',
|
271
|
+
connection_class: connection.class.name
|
272
|
+
}.to_json
|
273
|
+
connection.write(ping_message)
|
274
|
+
connection.flush
|
275
|
+
Rails.logger.debug "[LlamaBot] Sent keep-alive ping: #{ping_message}"
|
276
|
+
Async::Task.current.sleep(30)
|
277
|
+
end
|
278
|
+
rescue => e
|
279
|
+
Rails.logger.error "[LlamaBot] Error in keep-alive ping: #{e.message} | Connection type: #{connection.class.name}"
|
280
|
+
end
|
281
|
+
|
282
|
+
# Send messages from the user to the LlamaBot Backend Socket
|
283
|
+
def send_to_external_application(message)
|
284
|
+
# ChatMessage.create(content: message_content, user: current_user, chat_conversation: ChatConversation.last, ai_chat_message: true, created_at: Time.now)
|
285
|
+
|
286
|
+
payload = message.to_json
|
287
|
+
if @external_ws_connection
|
288
|
+
begin
|
289
|
+
@external_ws_connection.write(payload)
|
290
|
+
@external_ws_connection.flush
|
291
|
+
Rails.logger.info "[LlamaBot] Sent message to external WebSocket: #{payload}"
|
292
|
+
rescue => e
|
293
|
+
Rails.logger.error "[LlamaBot] Error sending message to external WebSocket: #{e.message}"
|
294
|
+
end
|
295
|
+
else
|
296
|
+
Rails.logger.error "[LlamaBot] External WebSocket connection not established"
|
297
|
+
# Optionally, you might want to attempt to reconnect here
|
298
|
+
end
|
299
|
+
end
|
300
|
+
|
301
|
+
def validate_message(data)
|
302
|
+
# This is a simple method that can be easily mocked
|
303
|
+
true
|
304
|
+
end
|
305
|
+
end # Single end statement to close the ChatChannel clas
|
306
|
+
end
|