llama_bot_rails 0.1.7 → 0.1.9
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/README.md +22 -22
- data/app/channels/llama_bot_rails/chat_channel.rb +51 -4
- data/app/controllers/llama_bot_rails/agent_controller.rb +78 -5
- data/app/views/llama_bot_rails/agent/chat.html.erb +250 -127
- data/app/views/llama_bot_rails/agent/chat_ws.html.erb +1178 -0
- data/config/routes.rb +3 -0
- data/lib/generators/llama_bot_rails/install/install_generator.rb +35 -5
- data/lib/generators/llama_bot_rails/install/templates/agent_state_builder.rb.erb +22 -0
- data/lib/llama_bot_rails/agent_state_builder.rb +12 -7
- data/lib/llama_bot_rails/engine.rb +1 -1
- data/lib/llama_bot_rails/llama_bot.rb +47 -0
- data/lib/llama_bot_rails/railtie.rb +19 -0
- data/lib/llama_bot_rails/version.rb +1 -1
- data/lib/llama_bot_rails.rb +7 -1
- metadata +4 -1
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: '0850d756a8d86153485d9fdbcb9a0154834e646050da9169b3269d7937d6760f'
|
4
|
+
data.tar.gz: c5276496549f2b055ec08dbfdf2615b47d431437f1d62845ff8b6c5d8baa0f7c
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 13c32b136629c642fb149a85197b445495e4c40f80a68402e5b7ab5f76d255b514a5584059df1e9bbe883f68a912c12d9de6811aed4de7f2d55d4ace6f23e432
|
7
|
+
data.tar.gz: b0d26dad8ee336083c702301c7ba36a6aa5926c65220ea205af6dec0abc941366c4af5de0126ed580251673fc935c693de717dc779ffbe0ba1f0f1d8d82e0dab
|
data/README.md
CHANGED
@@ -11,9 +11,10 @@ Chat with a powerful agent that has access to your models, your application cont
|
|
11
11
|
---
|
12
12
|
|
13
13
|
## 🎥 **See it in action** (30-Second Demo)
|
14
|
+

|
15
|
+
|
16
|
+
|
14
17
|
|
15
|
-
👉 
|
16
|
-
“Welcome to the future of Rails + AI.”
|
17
18
|
|
18
19
|
### The agent can:
|
19
20
|
|
@@ -35,26 +36,11 @@ bundle add llama_bot_rails
|
|
35
36
|
# 2. Install the routes & chat interface
|
36
37
|
rails generate llama_bot_rails:install
|
37
38
|
|
38
|
-
# 3.
|
39
|
-
|
40
|
-
|
41
|
-
|
42
|
-
|
43
|
-
# 4. Set up your environment
|
44
|
-
python3 -m venv venv
|
45
|
-
|
46
|
-
source venv/bin/activate
|
47
|
-
|
48
|
-
pip install -r requirements.txt
|
49
|
-
|
50
|
-
echo "OPENAI_API_KEY=your_openai_api_key_here" > .env
|
51
|
-
|
52
|
-
# 5. Run the agent
|
53
|
-
cd backend
|
54
|
-
uvicorn app:app --reload
|
55
|
-
|
56
|
-
# 6. Confirm our agent is running properly. You should see: Hello, World! 🦙💬
|
57
|
-
curl http://localhost:8000/hello
|
39
|
+
# 3.Run the LlamaBot backend easily with Docker
|
40
|
+
docker run \
|
41
|
+
-e OPENAI_API_KEY=(your-key) \
|
42
|
+
-p 8000:8000 \
|
43
|
+
kody06/llamabot-backend
|
58
44
|
|
59
45
|
# 7. Start your Rails server.
|
60
46
|
rails server
|
@@ -80,6 +66,20 @@ open http://localhost:3000/llama_bot/agent/chat
|
|
80
66
|
|
81
67
|
---
|
82
68
|
|
69
|
+
## ⚙️ Rails Integration Note (for LlamaBot Rails Gem)
|
70
|
+
|
71
|
+
If you're using the llama_bot_rails Gem with Docker, your Rails app must allow the Docker agent to connect back to it.
|
72
|
+
|
73
|
+
Add this to your config/environments/development.rb (if it wasn’t added automatically by the Gem installer):
|
74
|
+
|
75
|
+
```ruby
|
76
|
+
Rails.application.configure do
|
77
|
+
config.hosts << /host\.docker\.internal/ # Allow Docker agent to connect to Rails
|
78
|
+
end
|
79
|
+
```
|
80
|
+
|
81
|
+
This allows the Docker container to reach http://host.docker.internal:3000, which maps to your Rails app on the host machine.
|
82
|
+
|
83
83
|
## 🧨 **Power & Responsibility**
|
84
84
|
|
85
85
|
### ⚠️ **This gem gives the agent access to your Rails console.**
|
@@ -4,6 +4,34 @@ require 'async/websocket'
|
|
4
4
|
|
5
5
|
require 'json' # Ensure JSON is required if not already
|
6
6
|
|
7
|
+
# Why support both a websocket connection, (chat_channel.rb), and a non-websocket SSE connection? %>
|
8
|
+
# Rails 6 wasn’t working with our ActionCable websocket connection, so I wanted to implement SSE as well.
|
9
|
+
|
10
|
+
# We want to support a generic HTML interface that isn’t dependent on rails. (In case the Rails server goes down for whatever reason, we don’t lose access to LlamaBot).
|
11
|
+
# Why have chat_channel.rb at all?
|
12
|
+
|
13
|
+
# Because Ruby on Rails lacks good tooling to handle real-time interaction, that isn’t through ActionCable.
|
14
|
+
# For “cancel” requests. Websocket is a 2 way connection, so we can send a ‘cancel’ in.
|
15
|
+
# To support legacy LlamaPress stuff.
|
16
|
+
# We chose to implement it with ActionCable plus Async Websockets.
|
17
|
+
# But, it’s Ruby on Rails specific, and is best for UI/UX experiences.
|
18
|
+
|
19
|
+
# SSE is better for other clients that aren’t Ruby on Rails specific, and if you want to handle just a simple SSE approach.
|
20
|
+
# This does add some complexity though.
|
21
|
+
|
22
|
+
# We now have 2 different paradigms of front-end JavaScript consuming from LlamaBot
|
23
|
+
# ActionCable consumption
|
24
|
+
# StreamedResponse consumption.
|
25
|
+
|
26
|
+
# We also have 2 new middleware layers:
|
27
|
+
# ActionCable <-> chat_channel.rb <-> /ws <-> request_handler.py
|
28
|
+
# HTTPS <-> agent_controller.rb <-> LlamaBot.rb <-> FastAPI HTTPS
|
29
|
+
|
30
|
+
# So this increases our overall surface area for the application.
|
31
|
+
|
32
|
+
# This is deprecated and will be removed over time, to move towards a simple SSE approach.
|
33
|
+
|
34
|
+
|
7
35
|
module LlamaBotRails
|
8
36
|
class ChatChannel < ApplicationCable::Channel
|
9
37
|
# _chat.html.erb front-end subscribes to this channel in _websocket.html.erb.
|
@@ -105,10 +133,9 @@ module LlamaBotRails
|
|
105
133
|
# Forward the processed data to the LlamaBot Backend Socket
|
106
134
|
message = data["message"]
|
107
135
|
|
108
|
-
# 1. Instantiate the builder
|
109
136
|
builder = state_builder_class.new(
|
110
|
-
params:
|
111
|
-
context: {
|
137
|
+
params: data,
|
138
|
+
context: { api_token: @api_token }
|
112
139
|
)
|
113
140
|
|
114
141
|
# 2. Construct the LangGraph-ready state
|
@@ -143,7 +170,27 @@ module LlamaBotRails
|
|
143
170
|
private
|
144
171
|
|
145
172
|
def state_builder_class
|
146
|
-
LlamaBotRails.config.state_builder_class
|
173
|
+
builder_class_name = LlamaBotRails.config.state_builder_class || 'LlamaBotRails::AgentStateBuilder'
|
174
|
+
|
175
|
+
begin
|
176
|
+
builder_class_name.constantize
|
177
|
+
rescue NameError => e
|
178
|
+
# If it's not the default class, try to manually load from app/llama_bot
|
179
|
+
if builder_class_name != 'LlamaBotRails::AgentStateBuilder'
|
180
|
+
llama_bot_file = Rails.root.join("app", "llama_bot", "agent_state_builder.rb")
|
181
|
+
if llama_bot_file.exist?
|
182
|
+
Rails.logger.info "[LlamaBot] Autoload failed, attempting to manually load #{llama_bot_file}"
|
183
|
+
begin
|
184
|
+
load llama_bot_file.to_s
|
185
|
+
return builder_class_name.constantize
|
186
|
+
rescue => load_error
|
187
|
+
Rails.logger.error "[LlamaBot] Manual load failed: #{load_error.message}"
|
188
|
+
end
|
189
|
+
end
|
190
|
+
end
|
191
|
+
|
192
|
+
raise NameError, "Could not load state builder class '#{builder_class_name}'. Make sure it's defined in app/llama_bot/agent_state_builder.rb or is available in your autoload paths. Original error: #{e.message}"
|
193
|
+
end
|
147
194
|
end
|
148
195
|
|
149
196
|
def setup_external_websocket(connection_id)
|
@@ -1,7 +1,8 @@
|
|
1
1
|
require 'llama_bot_rails/llama_bot'
|
2
2
|
module LlamaBotRails
|
3
3
|
class AgentController < ActionController::Base
|
4
|
-
|
4
|
+
include ActionController::Live
|
5
|
+
skip_before_action :verify_authenticity_token, only: [:command, :send_message]
|
5
6
|
before_action :authenticate_agent!, only: [:command]
|
6
7
|
|
7
8
|
# POST /agent/command
|
@@ -38,6 +39,11 @@ module LlamaBotRails
|
|
38
39
|
# Render chat.html.erb
|
39
40
|
end
|
40
41
|
|
42
|
+
# GET /agent/chat_ws
|
43
|
+
def chat_ws
|
44
|
+
# render chat_ws.html.erb
|
45
|
+
end
|
46
|
+
|
41
47
|
def threads
|
42
48
|
begin
|
43
49
|
threads = LlamaBotRails::LlamaBot.get_threads
|
@@ -66,13 +72,75 @@ module LlamaBotRails
|
|
66
72
|
end
|
67
73
|
end
|
68
74
|
|
75
|
+
# POST /agent/send-message
|
76
|
+
def send_message
|
77
|
+
response.headers['Content-Type'] = 'text/event-stream'
|
78
|
+
response.headers['Cache-Control'] = 'no-cache'
|
79
|
+
response.headers['Connection'] = 'keep-alive'
|
80
|
+
|
81
|
+
@api_token = Rails.application.message_verifier(:llamabot_ws).generate(
|
82
|
+
{ session_id: SecureRandom.uuid },
|
83
|
+
expires_in: 30.minutes
|
84
|
+
)
|
85
|
+
|
86
|
+
# 1. Instantiate the builder
|
87
|
+
builder = state_builder_class.new(
|
88
|
+
params: params,
|
89
|
+
context: { api_token: @api_token }
|
90
|
+
)
|
91
|
+
|
92
|
+
# 2. Construct the LangGraph-ready state
|
93
|
+
state_payload = builder.build
|
94
|
+
# sse = SSE.new(response.stream)
|
95
|
+
|
96
|
+
begin
|
97
|
+
LlamaBotRails::LlamaBot.send_agent_message(state_payload) do |chunk|
|
98
|
+
Rails.logger.info "[[LlamaBot]] Received chunk in agent_controller.rb: #{chunk}"
|
99
|
+
# sse.write(chunk)
|
100
|
+
response.stream.write "data: #{chunk.to_json}\n\n"
|
101
|
+
|
102
|
+
end
|
103
|
+
rescue => e
|
104
|
+
Rails.logger.error "Error in send_message action: #{e.message}"
|
105
|
+
response.stream.write "data: #{ { type: 'error', content: e.message }.to_json }\n\n"
|
106
|
+
|
107
|
+
# sse.write({ type: 'error', content: e.message })
|
108
|
+
ensure
|
109
|
+
response.stream.close
|
110
|
+
|
111
|
+
# sse.close
|
112
|
+
end
|
113
|
+
end
|
114
|
+
|
115
|
+
def test_streaming
|
116
|
+
response.headers['Content-Type'] = 'text/event-stream'
|
117
|
+
response.headers['Cache-Control'] = 'no-cache'
|
118
|
+
response.headers['Connection'] = 'keep-alive'
|
119
|
+
sse = SSE.new(response.stream)
|
120
|
+
sse.write({ type: 'start', content: 'Starting streaming' })
|
121
|
+
sleep 1
|
122
|
+
sse.write({ type: 'ai', content: 'This is an AI message' })
|
123
|
+
sleep 1
|
124
|
+
sse.write({ type: 'ai', content: 'This is an AI message' })
|
125
|
+
sleep 1
|
126
|
+
sse.write({ type: 'ai', content: 'This is an AI message' })
|
127
|
+
sleep 1
|
128
|
+
sse.write({ type: 'ai', content: 'This is an AI message' })
|
129
|
+
end
|
130
|
+
|
69
131
|
private
|
70
132
|
|
71
133
|
def safety_eval(input)
|
72
|
-
|
73
|
-
|
74
|
-
|
75
|
-
|
134
|
+
begin
|
135
|
+
# Change to Rails root directory for file operations
|
136
|
+
Dir.chdir(Rails.root) do
|
137
|
+
# Create a safer evaluation context
|
138
|
+
Rails.logger.info "[[LlamaBot]] Evaluating input: #{input}"
|
139
|
+
binding.eval(input)
|
140
|
+
end
|
141
|
+
rescue => exception
|
142
|
+
Rails.logger.error "Error in safety_eval: #{exception.message}"
|
143
|
+
return exception.message
|
76
144
|
end
|
77
145
|
end
|
78
146
|
|
@@ -83,5 +151,10 @@ module LlamaBotRails
|
|
83
151
|
rescue ActiveSupport::MessageVerifier::InvalidSignature
|
84
152
|
head :unauthorized
|
85
153
|
end
|
154
|
+
|
155
|
+
def state_builder_class
|
156
|
+
#The user is responsible for creating a custom AgentStateBuilder if they want to use a custom agent. Otherwise, we default to LlamaBotRails::AgentStateBuilder.
|
157
|
+
LlamaBotRails.config.state_builder_class.constantize
|
158
|
+
end
|
86
159
|
end
|
87
160
|
end
|