llama_bot_rails 0.1.7 → 0.1.8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 22dd1ec3cd8056d6ad567a987afb3aee4590816f37477eeefa81e975d07b59a6
4
- data.tar.gz: f38dfc3262e9794f7371e6d16c9be4f382f638eac84657850c725b0f85264731
3
+ metadata.gz: 8a6895de16432a4a2200c1623a25c68c09897b2a6fb3bf55977a2824f3578310
4
+ data.tar.gz: c39ee31cf9fa2166de1d48434ab3228a6db960328a6565826242e57e5a22a678
5
5
  SHA512:
6
- metadata.gz: fd7d9a5386b68dcee1d2dd672df9a8ea6fa6c5860d2f6a829d7de73d9c28cfec0810addd344c3f341351c743a350afd8018bf6aff4d687611807bb9da37a4cef
7
- data.tar.gz: b3dbc4afc368553e968b587ccda61905912c9190ba6bf05d2f59003f556f45a4613595bb8e2938661b30e10de3a9c0ada1b1c1541777b7d577f5910e71121c07
6
+ metadata.gz: 9711a2a8cfe8e485210b84314860eb098113de321ccfd1a93be15608240312d45ff54d852b965eb60f0d84b530620e856d837d28dc7a8b8a3f958e470859b6a2
7
+ data.tar.gz: e175b5d7df84b42fff07dec9a0daed0384bf77891fc42611ab83ba087131b879a97aec66b77d154ccd9c5f89ed3db2df3e53052d0a937f5e80c8b84cc778c34e
data/README.md CHANGED
@@ -11,9 +11,10 @@ Chat with a powerful agent that has access to your models, your application cont
11
11
  ---
12
12
 
13
13
  ## 🎥 **See it in action** (30-Second Demo)
14
+ ![Demo](./assets/LlamaBotRailsGifSmaller.gif)
15
+
16
+
14
17
 
15
- 👉 ![LlamaBot Demo](https://llamapress-ai-image-uploads.s3.us-west-2.amazonaws.com/pp1s3l4iskwabnq0gi5tx8mi9mue)
16
- “Welcome to the future of Rails + AI.”
17
18
 
18
19
  ### The agent can:
19
20
 
@@ -35,26 +36,11 @@ bundle add llama_bot_rails
35
36
  # 2. Install the routes & chat interface
36
37
  rails generate llama_bot_rails:install
37
38
 
38
- # 3. Clone & run the LangGraph backend
39
- git clone https://github.com/kodykendall/llamabot
40
-
41
- cd llamabot
42
-
43
- # 4. Set up your environment
44
- python3 -m venv venv
45
-
46
- source venv/bin/activate
47
-
48
- pip install -r requirements.txt
49
-
50
- echo "OPENAI_API_KEY=your_openai_api_key_here" > .env
51
-
52
- # 5. Run the agent
53
- cd backend
54
- uvicorn app:app --reload
55
-
56
- # 6. Confirm our agent is running properly. You should see: Hello, World! 🦙💬
57
- curl http://localhost:8000/hello
39
+ # 3.Run the LlamaBot backend easily with Docker
40
+ docker run \
41
+ -e OPENAI_API_KEY=(your-key) \
42
+ -p 8000:8000 \
43
+ kody06/llamabot-backend
58
44
 
59
45
  # 7. Start your Rails server.
60
46
  rails server
@@ -80,6 +66,20 @@ open http://localhost:3000/llama_bot/agent/chat
80
66
 
81
67
  ---
82
68
 
69
+ ## ⚙️ Rails Integration Note (for LlamaBot Rails Gem)
70
+
71
+ If you're using the llama_bot_rails Gem with Docker, your Rails app must allow the Docker agent to connect back to it.
72
+
73
+ Add this to your config/environments/development.rb (if it wasn’t added automatically by the Gem installer):
74
+
75
+ ```ruby
76
+ Rails.application.configure do
77
+ config.hosts << /host\.docker\.internal/ # Allow Docker agent to connect to Rails
78
+ end
79
+ ```
80
+
81
+ This allows the Docker container to reach http://host.docker.internal:3000, which maps to your Rails app on the host machine.
82
+
83
83
  ## 🧨 **Power & Responsibility**
84
84
 
85
85
  ### ⚠️ **This gem gives the agent access to your Rails console.**
@@ -4,6 +4,34 @@ require 'async/websocket'
4
4
 
5
5
  require 'json' # Ensure JSON is required if not already
6
6
 
7
+ # Why support both a websocket connection, (chat_channel.rb), and a non-websocket SSE connection? %>
8
+ # Rails 6 wasn’t working with our ActionCable websocket connection, so I wanted to implement SSE as well.
9
+
10
+ # We want to support a generic HTML interface that isn’t dependent on rails. (In case the Rails server goes down for whatever reason, we don’t lose access to LlamaBot).
11
+ # Why have chat_channel.rb at all?
12
+
13
+ # Because Ruby on Rails lacks good tooling to handle real-time interaction, that isn’t through ActionCable.
14
+ # For “cancel” requests. Websocket is a 2 way connection, so we can send a ‘cancel’ in.
15
+ # To support legacy LlamaPress stuff.
16
+ # We chose to implement it with ActionCable plus Async Websockets.
17
+ # But, it’s Ruby on Rails specific, and is best for UI/UX experiences.
18
+
19
+ # SSE is better for other clients that aren’t Ruby on Rails specific, and if you want to handle just a simple SSE approach.
20
+ # This does add some complexity though.
21
+
22
+ # We now have 2 different paradigms of front-end JavaScript consuming from LlamaBot
23
+ # ActionCable consumption
24
+ # StreamedResponse consumption.
25
+
26
+ # We also have 2 new middleware layers:
27
+ # ActionCable <-> chat_channel.rb <-> /ws <-> request_handler.py
28
+ # HTTPS <-> agent_controller.rb <-> LlamaBot.rb <-> FastAPI HTTPS
29
+
30
+ # So this increases our overall surface area for the application.
31
+
32
+ # This is deprecated and will be removed over time, to move towards a simple SSE approach.
33
+
34
+
7
35
  module LlamaBotRails
8
36
  class ChatChannel < ApplicationCable::Channel
9
37
  # _chat.html.erb front-end subscribes to this channel in _websocket.html.erb.
@@ -1,7 +1,8 @@
1
1
  require 'llama_bot_rails/llama_bot'
2
2
  module LlamaBotRails
3
3
  class AgentController < ActionController::Base
4
- skip_before_action :verify_authenticity_token, only: [:command]
4
+ include ActionController::Live
5
+ skip_before_action :verify_authenticity_token, only: [:command, :send_message]
5
6
  before_action :authenticate_agent!, only: [:command]
6
7
 
7
8
  # POST /agent/command
@@ -38,6 +39,11 @@ module LlamaBotRails
38
39
  # Render chat.html.erb
39
40
  end
40
41
 
42
+ # GET /agent/chat_ws
43
+ def chat_ws
44
+ # render chat_ws.html.erb
45
+ end
46
+
41
47
  def threads
42
48
  begin
43
49
  threads = LlamaBotRails::LlamaBot.get_threads
@@ -66,13 +72,75 @@ module LlamaBotRails
66
72
  end
67
73
  end
68
74
 
75
+ # POST /agent/send-message
76
+ def send_message
77
+ response.headers['Content-Type'] = 'text/event-stream'
78
+ response.headers['Cache-Control'] = 'no-cache'
79
+ response.headers['Connection'] = 'keep-alive'
80
+
81
+ @api_token = Rails.application.message_verifier(:llamabot_ws).generate(
82
+ { session_id: SecureRandom.uuid },
83
+ expires_in: 30.minutes
84
+ )
85
+
86
+ # 1. Instantiate the builder
87
+ builder = state_builder_class.new(
88
+ params: { message: params[:message] },
89
+ context: { thread_id: params[:thread_id], api_token: @api_token }
90
+ )
91
+
92
+ # 2. Construct the LangGraph-ready state
93
+ state_payload = builder.build
94
+ # sse = SSE.new(response.stream)
95
+
96
+ begin
97
+ LlamaBotRails::LlamaBot.send_agent_message(state_payload) do |chunk|
98
+ Rails.logger.info "[[LlamaBot]] Received chunk in agent_controller.rb: #{chunk}"
99
+ # sse.write(chunk)
100
+ response.stream.write "data: #{chunk.to_json}\n\n"
101
+
102
+ end
103
+ rescue => e
104
+ Rails.logger.error "Error in send_message action: #{e.message}"
105
+ response.stream.write "data: #{ { type: 'error', content: e.message }.to_json }\n\n"
106
+
107
+ # sse.write({ type: 'error', content: e.message })
108
+ ensure
109
+ response.stream.close
110
+
111
+ # sse.close
112
+ end
113
+ end
114
+
115
+ def test_streaming
116
+ response.headers['Content-Type'] = 'text/event-stream'
117
+ response.headers['Cache-Control'] = 'no-cache'
118
+ response.headers['Connection'] = 'keep-alive'
119
+ sse = SSE.new(response.stream)
120
+ sse.write({ type: 'start', content: 'Starting streaming' })
121
+ sleep 1
122
+ sse.write({ type: 'ai', content: 'This is an AI message' })
123
+ sleep 1
124
+ sse.write({ type: 'ai', content: 'This is an AI message' })
125
+ sleep 1
126
+ sse.write({ type: 'ai', content: 'This is an AI message' })
127
+ sleep 1
128
+ sse.write({ type: 'ai', content: 'This is an AI message' })
129
+ end
130
+
69
131
  private
70
132
 
71
133
  def safety_eval(input)
72
- # Change to Rails root directory for file operations
73
- Dir.chdir(Rails.root) do
74
- # Create a safer evaluation context
75
- binding.eval(input)
134
+ begin
135
+ # Change to Rails root directory for file operations
136
+ Dir.chdir(Rails.root) do
137
+ # Create a safer evaluation context
138
+ Rails.logger.info "[[LlamaBot]] Evaluating input: #{input}"
139
+ binding.eval(input)
140
+ end
141
+ rescue => exception
142
+ Rails.logger.error "Error in safety_eval: #{exception.message}"
143
+ return exception.message
76
144
  end
77
145
  end
78
146
 
@@ -83,5 +151,10 @@ module LlamaBotRails
83
151
  rescue ActiveSupport::MessageVerifier::InvalidSignature
84
152
  head :unauthorized
85
153
  end
154
+
155
+ def state_builder_class
156
+ #The user is responsible for creating a custom AgentStateBuilder if they want to use a custom agent. Otherwise, we default to LlamaBotRails::AgentStateBuilder.
157
+ LlamaBotRails.config.state_builder_class.constantize
158
+ end
86
159
  end
87
160
  end
@@ -145,30 +145,7 @@
145
145
  display: block;
146
146
  }
147
147
 
148
- .connection-status {
149
- position: absolute;
150
- bottom: -2px;
151
- right: -2px;
152
- width: 12px;
153
- height: 12px;
154
- border-radius: 50%;
155
- border: 2px solid var(--bg-primary);
156
- transition: background-color 0.3s ease;
157
- z-index: 10;
158
- pointer-events: none;
159
- }
160
-
161
- .status-green {
162
- background-color: #22c55e !important;
163
- }
164
148
 
165
- .status-yellow {
166
- background-color: #eab308 !important;
167
- }
168
-
169
- .status-red {
170
- background-color: #ef4444 !important;
171
- }
172
149
 
173
150
  .error-modal {
174
151
  display: none;
@@ -406,7 +383,7 @@
406
383
  color: var(--text-secondary);
407
384
  }
408
385
 
409
- /* Clean loading indicator - just animated text */
386
+ /* Enhanced loading indicator with status updates */
410
387
  .loading-indicator {
411
388
  display: none;
412
389
  align-items: center;
@@ -417,6 +394,8 @@
417
394
  background: rgba(255, 255, 255, 0.02);
418
395
  border-radius: 8px;
419
396
  border: 1px solid rgba(255, 255, 255, 0.08);
397
+ transition: all 0.3s ease;
398
+ min-height: 50px; /* Prevent layout shift during status updates */
420
399
  }
421
400
 
422
401
  .loading-indicator.visible {
@@ -425,11 +404,15 @@
425
404
 
426
405
  .loading-text {
427
406
  font-style: italic;
407
+ flex: 1;
408
+ transition: color 0.3s ease;
409
+ line-height: 1.4;
428
410
  }
429
411
 
430
412
  .loading-dots::after {
431
413
  content: '';
432
414
  animation: dots 1.5s steps(4, end) infinite;
415
+ opacity: 0.7;
433
416
  }
434
417
 
435
418
  @keyframes dots {
@@ -439,6 +422,22 @@
439
422
  80%, 100% { content: '...'; }
440
423
  }
441
424
 
425
+ /* Status-specific styling */
426
+ .loading-indicator:has(.loading-text:contains("Error")) {
427
+ border-color: rgba(244, 67, 54, 0.3);
428
+ background: rgba(244, 67, 54, 0.05);
429
+ }
430
+
431
+ .loading-indicator:has(.loading-text:contains("Complete")) {
432
+ border-color: rgba(76, 175, 80, 0.3);
433
+ background: rgba(76, 175, 80, 0.05);
434
+ }
435
+
436
+ .loading-indicator:has(.loading-text:contains("Connected")) {
437
+ border-color: rgba(33, 150, 243, 0.3);
438
+ background: rgba(33, 150, 243, 0.05);
439
+ }
440
+
442
441
  /* Suggested Prompts Styling - Always visible above input */
443
442
  .suggested-prompts {
444
443
  margin-bottom: 16px;
@@ -510,16 +509,6 @@
510
509
  }
511
510
  </style>
512
511
 
513
- <% if defined?(javascript_importmap_tags) %> <!-- Rails 7+ -->
514
- <%= javascript_importmap_tags %>
515
- <% else %> <!-- Rails 6 -->
516
- <%= javascript_include_tag "application" %>
517
- <% end %>
518
-
519
- <%= javascript_include_tag "llama_bot_rails/application" %>
520
- <% if defined?(action_cable_meta_tag) %>
521
- <%= action_cable_meta_tag %>
522
- <% end %>
523
512
  <!-- Add Snarkdown CDN -->
524
513
  <script src="https://unpkg.com/snarkdown/dist/snarkdown.umd.js"></script>
525
514
  </head>
@@ -542,7 +531,6 @@
542
531
  </button>
543
532
  <div class="logo-container">
544
533
  <img src="https://service-jobs-images.s3.us-east-2.amazonaws.com/7rl98t1weu387r43il97h6ipk1l7" alt="LlamaBot Logo" class="logo">
545
- <div id="connectionStatusIconForLlamaBot" class="connection-status status-yellow"></div>
546
534
  </div>
547
535
  <h1>LlamaBot Chat</h1>
548
536
  </div>
@@ -597,95 +585,19 @@
597
585
  <script>
598
586
  let currentThreadId = null;
599
587
  let isSidebarCollapsed = false;
600
- let lastPongTime = Date.now();
601
- let redStatusStartTime = null;
602
- let errorModalShown = false;
603
- let connectionCheckInterval;
604
- let subscription = null;
605
-
606
- function waitForCableConnection(callback) {
607
- const interval = setInterval(() => {
608
- if (window.LlamaBotRails && LlamaBotRails.cable) {
609
- clearInterval(interval);
610
- callback(LlamaBotRails.cable);
611
- }
612
- }, 50);
613
- }
614
-
615
- waitForCableConnection((consumer) => {
616
- const sessionId = crypto.randomUUID();
617
-
618
- subscription = consumer.subscriptions.create({channel: 'LlamaBotRails::ChatChannel', session_id: sessionId}, {
619
- connected() {
620
- console.log('Connected to chat channel');
621
- lastPongTime = Date.now();
622
- loadThreads();
623
- startConnectionCheck();
624
- },
625
- disconnected() {
626
- console.log('Disconnected from chat channel');
627
- updateStatusIcon('status-red');
628
- },
629
- received(data) {
630
- const parsedData = JSON.parse(data).message;
631
- switch (parsedData.type) {
632
- case "ai":
633
- addMessage(parsedData.content, parsedData.type, parsedData.base_message);
634
- break;
635
- case "tool":
636
- addMessage(parsedData.content, parsedData.type, parsedData.base_message);
637
- break;
638
- case "error":
639
- addMessage(parsedData.content, parsedData.type, parsedData.base_message);
640
- break;
641
- case "pong":
642
- lastPongTime = Date.now();
643
- break;
644
- }
645
- }
646
- });
647
- });
588
+ let streamingTimeout = null;
589
+ const STREAMING_TIMEOUT_MS = 30000; // 30 seconds timeout
648
590
 
649
- function startConnectionCheck() {
650
- if (connectionCheckInterval) {
651
- clearInterval(connectionCheckInterval);
652
- }
653
- connectionCheckInterval = setInterval(updateConnectionStatus, 1000);
654
- }
655
-
656
- function updateConnectionStatus() {
657
- const timeSinceLastPong = Date.now() - lastPongTime;
658
-
659
- if (timeSinceLastPong < 30000) { // Less than 30 seconds
660
- updateStatusIcon('status-green');
661
- redStatusStartTime = null;
662
- errorModalShown = false;
663
- } else if (timeSinceLastPong < 50000) { // Between 30-50 seconds
664
- updateStatusIcon('status-yellow');
665
- redStatusStartTime = null;
666
- errorModalShown = false;
667
- } else { // More than 50 seconds
668
- updateStatusIcon('status-red');
669
- if (!redStatusStartTime) {
670
- redStatusStartTime = Date.now();
671
- } else if (Date.now() - redStatusStartTime > 5000 && !errorModalShown) { // 5 seconds in red status
672
- showErrorModal();
673
- }
674
- }
675
- }
676
-
677
- function updateStatusIcon(statusClass) {
678
- const statusIndicator = document.getElementById('connectionStatusIconForLlamaBot');
679
- statusIndicator.classList.remove('status-green', 'status-yellow', 'status-red');
680
- statusIndicator.classList.add(statusClass);
681
- }
591
+ // Initialize the app
592
+ document.addEventListener('DOMContentLoaded', function() {
593
+ loadThreads();
594
+ });
682
595
 
683
596
  function showErrorModal() {
684
597
  const modal = document.getElementById('errorModal');
685
598
  const overlay = document.getElementById('modalOverlay');
686
599
  modal.classList.add('visible');
687
600
  overlay.classList.add('visible');
688
- errorModalShown = true;
689
601
  }
690
602
 
691
603
  function closeErrorModal() {
@@ -852,6 +764,54 @@
852
764
  loadingIndicator.classList.remove('visible');
853
765
  }
854
766
 
767
+
768
+
769
+ function setupStreamingTimeout() {
770
+ // Clear any existing timeout
771
+ if (streamingTimeout) {
772
+ clearTimeout(streamingTimeout);
773
+ }
774
+
775
+ // Set up new timeout
776
+ streamingTimeout = setTimeout(() => {
777
+ console.warn('Streaming timeout reached');
778
+ hideLoadingIndicator();
779
+ addMessage('Request timed out. LlamaBot may be processing a complex request. Please try again.', 'error');
780
+ }, STREAMING_TIMEOUT_MS);
781
+ }
782
+
783
+ function clearStreamingTimeout() {
784
+ if (streamingTimeout) {
785
+ clearTimeout(streamingTimeout);
786
+ streamingTimeout = null;
787
+ }
788
+ }
789
+
790
+ // console.log('🤖 Testing streaming');
791
+ // testStreaming();
792
+
793
+ async function testStreaming() {
794
+ const response = await fetch('/llama_bot/agent/test_streaming_2');
795
+ const reader = response.body.pipeThrough(new TextDecoderStream()).getReader();
796
+ const decoder = new TextDecoder();
797
+ let buffer = '';
798
+ debugger;
799
+ let infinite_loop_protector = 0;
800
+ let in_infinite_loop = false;
801
+ while(!in_infinite_loop){ //infinite_loop_protector to protect against infinite loop
802
+ infinite_loop_protector++;
803
+ if (infinite_loop_protector > 10000) { //we shouldn't have 10000 loops, but just in case.
804
+ in_infinite_loop = true;
805
+ console.error('∞ ∞ ∞ Infinite loop prevented! ∞ ∞ ∞');
806
+ }
807
+ const { done, value } = reader.read();
808
+ console.log('🤖 SSE message:', value);
809
+ if (done) {
810
+ break;
811
+ }
812
+ }
813
+ }
814
+
855
815
  function selectPrompt(buttonElement) {
856
816
  const promptText = buttonElement.textContent;
857
817
  const messageInput = document.getElementById('message-input');
@@ -869,18 +829,11 @@
869
829
  }, 150);
870
830
  }
871
831
 
872
- function sendMessage() {
832
+ async function sendMessage() {
873
833
  const input = document.getElementById('message-input');
874
834
  const message = input.value.trim();
875
835
 
876
836
  if (message) {
877
- // Check if subscription is available
878
- if (!subscription) {
879
- console.error('WebSocket connection not established yet');
880
- addMessage('Connection not ready. Please wait...', 'error');
881
- return;
882
- }
883
-
884
837
  // Clear welcome message if it exists
885
838
  const welcomeMessage = document.querySelector('.welcome-message');
886
839
  if (welcomeMessage) {
@@ -912,8 +865,178 @@
912
865
  thread_id: threadId
913
866
  };
914
867
 
915
- console.log('Sending message with data:', messageData); // Debug log
916
- subscription.send(messageData);
868
+ console.log('Sending message with data:', messageData);
869
+
870
+ try {
871
+ // Set up fetch for streaming
872
+ const response = await fetch('/llama_bot/agent/send_message', {
873
+ method: 'POST',
874
+ headers: {
875
+ 'Content-Type': 'application/json',
876
+ },
877
+ body: JSON.stringify(messageData)
878
+ });
879
+
880
+ if (!response.ok) {
881
+ throw new Error(`HTTP error! status: ${response.status}`);
882
+ }
883
+
884
+ // Set up streaming timeout
885
+ setupStreamingTimeout();
886
+
887
+ // Set up the reader for the stream
888
+ const reader = response.body.getReader();
889
+ const decoder = new TextDecoder();
890
+ let buffer = '';
891
+
892
+ // const response_2 = await fetch('/llama_bot/agent/test_streaming', {
893
+ // method: 'GET',
894
+ // headers: {
895
+ // 'Content-Type': 'text/event-stream'
896
+ // },
897
+ // // body: JSON.stringify({
898
+ // // "message": message,
899
+ // // "thread_id": threadId
900
+ // // })
901
+ // });
902
+
903
+ // const reader_2 = response_2.body.pipeThrough(new TextDecoderStream()).getReader();
904
+
905
+ // while (true) {
906
+ // const { done, value } = await reader_2.read();
907
+ // if (done) {
908
+ // break;
909
+ // }
910
+ // // Process each SSE message (value)
911
+ // console.log('🤖 SSE message:', value);
912
+ // }
913
+
914
+ try {
915
+ while (true) {
916
+ const { done, value } = await reader.read();
917
+ console.log('Got a value from the stream 🧠 Stream value:', value);
918
+
919
+ if (done) {
920
+ console.log('Stream completed');
921
+ clearStreamingTimeout();
922
+ break;
923
+ }
924
+
925
+ // Decode the chunk and add to buffer
926
+ buffer += decoder.decode(value, { stream: true });
927
+
928
+ // Process complete SSE events (separated by \n\n). These \n\n are added by the Rails middleware to make sure it exists.
929
+ while (buffer.includes('\n\n')) {
930
+ const eventEnd = buffer.indexOf('\n\n');
931
+ const eventBlock = buffer.slice(0, eventEnd);
932
+ buffer = buffer.slice(eventEnd + 2);
933
+
934
+ // Parse SSE event
935
+ const dataLines = eventBlock.split('\n').filter(line => line.startsWith('data:'));
936
+ if (dataLines.length > 0) {
937
+
938
+ //remove the 'data:' prefix from the line
939
+ const jsonData = dataLines.map(line => line.substring(5).trim()).join('');
940
+
941
+ try {
942
+ const chunk = JSON.parse(jsonData);
943
+ console.log('Processing chunk:', chunk);
944
+
945
+ // Your existing chunk processing logic here...
946
+ if (chunk.type === 'start') {
947
+ console.log('Stream started:', chunk.request_id || 'unknown');
948
+ } else if (chunk.type === 'final') {
949
+ console.log('Stream completed');
950
+ hideLoadingIndicator();
951
+ clearStreamingTimeout();
952
+ } else if (chunk.type === 'error') {
953
+ console.error('Server error:', chunk);
954
+ hideLoadingIndicator();
955
+ clearStreamingTimeout();
956
+ addMessage(`Error: ${chunk.content || 'Unknown error occurred'}`, 'error');
957
+ } else if (chunk.type === 'ai') {
958
+ addMessage(chunk.content, 'ai', chunk);
959
+ hideLoadingIndicator();
960
+ clearStreamingTimeout();
961
+ } else if (chunk.type === 'tool') {
962
+ addMessage(chunk.content, 'tool', chunk);
963
+ } else {
964
+ if (chunk.content) {
965
+ addMessage(chunk.content, chunk.type || 'unknown', chunk);
966
+ }
967
+ console.log('Other chunk type:', chunk.type, chunk);
968
+ }
969
+
970
+ } catch (parseError) {
971
+ console.error('Error parsing SSE data:', parseError, 'Data:', jsonData);
972
+ }
973
+ }
974
+ }
975
+ }
976
+ } finally {
977
+ reader.releaseLock();
978
+ }
979
+
980
+ } catch (error) {
981
+ console.error('Error in sendMessage:', error);
982
+ hideLoadingIndicator();
983
+ clearStreamingTimeout();
984
+
985
+ // Show specific error message based on error type
986
+ let errorMessage = 'Error sending message. Please try again.';
987
+ if (error.name === 'TypeError' && error.message.includes('fetch')) {
988
+ errorMessage = 'Connection error. Please check if LlamaBot is running.';
989
+ } else if (error.message.includes('HTTP error')) {
990
+ errorMessage = `Server error: ${error.message}`;
991
+ }
992
+
993
+ addMessage(errorMessage, 'error');
994
+ }
995
+ }
996
+ }
997
+
998
+ function processChunk(chunk) {
999
+ console.log('Processing chunk in fallback handler:', chunk);
1000
+
1001
+ // Handle specific chunk types from Python backend
1002
+ switch (chunk.type) {
1003
+ case 'start':
1004
+ console.log('Stream started:', chunk.request_id || 'unknown');
1005
+ // Loading indicator already showing
1006
+ break;
1007
+
1008
+ case 'final':
1009
+ console.log('Stream completed');
1010
+ hideLoadingIndicator();
1011
+ clearStreamingTimeout();
1012
+ break;
1013
+
1014
+ case 'error':
1015
+ console.error('Server error:', chunk);
1016
+ hideLoadingIndicator();
1017
+ clearStreamingTimeout();
1018
+ addMessage(`Error: ${chunk.content || 'Unknown error occurred'}`, 'error');
1019
+ break;
1020
+
1021
+ case 'ai':
1022
+ // AI message from LangGraph
1023
+ addMessage(chunk.content, 'ai', chunk);
1024
+ hideLoadingIndicator();
1025
+ clearStreamingTimeout();
1026
+ break;
1027
+
1028
+ case 'tool':
1029
+ // Tool message from LangGraph
1030
+ addMessage(chunk.content, 'tool', chunk);
1031
+ break;
1032
+
1033
+ default:
1034
+ // Handle any other message types
1035
+ if (chunk.content) {
1036
+ addMessage(chunk.content, chunk.type || 'unknown', chunk);
1037
+ }
1038
+ console.log('Other chunk type:', chunk.type, chunk);
1039
+ break;
917
1040
  }
918
1041
  }
919
1042