khoj 1.21.5__py3-none-any.whl → 1.21.7.dev7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (75) hide show
  1. khoj/database/adapters/__init__.py +12 -0
  2. khoj/database/migrations/0056_chatmodeloptions_vision_enabled.py +17 -0
  3. khoj/database/migrations/0057_merge_20240816_1409.py +13 -0
  4. khoj/database/migrations/0060_merge_20240905_1828.py +14 -0
  5. khoj/database/models/__init__.py +1 -0
  6. khoj/interface/compiled/404/index.html +1 -1
  7. khoj/interface/compiled/_next/static/chunks/{3062-a42d847c919a9ea4.js → 3062-9be9a4e34f82ed3a.js} +1 -1
  8. khoj/interface/compiled/_next/static/chunks/3678-0732dd9d2f472171.js +25 -0
  9. khoj/interface/compiled/_next/static/chunks/8423-b6a61d82233d1a82.js +1 -0
  10. khoj/interface/compiled/_next/static/chunks/9001-3b27af6d5f21df44.js +21 -0
  11. khoj/interface/compiled/_next/static/chunks/9162-0be016519a18568b.js +11 -0
  12. khoj/interface/compiled/_next/static/chunks/{9178-d23cb0dbee40a775.js → 9178-3a0baad1c172d515.js} +1 -1
  13. khoj/interface/compiled/_next/static/chunks/{9693-91b03052c5cabded.js → 9984-e410179c6fac7cf1.js} +1 -1
  14. khoj/interface/compiled/_next/static/chunks/app/agents/{page-922694b75f1fb67b.js → page-462502107217be82.js} +1 -1
  15. khoj/interface/compiled/_next/static/chunks/app/automations/page-e30a75db8719f439.js +1 -0
  16. khoj/interface/compiled/_next/static/chunks/app/chat/page-4bb4f2422f5ec5f2.js +1 -0
  17. khoj/interface/compiled/_next/static/chunks/app/factchecker/page-693fe53982bf33e1.js +1 -0
  18. khoj/interface/compiled/_next/static/chunks/app/page-c26f689e39b400ba.js +1 -0
  19. khoj/interface/compiled/_next/static/chunks/app/search/{page-dcd385f03255ef36.js → page-0798bb43c2e368bf.js} +1 -1
  20. khoj/interface/compiled/_next/static/chunks/app/settings/{page-ddcd51147d18c694.js → page-f518555f8e2fd794.js} +1 -1
  21. khoj/interface/compiled/_next/static/chunks/app/share/chat/page-1a639fb3f120fee6.js +1 -0
  22. khoj/interface/compiled/_next/static/chunks/{webpack-95cfd7a1948cfeed.js → webpack-40d9ecfe7efa5386.js} +1 -1
  23. khoj/interface/compiled/_next/static/css/2a860030cf7c384b.css +1 -0
  24. khoj/interface/compiled/_next/static/css/3e49e5ee49c6bda1.css +25 -0
  25. khoj/interface/compiled/_next/static/css/592ca99f5122e75a.css +1 -0
  26. khoj/interface/compiled/_next/static/css/5a400c87d295e68a.css +1 -0
  27. khoj/interface/compiled/_next/static/css/c808691c459e3887.css +1 -0
  28. khoj/interface/compiled/agents/index.html +1 -1
  29. khoj/interface/compiled/agents/index.txt +2 -2
  30. khoj/interface/compiled/automations/index.html +1 -1
  31. khoj/interface/compiled/automations/index.txt +3 -3
  32. khoj/interface/compiled/chat/index.html +1 -1
  33. khoj/interface/compiled/chat/index.txt +2 -2
  34. khoj/interface/compiled/factchecker/index.html +1 -1
  35. khoj/interface/compiled/factchecker/index.txt +2 -2
  36. khoj/interface/compiled/index.html +1 -1
  37. khoj/interface/compiled/index.txt +2 -2
  38. khoj/interface/compiled/search/index.html +1 -1
  39. khoj/interface/compiled/search/index.txt +2 -2
  40. khoj/interface/compiled/settings/index.html +1 -1
  41. khoj/interface/compiled/settings/index.txt +3 -3
  42. khoj/interface/compiled/share/chat/index.html +1 -1
  43. khoj/interface/compiled/share/chat/index.txt +2 -2
  44. khoj/interface/email/welcome.html +1 -1
  45. khoj/processor/conversation/anthropic/anthropic_chat.py +2 -1
  46. khoj/processor/conversation/offline/chat_model.py +7 -2
  47. khoj/processor/conversation/openai/gpt.py +21 -12
  48. khoj/processor/conversation/utils.py +39 -14
  49. khoj/processor/tools/online_search.py +6 -2
  50. khoj/routers/api.py +4 -0
  51. khoj/routers/api_chat.py +65 -18
  52. khoj/routers/helpers.py +79 -21
  53. khoj/routers/storage.py +28 -0
  54. khoj/utils/helpers.py +15 -0
  55. {khoj-1.21.5.dist-info → khoj-1.21.7.dev7.dist-info}/METADATA +4 -4
  56. {khoj-1.21.5.dist-info → khoj-1.21.7.dev7.dist-info}/RECORD +61 -58
  57. khoj/interface/compiled/_next/static/chunks/3678-8c0e55c3b5d83a22.js +0 -25
  58. khoj/interface/compiled/_next/static/chunks/8423-132ea64eac83fd43.js +0 -1
  59. khoj/interface/compiled/_next/static/chunks/9001-acbca3e19b1a5ddf.js +0 -21
  60. khoj/interface/compiled/_next/static/chunks/9162-4a6d0d0dc5e27618.js +0 -11
  61. khoj/interface/compiled/_next/static/chunks/app/automations/page-fa3163653d2a72ac.js +0 -1
  62. khoj/interface/compiled/_next/static/chunks/app/chat/page-8c9b92236d4daf4b.js +0 -1
  63. khoj/interface/compiled/_next/static/chunks/app/factchecker/page-60be5e3295e2c0bc.js +0 -1
  64. khoj/interface/compiled/_next/static/chunks/app/page-ef4e7248d37fae41.js +0 -1
  65. khoj/interface/compiled/_next/static/chunks/app/share/chat/page-699b364dc6fbf139.js +0 -1
  66. khoj/interface/compiled/_next/static/css/9d5b867ec04494a6.css +0 -25
  67. khoj/interface/compiled/_next/static/css/a22d83f18a32957e.css +0 -1
  68. khoj/interface/compiled/_next/static/css/a3530ec58b0b660f.css +0 -1
  69. khoj/interface/compiled/_next/static/css/b81e909d403fb2df.css +0 -1
  70. khoj/interface/compiled/_next/static/css/df6f4c34ec280d53.css +0 -1
  71. /khoj/interface/compiled/_next/static/{c94a08w_ZKOpws32Cwk3G → r8hsIMReT-pry3sQxOVuA}/_buildManifest.js +0 -0
  72. /khoj/interface/compiled/_next/static/{c94a08w_ZKOpws32Cwk3G → r8hsIMReT-pry3sQxOVuA}/_ssgManifest.js +0 -0
  73. {khoj-1.21.5.dist-info → khoj-1.21.7.dev7.dist-info}/WHEEL +0 -0
  74. {khoj-1.21.5.dist-info → khoj-1.21.7.dev7.dist-info}/entry_points.txt +0 -0
  75. {khoj-1.21.5.dist-info → khoj-1.21.7.dev7.dist-info}/licenses/LICENSE +0 -0
@@ -7,7 +7,7 @@ from typing import Any, Iterator, List, Union
7
7
  from langchain.schema import ChatMessage
8
8
  from llama_cpp import Llama
9
9
 
10
- from khoj.database.models import Agent, KhojUser
10
+ from khoj.database.models import Agent, ChatModelOptions, KhojUser
11
11
  from khoj.processor.conversation import prompts
12
12
  from khoj.processor.conversation.offline.utils import download_model
13
13
  from khoj.processor.conversation.utils import (
@@ -76,7 +76,11 @@ def extract_questions_offline(
76
76
  )
77
77
 
78
78
  messages = generate_chatml_messages_with_context(
79
- example_questions, model_name=model, loaded_model=offline_chat_model, max_prompt_size=max_prompt_size
79
+ example_questions,
80
+ model_name=model,
81
+ loaded_model=offline_chat_model,
82
+ max_prompt_size=max_prompt_size,
83
+ model_type=ChatModelOptions.ModelType.OFFLINE,
80
84
  )
81
85
 
82
86
  state.chat_lock.acquire()
@@ -201,6 +205,7 @@ def converse_offline(
201
205
  loaded_model=offline_chat_model,
202
206
  max_prompt_size=max_prompt_size,
203
207
  tokenizer_name=tokenizer_name,
208
+ model_type=ChatModelOptions.ModelType.OFFLINE,
204
209
  )
205
210
 
206
211
  truncated_messages = "\n".join({f"{message.content[:70]}..." for message in messages})
@@ -5,13 +5,16 @@ from typing import Dict, Optional
5
5
 
6
6
  from langchain.schema import ChatMessage
7
7
 
8
- from khoj.database.models import Agent, KhojUser
8
+ from khoj.database.models import Agent, ChatModelOptions, KhojUser
9
9
  from khoj.processor.conversation import prompts
10
10
  from khoj.processor.conversation.openai.utils import (
11
11
  chat_completion_with_backoff,
12
12
  completion_with_backoff,
13
13
  )
14
- from khoj.processor.conversation.utils import generate_chatml_messages_with_context
14
+ from khoj.processor.conversation.utils import (
15
+ construct_structured_message,
16
+ generate_chatml_messages_with_context,
17
+ )
15
18
  from khoj.utils.helpers import ConversationCommand, is_none_or_empty
16
19
  from khoj.utils.rawconfig import LocationData
17
20
 
@@ -24,9 +27,10 @@ def extract_questions(
24
27
  conversation_log={},
25
28
  api_key=None,
26
29
  api_base_url=None,
27
- temperature=0.7,
28
30
  location_data: LocationData = None,
29
31
  user: KhojUser = None,
32
+ uploaded_image_url: Optional[str] = None,
33
+ vision_enabled: bool = False,
30
34
  ):
31
35
  """
32
36
  Infer search queries to retrieve relevant notes to answer user query
@@ -63,18 +67,18 @@ def extract_questions(
63
67
  location=location,
64
68
  username=username,
65
69
  )
66
- messages = [ChatMessage(content=prompt, role="user")]
67
70
 
68
- # Get Response from GPT
69
- response = completion_with_backoff(
70
- messages=messages,
71
- model=model,
72
- temperature=temperature,
73
- api_base_url=api_base_url,
74
- model_kwargs={"response_format": {"type": "json_object"}},
75
- openai_api_key=api_key,
71
+ prompt = construct_structured_message(
72
+ message=prompt,
73
+ image_url=uploaded_image_url,
74
+ model_type=ChatModelOptions.ModelType.OPENAI,
75
+ vision_enabled=vision_enabled,
76
76
  )
77
77
 
78
+ messages = [ChatMessage(content=prompt, role="user")]
79
+
80
+ response = send_message_to_model(messages, api_key, model, response_type="json_object", api_base_url=api_base_url)
81
+
78
82
  # Extract, Clean Message from GPT's Response
79
83
  try:
80
84
  response = response.strip()
@@ -123,6 +127,8 @@ def converse(
123
127
  location_data: LocationData = None,
124
128
  user_name: str = None,
125
129
  agent: Agent = None,
130
+ image_url: Optional[str] = None,
131
+ vision_available: bool = False,
126
132
  ):
127
133
  """
128
134
  Converse with user using OpenAI's ChatGPT
@@ -178,6 +184,9 @@ def converse(
178
184
  model_name=model,
179
185
  max_prompt_size=max_prompt_size,
180
186
  tokenizer_name=tokenizer_name,
187
+ uploaded_image_url=image_url,
188
+ vision_enabled=vision_available,
189
+ model_type=ChatModelOptions.ModelType.OPENAI,
181
190
  )
182
191
  truncated_messages = "\n".join({f"{message.content[:70]}..." for message in messages})
183
192
  logger.debug(f"Conversation Context for GPT: {truncated_messages}")
@@ -12,7 +12,7 @@ from llama_cpp.llama import Llama
12
12
  from transformers import AutoTokenizer
13
13
 
14
14
  from khoj.database.adapters import ConversationAdapters
15
- from khoj.database.models import ClientApplication, KhojUser
15
+ from khoj.database.models import ChatModelOptions, ClientApplication, KhojUser
16
16
  from khoj.processor.conversation.offline.utils import download_model, infer_max_tokens
17
17
  from khoj.utils import state
18
18
  from khoj.utils.helpers import is_none_or_empty, merge_dicts
@@ -101,12 +101,16 @@ def save_to_conversation_log(
101
101
  client_application: ClientApplication = None,
102
102
  conversation_id: int = None,
103
103
  automation_id: str = None,
104
+ uploaded_image_url: str = None,
104
105
  ):
105
106
  user_message_time = user_message_time or datetime.now().strftime("%Y-%m-%d %H:%M:%S")
106
107
  updated_conversation = message_to_log(
107
108
  user_message=q,
108
109
  chat_response=chat_response,
109
- user_message_metadata={"created": user_message_time},
110
+ user_message_metadata={
111
+ "created": user_message_time,
112
+ "uploadedImageData": uploaded_image_url,
113
+ },
110
114
  khoj_message_metadata={
111
115
  "context": compiled_references,
112
116
  "intent": {"inferred-queries": inferred_queries, "type": intent_type},
@@ -133,6 +137,13 @@ Khoj: "{inferred_queries if ("text-to-image" in intent_type) else chat_response}
133
137
  )
134
138
 
135
139
 
140
+ # Format user and system messages to chatml format
141
+ def construct_structured_message(message, image_url, model_type, vision_enabled):
142
+ if image_url and vision_enabled and model_type == ChatModelOptions.ModelType.OPENAI:
143
+ return [{"type": "text", "text": message}, {"type": "image_url", "image_url": {"url": image_url}}]
144
+ return message
145
+
146
+
136
147
  def generate_chatml_messages_with_context(
137
148
  user_message,
138
149
  system_message=None,
@@ -141,6 +152,9 @@ def generate_chatml_messages_with_context(
141
152
  loaded_model: Optional[Llama] = None,
142
153
  max_prompt_size=None,
143
154
  tokenizer_name=None,
155
+ uploaded_image_url=None,
156
+ vision_enabled=False,
157
+ model_type="",
144
158
  ):
145
159
  """Generate messages for ChatGPT with context from previous conversation"""
146
160
  # Set max prompt size from user config or based on pre-configured for model and machine specs
@@ -154,24 +168,35 @@ def generate_chatml_messages_with_context(
154
168
  lookback_turns = max_prompt_size // 750
155
169
 
156
170
  # Extract Chat History for Context
157
- chat_logs = []
171
+ chatml_messages: List[ChatMessage] = []
158
172
  for chat in conversation_log.get("chat", []):
159
- chat_notes = f'\n\n Notes:\n{chat.get("context")}' if chat.get("context") else "\n"
160
- chat_logs += [chat["message"] + chat_notes]
173
+ message_notes = f'\n\n Notes:\n{chat.get("context")}' if chat.get("context") else "\n"
174
+ role = "user" if chat["by"] == "you" else "assistant"
175
+
176
+ message_content = chat["message"] + message_notes
161
177
 
162
- rest_backnforths: List[ChatMessage] = []
163
- # Extract in reverse chronological order
164
- for user_msg, assistant_msg in zip(chat_logs[-2::-2], chat_logs[::-2]):
165
- if len(rest_backnforths) >= 2 * lookback_turns:
178
+ if chat.get("uploadedImageData") and vision_enabled:
179
+ message_content = construct_structured_message(
180
+ message_content, chat.get("uploadedImageData"), model_type, vision_enabled
181
+ )
182
+
183
+ reconstructed_message = ChatMessage(content=message_content, role=role)
184
+
185
+ chatml_messages.insert(0, reconstructed_message)
186
+
187
+ if len(chatml_messages) >= 2 * lookback_turns:
166
188
  break
167
- rest_backnforths += reciprocal_conversation_to_chatml([user_msg, assistant_msg])[::-1]
168
189
 
169
- # Format user and system messages to chatml format
170
190
  messages = []
171
191
  if not is_none_or_empty(user_message):
172
- messages.append(ChatMessage(content=user_message, role="user"))
173
- if len(rest_backnforths) > 0:
174
- messages += rest_backnforths
192
+ messages.append(
193
+ ChatMessage(
194
+ content=construct_structured_message(user_message, uploaded_image_url, model_type, vision_enabled),
195
+ role="user",
196
+ )
197
+ )
198
+ if len(chatml_messages) > 0:
199
+ messages += chatml_messages
175
200
  if not is_none_or_empty(system_message):
176
201
  messages.append(ChatMessage(content=system_message, role="system"))
177
202
 
@@ -56,6 +56,7 @@ async def search_online(
56
56
  subscribed: bool = False,
57
57
  send_status_func: Optional[Callable] = None,
58
58
  custom_filters: List[str] = [],
59
+ uploaded_image_url: str = None,
59
60
  ):
60
61
  query += " ".join(custom_filters)
61
62
  if not is_internet_connected():
@@ -64,7 +65,9 @@ async def search_online(
64
65
  return
65
66
 
66
67
  # Breakdown the query into subqueries to get the correct answer
67
- subqueries = await generate_online_subqueries(query, conversation_history, location, user)
68
+ subqueries = await generate_online_subqueries(
69
+ query, conversation_history, location, user, uploaded_image_url=uploaded_image_url
70
+ )
68
71
  response_dict = {}
69
72
 
70
73
  if subqueries:
@@ -138,13 +141,14 @@ async def read_webpages(
138
141
  user: KhojUser,
139
142
  subscribed: bool = False,
140
143
  send_status_func: Optional[Callable] = None,
144
+ uploaded_image_url: str = None,
141
145
  ):
142
146
  "Infer web pages to read from the query and extract relevant information from them"
143
147
  logger.info(f"Inferring web pages to read")
144
148
  if send_status_func:
145
149
  async for event in send_status_func(f"**Inferring web pages to read**"):
146
150
  yield {ChatEvent.STATUS: event}
147
- urls = await infer_webpage_urls(query, conversation_history, location, user)
151
+ urls = await infer_webpage_urls(query, conversation_history, location, user, uploaded_image_url)
148
152
 
149
153
  logger.info(f"Reading web pages at: {urls}")
150
154
  if send_status_func:
khoj/routers/api.py CHANGED
@@ -331,6 +331,7 @@ async def extract_references_and_questions(
331
331
  conversation_commands: List[ConversationCommand] = [ConversationCommand.Default],
332
332
  location_data: LocationData = None,
333
333
  send_status_func: Optional[Callable] = None,
334
+ uploaded_image_url: Optional[str] = None,
334
335
  ):
335
336
  user = request.user.object if request.user.is_authenticated else None
336
337
 
@@ -370,6 +371,7 @@ async def extract_references_and_questions(
370
371
  with timer("Extracting search queries took", logger):
371
372
  # If we've reached here, either the user has enabled offline chat or the openai model is enabled.
372
373
  conversation_config = await ConversationAdapters.aget_default_conversation_config()
374
+ vision_enabled = conversation_config.vision_enabled
373
375
 
374
376
  if conversation_config.model_type == ChatModelOptions.ModelType.OFFLINE:
375
377
  using_offline_chat = True
@@ -403,6 +405,8 @@ async def extract_references_and_questions(
403
405
  conversation_log=meta_log,
404
406
  location_data=location_data,
405
407
  user=user,
408
+ uploaded_image_url=uploaded_image_url,
409
+ vision_enabled=vision_enabled,
406
410
  )
407
411
  elif conversation_config.model_type == ChatModelOptions.ModelType.ANTHROPIC:
408
412
  api_key = conversation_config.openai_config.api_key
khoj/routers/api_chat.py CHANGED
@@ -1,4 +1,5 @@
1
1
  import asyncio
2
+ import base64
2
3
  import json
3
4
  import logging
4
5
  import time
@@ -46,11 +47,13 @@ from khoj.routers.helpers import (
46
47
  update_telemetry_state,
47
48
  validate_conversation_config,
48
49
  )
50
+ from khoj.routers.storage import upload_image_to_bucket
49
51
  from khoj.utils import state
50
52
  from khoj.utils.helpers import (
51
53
  AsyncIteratorWrapper,
52
54
  ConversationCommand,
53
55
  command_descriptions,
56
+ convert_image_to_webp,
54
57
  get_device,
55
58
  is_none_or_empty,
56
59
  )
@@ -517,21 +520,26 @@ async def set_conversation_title(
517
520
  )
518
521
 
519
522
 
520
- @api_chat.get("")
523
+ class ChatRequestBody(BaseModel):
524
+ q: str
525
+ n: Optional[int] = 7
526
+ d: Optional[float] = None
527
+ stream: Optional[bool] = False
528
+ title: Optional[str] = None
529
+ conversation_id: Optional[int] = None
530
+ city: Optional[str] = None
531
+ region: Optional[str] = None
532
+ country: Optional[str] = None
533
+ timezone: Optional[str] = None
534
+ image: Optional[str] = None
535
+
536
+
537
+ @api_chat.post("")
521
538
  @requires(["authenticated"])
522
539
  async def chat(
523
540
  request: Request,
524
541
  common: CommonQueryParams,
525
- q: str,
526
- n: int = 7,
527
- d: float = None,
528
- stream: Optional[bool] = False,
529
- title: Optional[str] = None,
530
- conversation_id: Optional[int] = None,
531
- city: Optional[str] = None,
532
- region: Optional[str] = None,
533
- country: Optional[str] = None,
534
- timezone: Optional[str] = None,
542
+ body: ChatRequestBody,
535
543
  rate_limiter_per_minute=Depends(
536
544
  ApiUserRateLimiter(requests=60, subscribed_requests=60, window=60, slug="chat_minute")
537
545
  ),
@@ -539,7 +547,20 @@ async def chat(
539
547
  ApiUserRateLimiter(requests=600, subscribed_requests=600, window=60 * 60 * 24, slug="chat_day")
540
548
  ),
541
549
  ):
542
- async def event_generator(q: str):
550
+ # Access the parameters from the body
551
+ q = body.q
552
+ n = body.n
553
+ d = body.d
554
+ stream = body.stream
555
+ title = body.title
556
+ conversation_id = body.conversation_id
557
+ city = body.city
558
+ region = body.region
559
+ country = body.country
560
+ timezone = body.timezone
561
+ image = body.image
562
+
563
+ async def event_generator(q: str, image: str):
543
564
  start_time = time.perf_counter()
544
565
  ttft = None
545
566
  chat_metadata: dict = {}
@@ -550,6 +571,17 @@ async def chat(
550
571
  q = unquote(q)
551
572
  nonlocal conversation_id
552
573
 
574
+ uploaded_image_url = None
575
+ if image:
576
+ decoded_string = unquote(image)
577
+ base64_data = decoded_string.split(",", 1)[1]
578
+ image_bytes = base64.b64decode(base64_data)
579
+ webp_image_bytes = convert_image_to_webp(image_bytes)
580
+ try:
581
+ uploaded_image_url = upload_image_to_bucket(webp_image_bytes, request.user.object.id)
582
+ except:
583
+ uploaded_image_url = None
584
+
553
585
  async def send_event(event_type: ChatEvent, data: str | dict):
554
586
  nonlocal connection_alive, ttft
555
587
  if not connection_alive or await request.is_disconnected():
@@ -637,7 +669,7 @@ async def chat(
637
669
 
638
670
  if conversation_commands == [ConversationCommand.Default] or is_automated_task:
639
671
  conversation_commands = await aget_relevant_information_sources(
640
- q, meta_log, is_automated_task, subscribed=subscribed
672
+ q, meta_log, is_automated_task, subscribed=subscribed, uploaded_image_url=uploaded_image_url
641
673
  )
642
674
  conversation_commands_str = ", ".join([cmd.value for cmd in conversation_commands])
643
675
  async for result in send_event(
@@ -645,7 +677,7 @@ async def chat(
645
677
  ):
646
678
  yield result
647
679
 
648
- mode = await aget_relevant_output_modes(q, meta_log, is_automated_task)
680
+ mode = await aget_relevant_output_modes(q, meta_log, is_automated_task, uploaded_image_url)
649
681
  async for result in send_event(ChatEvent.STATUS, f"**Decided Response Mode:** {mode.value}"):
650
682
  yield result
651
683
  if mode not in conversation_commands:
@@ -693,7 +725,9 @@ async def chat(
693
725
  ):
694
726
  yield result
695
727
 
696
- response = await extract_relevant_summary(q, contextual_data, subscribed=subscribed)
728
+ response = await extract_relevant_summary(
729
+ q, contextual_data, subscribed=subscribed, uploaded_image_url=uploaded_image_url
730
+ )
697
731
  response_log = str(response)
698
732
  async for result in send_llm_response(response_log):
699
733
  yield result
@@ -711,6 +745,7 @@ async def chat(
711
745
  intent_type="summarize",
712
746
  client_application=request.user.client_app,
713
747
  conversation_id=conversation_id,
748
+ uploaded_image_url=uploaded_image_url,
714
749
  )
715
750
  return
716
751
 
@@ -753,6 +788,7 @@ async def chat(
753
788
  conversation_id=conversation_id,
754
789
  inferred_queries=[query_to_run],
755
790
  automation_id=automation.id,
791
+ uploaded_image_url=uploaded_image_url,
756
792
  )
757
793
  async for result in send_llm_response(llm_response):
758
794
  yield result
@@ -771,6 +807,7 @@ async def chat(
771
807
  conversation_commands,
772
808
  location,
773
809
  partial(send_event, ChatEvent.STATUS),
810
+ uploaded_image_url=uploaded_image_url,
774
811
  ):
775
812
  if isinstance(result, dict) and ChatEvent.STATUS in result:
776
813
  yield result[ChatEvent.STATUS]
@@ -807,6 +844,7 @@ async def chat(
807
844
  subscribed,
808
845
  partial(send_event, ChatEvent.STATUS),
809
846
  custom_filters,
847
+ uploaded_image_url=uploaded_image_url,
810
848
  ):
811
849
  if isinstance(result, dict) and ChatEvent.STATUS in result:
812
850
  yield result[ChatEvent.STATUS]
@@ -823,7 +861,13 @@ async def chat(
823
861
  if ConversationCommand.Webpage in conversation_commands:
824
862
  try:
825
863
  async for result in read_webpages(
826
- defiltered_query, meta_log, location, user, subscribed, partial(send_event, ChatEvent.STATUS)
864
+ defiltered_query,
865
+ meta_log,
866
+ location,
867
+ user,
868
+ subscribed,
869
+ partial(send_event, ChatEvent.STATUS),
870
+ uploaded_image_url=uploaded_image_url,
827
871
  ):
828
872
  if isinstance(result, dict) and ChatEvent.STATUS in result:
829
873
  yield result[ChatEvent.STATUS]
@@ -869,6 +913,7 @@ async def chat(
869
913
  online_results=online_results,
870
914
  subscribed=subscribed,
871
915
  send_status_func=partial(send_event, ChatEvent.STATUS),
916
+ uploaded_image_url=uploaded_image_url,
872
917
  ):
873
918
  if isinstance(result, dict) and ChatEvent.STATUS in result:
874
919
  yield result[ChatEvent.STATUS]
@@ -898,6 +943,7 @@ async def chat(
898
943
  conversation_id=conversation_id,
899
944
  compiled_references=compiled_references,
900
945
  online_results=online_results,
946
+ uploaded_image_url=uploaded_image_url,
901
947
  )
902
948
  content_obj = {
903
949
  "intentType": intent_type,
@@ -924,6 +970,7 @@ async def chat(
924
970
  conversation_id,
925
971
  location,
926
972
  user_name,
973
+ uploaded_image_url,
927
974
  )
928
975
 
929
976
  # Send Response
@@ -949,9 +996,9 @@ async def chat(
949
996
 
950
997
  ## Stream Text Response
951
998
  if stream:
952
- return StreamingResponse(event_generator(q), media_type="text/plain")
999
+ return StreamingResponse(event_generator(q, image=image), media_type="text/plain")
953
1000
  ## Non-Streaming Text Response
954
1001
  else:
955
- response_iterator = event_generator(q)
1002
+ response_iterator = event_generator(q, image=image)
956
1003
  response_data = await read_chat_stream(response_iterator)
957
1004
  return Response(content=json.dumps(response_data), media_type="application/json", status_code=200)