khoj 1.27.2.dev29__py3-none-any.whl → 1.28.1.dev1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (75) hide show
  1. khoj/configure.py +1 -1
  2. khoj/database/adapters/__init__.py +50 -12
  3. khoj/interface/compiled/404/index.html +1 -1
  4. khoj/interface/compiled/_next/static/chunks/1034-da58b679fcbb79c1.js +1 -0
  5. khoj/interface/compiled/_next/static/chunks/1467-b331e469fe411347.js +1 -0
  6. khoj/interface/compiled/_next/static/chunks/1603-c1568f45947e9f2c.js +1 -0
  7. khoj/interface/compiled/_next/static/chunks/3423-ff7402ae1dd66592.js +1 -0
  8. khoj/interface/compiled/_next/static/chunks/8423-e80647edf6c92c27.js +1 -0
  9. khoj/interface/compiled/_next/static/chunks/app/agents/{page-5ae1e540bb5be8a9.js → page-2beaba7c9bb750bd.js} +1 -1
  10. khoj/interface/compiled/_next/static/chunks/app/automations/{page-774ae3e033f938cd.js → page-9b5c77e0b0dd772c.js} +1 -1
  11. khoj/interface/compiled/_next/static/chunks/app/chat/page-bfc70b16ba5e51b4.js +1 -0
  12. khoj/interface/compiled/_next/static/chunks/app/factchecker/page-340bcf53abf6a2cc.js +1 -0
  13. khoj/interface/compiled/_next/static/chunks/app/{page-4dc472cf6d674004.js → page-f249666a0cbdaa0d.js} +1 -1
  14. khoj/interface/compiled/_next/static/chunks/app/search/{page-9b64f61caa5bd7f9.js → page-ab2995529ece3140.js} +1 -1
  15. khoj/interface/compiled/_next/static/chunks/app/settings/{page-7a8c382af2a7e870.js → page-89e6737b2cc9fb3a.js} +1 -1
  16. khoj/interface/compiled/_next/static/chunks/app/share/chat/{page-eb9e282691858f2e.js → page-505b07bce608b34e.js} +1 -1
  17. khoj/interface/compiled/_next/static/chunks/{webpack-2b720658ccc746f2.js → webpack-878569182b3af4c6.js} +1 -1
  18. khoj/interface/compiled/_next/static/css/{2272c73fc7a3b571.css → 26c1c33d0423a7d8.css} +1 -1
  19. khoj/interface/compiled/_next/static/css/592ca99f5122e75a.css +1 -0
  20. khoj/interface/compiled/_next/static/css/a795ee88875f4853.css +25 -0
  21. khoj/interface/compiled/_next/static/css/d738728883c68af8.css +1 -0
  22. khoj/interface/compiled/agents/index.html +1 -1
  23. khoj/interface/compiled/agents/index.txt +2 -2
  24. khoj/interface/compiled/automations/index.html +1 -1
  25. khoj/interface/compiled/automations/index.txt +2 -2
  26. khoj/interface/compiled/chat/index.html +1 -1
  27. khoj/interface/compiled/chat/index.txt +2 -2
  28. khoj/interface/compiled/factchecker/index.html +1 -1
  29. khoj/interface/compiled/factchecker/index.txt +2 -2
  30. khoj/interface/compiled/index.html +1 -1
  31. khoj/interface/compiled/index.txt +2 -2
  32. khoj/interface/compiled/search/index.html +1 -1
  33. khoj/interface/compiled/search/index.txt +2 -2
  34. khoj/interface/compiled/settings/index.html +1 -1
  35. khoj/interface/compiled/settings/index.txt +2 -2
  36. khoj/interface/compiled/share/chat/index.html +1 -1
  37. khoj/interface/compiled/share/chat/index.txt +2 -2
  38. khoj/processor/conversation/anthropic/anthropic_chat.py +14 -10
  39. khoj/processor/conversation/anthropic/utils.py +13 -2
  40. khoj/processor/conversation/google/gemini_chat.py +15 -11
  41. khoj/processor/conversation/offline/chat_model.py +18 -10
  42. khoj/processor/conversation/openai/gpt.py +11 -8
  43. khoj/processor/conversation/openai/utils.py +7 -0
  44. khoj/processor/conversation/prompts.py +156 -49
  45. khoj/processor/conversation/utils.py +146 -13
  46. khoj/processor/embeddings.py +4 -4
  47. khoj/processor/tools/online_search.py +13 -7
  48. khoj/processor/tools/run_code.py +144 -0
  49. khoj/routers/api.py +6 -6
  50. khoj/routers/api_chat.py +193 -112
  51. khoj/routers/helpers.py +107 -48
  52. khoj/routers/research.py +320 -0
  53. khoj/search_filter/date_filter.py +1 -3
  54. khoj/search_filter/file_filter.py +1 -2
  55. khoj/search_type/text_search.py +3 -3
  56. khoj/utils/helpers.py +24 -2
  57. khoj/utils/yaml.py +4 -0
  58. {khoj-1.27.2.dev29.dist-info → khoj-1.28.1.dev1.dist-info}/METADATA +3 -3
  59. {khoj-1.27.2.dev29.dist-info → khoj-1.28.1.dev1.dist-info}/RECORD +66 -63
  60. khoj/interface/compiled/_next/static/chunks/1603-5138bb7c8035d9a6.js +0 -1
  61. khoj/interface/compiled/_next/static/chunks/2697-61fcba89fd87eab4.js +0 -1
  62. khoj/interface/compiled/_next/static/chunks/3423-0b533af8bf6ac218.js +0 -1
  63. khoj/interface/compiled/_next/static/chunks/9479-ff7d8c4dae2014d1.js +0 -1
  64. khoj/interface/compiled/_next/static/chunks/app/chat/page-97f5b61aaf46d364.js +0 -1
  65. khoj/interface/compiled/_next/static/chunks/app/factchecker/page-d82403db2866bad8.js +0 -1
  66. khoj/interface/compiled/_next/static/css/4cae6c0e5c72fb2d.css +0 -1
  67. khoj/interface/compiled/_next/static/css/76d55eb435962b19.css +0 -25
  68. khoj/interface/compiled/_next/static/css/ddcc0cf73e062476.css +0 -1
  69. /khoj/interface/compiled/_next/static/{atzIseFarmC7TIwq2BgHC → K7ZigmRDrBfpIN7jxKQsA}/_buildManifest.js +0 -0
  70. /khoj/interface/compiled/_next/static/{atzIseFarmC7TIwq2BgHC → K7ZigmRDrBfpIN7jxKQsA}/_ssgManifest.js +0 -0
  71. /khoj/interface/compiled/_next/static/chunks/{1970-60c96aed937a4928.js → 1970-90dd510762d820ba.js} +0 -0
  72. /khoj/interface/compiled/_next/static/chunks/{9417-2ca87207387fc790.js → 9417-951f46451a8dd6d7.js} +0 -0
  73. {khoj-1.27.2.dev29.dist-info → khoj-1.28.1.dev1.dist-info}/WHEEL +0 -0
  74. {khoj-1.27.2.dev29.dist-info → khoj-1.28.1.dev1.dist-info}/entry_points.txt +0 -0
  75. {khoj-1.27.2.dev29.dist-info → khoj-1.28.1.dev1.dist-info}/licenses/LICENSE +0 -0
khoj/routers/api_chat.py CHANGED
@@ -6,7 +6,7 @@ import time
6
6
  import uuid
7
7
  from datetime import datetime
8
8
  from functools import partial
9
- from typing import Dict, Optional
9
+ from typing import Any, Dict, List, Optional
10
10
  from urllib.parse import unquote
11
11
 
12
12
  from asgiref.sync import sync_to_async
@@ -25,11 +25,13 @@ from khoj.database.adapters import (
25
25
  )
26
26
  from khoj.database.models import Agent, KhojUser
27
27
  from khoj.processor.conversation.prompts import help_message, no_entries_found
28
- from khoj.processor.conversation.utils import save_to_conversation_log
28
+ from khoj.processor.conversation.utils import defilter_query, save_to_conversation_log
29
29
  from khoj.processor.image.generate import text_to_image
30
30
  from khoj.processor.speech.text_to_speech import generate_text_to_speech
31
31
  from khoj.processor.tools.online_search import read_webpages, search_online
32
+ from khoj.processor.tools.run_code import run_code
32
33
  from khoj.routers.api import extract_references_and_questions
34
+ from khoj.routers.email import send_query_feedback
33
35
  from khoj.routers.helpers import (
34
36
  ApiImageRateLimiter,
35
37
  ApiUserRateLimiter,
@@ -37,13 +39,16 @@ from khoj.routers.helpers import (
37
39
  ChatRequestBody,
38
40
  CommonQueryParams,
39
41
  ConversationCommandRateLimiter,
42
+ DeleteMessageRequestBody,
43
+ FeedbackData,
40
44
  agenerate_chat_response,
41
45
  aget_relevant_information_sources,
42
46
  aget_relevant_output_modes,
43
47
  construct_automation_created_message,
44
48
  create_automation,
45
- extract_relevant_summary,
49
+ extract_relevant_info,
46
50
  generate_excalidraw_diagram,
51
+ generate_summary_from_files,
47
52
  get_conversation_command,
48
53
  is_query_empty,
49
54
  is_ready_to_chat,
@@ -51,6 +56,10 @@ from khoj.routers.helpers import (
51
56
  update_telemetry_state,
52
57
  validate_conversation_config,
53
58
  )
59
+ from khoj.routers.research import (
60
+ InformationCollectionIteration,
61
+ execute_information_collection,
62
+ )
54
63
  from khoj.routers.storage import upload_image_to_bucket
55
64
  from khoj.utils import state
56
65
  from khoj.utils.helpers import (
@@ -68,16 +77,12 @@ from khoj.utils.rawconfig import FileFilterRequest, FilesFilterRequest, Location
68
77
  # Initialize Router
69
78
  logger = logging.getLogger(__name__)
70
79
  conversation_command_rate_limiter = ConversationCommandRateLimiter(
71
- trial_rate_limit=100, subscribed_rate_limit=6000, slug="command"
80
+ trial_rate_limit=20, subscribed_rate_limit=75, slug="command"
72
81
  )
73
82
 
74
83
 
75
84
  api_chat = APIRouter()
76
85
 
77
- from pydantic import BaseModel
78
-
79
- from khoj.routers.email import send_query_feedback
80
-
81
86
 
82
87
  @api_chat.get("/conversation/file-filters/{conversation_id}", response_class=Response)
83
88
  @requires(["authenticated"])
@@ -139,12 +144,6 @@ def remove_file_filter(request: Request, filter: FileFilterRequest) -> Response:
139
144
  return Response(content=json.dumps(file_filters), media_type="application/json", status_code=200)
140
145
 
141
146
 
142
- class FeedbackData(BaseModel):
143
- uquery: str
144
- kquery: str
145
- sentiment: str
146
-
147
-
148
147
  @api_chat.post("/feedback")
149
148
  @requires(["authenticated"])
150
149
  async def sendfeedback(request: Request, data: FeedbackData):
@@ -159,10 +158,10 @@ async def text_to_speech(
159
158
  common: CommonQueryParams,
160
159
  text: str,
161
160
  rate_limiter_per_minute=Depends(
162
- ApiUserRateLimiter(requests=20, subscribed_requests=20, window=60, slug="chat_minute")
161
+ ApiUserRateLimiter(requests=30, subscribed_requests=30, window=60, slug="chat_minute")
163
162
  ),
164
163
  rate_limiter_per_day=Depends(
165
- ApiUserRateLimiter(requests=50, subscribed_requests=300, window=60 * 60 * 24, slug="chat_day")
164
+ ApiUserRateLimiter(requests=100, subscribed_requests=600, window=60 * 60 * 24, slug="chat_day")
166
165
  ),
167
166
  ) -> Response:
168
167
  voice_model = await ConversationAdapters.aget_voice_model_config(request.user.object)
@@ -527,6 +526,19 @@ async def set_conversation_title(
527
526
  )
528
527
 
529
528
 
529
+ @api_chat.delete("/conversation/message", response_class=Response)
530
+ @requires(["authenticated"])
531
+ def delete_message(request: Request, delete_request: DeleteMessageRequestBody) -> Response:
532
+ user = request.user.object
533
+ success = ConversationAdapters.delete_message_by_turn_id(
534
+ user, delete_request.conversation_id, delete_request.turn_id
535
+ )
536
+ if success:
537
+ return Response(content=json.dumps({"status": "ok"}), media_type="application/json", status_code=200)
538
+ else:
539
+ return Response(content=json.dumps({"status": "error", "message": "Message not found"}), status_code=404)
540
+
541
+
530
542
  @api_chat.post("")
531
543
  @requires(["authenticated"])
532
544
  async def chat(
@@ -534,10 +546,10 @@ async def chat(
534
546
  common: CommonQueryParams,
535
547
  body: ChatRequestBody,
536
548
  rate_limiter_per_minute=Depends(
537
- ApiUserRateLimiter(requests=60, subscribed_requests=200, window=60, slug="chat_minute")
549
+ ApiUserRateLimiter(requests=20, subscribed_requests=20, window=60, slug="chat_minute")
538
550
  ),
539
551
  rate_limiter_per_day=Depends(
540
- ApiUserRateLimiter(requests=600, subscribed_requests=6000, window=60 * 60 * 24, slug="chat_day")
552
+ ApiUserRateLimiter(requests=100, subscribed_requests=600, window=60 * 60 * 24, slug="chat_day")
541
553
  ),
542
554
  image_rate_limiter=Depends(ApiImageRateLimiter(max_images=10, max_combined_size_mb=20)),
543
555
  ):
@@ -548,6 +560,7 @@ async def chat(
548
560
  stream = body.stream
549
561
  title = body.title
550
562
  conversation_id = body.conversation_id
563
+ turn_id = str(body.turn_id or uuid.uuid4())
551
564
  city = body.city
552
565
  region = body.region
553
566
  country = body.country or get_country_name_from_timezone(body.timezone)
@@ -563,9 +576,11 @@ async def chat(
563
576
  user: KhojUser = request.user.object
564
577
  event_delimiter = "␃🔚␗"
565
578
  q = unquote(q)
579
+ train_of_thought = []
566
580
  nonlocal conversation_id
581
+
567
582
  tracer: dict = {
568
- "mid": f"{uuid.uuid4()}",
583
+ "mid": turn_id,
569
584
  "cid": conversation_id,
570
585
  "uid": user.id,
571
586
  "khoj_version": state.khoj_version,
@@ -583,7 +598,7 @@ async def chat(
583
598
  uploaded_images.append(uploaded_image)
584
599
 
585
600
  async def send_event(event_type: ChatEvent, data: str | dict):
586
- nonlocal connection_alive, ttft
601
+ nonlocal connection_alive, ttft, train_of_thought
587
602
  if not connection_alive or await request.is_disconnected():
588
603
  connection_alive = False
589
604
  logger.warning(f"User {user} disconnected from {common.client} client")
@@ -591,11 +606,14 @@ async def chat(
591
606
  try:
592
607
  if event_type == ChatEvent.END_LLM_RESPONSE:
593
608
  collect_telemetry()
594
- if event_type == ChatEvent.START_LLM_RESPONSE:
609
+ elif event_type == ChatEvent.START_LLM_RESPONSE:
595
610
  ttft = time.perf_counter() - start_time
611
+ elif event_type == ChatEvent.STATUS:
612
+ train_of_thought.append({"type": event_type.value, "data": data})
613
+
596
614
  if event_type == ChatEvent.MESSAGE:
597
615
  yield data
598
- elif event_type == ChatEvent.REFERENCES or stream:
616
+ elif event_type == ChatEvent.REFERENCES or ChatEvent.METADATA or stream:
599
617
  yield json.dumps({"type": event_type.value, "data": data}, ensure_ascii=False)
600
618
  except asyncio.CancelledError as e:
601
619
  connection_alive = False
@@ -639,6 +657,11 @@ async def chat(
639
657
  metadata=chat_metadata,
640
658
  )
641
659
 
660
+ if is_query_empty(q):
661
+ async for result in send_llm_response("Please ask your query to get started."):
662
+ yield result
663
+ return
664
+
642
665
  conversation_commands = [get_conversation_command(query=q, any_references=True)]
643
666
 
644
667
  conversation = await ConversationAdapters.aget_conversation_by_user(
@@ -654,6 +677,9 @@ async def chat(
654
677
  return
655
678
  conversation_id = conversation.id
656
679
 
680
+ async for event in send_event(ChatEvent.METADATA, {"conversationId": str(conversation_id), "turnId": turn_id}):
681
+ yield event
682
+
657
683
  agent: Agent | None = None
658
684
  default_agent = await AgentAdapters.aget_default_agent()
659
685
  if conversation.agent and conversation.agent != default_agent:
@@ -665,22 +691,23 @@ async def chat(
665
691
  agent = default_agent
666
692
 
667
693
  await is_ready_to_chat(user)
668
-
669
694
  user_name = await aget_user_name(user)
670
695
  location = None
671
696
  if city or region or country or country_code:
672
697
  location = LocationData(city=city, region=region, country=country, country_code=country_code)
673
698
 
674
- if is_query_empty(q):
675
- async for result in send_llm_response("Please ask your query to get started."):
676
- yield result
677
- return
678
-
679
699
  user_message_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
680
700
 
681
701
  meta_log = conversation.conversation_log
682
702
  is_automated_task = conversation_commands == [ConversationCommand.AutomatedTask]
683
703
 
704
+ researched_results = ""
705
+ online_results: Dict = dict()
706
+ code_results: Dict = dict()
707
+ ## Extract Document References
708
+ compiled_references: List[Any] = []
709
+ inferred_queries: List[Any] = []
710
+
684
711
  if conversation_commands == [ConversationCommand.Default] or is_automated_task:
685
712
  conversation_commands = await aget_relevant_information_sources(
686
713
  q,
@@ -691,6 +718,11 @@ async def chat(
691
718
  agent=agent,
692
719
  tracer=tracer,
693
720
  )
721
+
722
+ # If we're doing research, we don't want to do anything else
723
+ if ConversationCommand.Research in conversation_commands:
724
+ conversation_commands = [ConversationCommand.Research]
725
+
694
726
  conversation_commands_str = ", ".join([cmd.value for cmd in conversation_commands])
695
727
  async for result in send_event(
696
728
  ChatEvent.STATUS, f"**Chose Data Sources to Search:** {conversation_commands_str}"
@@ -709,6 +741,44 @@ async def chat(
709
741
  await conversation_command_rate_limiter.update_and_check_if_valid(request, cmd)
710
742
  q = q.replace(f"/{cmd.value}", "").strip()
711
743
 
744
+ defiltered_query = defilter_query(q)
745
+
746
+ if conversation_commands == [ConversationCommand.Research]:
747
+ async for research_result in execute_information_collection(
748
+ request=request,
749
+ user=user,
750
+ query=defiltered_query,
751
+ conversation_id=conversation_id,
752
+ conversation_history=meta_log,
753
+ query_images=uploaded_images,
754
+ agent=agent,
755
+ send_status_func=partial(send_event, ChatEvent.STATUS),
756
+ user_name=user_name,
757
+ location=location,
758
+ file_filters=conversation.file_filters if conversation else [],
759
+ tracer=tracer,
760
+ ):
761
+ if isinstance(research_result, InformationCollectionIteration):
762
+ if research_result.summarizedResult:
763
+ if research_result.onlineContext:
764
+ online_results.update(research_result.onlineContext)
765
+ if research_result.codeContext:
766
+ code_results.update(research_result.codeContext)
767
+ if research_result.context:
768
+ compiled_references.extend(research_result.context)
769
+
770
+ researched_results += research_result.summarizedResult
771
+
772
+ else:
773
+ yield research_result
774
+
775
+ # researched_results = await extract_relevant_info(q, researched_results, agent)
776
+ logger.info(f"Researched Results: {researched_results}")
777
+
778
+ for cmd in conversation_commands:
779
+ await conversation_command_rate_limiter.update_and_check_if_valid(request, cmd)
780
+ q = q.replace(f"/{cmd.value}", "").strip()
781
+
712
782
  used_slash_summarize = conversation_commands == [ConversationCommand.Summarize]
713
783
  file_filters = conversation.file_filters if conversation else []
714
784
  # Skip trying to summarize if
@@ -733,48 +803,24 @@ async def chat(
733
803
  async for result in send_llm_response(response_log):
734
804
  yield result
735
805
  else:
736
- try:
737
- file_object = None
738
- if await EntryAdapters.aagent_has_entries(agent):
739
- file_names = await EntryAdapters.aget_agent_entry_filepaths(agent)
740
- if len(file_names) > 0:
741
- file_object = await FileObjectAdapters.async_get_file_objects_by_name(
742
- None, file_names[0], agent
743
- )
744
-
745
- if len(file_filters) > 0:
746
- file_object = await FileObjectAdapters.async_get_file_objects_by_name(user, file_filters[0])
747
-
748
- if len(file_object) == 0:
749
- response_log = "Sorry, I couldn't find the full text of this file. Please re-upload the document and try again."
750
- async for result in send_llm_response(response_log):
751
- yield result
752
- return
753
- contextual_data = " ".join([file.raw_text for file in file_object])
754
- if not q:
755
- q = "Create a general summary of the file"
756
- async for result in send_event(
757
- ChatEvent.STATUS, f"**Constructing Summary Using:** {file_object[0].file_name}"
758
- ):
759
- yield result
760
-
761
- response = await extract_relevant_summary(
762
- q,
763
- contextual_data,
764
- conversation_history=meta_log,
765
- query_images=uploaded_images,
766
- user=user,
767
- agent=agent,
768
- tracer=tracer,
769
- )
770
- response_log = str(response)
771
- async for result in send_llm_response(response_log):
772
- yield result
773
- except Exception as e:
774
- response_log = "Error summarizing file. Please try again, or contact support."
775
- logger.error(f"Error summarizing file for {user.email}: {e}", exc_info=True)
776
- async for result in send_llm_response(response_log):
777
- yield result
806
+ async for response in generate_summary_from_files(
807
+ q=q,
808
+ user=user,
809
+ file_filters=file_filters,
810
+ meta_log=meta_log,
811
+ query_images=uploaded_images,
812
+ agent=agent,
813
+ send_status_func=partial(send_event, ChatEvent.STATUS),
814
+ tracer=tracer,
815
+ ):
816
+ if isinstance(response, dict) and ChatEvent.STATUS in response:
817
+ yield response[ChatEvent.STATUS]
818
+ else:
819
+ if isinstance(response, str):
820
+ response_log = response
821
+ async for result in send_llm_response(response):
822
+ yield result
823
+
778
824
  await sync_to_async(save_to_conversation_log)(
779
825
  q,
780
826
  response_log,
@@ -786,6 +832,7 @@ async def chat(
786
832
  conversation_id=conversation_id,
787
833
  query_images=uploaded_images,
788
834
  tracer=tracer,
835
+ train_of_thought=train_of_thought,
789
836
  )
790
837
  return
791
838
 
@@ -794,7 +841,7 @@ async def chat(
794
841
  if not q:
795
842
  conversation_config = await ConversationAdapters.aget_user_conversation_config(user)
796
843
  if conversation_config == None:
797
- conversation_config = await ConversationAdapters.aget_default_conversation_config()
844
+ conversation_config = await ConversationAdapters.aget_default_conversation_config(user)
798
845
  model_type = conversation_config.model_type
799
846
  formatted_help = help_message.format(model=model_type, version=state.khoj_version, device=get_device())
800
847
  async for result in send_llm_response(formatted_help):
@@ -830,6 +877,7 @@ async def chat(
830
877
  automation_id=automation.id,
831
878
  query_images=uploaded_images,
832
879
  tracer=tracer,
880
+ train_of_thought=train_of_thought,
833
881
  )
834
882
  async for result in send_llm_response(llm_response):
835
883
  yield result
@@ -837,49 +885,49 @@ async def chat(
837
885
 
838
886
  # Gather Context
839
887
  ## Extract Document References
840
- compiled_references, inferred_queries, defiltered_query = [], [], q
841
- try:
842
- async for result in extract_references_and_questions(
843
- request,
844
- meta_log,
845
- q,
846
- (n or 7),
847
- d,
848
- conversation_id,
849
- conversation_commands,
850
- location,
851
- partial(send_event, ChatEvent.STATUS),
852
- query_images=uploaded_images,
853
- agent=agent,
854
- tracer=tracer,
855
- ):
856
- if isinstance(result, dict) and ChatEvent.STATUS in result:
857
- yield result[ChatEvent.STATUS]
858
- else:
859
- compiled_references.extend(result[0])
860
- inferred_queries.extend(result[1])
861
- defiltered_query = result[2]
862
- except Exception as e:
863
- error_message = f"Error searching knowledge base: {e}. Attempting to respond without document references."
864
- logger.error(error_message, exc_info=True)
865
- async for result in send_event(
866
- ChatEvent.STATUS, "Document search failed. I'll try respond without document references"
867
- ):
868
- yield result
869
-
870
- if not is_none_or_empty(compiled_references):
871
- headings = "\n- " + "\n- ".join(set([c.get("compiled", c).split("\n")[0] for c in compiled_references]))
872
- # Strip only leading # from headings
873
- headings = headings.replace("#", "")
874
- async for result in send_event(ChatEvent.STATUS, f"**Found Relevant Notes**: {headings}"):
875
- yield result
888
+ if not ConversationCommand.Research in conversation_commands:
889
+ try:
890
+ async for result in extract_references_and_questions(
891
+ request,
892
+ meta_log,
893
+ q,
894
+ (n or 7),
895
+ d,
896
+ conversation_id,
897
+ conversation_commands,
898
+ location,
899
+ partial(send_event, ChatEvent.STATUS),
900
+ query_images=uploaded_images,
901
+ agent=agent,
902
+ tracer=tracer,
903
+ ):
904
+ if isinstance(result, dict) and ChatEvent.STATUS in result:
905
+ yield result[ChatEvent.STATUS]
906
+ else:
907
+ compiled_references.extend(result[0])
908
+ inferred_queries.extend(result[1])
909
+ defiltered_query = result[2]
910
+ except Exception as e:
911
+ error_message = (
912
+ f"Error searching knowledge base: {e}. Attempting to respond without document references."
913
+ )
914
+ logger.error(error_message, exc_info=True)
915
+ async for result in send_event(
916
+ ChatEvent.STATUS, "Document search failed. I'll try respond without document references"
917
+ ):
918
+ yield result
876
919
 
877
- online_results: Dict = dict()
920
+ if not is_none_or_empty(compiled_references):
921
+ headings = "\n- " + "\n- ".join(set([c.get("compiled", c).split("\n")[0] for c in compiled_references]))
922
+ # Strip only leading # from headings
923
+ headings = headings.replace("#", "")
924
+ async for result in send_event(ChatEvent.STATUS, f"**Found Relevant Notes**: {headings}"):
925
+ yield result
878
926
 
879
- if conversation_commands == [ConversationCommand.Notes] and not await EntryAdapters.auser_has_entries(user):
880
- async for result in send_llm_response(f"{no_entries_found.format()}"):
881
- yield result
882
- return
927
+ if conversation_commands == [ConversationCommand.Notes] and not await EntryAdapters.auser_has_entries(user):
928
+ async for result in send_llm_response(f"{no_entries_found.format()}"):
929
+ yield result
930
+ return
883
931
 
884
932
  if ConversationCommand.Notes in conversation_commands and is_none_or_empty(compiled_references):
885
933
  conversation_commands.remove(ConversationCommand.Notes)
@@ -948,6 +996,33 @@ async def chat(
948
996
  ):
949
997
  yield result
950
998
 
999
+ ## Gather Code Results
1000
+ if ConversationCommand.Code in conversation_commands:
1001
+ try:
1002
+ context = f"# Iteration 1:\n#---\nNotes:\n{compiled_references}\n\nOnline Results:{online_results}"
1003
+ async for result in run_code(
1004
+ defiltered_query,
1005
+ meta_log,
1006
+ context,
1007
+ location,
1008
+ user,
1009
+ partial(send_event, ChatEvent.STATUS),
1010
+ query_images=uploaded_images,
1011
+ agent=agent,
1012
+ tracer=tracer,
1013
+ ):
1014
+ if isinstance(result, dict) and ChatEvent.STATUS in result:
1015
+ yield result[ChatEvent.STATUS]
1016
+ else:
1017
+ code_results = result
1018
+ async for result in send_event(ChatEvent.STATUS, f"**Ran code snippets**: {len(code_results)}"):
1019
+ yield result
1020
+ except ValueError as e:
1021
+ logger.warning(
1022
+ f"Failed to use code tool: {e}. Attempting to respond without code results",
1023
+ exc_info=True,
1024
+ )
1025
+
951
1026
  ## Send Gathered References
952
1027
  async for result in send_event(
953
1028
  ChatEvent.REFERENCES,
@@ -955,6 +1030,7 @@ async def chat(
955
1030
  "inferredQueries": inferred_queries,
956
1031
  "context": compiled_references,
957
1032
  "onlineContext": online_results,
1033
+ "codeContext": code_results,
958
1034
  },
959
1035
  ):
960
1036
  yield result
@@ -1004,6 +1080,7 @@ async def chat(
1004
1080
  online_results=online_results,
1005
1081
  query_images=uploaded_images,
1006
1082
  tracer=tracer,
1083
+ train_of_thought=train_of_thought,
1007
1084
  )
1008
1085
  content_obj = {
1009
1086
  "intentType": intent_type,
@@ -1061,6 +1138,7 @@ async def chat(
1061
1138
  online_results=online_results,
1062
1139
  query_images=uploaded_images,
1063
1140
  tracer=tracer,
1141
+ train_of_thought=train_of_thought,
1064
1142
  )
1065
1143
 
1066
1144
  async for result in send_llm_response(json.dumps(content_obj)):
@@ -1076,6 +1154,7 @@ async def chat(
1076
1154
  conversation,
1077
1155
  compiled_references,
1078
1156
  online_results,
1157
+ code_results,
1079
1158
  inferred_queries,
1080
1159
  conversation_commands,
1081
1160
  user,
@@ -1083,8 +1162,10 @@ async def chat(
1083
1162
  conversation_id,
1084
1163
  location,
1085
1164
  user_name,
1165
+ researched_results,
1086
1166
  uploaded_images,
1087
1167
  tracer,
1168
+ train_of_thought,
1088
1169
  )
1089
1170
 
1090
1171
  # Send Response