khoj 1.27.2.dev18__py3-none-any.whl → 1.27.2.dev130__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- khoj/database/adapters/__init__.py +34 -10
- khoj/interface/compiled/404/index.html +1 -1
- khoj/interface/compiled/_next/static/chunks/1034-da58b679fcbb79c1.js +1 -0
- khoj/interface/compiled/_next/static/chunks/1467-5a191c1cd5bf0b83.js +1 -0
- khoj/interface/compiled/_next/static/chunks/1603-5d70d9dfcdcb1f10.js +1 -0
- khoj/interface/compiled/_next/static/chunks/3423-fa918f4e5365a35e.js +1 -0
- khoj/interface/compiled/_next/static/chunks/8423-3ad0bfb299801220.js +1 -0
- khoj/interface/compiled/_next/static/chunks/app/chat/page-7dc98df9c88828f0.js +1 -0
- khoj/interface/compiled/_next/static/chunks/app/factchecker/page-d887f55fe6d4f35d.js +1 -0
- khoj/interface/compiled/_next/static/chunks/app/{page-8f22b790e50dd722.js → page-d46244282af16509.js} +1 -1
- khoj/interface/compiled/_next/static/chunks/app/share/chat/{page-6a01e07fb244c10c.js → page-505b07bce608b34e.js} +1 -1
- khoj/interface/compiled/_next/static/chunks/{webpack-31239d193815e49e.js → webpack-8ae5ce45161bd98e.js} +1 -1
- khoj/interface/compiled/_next/static/css/{2272c73fc7a3b571.css → 26c1c33d0423a7d8.css} +1 -1
- khoj/interface/compiled/_next/static/css/e9c5fe555dd3050b.css +25 -0
- khoj/interface/compiled/agents/index.html +1 -1
- khoj/interface/compiled/agents/index.txt +2 -2
- khoj/interface/compiled/automations/index.html +1 -1
- khoj/interface/compiled/automations/index.txt +2 -2
- khoj/interface/compiled/chat/index.html +1 -1
- khoj/interface/compiled/chat/index.txt +2 -2
- khoj/interface/compiled/factchecker/index.html +1 -1
- khoj/interface/compiled/factchecker/index.txt +2 -2
- khoj/interface/compiled/index.html +1 -1
- khoj/interface/compiled/index.txt +2 -2
- khoj/interface/compiled/search/index.html +1 -1
- khoj/interface/compiled/search/index.txt +2 -2
- khoj/interface/compiled/settings/index.html +1 -1
- khoj/interface/compiled/settings/index.txt +2 -2
- khoj/interface/compiled/share/chat/index.html +1 -1
- khoj/interface/compiled/share/chat/index.txt +2 -2
- khoj/processor/conversation/anthropic/anthropic_chat.py +19 -10
- khoj/processor/conversation/anthropic/utils.py +37 -6
- khoj/processor/conversation/google/gemini_chat.py +23 -13
- khoj/processor/conversation/google/utils.py +34 -10
- khoj/processor/conversation/offline/chat_model.py +40 -15
- khoj/processor/conversation/openai/gpt.py +25 -10
- khoj/processor/conversation/openai/utils.py +43 -9
- khoj/processor/conversation/prompts.py +131 -22
- khoj/processor/conversation/utils.py +299 -6
- khoj/processor/image/generate.py +2 -0
- khoj/processor/tools/online_search.py +19 -8
- khoj/processor/tools/run_code.py +144 -0
- khoj/routers/api.py +11 -6
- khoj/routers/api_chat.py +177 -88
- khoj/routers/helpers.py +155 -59
- khoj/routers/research.py +321 -0
- khoj/search_filter/date_filter.py +1 -3
- khoj/search_filter/file_filter.py +1 -2
- khoj/search_type/text_search.py +3 -3
- khoj/utils/helpers.py +15 -2
- khoj/utils/yaml.py +4 -0
- {khoj-1.27.2.dev18.dist-info → khoj-1.27.2.dev130.dist-info}/METADATA +2 -1
- {khoj-1.27.2.dev18.dist-info → khoj-1.27.2.dev130.dist-info}/RECORD +61 -58
- khoj/interface/compiled/_next/static/chunks/1603-5138bb7c8035d9a6.js +0 -1
- khoj/interface/compiled/_next/static/chunks/2697-61fcba89fd87eab4.js +0 -1
- khoj/interface/compiled/_next/static/chunks/3423-8e9c420574a9fbe3.js +0 -1
- khoj/interface/compiled/_next/static/chunks/9479-a5e7ff4c7d1d7ee7.js +0 -1
- khoj/interface/compiled/_next/static/chunks/app/chat/page-151232d8417a1ea1.js +0 -1
- khoj/interface/compiled/_next/static/chunks/app/factchecker/page-798904432c2417c4.js +0 -1
- khoj/interface/compiled/_next/static/css/76d55eb435962b19.css +0 -25
- /khoj/interface/compiled/_next/static/{_gBBcNbs4wMKxKXhQs5E4 → N19uqHAJYqRAVxvuVwHfE}/_buildManifest.js +0 -0
- /khoj/interface/compiled/_next/static/{_gBBcNbs4wMKxKXhQs5E4 → N19uqHAJYqRAVxvuVwHfE}/_ssgManifest.js +0 -0
- /khoj/interface/compiled/_next/static/chunks/{1970-1d6d0c1b00b4f343.js → 1970-444843bea1d17d61.js} +0 -0
- /khoj/interface/compiled/_next/static/chunks/{9417-759984ad62caa3dc.js → 9417-19cfd1a9cb758e71.js} +0 -0
- /khoj/interface/compiled/_next/static/chunks/app/settings/{page-7946cabb9c54e22d.js → page-89e6737b2cc9fb3a.js} +0 -0
- {khoj-1.27.2.dev18.dist-info → khoj-1.27.2.dev130.dist-info}/WHEEL +0 -0
- {khoj-1.27.2.dev18.dist-info → khoj-1.27.2.dev130.dist-info}/entry_points.txt +0 -0
- {khoj-1.27.2.dev18.dist-info → khoj-1.27.2.dev130.dist-info}/licenses/LICENSE +0 -0
khoj/routers/api_chat.py
CHANGED
@@ -3,9 +3,10 @@ import base64
|
|
3
3
|
import json
|
4
4
|
import logging
|
5
5
|
import time
|
6
|
+
import uuid
|
6
7
|
from datetime import datetime
|
7
8
|
from functools import partial
|
8
|
-
from typing import Dict, Optional
|
9
|
+
from typing import Any, Dict, List, Optional
|
9
10
|
from urllib.parse import unquote
|
10
11
|
|
11
12
|
from asgiref.sync import sync_to_async
|
@@ -24,10 +25,11 @@ from khoj.database.adapters import (
|
|
24
25
|
)
|
25
26
|
from khoj.database.models import Agent, KhojUser
|
26
27
|
from khoj.processor.conversation.prompts import help_message, no_entries_found
|
27
|
-
from khoj.processor.conversation.utils import save_to_conversation_log
|
28
|
+
from khoj.processor.conversation.utils import defilter_query, save_to_conversation_log
|
28
29
|
from khoj.processor.image.generate import text_to_image
|
29
30
|
from khoj.processor.speech.text_to_speech import generate_text_to_speech
|
30
31
|
from khoj.processor.tools.online_search import read_webpages, search_online
|
32
|
+
from khoj.processor.tools.run_code import run_code
|
31
33
|
from khoj.routers.api import extract_references_and_questions
|
32
34
|
from khoj.routers.helpers import (
|
33
35
|
ApiImageRateLimiter,
|
@@ -41,8 +43,10 @@ from khoj.routers.helpers import (
|
|
41
43
|
aget_relevant_output_modes,
|
42
44
|
construct_automation_created_message,
|
43
45
|
create_automation,
|
46
|
+
extract_relevant_info,
|
44
47
|
extract_relevant_summary,
|
45
48
|
generate_excalidraw_diagram,
|
49
|
+
generate_summary_from_files,
|
46
50
|
get_conversation_command,
|
47
51
|
is_query_empty,
|
48
52
|
is_ready_to_chat,
|
@@ -50,6 +54,10 @@ from khoj.routers.helpers import (
|
|
50
54
|
update_telemetry_state,
|
51
55
|
validate_conversation_config,
|
52
56
|
)
|
57
|
+
from khoj.routers.research import (
|
58
|
+
InformationCollectionIteration,
|
59
|
+
execute_information_collection,
|
60
|
+
)
|
53
61
|
from khoj.routers.storage import upload_image_to_bucket
|
54
62
|
from khoj.utils import state
|
55
63
|
from khoj.utils.helpers import (
|
@@ -562,8 +570,16 @@ async def chat(
|
|
562
570
|
user: KhojUser = request.user.object
|
563
571
|
event_delimiter = "␃🔚␗"
|
564
572
|
q = unquote(q)
|
573
|
+
train_of_thought = []
|
565
574
|
nonlocal conversation_id
|
566
575
|
|
576
|
+
tracer: dict = {
|
577
|
+
"mid": f"{uuid.uuid4()}",
|
578
|
+
"cid": conversation_id,
|
579
|
+
"uid": user.id,
|
580
|
+
"khoj_version": state.khoj_version,
|
581
|
+
}
|
582
|
+
|
567
583
|
uploaded_images: list[str] = []
|
568
584
|
if images:
|
569
585
|
for image in images:
|
@@ -576,7 +592,7 @@ async def chat(
|
|
576
592
|
uploaded_images.append(uploaded_image)
|
577
593
|
|
578
594
|
async def send_event(event_type: ChatEvent, data: str | dict):
|
579
|
-
nonlocal connection_alive, ttft
|
595
|
+
nonlocal connection_alive, ttft, train_of_thought
|
580
596
|
if not connection_alive or await request.is_disconnected():
|
581
597
|
connection_alive = False
|
582
598
|
logger.warning(f"User {user} disconnected from {common.client} client")
|
@@ -584,8 +600,11 @@ async def chat(
|
|
584
600
|
try:
|
585
601
|
if event_type == ChatEvent.END_LLM_RESPONSE:
|
586
602
|
collect_telemetry()
|
587
|
-
|
603
|
+
elif event_type == ChatEvent.START_LLM_RESPONSE:
|
588
604
|
ttft = time.perf_counter() - start_time
|
605
|
+
elif event_type == ChatEvent.STATUS:
|
606
|
+
train_of_thought.append({"type": event_type.value, "data": data})
|
607
|
+
|
589
608
|
if event_type == ChatEvent.MESSAGE:
|
590
609
|
yield data
|
591
610
|
elif event_type == ChatEvent.REFERENCES or stream:
|
@@ -674,6 +693,14 @@ async def chat(
|
|
674
693
|
meta_log = conversation.conversation_log
|
675
694
|
is_automated_task = conversation_commands == [ConversationCommand.AutomatedTask]
|
676
695
|
|
696
|
+
researched_results = ""
|
697
|
+
online_results: Dict = dict()
|
698
|
+
code_results: Dict = dict()
|
699
|
+
## Extract Document References
|
700
|
+
compiled_references: List[Any] = []
|
701
|
+
inferred_queries: List[Any] = []
|
702
|
+
defiltered_query = defilter_query(q)
|
703
|
+
|
677
704
|
if conversation_commands == [ConversationCommand.Default] or is_automated_task:
|
678
705
|
conversation_commands = await aget_relevant_information_sources(
|
679
706
|
q,
|
@@ -682,19 +709,59 @@ async def chat(
|
|
682
709
|
user=user,
|
683
710
|
query_images=uploaded_images,
|
684
711
|
agent=agent,
|
712
|
+
tracer=tracer,
|
685
713
|
)
|
714
|
+
|
715
|
+
# If we're doing research, we don't want to do anything else
|
716
|
+
if ConversationCommand.Research in conversation_commands:
|
717
|
+
conversation_commands = [ConversationCommand.Research]
|
718
|
+
|
686
719
|
conversation_commands_str = ", ".join([cmd.value for cmd in conversation_commands])
|
687
720
|
async for result in send_event(
|
688
721
|
ChatEvent.STATUS, f"**Chose Data Sources to Search:** {conversation_commands_str}"
|
689
722
|
):
|
690
723
|
yield result
|
691
724
|
|
692
|
-
mode = await aget_relevant_output_modes(
|
725
|
+
mode = await aget_relevant_output_modes(
|
726
|
+
q, meta_log, is_automated_task, user, uploaded_images, agent, tracer=tracer
|
727
|
+
)
|
693
728
|
async for result in send_event(ChatEvent.STATUS, f"**Decided Response Mode:** {mode.value}"):
|
694
729
|
yield result
|
695
730
|
if mode not in conversation_commands:
|
696
731
|
conversation_commands.append(mode)
|
697
732
|
|
733
|
+
if conversation_commands == [ConversationCommand.Research]:
|
734
|
+
async for research_result in execute_information_collection(
|
735
|
+
request=request,
|
736
|
+
user=user,
|
737
|
+
query=defiltered_query,
|
738
|
+
conversation_id=conversation_id,
|
739
|
+
conversation_history=meta_log,
|
740
|
+
query_images=uploaded_images,
|
741
|
+
agent=agent,
|
742
|
+
send_status_func=partial(send_event, ChatEvent.STATUS),
|
743
|
+
user_name=user_name,
|
744
|
+
location=location,
|
745
|
+
file_filters=conversation.file_filters if conversation else [],
|
746
|
+
tracer=tracer,
|
747
|
+
):
|
748
|
+
if isinstance(research_result, InformationCollectionIteration):
|
749
|
+
if research_result.summarizedResult:
|
750
|
+
if research_result.onlineContext:
|
751
|
+
online_results.update(research_result.onlineContext)
|
752
|
+
if research_result.codeContext:
|
753
|
+
code_results.update(research_result.codeContext)
|
754
|
+
if research_result.context:
|
755
|
+
compiled_references.extend(research_result.context)
|
756
|
+
|
757
|
+
researched_results += research_result.summarizedResult
|
758
|
+
|
759
|
+
else:
|
760
|
+
yield research_result
|
761
|
+
|
762
|
+
# researched_results = await extract_relevant_info(q, researched_results, agent)
|
763
|
+
logger.info(f"Researched Results: {researched_results}")
|
764
|
+
|
698
765
|
for cmd in conversation_commands:
|
699
766
|
await conversation_command_rate_limiter.update_and_check_if_valid(request, cmd)
|
700
767
|
q = q.replace(f"/{cmd.value}", "").strip()
|
@@ -723,47 +790,24 @@ async def chat(
|
|
723
790
|
async for result in send_llm_response(response_log):
|
724
791
|
yield result
|
725
792
|
else:
|
726
|
-
|
727
|
-
|
728
|
-
|
729
|
-
|
730
|
-
|
731
|
-
|
732
|
-
|
733
|
-
|
734
|
-
|
735
|
-
|
736
|
-
|
737
|
-
|
738
|
-
|
739
|
-
|
740
|
-
|
741
|
-
|
742
|
-
|
743
|
-
|
744
|
-
if not q:
|
745
|
-
q = "Create a general summary of the file"
|
746
|
-
async for result in send_event(
|
747
|
-
ChatEvent.STATUS, f"**Constructing Summary Using:** {file_object[0].file_name}"
|
748
|
-
):
|
749
|
-
yield result
|
750
|
-
|
751
|
-
response = await extract_relevant_summary(
|
752
|
-
q,
|
753
|
-
contextual_data,
|
754
|
-
conversation_history=meta_log,
|
755
|
-
query_images=uploaded_images,
|
756
|
-
user=user,
|
757
|
-
agent=agent,
|
758
|
-
)
|
759
|
-
response_log = str(response)
|
760
|
-
async for result in send_llm_response(response_log):
|
761
|
-
yield result
|
762
|
-
except Exception as e:
|
763
|
-
response_log = "Error summarizing file. Please try again, or contact support."
|
764
|
-
logger.error(f"Error summarizing file for {user.email}: {e}", exc_info=True)
|
765
|
-
async for result in send_llm_response(response_log):
|
766
|
-
yield result
|
793
|
+
async for response in generate_summary_from_files(
|
794
|
+
q=q,
|
795
|
+
user=user,
|
796
|
+
file_filters=file_filters,
|
797
|
+
meta_log=meta_log,
|
798
|
+
query_images=uploaded_images,
|
799
|
+
agent=agent,
|
800
|
+
send_status_func=partial(send_event, ChatEvent.STATUS),
|
801
|
+
tracer=tracer,
|
802
|
+
):
|
803
|
+
if isinstance(response, dict) and ChatEvent.STATUS in response:
|
804
|
+
yield response[ChatEvent.STATUS]
|
805
|
+
else:
|
806
|
+
if isinstance(response, str):
|
807
|
+
response_log = response
|
808
|
+
async for result in send_llm_response(response):
|
809
|
+
yield result
|
810
|
+
|
767
811
|
await sync_to_async(save_to_conversation_log)(
|
768
812
|
q,
|
769
813
|
response_log,
|
@@ -774,6 +818,8 @@ async def chat(
|
|
774
818
|
client_application=request.user.client_app,
|
775
819
|
conversation_id=conversation_id,
|
776
820
|
query_images=uploaded_images,
|
821
|
+
tracer=tracer,
|
822
|
+
train_of_thought=train_of_thought,
|
777
823
|
)
|
778
824
|
return
|
779
825
|
|
@@ -782,7 +828,7 @@ async def chat(
|
|
782
828
|
if not q:
|
783
829
|
conversation_config = await ConversationAdapters.aget_user_conversation_config(user)
|
784
830
|
if conversation_config == None:
|
785
|
-
conversation_config = await ConversationAdapters.aget_default_conversation_config()
|
831
|
+
conversation_config = await ConversationAdapters.aget_default_conversation_config(user)
|
786
832
|
model_type = conversation_config.model_type
|
787
833
|
formatted_help = help_message.format(model=model_type, version=state.khoj_version, device=get_device())
|
788
834
|
async for result in send_llm_response(formatted_help):
|
@@ -795,7 +841,7 @@ async def chat(
|
|
795
841
|
if ConversationCommand.Automation in conversation_commands:
|
796
842
|
try:
|
797
843
|
automation, crontime, query_to_run, subject = await create_automation(
|
798
|
-
q, timezone, user, request.url, meta_log
|
844
|
+
q, timezone, user, request.url, meta_log, tracer=tracer
|
799
845
|
)
|
800
846
|
except Exception as e:
|
801
847
|
logger.error(f"Error scheduling task {q} for {user.email}: {e}")
|
@@ -817,6 +863,8 @@ async def chat(
|
|
817
863
|
inferred_queries=[query_to_run],
|
818
864
|
automation_id=automation.id,
|
819
865
|
query_images=uploaded_images,
|
866
|
+
tracer=tracer,
|
867
|
+
train_of_thought=train_of_thought,
|
820
868
|
)
|
821
869
|
async for result in send_llm_response(llm_response):
|
822
870
|
yield result
|
@@ -824,48 +872,49 @@ async def chat(
|
|
824
872
|
|
825
873
|
# Gather Context
|
826
874
|
## Extract Document References
|
827
|
-
|
828
|
-
|
829
|
-
|
830
|
-
|
831
|
-
|
832
|
-
|
833
|
-
|
834
|
-
|
835
|
-
|
836
|
-
|
837
|
-
|
838
|
-
|
839
|
-
|
840
|
-
|
841
|
-
|
842
|
-
|
843
|
-
|
844
|
-
|
845
|
-
|
846
|
-
|
847
|
-
|
848
|
-
|
849
|
-
|
850
|
-
|
851
|
-
|
852
|
-
|
853
|
-
|
854
|
-
|
855
|
-
|
856
|
-
|
857
|
-
|
858
|
-
# Strip only leading # from headings
|
859
|
-
headings = headings.replace("#", "")
|
860
|
-
async for result in send_event(ChatEvent.STATUS, f"**Found Relevant Notes**: {headings}"):
|
861
|
-
yield result
|
875
|
+
if not ConversationCommand.Research in conversation_commands:
|
876
|
+
try:
|
877
|
+
async for result in extract_references_and_questions(
|
878
|
+
request,
|
879
|
+
meta_log,
|
880
|
+
q,
|
881
|
+
(n or 7),
|
882
|
+
d,
|
883
|
+
conversation_id,
|
884
|
+
conversation_commands,
|
885
|
+
location,
|
886
|
+
partial(send_event, ChatEvent.STATUS),
|
887
|
+
query_images=uploaded_images,
|
888
|
+
agent=agent,
|
889
|
+
tracer=tracer,
|
890
|
+
):
|
891
|
+
if isinstance(result, dict) and ChatEvent.STATUS in result:
|
892
|
+
yield result[ChatEvent.STATUS]
|
893
|
+
else:
|
894
|
+
compiled_references.extend(result[0])
|
895
|
+
inferred_queries.extend(result[1])
|
896
|
+
defiltered_query = result[2]
|
897
|
+
except Exception as e:
|
898
|
+
error_message = (
|
899
|
+
f"Error searching knowledge base: {e}. Attempting to respond without document references."
|
900
|
+
)
|
901
|
+
logger.error(error_message, exc_info=True)
|
902
|
+
async for result in send_event(
|
903
|
+
ChatEvent.STATUS, "Document search failed. I'll try respond without document references"
|
904
|
+
):
|
905
|
+
yield result
|
862
906
|
|
863
|
-
|
907
|
+
if not is_none_or_empty(compiled_references):
|
908
|
+
headings = "\n- " + "\n- ".join(set([c.get("compiled", c).split("\n")[0] for c in compiled_references]))
|
909
|
+
# Strip only leading # from headings
|
910
|
+
headings = headings.replace("#", "")
|
911
|
+
async for result in send_event(ChatEvent.STATUS, f"**Found Relevant Notes**: {headings}"):
|
912
|
+
yield result
|
864
913
|
|
865
|
-
|
866
|
-
|
867
|
-
|
868
|
-
|
914
|
+
if conversation_commands == [ConversationCommand.Notes] and not await EntryAdapters.auser_has_entries(user):
|
915
|
+
async for result in send_llm_response(f"{no_entries_found.format()}"):
|
916
|
+
yield result
|
917
|
+
return
|
869
918
|
|
870
919
|
if ConversationCommand.Notes in conversation_commands and is_none_or_empty(compiled_references):
|
871
920
|
conversation_commands.remove(ConversationCommand.Notes)
|
@@ -882,6 +931,7 @@ async def chat(
|
|
882
931
|
custom_filters,
|
883
932
|
query_images=uploaded_images,
|
884
933
|
agent=agent,
|
934
|
+
tracer=tracer,
|
885
935
|
):
|
886
936
|
if isinstance(result, dict) and ChatEvent.STATUS in result:
|
887
937
|
yield result[ChatEvent.STATUS]
|
@@ -906,6 +956,7 @@ async def chat(
|
|
906
956
|
partial(send_event, ChatEvent.STATUS),
|
907
957
|
query_images=uploaded_images,
|
908
958
|
agent=agent,
|
959
|
+
tracer=tracer,
|
909
960
|
):
|
910
961
|
if isinstance(result, dict) and ChatEvent.STATUS in result:
|
911
962
|
yield result[ChatEvent.STATUS]
|
@@ -932,6 +983,33 @@ async def chat(
|
|
932
983
|
):
|
933
984
|
yield result
|
934
985
|
|
986
|
+
## Gather Code Results
|
987
|
+
if ConversationCommand.Code in conversation_commands:
|
988
|
+
try:
|
989
|
+
context = f"# Iteration 1:\n#---\nNotes:\n{compiled_references}\n\nOnline Results:{online_results}"
|
990
|
+
async for result in run_code(
|
991
|
+
defiltered_query,
|
992
|
+
meta_log,
|
993
|
+
context,
|
994
|
+
location,
|
995
|
+
user,
|
996
|
+
partial(send_event, ChatEvent.STATUS),
|
997
|
+
query_images=uploaded_images,
|
998
|
+
agent=agent,
|
999
|
+
tracer=tracer,
|
1000
|
+
):
|
1001
|
+
if isinstance(result, dict) and ChatEvent.STATUS in result:
|
1002
|
+
yield result[ChatEvent.STATUS]
|
1003
|
+
else:
|
1004
|
+
code_results = result
|
1005
|
+
async for result in send_event(ChatEvent.STATUS, f"**Ran code snippets**: {len(code_results)}"):
|
1006
|
+
yield result
|
1007
|
+
except ValueError as e:
|
1008
|
+
logger.warning(
|
1009
|
+
f"Failed to use code tool: {e}. Attempting to respond without code results",
|
1010
|
+
exc_info=True,
|
1011
|
+
)
|
1012
|
+
|
935
1013
|
## Send Gathered References
|
936
1014
|
async for result in send_event(
|
937
1015
|
ChatEvent.REFERENCES,
|
@@ -939,6 +1017,7 @@ async def chat(
|
|
939
1017
|
"inferredQueries": inferred_queries,
|
940
1018
|
"context": compiled_references,
|
941
1019
|
"onlineContext": online_results,
|
1020
|
+
"codeContext": code_results,
|
942
1021
|
},
|
943
1022
|
):
|
944
1023
|
yield result
|
@@ -956,6 +1035,7 @@ async def chat(
|
|
956
1035
|
send_status_func=partial(send_event, ChatEvent.STATUS),
|
957
1036
|
query_images=uploaded_images,
|
958
1037
|
agent=agent,
|
1038
|
+
tracer=tracer,
|
959
1039
|
):
|
960
1040
|
if isinstance(result, dict) and ChatEvent.STATUS in result:
|
961
1041
|
yield result[ChatEvent.STATUS]
|
@@ -986,6 +1066,8 @@ async def chat(
|
|
986
1066
|
compiled_references=compiled_references,
|
987
1067
|
online_results=online_results,
|
988
1068
|
query_images=uploaded_images,
|
1069
|
+
tracer=tracer,
|
1070
|
+
train_of_thought=train_of_thought,
|
989
1071
|
)
|
990
1072
|
content_obj = {
|
991
1073
|
"intentType": intent_type,
|
@@ -1014,6 +1096,7 @@ async def chat(
|
|
1014
1096
|
user=user,
|
1015
1097
|
agent=agent,
|
1016
1098
|
send_status_func=partial(send_event, ChatEvent.STATUS),
|
1099
|
+
tracer=tracer,
|
1017
1100
|
):
|
1018
1101
|
if isinstance(result, dict) and ChatEvent.STATUS in result:
|
1019
1102
|
yield result[ChatEvent.STATUS]
|
@@ -1041,6 +1124,8 @@ async def chat(
|
|
1041
1124
|
compiled_references=compiled_references,
|
1042
1125
|
online_results=online_results,
|
1043
1126
|
query_images=uploaded_images,
|
1127
|
+
tracer=tracer,
|
1128
|
+
train_of_thought=train_of_thought,
|
1044
1129
|
)
|
1045
1130
|
|
1046
1131
|
async for result in send_llm_response(json.dumps(content_obj)):
|
@@ -1056,6 +1141,7 @@ async def chat(
|
|
1056
1141
|
conversation,
|
1057
1142
|
compiled_references,
|
1058
1143
|
online_results,
|
1144
|
+
code_results,
|
1059
1145
|
inferred_queries,
|
1060
1146
|
conversation_commands,
|
1061
1147
|
user,
|
@@ -1063,7 +1149,10 @@ async def chat(
|
|
1063
1149
|
conversation_id,
|
1064
1150
|
location,
|
1065
1151
|
user_name,
|
1152
|
+
researched_results,
|
1066
1153
|
uploaded_images,
|
1154
|
+
tracer,
|
1155
|
+
train_of_thought,
|
1067
1156
|
)
|
1068
1157
|
|
1069
1158
|
# Send Response
|