khoj 1.25.1.dev9__py3-none-any.whl → 1.26.1.dev3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- khoj/database/adapters/__init__.py +81 -11
- khoj/database/admin.py +18 -1
- khoj/database/migrations/0068_alter_agent_output_modes.py +24 -0
- khoj/database/migrations/0069_webscraper_serverchatsettings_web_scraper.py +89 -0
- khoj/database/models/__init__.py +78 -2
- khoj/interface/compiled/404/index.html +1 -1
- khoj/interface/compiled/_next/static/chunks/1603-fa3ee48860b9dc5c.js +1 -0
- khoj/interface/compiled/_next/static/chunks/{9417-1d158bf46d3a0dc9.js → 9417-1ad504db22331388.js} +1 -1
- khoj/interface/compiled/_next/static/chunks/{9479-563e4d61f91d5a7c.js → 9479-adede27bb126b5d0.js} +1 -1
- khoj/interface/compiled/_next/static/chunks/app/agents/{layout-e71c8e913cccf792.js → layout-75636ab3a413fa8e.js} +1 -1
- khoj/interface/compiled/_next/static/chunks/app/agents/{page-b406d166301c4c7d.js → page-e9eee31dbdb4658c.js} +1 -1
- khoj/interface/compiled/_next/static/chunks/app/chat/{layout-8102549127db3067.js → layout-96fcf62857bf8f30.js} +1 -1
- khoj/interface/compiled/_next/static/chunks/app/{layout-f3e40d346da53112.js → layout-d0f0a9067427fb20.js} +1 -1
- khoj/interface/compiled/_next/static/chunks/app/settings/{layout-6f9314b0d7a26046.js → layout-a8f33dfe92f997fb.js} +1 -1
- khoj/interface/compiled/_next/static/chunks/app/share/chat/{layout-39f03f9e32399f0f.js → layout-2df56074e42adaa0.js} +1 -1
- khoj/interface/compiled/_next/static/chunks/{webpack-462819dcfa6a1e2b.js → webpack-ba79408024891b00.js} +1 -1
- khoj/interface/compiled/_next/static/css/467a524c75e7d7c0.css +1 -0
- khoj/interface/compiled/_next/static/css/{b1094827d745306b.css → f768dddada62459d.css} +1 -1
- khoj/interface/compiled/agents/index.html +1 -1
- khoj/interface/compiled/agents/index.txt +2 -2
- khoj/interface/compiled/automations/index.html +1 -1
- khoj/interface/compiled/automations/index.txt +2 -2
- khoj/interface/compiled/chat/index.html +1 -1
- khoj/interface/compiled/chat/index.txt +2 -2
- khoj/interface/compiled/factchecker/index.html +1 -1
- khoj/interface/compiled/factchecker/index.txt +2 -2
- khoj/interface/compiled/index.html +1 -1
- khoj/interface/compiled/index.txt +2 -2
- khoj/interface/compiled/search/index.html +1 -1
- khoj/interface/compiled/search/index.txt +2 -2
- khoj/interface/compiled/settings/index.html +1 -1
- khoj/interface/compiled/settings/index.txt +3 -3
- khoj/interface/compiled/share/chat/index.html +1 -1
- khoj/interface/compiled/share/chat/index.txt +2 -2
- khoj/interface/web/assets/icons/agents.svg +1 -0
- khoj/interface/web/assets/icons/automation.svg +1 -0
- khoj/interface/web/assets/icons/chat.svg +24 -0
- khoj/interface/web/login.html +11 -22
- khoj/processor/content/images/image_to_entries.py +2 -0
- khoj/processor/conversation/google/utils.py +4 -0
- khoj/processor/conversation/prompts.py +1 -1
- khoj/processor/embeddings.py +1 -0
- khoj/processor/tools/online_search.py +135 -42
- khoj/routers/api_chat.py +34 -505
- khoj/routers/helpers.py +13 -11
- khoj/search_type/text_search.py +7 -2
- khoj/utils/helpers.py +50 -5
- {khoj-1.25.1.dev9.dist-info → khoj-1.26.1.dev3.dist-info}/METADATA +4 -4
- {khoj-1.25.1.dev9.dist-info → khoj-1.26.1.dev3.dist-info}/RECORD +54 -51
- khoj/interface/compiled/_next/static/chunks/1603-67a89278e2c5dbe6.js +0 -1
- khoj/interface/compiled/_next/static/css/1538cedb321e3a97.css +0 -1
- /khoj/interface/compiled/_next/static/{jRL5xyceUdI0nvEyCkgqF → 0KX2AuxAEK1Jhb97imej7}/_buildManifest.js +0 -0
- /khoj/interface/compiled/_next/static/{jRL5xyceUdI0nvEyCkgqF → 0KX2AuxAEK1Jhb97imej7}/_ssgManifest.js +0 -0
- {khoj-1.25.1.dev9.dist-info → khoj-1.26.1.dev3.dist-info}/WHEEL +0 -0
- {khoj-1.25.1.dev9.dist-info → khoj-1.26.1.dev3.dist-info}/entry_points.txt +0 -0
- {khoj-1.25.1.dev9.dist-info → khoj-1.26.1.dev3.dist-info}/licenses/LICENSE +0 -0
khoj/routers/api_chat.py
CHANGED
@@ -3,7 +3,6 @@ import base64
|
|
3
3
|
import json
|
4
4
|
import logging
|
5
5
|
import time
|
6
|
-
import warnings
|
7
6
|
from datetime import datetime
|
8
7
|
from functools import partial
|
9
8
|
from typing import Dict, Optional
|
@@ -11,9 +10,8 @@ from urllib.parse import unquote
|
|
11
10
|
|
12
11
|
from asgiref.sync import sync_to_async
|
13
12
|
from fastapi import APIRouter, Depends, HTTPException, Request
|
14
|
-
from fastapi.requests import Request
|
15
13
|
from fastapi.responses import Response, StreamingResponse
|
16
|
-
from starlette.authentication import
|
14
|
+
from starlette.authentication import requires
|
17
15
|
|
18
16
|
from khoj.app.settings import ALLOWED_HOSTS
|
19
17
|
from khoj.database.adapters import (
|
@@ -574,7 +572,6 @@ async def chat(
|
|
574
572
|
chat_metadata: dict = {}
|
575
573
|
connection_alive = True
|
576
574
|
user: KhojUser = request.user.object
|
577
|
-
subscribed: bool = has_required_scope(request, ["premium"])
|
578
575
|
event_delimiter = "␃🔚␗"
|
579
576
|
q = unquote(q)
|
580
577
|
nonlocal conversation_id
|
@@ -641,7 +638,7 @@ async def chat(
|
|
641
638
|
request=request,
|
642
639
|
telemetry_type="api",
|
643
640
|
api="chat",
|
644
|
-
client=
|
641
|
+
client=common.client,
|
645
642
|
user_agent=request.headers.get("user-agent"),
|
646
643
|
host=request.headers.get("host"),
|
647
644
|
metadata=chat_metadata,
|
@@ -839,509 +836,35 @@ async def chat(
|
|
839
836
|
|
840
837
|
# Gather Context
|
841
838
|
## Extract Document References
|
842
|
-
compiled_references, inferred_queries, defiltered_query = [], [],
|
843
|
-
|
844
|
-
|
845
|
-
|
846
|
-
q,
|
847
|
-
(n or 7),
|
848
|
-
d,
|
849
|
-
conversation_id,
|
850
|
-
conversation_commands,
|
851
|
-
location,
|
852
|
-
partial(send_event, ChatEvent.STATUS),
|
853
|
-
uploaded_image_url=uploaded_image_url,
|
854
|
-
agent=agent,
|
855
|
-
):
|
856
|
-
if isinstance(result, dict) and ChatEvent.STATUS in result:
|
857
|
-
yield result[ChatEvent.STATUS]
|
858
|
-
else:
|
859
|
-
compiled_references.extend(result[0])
|
860
|
-
inferred_queries.extend(result[1])
|
861
|
-
defiltered_query = result[2]
|
862
|
-
|
863
|
-
if not is_none_or_empty(compiled_references):
|
864
|
-
headings = "\n- " + "\n- ".join(set([c.get("compiled", c).split("\n")[0] for c in compiled_references]))
|
865
|
-
# Strip only leading # from headings
|
866
|
-
headings = headings.replace("#", "")
|
867
|
-
async for result in send_event(ChatEvent.STATUS, f"**Found Relevant Notes**: {headings}"):
|
868
|
-
yield result
|
869
|
-
|
870
|
-
online_results: Dict = dict()
|
871
|
-
|
872
|
-
if conversation_commands == [ConversationCommand.Notes] and not await EntryAdapters.auser_has_entries(user):
|
873
|
-
async for result in send_llm_response(f"{no_entries_found.format()}"):
|
874
|
-
yield result
|
875
|
-
return
|
876
|
-
|
877
|
-
if ConversationCommand.Notes in conversation_commands and is_none_or_empty(compiled_references):
|
878
|
-
conversation_commands.remove(ConversationCommand.Notes)
|
879
|
-
|
880
|
-
## Gather Online References
|
881
|
-
if ConversationCommand.Online in conversation_commands:
|
882
|
-
try:
|
883
|
-
async for result in search_online(
|
884
|
-
defiltered_query,
|
885
|
-
meta_log,
|
886
|
-
location,
|
887
|
-
user,
|
888
|
-
subscribed,
|
889
|
-
partial(send_event, ChatEvent.STATUS),
|
890
|
-
custom_filters,
|
891
|
-
uploaded_image_url=uploaded_image_url,
|
892
|
-
agent=agent,
|
893
|
-
):
|
894
|
-
if isinstance(result, dict) and ChatEvent.STATUS in result:
|
895
|
-
yield result[ChatEvent.STATUS]
|
896
|
-
else:
|
897
|
-
online_results = result
|
898
|
-
except ValueError as e:
|
899
|
-
error_message = f"Error searching online: {e}. Attempting to respond without online results"
|
900
|
-
logger.warning(error_message)
|
901
|
-
async for result in send_llm_response(error_message):
|
902
|
-
yield result
|
903
|
-
return
|
904
|
-
|
905
|
-
## Gather Webpage References
|
906
|
-
if ConversationCommand.Webpage in conversation_commands:
|
907
|
-
try:
|
908
|
-
async for result in read_webpages(
|
909
|
-
defiltered_query,
|
910
|
-
meta_log,
|
911
|
-
location,
|
912
|
-
user,
|
913
|
-
subscribed,
|
914
|
-
partial(send_event, ChatEvent.STATUS),
|
915
|
-
uploaded_image_url=uploaded_image_url,
|
916
|
-
agent=agent,
|
917
|
-
):
|
918
|
-
if isinstance(result, dict) and ChatEvent.STATUS in result:
|
919
|
-
yield result[ChatEvent.STATUS]
|
920
|
-
else:
|
921
|
-
direct_web_pages = result
|
922
|
-
webpages = []
|
923
|
-
for query in direct_web_pages:
|
924
|
-
if online_results.get(query):
|
925
|
-
online_results[query]["webpages"] = direct_web_pages[query]["webpages"]
|
926
|
-
else:
|
927
|
-
online_results[query] = {"webpages": direct_web_pages[query]["webpages"]}
|
928
|
-
|
929
|
-
for webpage in direct_web_pages[query]["webpages"]:
|
930
|
-
webpages.append(webpage["link"])
|
931
|
-
async for result in send_event(ChatEvent.STATUS, f"**Read web pages**: {webpages}"):
|
932
|
-
yield result
|
933
|
-
except ValueError as e:
|
934
|
-
logger.warning(
|
935
|
-
f"Error directly reading webpages: {e}. Attempting to respond without online results",
|
936
|
-
exc_info=True,
|
937
|
-
)
|
938
|
-
|
939
|
-
## Send Gathered References
|
940
|
-
async for result in send_event(
|
941
|
-
ChatEvent.REFERENCES,
|
942
|
-
{
|
943
|
-
"inferredQueries": inferred_queries,
|
944
|
-
"context": compiled_references,
|
945
|
-
"onlineContext": online_results,
|
946
|
-
},
|
947
|
-
):
|
948
|
-
yield result
|
949
|
-
|
950
|
-
# Generate Output
|
951
|
-
## Generate Image Output
|
952
|
-
if ConversationCommand.Image in conversation_commands:
|
953
|
-
async for result in text_to_image(
|
954
|
-
q,
|
955
|
-
user,
|
839
|
+
compiled_references, inferred_queries, defiltered_query = [], [], q
|
840
|
+
try:
|
841
|
+
async for result in extract_references_and_questions(
|
842
|
+
request,
|
956
843
|
meta_log,
|
957
|
-
|
958
|
-
|
959
|
-
|
960
|
-
|
844
|
+
q,
|
845
|
+
(n or 7),
|
846
|
+
d,
|
847
|
+
conversation_id,
|
848
|
+
conversation_commands,
|
849
|
+
location,
|
850
|
+
partial(send_event, ChatEvent.STATUS),
|
961
851
|
uploaded_image_url=uploaded_image_url,
|
962
852
|
agent=agent,
|
963
853
|
):
|
964
854
|
if isinstance(result, dict) and ChatEvent.STATUS in result:
|
965
855
|
yield result[ChatEvent.STATUS]
|
966
856
|
else:
|
967
|
-
|
968
|
-
|
969
|
-
|
970
|
-
|
971
|
-
|
972
|
-
|
973
|
-
"detail": improved_image_prompt,
|
974
|
-
"image": image,
|
975
|
-
}
|
976
|
-
async for result in send_llm_response(json.dumps(content_obj)):
|
977
|
-
yield result
|
978
|
-
return
|
979
|
-
|
980
|
-
await sync_to_async(save_to_conversation_log)(
|
981
|
-
q,
|
982
|
-
image,
|
983
|
-
user,
|
984
|
-
meta_log,
|
985
|
-
user_message_time,
|
986
|
-
intent_type=intent_type,
|
987
|
-
inferred_queries=[improved_image_prompt],
|
988
|
-
client_application=request.user.client_app,
|
989
|
-
conversation_id=conversation_id,
|
990
|
-
compiled_references=compiled_references,
|
991
|
-
online_results=online_results,
|
992
|
-
uploaded_image_url=uploaded_image_url,
|
993
|
-
)
|
994
|
-
content_obj = {
|
995
|
-
"intentType": intent_type,
|
996
|
-
"inferredQueries": [improved_image_prompt],
|
997
|
-
"image": image,
|
998
|
-
}
|
999
|
-
async for result in send_llm_response(json.dumps(content_obj)):
|
1000
|
-
yield result
|
1001
|
-
return
|
1002
|
-
|
1003
|
-
## Generate Text Output
|
1004
|
-
async for result in send_event(ChatEvent.STATUS, f"**Generating a well-informed response**"):
|
1005
|
-
yield result
|
1006
|
-
llm_response, chat_metadata = await agenerate_chat_response(
|
1007
|
-
defiltered_query,
|
1008
|
-
meta_log,
|
1009
|
-
conversation,
|
1010
|
-
compiled_references,
|
1011
|
-
online_results,
|
1012
|
-
inferred_queries,
|
1013
|
-
conversation_commands,
|
1014
|
-
user,
|
1015
|
-
request.user.client_app,
|
1016
|
-
conversation_id,
|
1017
|
-
location,
|
1018
|
-
user_name,
|
1019
|
-
uploaded_image_url,
|
1020
|
-
)
|
1021
|
-
|
1022
|
-
# Send Response
|
1023
|
-
async for result in send_event(ChatEvent.START_LLM_RESPONSE, ""):
|
1024
|
-
yield result
|
1025
|
-
|
1026
|
-
continue_stream = True
|
1027
|
-
iterator = AsyncIteratorWrapper(llm_response)
|
1028
|
-
async for item in iterator:
|
1029
|
-
if item is None:
|
1030
|
-
async for result in send_event(ChatEvent.END_LLM_RESPONSE, ""):
|
1031
|
-
yield result
|
1032
|
-
logger.debug("Finished streaming response")
|
1033
|
-
return
|
1034
|
-
if not connection_alive or not continue_stream:
|
1035
|
-
continue
|
1036
|
-
try:
|
1037
|
-
async for result in send_event(ChatEvent.MESSAGE, f"{item}"):
|
1038
|
-
yield result
|
1039
|
-
except Exception as e:
|
1040
|
-
continue_stream = False
|
1041
|
-
logger.info(f"User {user} disconnected. Emitting rest of responses to clear thread: {e}")
|
1042
|
-
|
1043
|
-
## Stream Text Response
|
1044
|
-
if stream:
|
1045
|
-
return StreamingResponse(event_generator(q, image=image), media_type="text/plain")
|
1046
|
-
## Non-Streaming Text Response
|
1047
|
-
else:
|
1048
|
-
response_iterator = event_generator(q, image=image)
|
1049
|
-
response_data = await read_chat_stream(response_iterator)
|
1050
|
-
return Response(content=json.dumps(response_data), media_type="application/json", status_code=200)
|
1051
|
-
|
1052
|
-
|
1053
|
-
# Deprecated API. Remove by end of September 2024
|
1054
|
-
@api_chat.get("")
|
1055
|
-
@requires(["authenticated"])
|
1056
|
-
async def get_chat(
|
1057
|
-
request: Request,
|
1058
|
-
common: CommonQueryParams,
|
1059
|
-
q: str,
|
1060
|
-
n: int = 7,
|
1061
|
-
d: float = None,
|
1062
|
-
stream: Optional[bool] = False,
|
1063
|
-
title: Optional[str] = None,
|
1064
|
-
conversation_id: Optional[str] = None,
|
1065
|
-
city: Optional[str] = None,
|
1066
|
-
region: Optional[str] = None,
|
1067
|
-
country: Optional[str] = None,
|
1068
|
-
timezone: Optional[str] = None,
|
1069
|
-
image: Optional[str] = None,
|
1070
|
-
rate_limiter_per_minute=Depends(
|
1071
|
-
ApiUserRateLimiter(requests=60, subscribed_requests=60, window=60, slug="chat_minute")
|
1072
|
-
),
|
1073
|
-
rate_limiter_per_day=Depends(
|
1074
|
-
ApiUserRateLimiter(requests=600, subscribed_requests=600, window=60 * 60 * 24, slug="chat_day")
|
1075
|
-
),
|
1076
|
-
):
|
1077
|
-
# Issue a deprecation warning
|
1078
|
-
warnings.warn(
|
1079
|
-
"The 'get_chat' API endpoint is deprecated. It will be removed by the end of September 2024.",
|
1080
|
-
DeprecationWarning,
|
1081
|
-
stacklevel=2,
|
1082
|
-
)
|
1083
|
-
|
1084
|
-
async def event_generator(q: str, image: str):
|
1085
|
-
start_time = time.perf_counter()
|
1086
|
-
ttft = None
|
1087
|
-
chat_metadata: dict = {}
|
1088
|
-
connection_alive = True
|
1089
|
-
user: KhojUser = request.user.object
|
1090
|
-
subscribed: bool = has_required_scope(request, ["premium"])
|
1091
|
-
event_delimiter = "␃🔚␗"
|
1092
|
-
q = unquote(q)
|
1093
|
-
nonlocal conversation_id
|
1094
|
-
|
1095
|
-
uploaded_image_url = None
|
1096
|
-
if image:
|
1097
|
-
decoded_string = unquote(image)
|
1098
|
-
base64_data = decoded_string.split(",", 1)[1]
|
1099
|
-
image_bytes = base64.b64decode(base64_data)
|
1100
|
-
webp_image_bytes = convert_image_to_webp(image_bytes)
|
1101
|
-
try:
|
1102
|
-
uploaded_image_url = upload_image_to_bucket(webp_image_bytes, request.user.object.id)
|
1103
|
-
except:
|
1104
|
-
uploaded_image_url = None
|
1105
|
-
|
1106
|
-
async def send_event(event_type: ChatEvent, data: str | dict):
|
1107
|
-
nonlocal connection_alive, ttft
|
1108
|
-
if not connection_alive or await request.is_disconnected():
|
1109
|
-
connection_alive = False
|
1110
|
-
logger.warn(f"User {user} disconnected from {common.client} client")
|
1111
|
-
return
|
1112
|
-
try:
|
1113
|
-
if event_type == ChatEvent.END_LLM_RESPONSE:
|
1114
|
-
collect_telemetry()
|
1115
|
-
if event_type == ChatEvent.START_LLM_RESPONSE:
|
1116
|
-
ttft = time.perf_counter() - start_time
|
1117
|
-
if event_type == ChatEvent.MESSAGE:
|
1118
|
-
yield data
|
1119
|
-
elif event_type == ChatEvent.REFERENCES or stream:
|
1120
|
-
yield json.dumps({"type": event_type.value, "data": data}, ensure_ascii=False)
|
1121
|
-
except asyncio.CancelledError as e:
|
1122
|
-
connection_alive = False
|
1123
|
-
logger.warn(f"User {user} disconnected from {common.client} client: {e}")
|
1124
|
-
return
|
1125
|
-
except Exception as e:
|
1126
|
-
connection_alive = False
|
1127
|
-
logger.error(f"Failed to stream chat API response to {user} on {common.client}: {e}", exc_info=True)
|
1128
|
-
return
|
1129
|
-
finally:
|
1130
|
-
yield event_delimiter
|
1131
|
-
|
1132
|
-
async def send_llm_response(response: str):
|
1133
|
-
async for result in send_event(ChatEvent.START_LLM_RESPONSE, ""):
|
1134
|
-
yield result
|
1135
|
-
async for result in send_event(ChatEvent.MESSAGE, response):
|
1136
|
-
yield result
|
1137
|
-
async for result in send_event(ChatEvent.END_LLM_RESPONSE, ""):
|
1138
|
-
yield result
|
1139
|
-
|
1140
|
-
def collect_telemetry():
|
1141
|
-
# Gather chat response telemetry
|
1142
|
-
nonlocal chat_metadata
|
1143
|
-
latency = time.perf_counter() - start_time
|
1144
|
-
cmd_set = set([cmd.value for cmd in conversation_commands])
|
1145
|
-
chat_metadata = chat_metadata or {}
|
1146
|
-
chat_metadata["conversation_command"] = cmd_set
|
1147
|
-
chat_metadata["agent"] = conversation.agent.slug if conversation.agent else None
|
1148
|
-
chat_metadata["latency"] = f"{latency:.3f}"
|
1149
|
-
chat_metadata["ttft_latency"] = f"{ttft:.3f}"
|
1150
|
-
|
1151
|
-
logger.info(f"Chat response time to first token: {ttft:.3f} seconds")
|
1152
|
-
logger.info(f"Chat response total time: {latency:.3f} seconds")
|
1153
|
-
update_telemetry_state(
|
1154
|
-
request=request,
|
1155
|
-
telemetry_type="api",
|
1156
|
-
api="chat",
|
1157
|
-
client=request.user.client_app,
|
1158
|
-
user_agent=request.headers.get("user-agent"),
|
1159
|
-
host=request.headers.get("host"),
|
1160
|
-
metadata=chat_metadata,
|
1161
|
-
)
|
1162
|
-
|
1163
|
-
conversation_commands = [get_conversation_command(query=q, any_references=True)]
|
1164
|
-
|
1165
|
-
conversation = await ConversationAdapters.aget_conversation_by_user(
|
1166
|
-
user, client_application=request.user.client_app, conversation_id=conversation_id, title=title
|
1167
|
-
)
|
1168
|
-
if not conversation:
|
1169
|
-
async for result in send_llm_response(f"Conversation {conversation_id} not found"):
|
1170
|
-
yield result
|
1171
|
-
return
|
1172
|
-
conversation_id = conversation.id
|
1173
|
-
agent = conversation.agent if conversation.agent else None
|
1174
|
-
|
1175
|
-
await is_ready_to_chat(user)
|
1176
|
-
|
1177
|
-
user_name = await aget_user_name(user)
|
1178
|
-
location = None
|
1179
|
-
if city or region or country:
|
1180
|
-
location = LocationData(city=city, region=region, country=country)
|
1181
|
-
|
1182
|
-
if is_query_empty(q):
|
1183
|
-
async for result in send_llm_response("Please ask your query to get started."):
|
1184
|
-
yield result
|
1185
|
-
return
|
1186
|
-
|
1187
|
-
user_message_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
1188
|
-
|
1189
|
-
meta_log = conversation.conversation_log
|
1190
|
-
is_automated_task = conversation_commands == [ConversationCommand.AutomatedTask]
|
1191
|
-
|
1192
|
-
if conversation_commands == [ConversationCommand.Default] or is_automated_task:
|
1193
|
-
conversation_commands = await aget_relevant_information_sources(
|
1194
|
-
q, meta_log, is_automated_task, user=user, uploaded_image_url=uploaded_image_url
|
1195
|
-
)
|
1196
|
-
conversation_commands_str = ", ".join([cmd.value for cmd in conversation_commands])
|
857
|
+
compiled_references.extend(result[0])
|
858
|
+
inferred_queries.extend(result[1])
|
859
|
+
defiltered_query = result[2]
|
860
|
+
except Exception as e:
|
861
|
+
error_message = f"Error searching knowledge base: {e}. Attempting to respond without document references."
|
862
|
+
logger.warning(error_message)
|
1197
863
|
async for result in send_event(
|
1198
|
-
ChatEvent.STATUS,
|
864
|
+
ChatEvent.STATUS, "Document search failed. I'll try respond without document references"
|
1199
865
|
):
|
1200
866
|
yield result
|
1201
867
|
|
1202
|
-
mode = await aget_relevant_output_modes(q, meta_log, is_automated_task, user, uploaded_image_url)
|
1203
|
-
async for result in send_event(ChatEvent.STATUS, f"**Decided Response Mode:** {mode.value}"):
|
1204
|
-
yield result
|
1205
|
-
if mode not in conversation_commands:
|
1206
|
-
conversation_commands.append(mode)
|
1207
|
-
|
1208
|
-
for cmd in conversation_commands:
|
1209
|
-
await conversation_command_rate_limiter.update_and_check_if_valid(request, cmd)
|
1210
|
-
q = q.replace(f"/{cmd.value}", "").strip()
|
1211
|
-
|
1212
|
-
used_slash_summarize = conversation_commands == [ConversationCommand.Summarize]
|
1213
|
-
file_filters = conversation.file_filters if conversation else []
|
1214
|
-
# Skip trying to summarize if
|
1215
|
-
if (
|
1216
|
-
# summarization intent was inferred
|
1217
|
-
ConversationCommand.Summarize in conversation_commands
|
1218
|
-
# and not triggered via slash command
|
1219
|
-
and not used_slash_summarize
|
1220
|
-
# but we can't actually summarize
|
1221
|
-
and len(file_filters) != 1
|
1222
|
-
):
|
1223
|
-
conversation_commands.remove(ConversationCommand.Summarize)
|
1224
|
-
elif ConversationCommand.Summarize in conversation_commands:
|
1225
|
-
response_log = ""
|
1226
|
-
if len(file_filters) == 0:
|
1227
|
-
response_log = "No files selected for summarization. Please add files using the section on the left."
|
1228
|
-
async for result in send_llm_response(response_log):
|
1229
|
-
yield result
|
1230
|
-
elif len(file_filters) > 1:
|
1231
|
-
response_log = "Only one file can be selected for summarization."
|
1232
|
-
async for result in send_llm_response(response_log):
|
1233
|
-
yield result
|
1234
|
-
else:
|
1235
|
-
try:
|
1236
|
-
file_object = await FileObjectAdapters.async_get_file_objects_by_name(user, file_filters[0])
|
1237
|
-
if len(file_object) == 0:
|
1238
|
-
response_log = "Sorry, we couldn't find the full text of this file. Please re-upload the document and try again."
|
1239
|
-
async for result in send_llm_response(response_log):
|
1240
|
-
yield result
|
1241
|
-
return
|
1242
|
-
contextual_data = " ".join([file.raw_text for file in file_object])
|
1243
|
-
if not q:
|
1244
|
-
q = "Create a general summary of the file"
|
1245
|
-
async for result in send_event(
|
1246
|
-
ChatEvent.STATUS, f"**Constructing Summary Using:** {file_object[0].file_name}"
|
1247
|
-
):
|
1248
|
-
yield result
|
1249
|
-
|
1250
|
-
response = await extract_relevant_summary(
|
1251
|
-
q,
|
1252
|
-
contextual_data,
|
1253
|
-
conversation_history=meta_log,
|
1254
|
-
user=user,
|
1255
|
-
uploaded_image_url=uploaded_image_url,
|
1256
|
-
)
|
1257
|
-
response_log = str(response)
|
1258
|
-
async for result in send_llm_response(response_log):
|
1259
|
-
yield result
|
1260
|
-
except Exception as e:
|
1261
|
-
response_log = "Error summarizing file."
|
1262
|
-
logger.error(f"Error summarizing file for {user.email}: {e}", exc_info=True)
|
1263
|
-
async for result in send_llm_response(response_log):
|
1264
|
-
yield result
|
1265
|
-
await sync_to_async(save_to_conversation_log)(
|
1266
|
-
q,
|
1267
|
-
response_log,
|
1268
|
-
user,
|
1269
|
-
meta_log,
|
1270
|
-
user_message_time,
|
1271
|
-
intent_type="summarize",
|
1272
|
-
client_application=request.user.client_app,
|
1273
|
-
conversation_id=conversation_id,
|
1274
|
-
uploaded_image_url=uploaded_image_url,
|
1275
|
-
)
|
1276
|
-
return
|
1277
|
-
|
1278
|
-
custom_filters = []
|
1279
|
-
if conversation_commands == [ConversationCommand.Help]:
|
1280
|
-
if not q:
|
1281
|
-
conversation_config = await ConversationAdapters.aget_user_conversation_config(user)
|
1282
|
-
if conversation_config == None:
|
1283
|
-
conversation_config = await ConversationAdapters.aget_default_conversation_config()
|
1284
|
-
model_type = conversation_config.model_type
|
1285
|
-
formatted_help = help_message.format(model=model_type, version=state.khoj_version, device=get_device())
|
1286
|
-
async for result in send_llm_response(formatted_help):
|
1287
|
-
yield result
|
1288
|
-
return
|
1289
|
-
# Adding specification to search online specifically on khoj.dev pages.
|
1290
|
-
custom_filters.append("site:khoj.dev")
|
1291
|
-
conversation_commands.append(ConversationCommand.Online)
|
1292
|
-
|
1293
|
-
if ConversationCommand.Automation in conversation_commands:
|
1294
|
-
try:
|
1295
|
-
automation, crontime, query_to_run, subject = await create_automation(
|
1296
|
-
q, timezone, user, request.url, meta_log
|
1297
|
-
)
|
1298
|
-
except Exception as e:
|
1299
|
-
logger.error(f"Error scheduling task {q} for {user.email}: {e}")
|
1300
|
-
error_message = f"Unable to create automation. Ensure the automation doesn't already exist."
|
1301
|
-
async for result in send_llm_response(error_message):
|
1302
|
-
yield result
|
1303
|
-
return
|
1304
|
-
|
1305
|
-
llm_response = construct_automation_created_message(automation, crontime, query_to_run, subject)
|
1306
|
-
await sync_to_async(save_to_conversation_log)(
|
1307
|
-
q,
|
1308
|
-
llm_response,
|
1309
|
-
user,
|
1310
|
-
meta_log,
|
1311
|
-
user_message_time,
|
1312
|
-
intent_type="automation",
|
1313
|
-
client_application=request.user.client_app,
|
1314
|
-
conversation_id=conversation_id,
|
1315
|
-
inferred_queries=[query_to_run],
|
1316
|
-
automation_id=automation.id,
|
1317
|
-
uploaded_image_url=uploaded_image_url,
|
1318
|
-
)
|
1319
|
-
async for result in send_llm_response(llm_response):
|
1320
|
-
yield result
|
1321
|
-
return
|
1322
|
-
|
1323
|
-
# Gather Context
|
1324
|
-
## Extract Document References
|
1325
|
-
compiled_references, inferred_queries, defiltered_query = [], [], None
|
1326
|
-
async for result in extract_references_and_questions(
|
1327
|
-
request,
|
1328
|
-
meta_log,
|
1329
|
-
q,
|
1330
|
-
(n or 7),
|
1331
|
-
d,
|
1332
|
-
conversation_id,
|
1333
|
-
conversation_commands,
|
1334
|
-
location,
|
1335
|
-
partial(send_event, ChatEvent.STATUS),
|
1336
|
-
uploaded_image_url=uploaded_image_url,
|
1337
|
-
):
|
1338
|
-
if isinstance(result, dict) and ChatEvent.STATUS in result:
|
1339
|
-
yield result[ChatEvent.STATUS]
|
1340
|
-
else:
|
1341
|
-
compiled_references.extend(result[0])
|
1342
|
-
inferred_queries.extend(result[1])
|
1343
|
-
defiltered_query = result[2]
|
1344
|
-
|
1345
868
|
if not is_none_or_empty(compiled_references):
|
1346
869
|
headings = "\n- " + "\n- ".join(set([c.get("compiled", c).split("\n")[0] for c in compiled_references]))
|
1347
870
|
# Strip only leading # from headings
|
@@ -1367,21 +890,22 @@ async def get_chat(
|
|
1367
890
|
meta_log,
|
1368
891
|
location,
|
1369
892
|
user,
|
1370
|
-
subscribed,
|
1371
893
|
partial(send_event, ChatEvent.STATUS),
|
1372
894
|
custom_filters,
|
1373
895
|
uploaded_image_url=uploaded_image_url,
|
896
|
+
agent=agent,
|
1374
897
|
):
|
1375
898
|
if isinstance(result, dict) and ChatEvent.STATUS in result:
|
1376
899
|
yield result[ChatEvent.STATUS]
|
1377
900
|
else:
|
1378
901
|
online_results = result
|
1379
|
-
except
|
902
|
+
except Exception as e:
|
1380
903
|
error_message = f"Error searching online: {e}. Attempting to respond without online results"
|
1381
904
|
logger.warning(error_message)
|
1382
|
-
async for result in
|
905
|
+
async for result in send_event(
|
906
|
+
ChatEvent.STATUS, "Online search failed. I'll try respond without online references"
|
907
|
+
):
|
1383
908
|
yield result
|
1384
|
-
return
|
1385
909
|
|
1386
910
|
## Gather Webpage References
|
1387
911
|
if ConversationCommand.Webpage in conversation_commands:
|
@@ -1391,9 +915,9 @@ async def get_chat(
|
|
1391
915
|
meta_log,
|
1392
916
|
location,
|
1393
917
|
user,
|
1394
|
-
subscribed,
|
1395
918
|
partial(send_event, ChatEvent.STATUS),
|
1396
919
|
uploaded_image_url=uploaded_image_url,
|
920
|
+
agent=agent,
|
1397
921
|
):
|
1398
922
|
if isinstance(result, dict) and ChatEvent.STATUS in result:
|
1399
923
|
yield result[ChatEvent.STATUS]
|
@@ -1410,11 +934,15 @@ async def get_chat(
|
|
1410
934
|
webpages.append(webpage["link"])
|
1411
935
|
async for result in send_event(ChatEvent.STATUS, f"**Read web pages**: {webpages}"):
|
1412
936
|
yield result
|
1413
|
-
except
|
937
|
+
except Exception as e:
|
1414
938
|
logger.warning(
|
1415
|
-
f"Error
|
939
|
+
f"Error reading webpages: {e}. Attempting to respond without webpage results",
|
1416
940
|
exc_info=True,
|
1417
941
|
)
|
942
|
+
async for result in send_event(
|
943
|
+
ChatEvent.STATUS, "Webpage read failed. I'll try respond without webpage references"
|
944
|
+
):
|
945
|
+
yield result
|
1418
946
|
|
1419
947
|
## Send Gathered References
|
1420
948
|
async for result in send_event(
|
@@ -1431,7 +959,7 @@ async def get_chat(
|
|
1431
959
|
## Generate Image Output
|
1432
960
|
if ConversationCommand.Image in conversation_commands:
|
1433
961
|
async for result in text_to_image(
|
1434
|
-
|
962
|
+
defiltered_query,
|
1435
963
|
user,
|
1436
964
|
meta_log,
|
1437
965
|
location_data=location,
|
@@ -1439,6 +967,7 @@ async def get_chat(
|
|
1439
967
|
online_results=online_results,
|
1440
968
|
send_status_func=partial(send_event, ChatEvent.STATUS),
|
1441
969
|
uploaded_image_url=uploaded_image_url,
|
970
|
+
agent=agent,
|
1442
971
|
):
|
1443
972
|
if isinstance(result, dict) and ChatEvent.STATUS in result:
|
1444
973
|
yield result[ChatEvent.STATUS]
|