khoj 1.22.1__py3-none-any.whl → 1.22.2.dev9__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- khoj/interface/compiled/404/index.html +1 -1
- khoj/interface/compiled/_next/static/chunks/app/factchecker/page-faaf60c2a32dd9e2.js +1 -0
- khoj/interface/compiled/_next/static/chunks/{webpack-ace3bded0dbc790e.js → webpack-dff708c71e9234cb.js} +1 -1
- khoj/interface/compiled/_next/static/css/{3f8ae5fd7b73a9e5.css → 149c5104fe3d38b8.css} +1 -1
- khoj/interface/compiled/agents/index.html +1 -1
- khoj/interface/compiled/agents/index.txt +2 -2
- khoj/interface/compiled/automations/index.html +1 -1
- khoj/interface/compiled/automations/index.txt +1 -1
- khoj/interface/compiled/chat/index.html +1 -1
- khoj/interface/compiled/chat/index.txt +2 -2
- khoj/interface/compiled/factchecker/index.html +1 -1
- khoj/interface/compiled/factchecker/index.txt +2 -2
- khoj/interface/compiled/index.html +1 -1
- khoj/interface/compiled/index.txt +2 -2
- khoj/interface/compiled/search/index.html +1 -1
- khoj/interface/compiled/search/index.txt +2 -2
- khoj/interface/compiled/settings/index.html +1 -1
- khoj/interface/compiled/settings/index.txt +1 -1
- khoj/interface/compiled/share/chat/index.html +1 -1
- khoj/interface/compiled/share/chat/index.txt +2 -2
- khoj/processor/conversation/openai/gpt.py +2 -2
- khoj/processor/conversation/openai/whisper.py +1 -1
- khoj/routers/api_chat.py +476 -0
- khoj/routers/helpers.py +5 -6
- khoj/utils/constants.py +1 -1
- {khoj-1.22.1.dist-info → khoj-1.22.2.dev9.dist-info}/METADATA +1 -1
- {khoj-1.22.1.dist-info → khoj-1.22.2.dev9.dist-info}/RECORD +37 -37
- khoj/interface/compiled/_next/static/chunks/app/factchecker/page-5c55afdb9dbe8dac.js +0 -1
- /khoj/interface/compiled/_next/static/{2ItMRj16PRB_daJUGRf2Z → 3ha-3qUhlbm88tHUHwG1o}/_buildManifest.js +0 -0
- /khoj/interface/compiled/_next/static/{2ItMRj16PRB_daJUGRf2Z → 3ha-3qUhlbm88tHUHwG1o}/_ssgManifest.js +0 -0
- /khoj/interface/compiled/_next/static/chunks/{8423-b6a61d82233d1a82.js → 8423-50162a9e62970350.js} +0 -0
- /khoj/interface/compiled/_next/static/chunks/{9178-3a0baad1c172d515.js → 9178-c153fc402c970365.js} +0 -0
- /khoj/interface/compiled/_next/static/chunks/{9417-2e54c6fd056982d8.js → 9417-5d14ac74aaab2c66.js} +0 -0
- /khoj/interface/compiled/_next/static/chunks/app/agents/{page-3c01900e7b5c7e50.js → page-6ade083d5e27a023.js} +0 -0
- /khoj/interface/compiled/_next/static/chunks/app/{page-8f61b4bd2032384a.js → page-a03a9fb2dc6d6e47.js} +0 -0
- {khoj-1.22.1.dist-info → khoj-1.22.2.dev9.dist-info}/WHEEL +0 -0
- {khoj-1.22.1.dist-info → khoj-1.22.2.dev9.dist-info}/entry_points.txt +0 -0
- {khoj-1.22.1.dist-info → khoj-1.22.2.dev9.dist-info}/licenses/LICENSE +0 -0
khoj/routers/api_chat.py
CHANGED
|
@@ -3,6 +3,7 @@ import base64
|
|
|
3
3
|
import json
|
|
4
4
|
import logging
|
|
5
5
|
import time
|
|
6
|
+
import warnings
|
|
6
7
|
from datetime import datetime
|
|
7
8
|
from functools import partial
|
|
8
9
|
from typing import Dict, Optional
|
|
@@ -1002,3 +1003,478 @@ async def chat(
|
|
|
1002
1003
|
response_iterator = event_generator(q, image=image)
|
|
1003
1004
|
response_data = await read_chat_stream(response_iterator)
|
|
1004
1005
|
return Response(content=json.dumps(response_data), media_type="application/json", status_code=200)
|
|
1006
|
+
|
|
1007
|
+
|
|
1008
|
+
# Deprecated API. Remove by end of September 2024
|
|
1009
|
+
@api_chat.get("")
|
|
1010
|
+
@requires(["authenticated"])
|
|
1011
|
+
async def get_chat(
|
|
1012
|
+
request: Request,
|
|
1013
|
+
common: CommonQueryParams,
|
|
1014
|
+
q: str,
|
|
1015
|
+
n: int = 7,
|
|
1016
|
+
d: float = None,
|
|
1017
|
+
stream: Optional[bool] = False,
|
|
1018
|
+
title: Optional[str] = None,
|
|
1019
|
+
conversation_id: Optional[int] = None,
|
|
1020
|
+
city: Optional[str] = None,
|
|
1021
|
+
region: Optional[str] = None,
|
|
1022
|
+
country: Optional[str] = None,
|
|
1023
|
+
timezone: Optional[str] = None,
|
|
1024
|
+
image: Optional[str] = None,
|
|
1025
|
+
rate_limiter_per_minute=Depends(
|
|
1026
|
+
ApiUserRateLimiter(requests=60, subscribed_requests=60, window=60, slug="chat_minute")
|
|
1027
|
+
),
|
|
1028
|
+
rate_limiter_per_day=Depends(
|
|
1029
|
+
ApiUserRateLimiter(requests=600, subscribed_requests=600, window=60 * 60 * 24, slug="chat_day")
|
|
1030
|
+
),
|
|
1031
|
+
):
|
|
1032
|
+
# Issue a deprecation warning
|
|
1033
|
+
warnings.warn(
|
|
1034
|
+
"The 'get_chat' API endpoint is deprecated. It will be removed by the end of September 2024.",
|
|
1035
|
+
DeprecationWarning,
|
|
1036
|
+
stacklevel=2,
|
|
1037
|
+
)
|
|
1038
|
+
|
|
1039
|
+
async def event_generator(q: str, image: str):
|
|
1040
|
+
start_time = time.perf_counter()
|
|
1041
|
+
ttft = None
|
|
1042
|
+
chat_metadata: dict = {}
|
|
1043
|
+
connection_alive = True
|
|
1044
|
+
user: KhojUser = request.user.object
|
|
1045
|
+
subscribed: bool = has_required_scope(request, ["premium"])
|
|
1046
|
+
event_delimiter = "␃🔚␗"
|
|
1047
|
+
q = unquote(q)
|
|
1048
|
+
nonlocal conversation_id
|
|
1049
|
+
|
|
1050
|
+
uploaded_image_url = None
|
|
1051
|
+
if image:
|
|
1052
|
+
decoded_string = unquote(image)
|
|
1053
|
+
base64_data = decoded_string.split(",", 1)[1]
|
|
1054
|
+
image_bytes = base64.b64decode(base64_data)
|
|
1055
|
+
webp_image_bytes = convert_image_to_webp(image_bytes)
|
|
1056
|
+
try:
|
|
1057
|
+
uploaded_image_url = upload_image_to_bucket(webp_image_bytes, request.user.object.id)
|
|
1058
|
+
except:
|
|
1059
|
+
uploaded_image_url = None
|
|
1060
|
+
|
|
1061
|
+
async def send_event(event_type: ChatEvent, data: str | dict):
|
|
1062
|
+
nonlocal connection_alive, ttft
|
|
1063
|
+
if not connection_alive or await request.is_disconnected():
|
|
1064
|
+
connection_alive = False
|
|
1065
|
+
logger.warn(f"User {user} disconnected from {common.client} client")
|
|
1066
|
+
return
|
|
1067
|
+
try:
|
|
1068
|
+
if event_type == ChatEvent.END_LLM_RESPONSE:
|
|
1069
|
+
collect_telemetry()
|
|
1070
|
+
if event_type == ChatEvent.START_LLM_RESPONSE:
|
|
1071
|
+
ttft = time.perf_counter() - start_time
|
|
1072
|
+
if event_type == ChatEvent.MESSAGE:
|
|
1073
|
+
yield data
|
|
1074
|
+
elif event_type == ChatEvent.REFERENCES or stream:
|
|
1075
|
+
yield json.dumps({"type": event_type.value, "data": data}, ensure_ascii=False)
|
|
1076
|
+
except asyncio.CancelledError as e:
|
|
1077
|
+
connection_alive = False
|
|
1078
|
+
logger.warn(f"User {user} disconnected from {common.client} client: {e}")
|
|
1079
|
+
return
|
|
1080
|
+
except Exception as e:
|
|
1081
|
+
connection_alive = False
|
|
1082
|
+
logger.error(f"Failed to stream chat API response to {user} on {common.client}: {e}", exc_info=True)
|
|
1083
|
+
return
|
|
1084
|
+
finally:
|
|
1085
|
+
yield event_delimiter
|
|
1086
|
+
|
|
1087
|
+
async def send_llm_response(response: str):
|
|
1088
|
+
async for result in send_event(ChatEvent.START_LLM_RESPONSE, ""):
|
|
1089
|
+
yield result
|
|
1090
|
+
async for result in send_event(ChatEvent.MESSAGE, response):
|
|
1091
|
+
yield result
|
|
1092
|
+
async for result in send_event(ChatEvent.END_LLM_RESPONSE, ""):
|
|
1093
|
+
yield result
|
|
1094
|
+
|
|
1095
|
+
def collect_telemetry():
|
|
1096
|
+
# Gather chat response telemetry
|
|
1097
|
+
nonlocal chat_metadata
|
|
1098
|
+
latency = time.perf_counter() - start_time
|
|
1099
|
+
cmd_set = set([cmd.value for cmd in conversation_commands])
|
|
1100
|
+
chat_metadata = chat_metadata or {}
|
|
1101
|
+
chat_metadata["conversation_command"] = cmd_set
|
|
1102
|
+
chat_metadata["agent"] = conversation.agent.slug if conversation.agent else None
|
|
1103
|
+
chat_metadata["latency"] = f"{latency:.3f}"
|
|
1104
|
+
chat_metadata["ttft_latency"] = f"{ttft:.3f}"
|
|
1105
|
+
|
|
1106
|
+
logger.info(f"Chat response time to first token: {ttft:.3f} seconds")
|
|
1107
|
+
logger.info(f"Chat response total time: {latency:.3f} seconds")
|
|
1108
|
+
update_telemetry_state(
|
|
1109
|
+
request=request,
|
|
1110
|
+
telemetry_type="api",
|
|
1111
|
+
api="chat",
|
|
1112
|
+
client=request.user.client_app,
|
|
1113
|
+
user_agent=request.headers.get("user-agent"),
|
|
1114
|
+
host=request.headers.get("host"),
|
|
1115
|
+
metadata=chat_metadata,
|
|
1116
|
+
)
|
|
1117
|
+
|
|
1118
|
+
conversation_commands = [get_conversation_command(query=q, any_references=True)]
|
|
1119
|
+
|
|
1120
|
+
conversation = await ConversationAdapters.aget_conversation_by_user(
|
|
1121
|
+
user, client_application=request.user.client_app, conversation_id=conversation_id, title=title
|
|
1122
|
+
)
|
|
1123
|
+
if not conversation:
|
|
1124
|
+
async for result in send_llm_response(f"Conversation {conversation_id} not found"):
|
|
1125
|
+
yield result
|
|
1126
|
+
return
|
|
1127
|
+
conversation_id = conversation.id
|
|
1128
|
+
|
|
1129
|
+
await is_ready_to_chat(user)
|
|
1130
|
+
|
|
1131
|
+
user_name = await aget_user_name(user)
|
|
1132
|
+
location = None
|
|
1133
|
+
if city or region or country:
|
|
1134
|
+
location = LocationData(city=city, region=region, country=country)
|
|
1135
|
+
|
|
1136
|
+
if is_query_empty(q):
|
|
1137
|
+
async for result in send_llm_response("Please ask your query to get started."):
|
|
1138
|
+
yield result
|
|
1139
|
+
return
|
|
1140
|
+
|
|
1141
|
+
user_message_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
|
1142
|
+
|
|
1143
|
+
meta_log = conversation.conversation_log
|
|
1144
|
+
is_automated_task = conversation_commands == [ConversationCommand.AutomatedTask]
|
|
1145
|
+
|
|
1146
|
+
if conversation_commands == [ConversationCommand.Default] or is_automated_task:
|
|
1147
|
+
conversation_commands = await aget_relevant_information_sources(
|
|
1148
|
+
q, meta_log, is_automated_task, subscribed=subscribed, uploaded_image_url=uploaded_image_url
|
|
1149
|
+
)
|
|
1150
|
+
conversation_commands_str = ", ".join([cmd.value for cmd in conversation_commands])
|
|
1151
|
+
async for result in send_event(
|
|
1152
|
+
ChatEvent.STATUS, f"**Chose Data Sources to Search:** {conversation_commands_str}"
|
|
1153
|
+
):
|
|
1154
|
+
yield result
|
|
1155
|
+
|
|
1156
|
+
mode = await aget_relevant_output_modes(q, meta_log, is_automated_task, uploaded_image_url)
|
|
1157
|
+
async for result in send_event(ChatEvent.STATUS, f"**Decided Response Mode:** {mode.value}"):
|
|
1158
|
+
yield result
|
|
1159
|
+
if mode not in conversation_commands:
|
|
1160
|
+
conversation_commands.append(mode)
|
|
1161
|
+
|
|
1162
|
+
for cmd in conversation_commands:
|
|
1163
|
+
await conversation_command_rate_limiter.update_and_check_if_valid(request, cmd)
|
|
1164
|
+
q = q.replace(f"/{cmd.value}", "").strip()
|
|
1165
|
+
|
|
1166
|
+
used_slash_summarize = conversation_commands == [ConversationCommand.Summarize]
|
|
1167
|
+
file_filters = conversation.file_filters if conversation else []
|
|
1168
|
+
# Skip trying to summarize if
|
|
1169
|
+
if (
|
|
1170
|
+
# summarization intent was inferred
|
|
1171
|
+
ConversationCommand.Summarize in conversation_commands
|
|
1172
|
+
# and not triggered via slash command
|
|
1173
|
+
and not used_slash_summarize
|
|
1174
|
+
# but we can't actually summarize
|
|
1175
|
+
and len(file_filters) != 1
|
|
1176
|
+
):
|
|
1177
|
+
conversation_commands.remove(ConversationCommand.Summarize)
|
|
1178
|
+
elif ConversationCommand.Summarize in conversation_commands:
|
|
1179
|
+
response_log = ""
|
|
1180
|
+
if len(file_filters) == 0:
|
|
1181
|
+
response_log = "No files selected for summarization. Please add files using the section on the left."
|
|
1182
|
+
async for result in send_llm_response(response_log):
|
|
1183
|
+
yield result
|
|
1184
|
+
elif len(file_filters) > 1:
|
|
1185
|
+
response_log = "Only one file can be selected for summarization."
|
|
1186
|
+
async for result in send_llm_response(response_log):
|
|
1187
|
+
yield result
|
|
1188
|
+
else:
|
|
1189
|
+
try:
|
|
1190
|
+
file_object = await FileObjectAdapters.async_get_file_objects_by_name(user, file_filters[0])
|
|
1191
|
+
if len(file_object) == 0:
|
|
1192
|
+
response_log = "Sorry, we couldn't find the full text of this file. Please re-upload the document and try again."
|
|
1193
|
+
async for result in send_llm_response(response_log):
|
|
1194
|
+
yield result
|
|
1195
|
+
return
|
|
1196
|
+
contextual_data = " ".join([file.raw_text for file in file_object])
|
|
1197
|
+
if not q:
|
|
1198
|
+
q = "Create a general summary of the file"
|
|
1199
|
+
async for result in send_event(
|
|
1200
|
+
ChatEvent.STATUS, f"**Constructing Summary Using:** {file_object[0].file_name}"
|
|
1201
|
+
):
|
|
1202
|
+
yield result
|
|
1203
|
+
|
|
1204
|
+
response = await extract_relevant_summary(
|
|
1205
|
+
q, contextual_data, subscribed=subscribed, uploaded_image_url=uploaded_image_url
|
|
1206
|
+
)
|
|
1207
|
+
response_log = str(response)
|
|
1208
|
+
async for result in send_llm_response(response_log):
|
|
1209
|
+
yield result
|
|
1210
|
+
except Exception as e:
|
|
1211
|
+
response_log = "Error summarizing file."
|
|
1212
|
+
logger.error(f"Error summarizing file for {user.email}: {e}", exc_info=True)
|
|
1213
|
+
async for result in send_llm_response(response_log):
|
|
1214
|
+
yield result
|
|
1215
|
+
await sync_to_async(save_to_conversation_log)(
|
|
1216
|
+
q,
|
|
1217
|
+
response_log,
|
|
1218
|
+
user,
|
|
1219
|
+
meta_log,
|
|
1220
|
+
user_message_time,
|
|
1221
|
+
intent_type="summarize",
|
|
1222
|
+
client_application=request.user.client_app,
|
|
1223
|
+
conversation_id=conversation_id,
|
|
1224
|
+
uploaded_image_url=uploaded_image_url,
|
|
1225
|
+
)
|
|
1226
|
+
return
|
|
1227
|
+
|
|
1228
|
+
custom_filters = []
|
|
1229
|
+
if conversation_commands == [ConversationCommand.Help]:
|
|
1230
|
+
if not q:
|
|
1231
|
+
conversation_config = await ConversationAdapters.aget_user_conversation_config(user)
|
|
1232
|
+
if conversation_config == None:
|
|
1233
|
+
conversation_config = await ConversationAdapters.aget_default_conversation_config()
|
|
1234
|
+
model_type = conversation_config.model_type
|
|
1235
|
+
formatted_help = help_message.format(model=model_type, version=state.khoj_version, device=get_device())
|
|
1236
|
+
async for result in send_llm_response(formatted_help):
|
|
1237
|
+
yield result
|
|
1238
|
+
return
|
|
1239
|
+
# Adding specification to search online specifically on khoj.dev pages.
|
|
1240
|
+
custom_filters.append("site:khoj.dev")
|
|
1241
|
+
conversation_commands.append(ConversationCommand.Online)
|
|
1242
|
+
|
|
1243
|
+
if ConversationCommand.Automation in conversation_commands:
|
|
1244
|
+
try:
|
|
1245
|
+
automation, crontime, query_to_run, subject = await create_automation(
|
|
1246
|
+
q, timezone, user, request.url, meta_log
|
|
1247
|
+
)
|
|
1248
|
+
except Exception as e:
|
|
1249
|
+
logger.error(f"Error scheduling task {q} for {user.email}: {e}")
|
|
1250
|
+
error_message = f"Unable to create automation. Ensure the automation doesn't already exist."
|
|
1251
|
+
async for result in send_llm_response(error_message):
|
|
1252
|
+
yield result
|
|
1253
|
+
return
|
|
1254
|
+
|
|
1255
|
+
llm_response = construct_automation_created_message(automation, crontime, query_to_run, subject)
|
|
1256
|
+
await sync_to_async(save_to_conversation_log)(
|
|
1257
|
+
q,
|
|
1258
|
+
llm_response,
|
|
1259
|
+
user,
|
|
1260
|
+
meta_log,
|
|
1261
|
+
user_message_time,
|
|
1262
|
+
intent_type="automation",
|
|
1263
|
+
client_application=request.user.client_app,
|
|
1264
|
+
conversation_id=conversation_id,
|
|
1265
|
+
inferred_queries=[query_to_run],
|
|
1266
|
+
automation_id=automation.id,
|
|
1267
|
+
uploaded_image_url=uploaded_image_url,
|
|
1268
|
+
)
|
|
1269
|
+
async for result in send_llm_response(llm_response):
|
|
1270
|
+
yield result
|
|
1271
|
+
return
|
|
1272
|
+
|
|
1273
|
+
# Gather Context
|
|
1274
|
+
## Extract Document References
|
|
1275
|
+
compiled_references, inferred_queries, defiltered_query = [], [], None
|
|
1276
|
+
async for result in extract_references_and_questions(
|
|
1277
|
+
request,
|
|
1278
|
+
meta_log,
|
|
1279
|
+
q,
|
|
1280
|
+
(n or 7),
|
|
1281
|
+
d,
|
|
1282
|
+
conversation_id,
|
|
1283
|
+
conversation_commands,
|
|
1284
|
+
location,
|
|
1285
|
+
partial(send_event, ChatEvent.STATUS),
|
|
1286
|
+
uploaded_image_url=uploaded_image_url,
|
|
1287
|
+
):
|
|
1288
|
+
if isinstance(result, dict) and ChatEvent.STATUS in result:
|
|
1289
|
+
yield result[ChatEvent.STATUS]
|
|
1290
|
+
else:
|
|
1291
|
+
compiled_references.extend(result[0])
|
|
1292
|
+
inferred_queries.extend(result[1])
|
|
1293
|
+
defiltered_query = result[2]
|
|
1294
|
+
|
|
1295
|
+
if not is_none_or_empty(compiled_references):
|
|
1296
|
+
headings = "\n- " + "\n- ".join(set([c.get("compiled", c).split("\n")[0] for c in compiled_references]))
|
|
1297
|
+
# Strip only leading # from headings
|
|
1298
|
+
headings = headings.replace("#", "")
|
|
1299
|
+
async for result in send_event(ChatEvent.STATUS, f"**Found Relevant Notes**: {headings}"):
|
|
1300
|
+
yield result
|
|
1301
|
+
|
|
1302
|
+
online_results: Dict = dict()
|
|
1303
|
+
|
|
1304
|
+
if conversation_commands == [ConversationCommand.Notes] and not await EntryAdapters.auser_has_entries(user):
|
|
1305
|
+
async for result in send_llm_response(f"{no_entries_found.format()}"):
|
|
1306
|
+
yield result
|
|
1307
|
+
return
|
|
1308
|
+
|
|
1309
|
+
if ConversationCommand.Notes in conversation_commands and is_none_or_empty(compiled_references):
|
|
1310
|
+
conversation_commands.remove(ConversationCommand.Notes)
|
|
1311
|
+
|
|
1312
|
+
## Gather Online References
|
|
1313
|
+
if ConversationCommand.Online in conversation_commands:
|
|
1314
|
+
try:
|
|
1315
|
+
async for result in search_online(
|
|
1316
|
+
defiltered_query,
|
|
1317
|
+
meta_log,
|
|
1318
|
+
location,
|
|
1319
|
+
user,
|
|
1320
|
+
subscribed,
|
|
1321
|
+
partial(send_event, ChatEvent.STATUS),
|
|
1322
|
+
custom_filters,
|
|
1323
|
+
uploaded_image_url=uploaded_image_url,
|
|
1324
|
+
):
|
|
1325
|
+
if isinstance(result, dict) and ChatEvent.STATUS in result:
|
|
1326
|
+
yield result[ChatEvent.STATUS]
|
|
1327
|
+
else:
|
|
1328
|
+
online_results = result
|
|
1329
|
+
except ValueError as e:
|
|
1330
|
+
error_message = f"Error searching online: {e}. Attempting to respond without online results"
|
|
1331
|
+
logger.warning(error_message)
|
|
1332
|
+
async for result in send_llm_response(error_message):
|
|
1333
|
+
yield result
|
|
1334
|
+
return
|
|
1335
|
+
|
|
1336
|
+
## Gather Webpage References
|
|
1337
|
+
if ConversationCommand.Webpage in conversation_commands:
|
|
1338
|
+
try:
|
|
1339
|
+
async for result in read_webpages(
|
|
1340
|
+
defiltered_query,
|
|
1341
|
+
meta_log,
|
|
1342
|
+
location,
|
|
1343
|
+
user,
|
|
1344
|
+
subscribed,
|
|
1345
|
+
partial(send_event, ChatEvent.STATUS),
|
|
1346
|
+
uploaded_image_url=uploaded_image_url,
|
|
1347
|
+
):
|
|
1348
|
+
if isinstance(result, dict) and ChatEvent.STATUS in result:
|
|
1349
|
+
yield result[ChatEvent.STATUS]
|
|
1350
|
+
else:
|
|
1351
|
+
direct_web_pages = result
|
|
1352
|
+
webpages = []
|
|
1353
|
+
for query in direct_web_pages:
|
|
1354
|
+
if online_results.get(query):
|
|
1355
|
+
online_results[query]["webpages"] = direct_web_pages[query]["webpages"]
|
|
1356
|
+
else:
|
|
1357
|
+
online_results[query] = {"webpages": direct_web_pages[query]["webpages"]}
|
|
1358
|
+
|
|
1359
|
+
for webpage in direct_web_pages[query]["webpages"]:
|
|
1360
|
+
webpages.append(webpage["link"])
|
|
1361
|
+
async for result in send_event(ChatEvent.STATUS, f"**Read web pages**: {webpages}"):
|
|
1362
|
+
yield result
|
|
1363
|
+
except ValueError as e:
|
|
1364
|
+
logger.warning(
|
|
1365
|
+
f"Error directly reading webpages: {e}. Attempting to respond without online results",
|
|
1366
|
+
exc_info=True,
|
|
1367
|
+
)
|
|
1368
|
+
|
|
1369
|
+
## Send Gathered References
|
|
1370
|
+
async for result in send_event(
|
|
1371
|
+
ChatEvent.REFERENCES,
|
|
1372
|
+
{
|
|
1373
|
+
"inferredQueries": inferred_queries,
|
|
1374
|
+
"context": compiled_references,
|
|
1375
|
+
"onlineContext": online_results,
|
|
1376
|
+
},
|
|
1377
|
+
):
|
|
1378
|
+
yield result
|
|
1379
|
+
|
|
1380
|
+
# Generate Output
|
|
1381
|
+
## Generate Image Output
|
|
1382
|
+
if ConversationCommand.Image in conversation_commands:
|
|
1383
|
+
async for result in text_to_image(
|
|
1384
|
+
q,
|
|
1385
|
+
user,
|
|
1386
|
+
meta_log,
|
|
1387
|
+
location_data=location,
|
|
1388
|
+
references=compiled_references,
|
|
1389
|
+
online_results=online_results,
|
|
1390
|
+
subscribed=subscribed,
|
|
1391
|
+
send_status_func=partial(send_event, ChatEvent.STATUS),
|
|
1392
|
+
uploaded_image_url=uploaded_image_url,
|
|
1393
|
+
):
|
|
1394
|
+
if isinstance(result, dict) and ChatEvent.STATUS in result:
|
|
1395
|
+
yield result[ChatEvent.STATUS]
|
|
1396
|
+
else:
|
|
1397
|
+
image, status_code, improved_image_prompt, intent_type = result
|
|
1398
|
+
|
|
1399
|
+
if image is None or status_code != 200:
|
|
1400
|
+
content_obj = {
|
|
1401
|
+
"content-type": "application/json",
|
|
1402
|
+
"intentType": intent_type,
|
|
1403
|
+
"detail": improved_image_prompt,
|
|
1404
|
+
"image": image,
|
|
1405
|
+
}
|
|
1406
|
+
async for result in send_llm_response(json.dumps(content_obj)):
|
|
1407
|
+
yield result
|
|
1408
|
+
return
|
|
1409
|
+
|
|
1410
|
+
await sync_to_async(save_to_conversation_log)(
|
|
1411
|
+
q,
|
|
1412
|
+
image,
|
|
1413
|
+
user,
|
|
1414
|
+
meta_log,
|
|
1415
|
+
user_message_time,
|
|
1416
|
+
intent_type=intent_type,
|
|
1417
|
+
inferred_queries=[improved_image_prompt],
|
|
1418
|
+
client_application=request.user.client_app,
|
|
1419
|
+
conversation_id=conversation_id,
|
|
1420
|
+
compiled_references=compiled_references,
|
|
1421
|
+
online_results=online_results,
|
|
1422
|
+
uploaded_image_url=uploaded_image_url,
|
|
1423
|
+
)
|
|
1424
|
+
content_obj = {
|
|
1425
|
+
"intentType": intent_type,
|
|
1426
|
+
"inferredQueries": [improved_image_prompt],
|
|
1427
|
+
"image": image,
|
|
1428
|
+
}
|
|
1429
|
+
async for result in send_llm_response(json.dumps(content_obj)):
|
|
1430
|
+
yield result
|
|
1431
|
+
return
|
|
1432
|
+
|
|
1433
|
+
## Generate Text Output
|
|
1434
|
+
async for result in send_event(ChatEvent.STATUS, f"**Generating a well-informed response**"):
|
|
1435
|
+
yield result
|
|
1436
|
+
llm_response, chat_metadata = await agenerate_chat_response(
|
|
1437
|
+
defiltered_query,
|
|
1438
|
+
meta_log,
|
|
1439
|
+
conversation,
|
|
1440
|
+
compiled_references,
|
|
1441
|
+
online_results,
|
|
1442
|
+
inferred_queries,
|
|
1443
|
+
conversation_commands,
|
|
1444
|
+
user,
|
|
1445
|
+
request.user.client_app,
|
|
1446
|
+
conversation_id,
|
|
1447
|
+
location,
|
|
1448
|
+
user_name,
|
|
1449
|
+
uploaded_image_url,
|
|
1450
|
+
)
|
|
1451
|
+
|
|
1452
|
+
# Send Response
|
|
1453
|
+
async for result in send_event(ChatEvent.START_LLM_RESPONSE, ""):
|
|
1454
|
+
yield result
|
|
1455
|
+
|
|
1456
|
+
continue_stream = True
|
|
1457
|
+
iterator = AsyncIteratorWrapper(llm_response)
|
|
1458
|
+
async for item in iterator:
|
|
1459
|
+
if item is None:
|
|
1460
|
+
async for result in send_event(ChatEvent.END_LLM_RESPONSE, ""):
|
|
1461
|
+
yield result
|
|
1462
|
+
logger.debug("Finished streaming response")
|
|
1463
|
+
return
|
|
1464
|
+
if not connection_alive or not continue_stream:
|
|
1465
|
+
continue
|
|
1466
|
+
try:
|
|
1467
|
+
async for result in send_event(ChatEvent.MESSAGE, f"{item}"):
|
|
1468
|
+
yield result
|
|
1469
|
+
except Exception as e:
|
|
1470
|
+
continue_stream = False
|
|
1471
|
+
logger.info(f"User {user} disconnected. Emitting rest of responses to clear thread: {e}")
|
|
1472
|
+
|
|
1473
|
+
## Stream Text Response
|
|
1474
|
+
if stream:
|
|
1475
|
+
return StreamingResponse(event_generator(q, image=image), media_type="text/plain")
|
|
1476
|
+
## Non-Streaming Text Response
|
|
1477
|
+
else:
|
|
1478
|
+
response_iterator = event_generator(q, image=image)
|
|
1479
|
+
response_data = await read_chat_stream(response_iterator)
|
|
1480
|
+
return Response(content=json.dumps(response_data), media_type="application/json", status_code=200)
|
khoj/routers/helpers.py
CHANGED
|
@@ -270,7 +270,7 @@ async def aget_relevant_information_sources(
|
|
|
270
270
|
chat_history = construct_chat_history(conversation_history)
|
|
271
271
|
|
|
272
272
|
if uploaded_image_url:
|
|
273
|
-
query = f"[placeholder for
|
|
273
|
+
query = f"[placeholder for user attached image]\n{query}"
|
|
274
274
|
|
|
275
275
|
relevant_tools_prompt = prompts.pick_relevant_information_collection_tools.format(
|
|
276
276
|
query=query,
|
|
@@ -330,7 +330,7 @@ async def aget_relevant_output_modes(
|
|
|
330
330
|
chat_history = construct_chat_history(conversation_history)
|
|
331
331
|
|
|
332
332
|
if uploaded_image_url:
|
|
333
|
-
query = f"
|
|
333
|
+
query = f"[placeholder for user attached image]\n{query}"
|
|
334
334
|
|
|
335
335
|
relevant_mode_prompt = prompts.pick_relevant_output_mode.format(
|
|
336
336
|
query=query,
|
|
@@ -1201,11 +1201,10 @@ def scheduled_chat(
|
|
|
1201
1201
|
query_dict["conversation_id"] = [conversation_id]
|
|
1202
1202
|
|
|
1203
1203
|
# Construct the URL to call the chat API with the scheduled query string
|
|
1204
|
-
|
|
1205
|
-
url = f"{scheme}://{calling_url.netloc}/api/chat?{encoded_query}"
|
|
1204
|
+
url = f"{scheme}://{calling_url.netloc}/api/chat?client=khoj"
|
|
1206
1205
|
|
|
1207
1206
|
# Construct the Headers for the chat API
|
|
1208
|
-
headers = {"User-Agent": "Khoj"}
|
|
1207
|
+
headers = {"User-Agent": "Khoj", "Content-Type": "application/json"}
|
|
1209
1208
|
if not state.anonymous_mode:
|
|
1210
1209
|
# Add authorization request header in non-anonymous mode
|
|
1211
1210
|
token = get_khoj_tokens(user)
|
|
@@ -1216,7 +1215,7 @@ def scheduled_chat(
|
|
|
1216
1215
|
headers["Authorization"] = f"Bearer {token}"
|
|
1217
1216
|
|
|
1218
1217
|
# Call the chat API endpoint with authenticated user token and query
|
|
1219
|
-
raw_response = requests.
|
|
1218
|
+
raw_response = requests.post(url, headers=headers, json=query_dict)
|
|
1220
1219
|
|
|
1221
1220
|
# Stop if the chat API call was not successful
|
|
1222
1221
|
if raw_response.status_code != 200:
|
khoj/utils/constants.py
CHANGED
|
@@ -9,7 +9,7 @@ app_env_filepath = "~/.khoj/env"
|
|
|
9
9
|
telemetry_server = "https://khoj.beta.haletic.com/v1/telemetry"
|
|
10
10
|
content_directory = "~/.khoj/content/"
|
|
11
11
|
default_offline_chat_model = "bartowski/Meta-Llama-3.1-8B-Instruct-GGUF"
|
|
12
|
-
default_online_chat_model = "gpt-
|
|
12
|
+
default_online_chat_model = "gpt-4o-mini"
|
|
13
13
|
|
|
14
14
|
empty_config = {
|
|
15
15
|
"search-type": {
|