khoj 1.25.1.dev2__py3-none-any.whl → 1.25.1.dev12__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (44) hide show
  1. khoj/configure.py +3 -3
  2. khoj/database/adapters/__init__.py +47 -23
  3. khoj/database/admin.py +2 -1
  4. khoj/interface/compiled/404/index.html +1 -1
  5. khoj/interface/compiled/_next/static/chunks/7762-79f2205740622b5c.js +1 -0
  6. khoj/interface/compiled/_next/static/chunks/app/agents/page-f8d03847a0fa2539.js +1 -0
  7. khoj/interface/compiled/_next/static/chunks/{webpack-ace3bded0dbc790e.js → webpack-dff708c71e9234cb.js} +1 -1
  8. khoj/interface/compiled/agents/index.html +1 -1
  9. khoj/interface/compiled/agents/index.txt +2 -2
  10. khoj/interface/compiled/automations/index.html +1 -1
  11. khoj/interface/compiled/automations/index.txt +1 -1
  12. khoj/interface/compiled/chat/index.html +1 -1
  13. khoj/interface/compiled/chat/index.txt +2 -2
  14. khoj/interface/compiled/factchecker/index.html +1 -1
  15. khoj/interface/compiled/factchecker/index.txt +2 -2
  16. khoj/interface/compiled/index.html +1 -1
  17. khoj/interface/compiled/index.txt +2 -2
  18. khoj/interface/compiled/search/index.html +1 -1
  19. khoj/interface/compiled/search/index.txt +2 -2
  20. khoj/interface/compiled/settings/index.html +1 -1
  21. khoj/interface/compiled/settings/index.txt +1 -1
  22. khoj/interface/compiled/share/chat/index.html +1 -1
  23. khoj/interface/compiled/share/chat/index.txt +2 -2
  24. khoj/processor/image/generate.py +1 -2
  25. khoj/processor/tools/online_search.py +4 -6
  26. khoj/routers/api.py +1 -1
  27. khoj/routers/api_agents.py +4 -1
  28. khoj/routers/api_chat.py +5 -488
  29. khoj/routers/api_model.py +1 -1
  30. khoj/routers/helpers.py +47 -49
  31. khoj/utils/initialization.py +0 -3
  32. {khoj-1.25.1.dev2.dist-info → khoj-1.25.1.dev12.dist-info}/METADATA +18 -13
  33. {khoj-1.25.1.dev2.dist-info → khoj-1.25.1.dev12.dist-info}/RECORD +42 -42
  34. khoj/interface/compiled/_next/static/chunks/1269-2e52d48e7d0e5c61.js +0 -1
  35. khoj/interface/compiled/_next/static/chunks/app/agents/page-ad81f5e84372667b.js +0 -1
  36. /khoj/interface/compiled/_next/static/{GCWJ9zQV9mIpv2rrghzda → CGyts-FEbV6owmPboHtLL}/_buildManifest.js +0 -0
  37. /khoj/interface/compiled/_next/static/{GCWJ9zQV9mIpv2rrghzda → CGyts-FEbV6owmPboHtLL}/_ssgManifest.js +0 -0
  38. /khoj/interface/compiled/_next/static/chunks/{9178-899fe9a6b754ecfe.js → 9178-f208a3e6404714a9.js} +0 -0
  39. /khoj/interface/compiled/_next/static/chunks/{9417-29502e39c3e7d60c.js → 9417-1d158bf46d3a0dc9.js} +0 -0
  40. /khoj/interface/compiled/_next/static/chunks/{9479-7eed36fc954ef804.js → 9479-563e4d61f91d5a7c.js} +0 -0
  41. /khoj/interface/compiled/_next/static/chunks/app/{page-10a5aad6e04f3cf8.js → page-421d13f70c505dd9.js} +0 -0
  42. {khoj-1.25.1.dev2.dist-info → khoj-1.25.1.dev12.dist-info}/WHEEL +0 -0
  43. {khoj-1.25.1.dev2.dist-info → khoj-1.25.1.dev12.dist-info}/entry_points.txt +0 -0
  44. {khoj-1.25.1.dev2.dist-info → khoj-1.25.1.dev12.dist-info}/licenses/LICENSE +0 -0
khoj/routers/api_chat.py CHANGED
@@ -194,7 +194,7 @@ def chat_history(
194
194
  n: Optional[int] = None,
195
195
  ):
196
196
  user = request.user.object
197
- validate_conversation_config()
197
+ validate_conversation_config(user)
198
198
 
199
199
  # Load Conversation History
200
200
  conversation = ConversationAdapters.get_conversation_by_user(
@@ -209,7 +209,7 @@ def chat_history(
209
209
 
210
210
  agent_metadata = None
211
211
  if conversation.agent:
212
- if conversation.agent.privacy_level == Agent.PrivacyLevel.PRIVATE:
212
+ if conversation.agent.privacy_level == Agent.PrivacyLevel.PRIVATE and conversation.agent.creator != user:
213
213
  conversation.agent = None
214
214
  else:
215
215
  agent_metadata = {
@@ -694,7 +694,7 @@ async def chat(
694
694
  q,
695
695
  meta_log,
696
696
  is_automated_task,
697
- subscribed=subscribed,
697
+ user=user,
698
698
  uploaded_image_url=uploaded_image_url,
699
699
  agent=agent,
700
700
  )
@@ -704,7 +704,7 @@ async def chat(
704
704
  ):
705
705
  yield result
706
706
 
707
- mode = await aget_relevant_output_modes(q, meta_log, is_automated_task, uploaded_image_url, agent)
707
+ mode = await aget_relevant_output_modes(q, meta_log, is_automated_task, user, uploaded_image_url, agent)
708
708
  async for result in send_event(ChatEvent.STATUS, f"**Decided Response Mode:** {mode.value}"):
709
709
  yield result
710
710
  if mode not in conversation_commands:
@@ -767,8 +767,8 @@ async def chat(
767
767
  q,
768
768
  contextual_data,
769
769
  conversation_history=meta_log,
770
- subscribed=subscribed,
771
770
  uploaded_image_url=uploaded_image_url,
771
+ user=user,
772
772
  agent=agent,
773
773
  )
774
774
  response_log = str(response)
@@ -885,7 +885,6 @@ async def chat(
885
885
  meta_log,
886
886
  location,
887
887
  user,
888
- subscribed,
889
888
  partial(send_event, ChatEvent.STATUS),
890
889
  custom_filters,
891
890
  uploaded_image_url=uploaded_image_url,
@@ -910,7 +909,6 @@ async def chat(
910
909
  meta_log,
911
910
  location,
912
911
  user,
913
- subscribed,
914
912
  partial(send_event, ChatEvent.STATUS),
915
913
  uploaded_image_url=uploaded_image_url,
916
914
  agent=agent,
@@ -957,7 +955,6 @@ async def chat(
957
955
  location_data=location,
958
956
  references=compiled_references,
959
957
  online_results=online_results,
960
- subscribed=subscribed,
961
958
  send_status_func=partial(send_event, ChatEvent.STATUS),
962
959
  uploaded_image_url=uploaded_image_url,
963
960
  agent=agent,
@@ -1049,483 +1046,3 @@ async def chat(
1049
1046
  response_iterator = event_generator(q, image=image)
1050
1047
  response_data = await read_chat_stream(response_iterator)
1051
1048
  return Response(content=json.dumps(response_data), media_type="application/json", status_code=200)
1052
-
1053
-
1054
- # Deprecated API. Remove by end of September 2024
1055
- @api_chat.get("")
1056
- @requires(["authenticated"])
1057
- async def get_chat(
1058
- request: Request,
1059
- common: CommonQueryParams,
1060
- q: str,
1061
- n: int = 7,
1062
- d: float = None,
1063
- stream: Optional[bool] = False,
1064
- title: Optional[str] = None,
1065
- conversation_id: Optional[str] = None,
1066
- city: Optional[str] = None,
1067
- region: Optional[str] = None,
1068
- country: Optional[str] = None,
1069
- timezone: Optional[str] = None,
1070
- image: Optional[str] = None,
1071
- rate_limiter_per_minute=Depends(
1072
- ApiUserRateLimiter(requests=60, subscribed_requests=60, window=60, slug="chat_minute")
1073
- ),
1074
- rate_limiter_per_day=Depends(
1075
- ApiUserRateLimiter(requests=600, subscribed_requests=600, window=60 * 60 * 24, slug="chat_day")
1076
- ),
1077
- ):
1078
- # Issue a deprecation warning
1079
- warnings.warn(
1080
- "The 'get_chat' API endpoint is deprecated. It will be removed by the end of September 2024.",
1081
- DeprecationWarning,
1082
- stacklevel=2,
1083
- )
1084
-
1085
- async def event_generator(q: str, image: str):
1086
- start_time = time.perf_counter()
1087
- ttft = None
1088
- chat_metadata: dict = {}
1089
- connection_alive = True
1090
- user: KhojUser = request.user.object
1091
- subscribed: bool = has_required_scope(request, ["premium"])
1092
- event_delimiter = "␃🔚␗"
1093
- q = unquote(q)
1094
- nonlocal conversation_id
1095
-
1096
- uploaded_image_url = None
1097
- if image:
1098
- decoded_string = unquote(image)
1099
- base64_data = decoded_string.split(",", 1)[1]
1100
- image_bytes = base64.b64decode(base64_data)
1101
- webp_image_bytes = convert_image_to_webp(image_bytes)
1102
- try:
1103
- uploaded_image_url = upload_image_to_bucket(webp_image_bytes, request.user.object.id)
1104
- except:
1105
- uploaded_image_url = None
1106
-
1107
- async def send_event(event_type: ChatEvent, data: str | dict):
1108
- nonlocal connection_alive, ttft
1109
- if not connection_alive or await request.is_disconnected():
1110
- connection_alive = False
1111
- logger.warn(f"User {user} disconnected from {common.client} client")
1112
- return
1113
- try:
1114
- if event_type == ChatEvent.END_LLM_RESPONSE:
1115
- collect_telemetry()
1116
- if event_type == ChatEvent.START_LLM_RESPONSE:
1117
- ttft = time.perf_counter() - start_time
1118
- if event_type == ChatEvent.MESSAGE:
1119
- yield data
1120
- elif event_type == ChatEvent.REFERENCES or stream:
1121
- yield json.dumps({"type": event_type.value, "data": data}, ensure_ascii=False)
1122
- except asyncio.CancelledError as e:
1123
- connection_alive = False
1124
- logger.warn(f"User {user} disconnected from {common.client} client: {e}")
1125
- return
1126
- except Exception as e:
1127
- connection_alive = False
1128
- logger.error(f"Failed to stream chat API response to {user} on {common.client}: {e}", exc_info=True)
1129
- return
1130
- finally:
1131
- yield event_delimiter
1132
-
1133
- async def send_llm_response(response: str):
1134
- async for result in send_event(ChatEvent.START_LLM_RESPONSE, ""):
1135
- yield result
1136
- async for result in send_event(ChatEvent.MESSAGE, response):
1137
- yield result
1138
- async for result in send_event(ChatEvent.END_LLM_RESPONSE, ""):
1139
- yield result
1140
-
1141
- def collect_telemetry():
1142
- # Gather chat response telemetry
1143
- nonlocal chat_metadata
1144
- latency = time.perf_counter() - start_time
1145
- cmd_set = set([cmd.value for cmd in conversation_commands])
1146
- chat_metadata = chat_metadata or {}
1147
- chat_metadata["conversation_command"] = cmd_set
1148
- chat_metadata["agent"] = conversation.agent.slug if conversation.agent else None
1149
- chat_metadata["latency"] = f"{latency:.3f}"
1150
- chat_metadata["ttft_latency"] = f"{ttft:.3f}"
1151
-
1152
- logger.info(f"Chat response time to first token: {ttft:.3f} seconds")
1153
- logger.info(f"Chat response total time: {latency:.3f} seconds")
1154
- update_telemetry_state(
1155
- request=request,
1156
- telemetry_type="api",
1157
- api="chat",
1158
- client=request.user.client_app,
1159
- user_agent=request.headers.get("user-agent"),
1160
- host=request.headers.get("host"),
1161
- metadata=chat_metadata,
1162
- )
1163
-
1164
- conversation_commands = [get_conversation_command(query=q, any_references=True)]
1165
-
1166
- conversation = await ConversationAdapters.aget_conversation_by_user(
1167
- user, client_application=request.user.client_app, conversation_id=conversation_id, title=title
1168
- )
1169
- if not conversation:
1170
- async for result in send_llm_response(f"Conversation {conversation_id} not found"):
1171
- yield result
1172
- return
1173
- conversation_id = conversation.id
1174
- agent = conversation.agent if conversation.agent else None
1175
-
1176
- await is_ready_to_chat(user)
1177
-
1178
- user_name = await aget_user_name(user)
1179
- location = None
1180
- if city or region or country:
1181
- location = LocationData(city=city, region=region, country=country)
1182
-
1183
- if is_query_empty(q):
1184
- async for result in send_llm_response("Please ask your query to get started."):
1185
- yield result
1186
- return
1187
-
1188
- user_message_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
1189
-
1190
- meta_log = conversation.conversation_log
1191
- is_automated_task = conversation_commands == [ConversationCommand.AutomatedTask]
1192
-
1193
- if conversation_commands == [ConversationCommand.Default] or is_automated_task:
1194
- conversation_commands = await aget_relevant_information_sources(
1195
- q, meta_log, is_automated_task, subscribed=subscribed, uploaded_image_url=uploaded_image_url
1196
- )
1197
- conversation_commands_str = ", ".join([cmd.value for cmd in conversation_commands])
1198
- async for result in send_event(
1199
- ChatEvent.STATUS, f"**Chose Data Sources to Search:** {conversation_commands_str}"
1200
- ):
1201
- yield result
1202
-
1203
- mode = await aget_relevant_output_modes(q, meta_log, is_automated_task, uploaded_image_url)
1204
- async for result in send_event(ChatEvent.STATUS, f"**Decided Response Mode:** {mode.value}"):
1205
- yield result
1206
- if mode not in conversation_commands:
1207
- conversation_commands.append(mode)
1208
-
1209
- for cmd in conversation_commands:
1210
- await conversation_command_rate_limiter.update_and_check_if_valid(request, cmd)
1211
- q = q.replace(f"/{cmd.value}", "").strip()
1212
-
1213
- used_slash_summarize = conversation_commands == [ConversationCommand.Summarize]
1214
- file_filters = conversation.file_filters if conversation else []
1215
- # Skip trying to summarize if
1216
- if (
1217
- # summarization intent was inferred
1218
- ConversationCommand.Summarize in conversation_commands
1219
- # and not triggered via slash command
1220
- and not used_slash_summarize
1221
- # but we can't actually summarize
1222
- and len(file_filters) != 1
1223
- ):
1224
- conversation_commands.remove(ConversationCommand.Summarize)
1225
- elif ConversationCommand.Summarize in conversation_commands:
1226
- response_log = ""
1227
- if len(file_filters) == 0:
1228
- response_log = "No files selected for summarization. Please add files using the section on the left."
1229
- async for result in send_llm_response(response_log):
1230
- yield result
1231
- elif len(file_filters) > 1:
1232
- response_log = "Only one file can be selected for summarization."
1233
- async for result in send_llm_response(response_log):
1234
- yield result
1235
- else:
1236
- try:
1237
- file_object = await FileObjectAdapters.async_get_file_objects_by_name(user, file_filters[0])
1238
- if len(file_object) == 0:
1239
- response_log = "Sorry, we couldn't find the full text of this file. Please re-upload the document and try again."
1240
- async for result in send_llm_response(response_log):
1241
- yield result
1242
- return
1243
- contextual_data = " ".join([file.raw_text for file in file_object])
1244
- if not q:
1245
- q = "Create a general summary of the file"
1246
- async for result in send_event(
1247
- ChatEvent.STATUS, f"**Constructing Summary Using:** {file_object[0].file_name}"
1248
- ):
1249
- yield result
1250
-
1251
- response = await extract_relevant_summary(
1252
- q,
1253
- contextual_data,
1254
- conversation_history=meta_log,
1255
- subscribed=subscribed,
1256
- uploaded_image_url=uploaded_image_url,
1257
- )
1258
- response_log = str(response)
1259
- async for result in send_llm_response(response_log):
1260
- yield result
1261
- except Exception as e:
1262
- response_log = "Error summarizing file."
1263
- logger.error(f"Error summarizing file for {user.email}: {e}", exc_info=True)
1264
- async for result in send_llm_response(response_log):
1265
- yield result
1266
- await sync_to_async(save_to_conversation_log)(
1267
- q,
1268
- response_log,
1269
- user,
1270
- meta_log,
1271
- user_message_time,
1272
- intent_type="summarize",
1273
- client_application=request.user.client_app,
1274
- conversation_id=conversation_id,
1275
- uploaded_image_url=uploaded_image_url,
1276
- )
1277
- return
1278
-
1279
- custom_filters = []
1280
- if conversation_commands == [ConversationCommand.Help]:
1281
- if not q:
1282
- conversation_config = await ConversationAdapters.aget_user_conversation_config(user)
1283
- if conversation_config == None:
1284
- conversation_config = await ConversationAdapters.aget_default_conversation_config()
1285
- model_type = conversation_config.model_type
1286
- formatted_help = help_message.format(model=model_type, version=state.khoj_version, device=get_device())
1287
- async for result in send_llm_response(formatted_help):
1288
- yield result
1289
- return
1290
- # Adding specification to search online specifically on khoj.dev pages.
1291
- custom_filters.append("site:khoj.dev")
1292
- conversation_commands.append(ConversationCommand.Online)
1293
-
1294
- if ConversationCommand.Automation in conversation_commands:
1295
- try:
1296
- automation, crontime, query_to_run, subject = await create_automation(
1297
- q, timezone, user, request.url, meta_log
1298
- )
1299
- except Exception as e:
1300
- logger.error(f"Error scheduling task {q} for {user.email}: {e}")
1301
- error_message = f"Unable to create automation. Ensure the automation doesn't already exist."
1302
- async for result in send_llm_response(error_message):
1303
- yield result
1304
- return
1305
-
1306
- llm_response = construct_automation_created_message(automation, crontime, query_to_run, subject)
1307
- await sync_to_async(save_to_conversation_log)(
1308
- q,
1309
- llm_response,
1310
- user,
1311
- meta_log,
1312
- user_message_time,
1313
- intent_type="automation",
1314
- client_application=request.user.client_app,
1315
- conversation_id=conversation_id,
1316
- inferred_queries=[query_to_run],
1317
- automation_id=automation.id,
1318
- uploaded_image_url=uploaded_image_url,
1319
- )
1320
- async for result in send_llm_response(llm_response):
1321
- yield result
1322
- return
1323
-
1324
- # Gather Context
1325
- ## Extract Document References
1326
- compiled_references, inferred_queries, defiltered_query = [], [], None
1327
- async for result in extract_references_and_questions(
1328
- request,
1329
- meta_log,
1330
- q,
1331
- (n or 7),
1332
- d,
1333
- conversation_id,
1334
- conversation_commands,
1335
- location,
1336
- partial(send_event, ChatEvent.STATUS),
1337
- uploaded_image_url=uploaded_image_url,
1338
- ):
1339
- if isinstance(result, dict) and ChatEvent.STATUS in result:
1340
- yield result[ChatEvent.STATUS]
1341
- else:
1342
- compiled_references.extend(result[0])
1343
- inferred_queries.extend(result[1])
1344
- defiltered_query = result[2]
1345
-
1346
- if not is_none_or_empty(compiled_references):
1347
- headings = "\n- " + "\n- ".join(set([c.get("compiled", c).split("\n")[0] for c in compiled_references]))
1348
- # Strip only leading # from headings
1349
- headings = headings.replace("#", "")
1350
- async for result in send_event(ChatEvent.STATUS, f"**Found Relevant Notes**: {headings}"):
1351
- yield result
1352
-
1353
- online_results: Dict = dict()
1354
-
1355
- if conversation_commands == [ConversationCommand.Notes] and not await EntryAdapters.auser_has_entries(user):
1356
- async for result in send_llm_response(f"{no_entries_found.format()}"):
1357
- yield result
1358
- return
1359
-
1360
- if ConversationCommand.Notes in conversation_commands and is_none_or_empty(compiled_references):
1361
- conversation_commands.remove(ConversationCommand.Notes)
1362
-
1363
- ## Gather Online References
1364
- if ConversationCommand.Online in conversation_commands:
1365
- try:
1366
- async for result in search_online(
1367
- defiltered_query,
1368
- meta_log,
1369
- location,
1370
- user,
1371
- subscribed,
1372
- partial(send_event, ChatEvent.STATUS),
1373
- custom_filters,
1374
- uploaded_image_url=uploaded_image_url,
1375
- ):
1376
- if isinstance(result, dict) and ChatEvent.STATUS in result:
1377
- yield result[ChatEvent.STATUS]
1378
- else:
1379
- online_results = result
1380
- except ValueError as e:
1381
- error_message = f"Error searching online: {e}. Attempting to respond without online results"
1382
- logger.warning(error_message)
1383
- async for result in send_llm_response(error_message):
1384
- yield result
1385
- return
1386
-
1387
- ## Gather Webpage References
1388
- if ConversationCommand.Webpage in conversation_commands:
1389
- try:
1390
- async for result in read_webpages(
1391
- defiltered_query,
1392
- meta_log,
1393
- location,
1394
- user,
1395
- subscribed,
1396
- partial(send_event, ChatEvent.STATUS),
1397
- uploaded_image_url=uploaded_image_url,
1398
- ):
1399
- if isinstance(result, dict) and ChatEvent.STATUS in result:
1400
- yield result[ChatEvent.STATUS]
1401
- else:
1402
- direct_web_pages = result
1403
- webpages = []
1404
- for query in direct_web_pages:
1405
- if online_results.get(query):
1406
- online_results[query]["webpages"] = direct_web_pages[query]["webpages"]
1407
- else:
1408
- online_results[query] = {"webpages": direct_web_pages[query]["webpages"]}
1409
-
1410
- for webpage in direct_web_pages[query]["webpages"]:
1411
- webpages.append(webpage["link"])
1412
- async for result in send_event(ChatEvent.STATUS, f"**Read web pages**: {webpages}"):
1413
- yield result
1414
- except ValueError as e:
1415
- logger.warning(
1416
- f"Error directly reading webpages: {e}. Attempting to respond without online results",
1417
- exc_info=True,
1418
- )
1419
-
1420
- ## Send Gathered References
1421
- async for result in send_event(
1422
- ChatEvent.REFERENCES,
1423
- {
1424
- "inferredQueries": inferred_queries,
1425
- "context": compiled_references,
1426
- "onlineContext": online_results,
1427
- },
1428
- ):
1429
- yield result
1430
-
1431
- # Generate Output
1432
- ## Generate Image Output
1433
- if ConversationCommand.Image in conversation_commands:
1434
- async for result in text_to_image(
1435
- q,
1436
- user,
1437
- meta_log,
1438
- location_data=location,
1439
- references=compiled_references,
1440
- online_results=online_results,
1441
- subscribed=subscribed,
1442
- send_status_func=partial(send_event, ChatEvent.STATUS),
1443
- uploaded_image_url=uploaded_image_url,
1444
- ):
1445
- if isinstance(result, dict) and ChatEvent.STATUS in result:
1446
- yield result[ChatEvent.STATUS]
1447
- else:
1448
- image, status_code, improved_image_prompt, intent_type = result
1449
-
1450
- if image is None or status_code != 200:
1451
- content_obj = {
1452
- "content-type": "application/json",
1453
- "intentType": intent_type,
1454
- "detail": improved_image_prompt,
1455
- "image": image,
1456
- }
1457
- async for result in send_llm_response(json.dumps(content_obj)):
1458
- yield result
1459
- return
1460
-
1461
- await sync_to_async(save_to_conversation_log)(
1462
- q,
1463
- image,
1464
- user,
1465
- meta_log,
1466
- user_message_time,
1467
- intent_type=intent_type,
1468
- inferred_queries=[improved_image_prompt],
1469
- client_application=request.user.client_app,
1470
- conversation_id=conversation_id,
1471
- compiled_references=compiled_references,
1472
- online_results=online_results,
1473
- uploaded_image_url=uploaded_image_url,
1474
- )
1475
- content_obj = {
1476
- "intentType": intent_type,
1477
- "inferredQueries": [improved_image_prompt],
1478
- "image": image,
1479
- }
1480
- async for result in send_llm_response(json.dumps(content_obj)):
1481
- yield result
1482
- return
1483
-
1484
- ## Generate Text Output
1485
- async for result in send_event(ChatEvent.STATUS, f"**Generating a well-informed response**"):
1486
- yield result
1487
- llm_response, chat_metadata = await agenerate_chat_response(
1488
- defiltered_query,
1489
- meta_log,
1490
- conversation,
1491
- compiled_references,
1492
- online_results,
1493
- inferred_queries,
1494
- conversation_commands,
1495
- user,
1496
- request.user.client_app,
1497
- conversation_id,
1498
- location,
1499
- user_name,
1500
- uploaded_image_url,
1501
- )
1502
-
1503
- # Send Response
1504
- async for result in send_event(ChatEvent.START_LLM_RESPONSE, ""):
1505
- yield result
1506
-
1507
- continue_stream = True
1508
- iterator = AsyncIteratorWrapper(llm_response)
1509
- async for item in iterator:
1510
- if item is None:
1511
- async for result in send_event(ChatEvent.END_LLM_RESPONSE, ""):
1512
- yield result
1513
- logger.debug("Finished streaming response")
1514
- return
1515
- if not connection_alive or not continue_stream:
1516
- continue
1517
- try:
1518
- async for result in send_event(ChatEvent.MESSAGE, f"{item}"):
1519
- yield result
1520
- except Exception as e:
1521
- continue_stream = False
1522
- logger.info(f"User {user} disconnected. Emitting rest of responses to clear thread: {e}")
1523
-
1524
- ## Stream Text Response
1525
- if stream:
1526
- return StreamingResponse(event_generator(q, image=image), media_type="text/plain")
1527
- ## Non-Streaming Text Response
1528
- else:
1529
- response_iterator = event_generator(q, image=image)
1530
- response_data = await read_chat_stream(response_iterator)
1531
- return Response(content=json.dumps(response_data), media_type="application/json", status_code=200)
khoj/routers/api_model.py CHANGED
@@ -40,7 +40,7 @@ def get_user_chat_model(
40
40
  chat_model = ConversationAdapters.get_conversation_config(user)
41
41
 
42
42
  if chat_model is None:
43
- chat_model = ConversationAdapters.get_default_conversation_config()
43
+ chat_model = ConversationAdapters.get_default_conversation_config(user)
44
44
 
45
45
  return Response(status_code=200, content=json.dumps({"id": chat_model.id, "chat_model": chat_model.chat_model}))
46
46