khoj 1.42.1.dev8__py3-none-any.whl → 1.42.2.dev1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (64) hide show
  1. khoj/database/adapters/__init__.py +3 -1
  2. khoj/database/models/__init__.py +9 -9
  3. khoj/interface/compiled/404/index.html +2 -2
  4. khoj/interface/compiled/_next/static/chunks/{2327-aa22697ed9c8d54a.js → 2327-f03b2a77f67b8f8c.js} +1 -1
  5. khoj/interface/compiled/_next/static/chunks/{5138-2cce449fd2454abf.js → 5138-81457f7f59956b56.js} +1 -1
  6. khoj/interface/compiled/_next/static/chunks/app/agents/layout-e00fb81dca656a10.js +1 -0
  7. khoj/interface/compiled/_next/static/chunks/app/agents/{page-774c78ff0f55a228.js → page-2fac1d5ac7192e73.js} +1 -1
  8. khoj/interface/compiled/_next/static/chunks/app/automations/{page-4454891c5007b870.js → page-465741d9149dfd48.js} +1 -1
  9. khoj/interface/compiled/_next/static/chunks/app/chat/layout-33934fc2d6ae6838.js +1 -0
  10. khoj/interface/compiled/_next/static/chunks/app/chat/{page-3c299bf8e6b1afd3.js → page-1726184cf1c1b86e.js} +1 -1
  11. khoj/interface/compiled/_next/static/chunks/app/{page-f7a0286dfc31ad6b.js → page-45ae5e99e8a61821.js} +1 -1
  12. khoj/interface/compiled/_next/static/chunks/app/search/layout-c02531d586972d7d.js +1 -0
  13. khoj/interface/compiled/_next/static/chunks/app/search/{page-f1a7f278c89e09b6.js → page-afb5e7ed13d221c1.js} +1 -1
  14. khoj/interface/compiled/_next/static/chunks/app/settings/{page-5d9134d4a97f8834.js → page-8fb6cc97be8774a7.js} +1 -1
  15. khoj/interface/compiled/_next/static/chunks/app/share/chat/layout-e8e5db7830bf3f47.js +1 -0
  16. khoj/interface/compiled/_next/static/chunks/app/share/chat/{page-32cd0ceb9ffbd777.js → page-9a167dc9b5fcd464.js} +1 -1
  17. khoj/interface/compiled/_next/static/chunks/{webpack-97e712397e673897.js → webpack-1c900156837baf90.js} +1 -1
  18. khoj/interface/compiled/_next/static/css/{93eeacc43e261162.css → c34713c98384ee87.css} +1 -1
  19. khoj/interface/compiled/_next/static/css/{0db53bacf81896f5.css → fca983d49c3dd1a3.css} +1 -1
  20. khoj/interface/compiled/agents/index.html +2 -2
  21. khoj/interface/compiled/agents/index.txt +2 -2
  22. khoj/interface/compiled/automations/index.html +2 -2
  23. khoj/interface/compiled/automations/index.txt +3 -3
  24. khoj/interface/compiled/chat/index.html +2 -2
  25. khoj/interface/compiled/chat/index.txt +2 -2
  26. khoj/interface/compiled/index.html +2 -2
  27. khoj/interface/compiled/index.txt +2 -2
  28. khoj/interface/compiled/search/index.html +2 -2
  29. khoj/interface/compiled/search/index.txt +2 -2
  30. khoj/interface/compiled/settings/index.html +2 -2
  31. khoj/interface/compiled/settings/index.txt +4 -4
  32. khoj/interface/compiled/share/chat/index.html +2 -2
  33. khoj/interface/compiled/share/chat/index.txt +2 -2
  34. khoj/processor/conversation/anthropic/anthropic_chat.py +7 -7
  35. khoj/processor/conversation/google/gemini_chat.py +7 -7
  36. khoj/processor/conversation/offline/chat_model.py +6 -6
  37. khoj/processor/conversation/openai/gpt.py +7 -7
  38. khoj/processor/conversation/utils.py +94 -89
  39. khoj/processor/image/generate.py +16 -11
  40. khoj/processor/operator/__init__.py +2 -3
  41. khoj/processor/operator/operator_agent_binary.py +11 -11
  42. khoj/processor/tools/online_search.py +9 -3
  43. khoj/processor/tools/run_code.py +5 -5
  44. khoj/routers/api.py +14 -8
  45. khoj/routers/api_chat.py +16 -16
  46. khoj/routers/helpers.py +44 -43
  47. khoj/routers/research.py +10 -10
  48. {khoj-1.42.1.dev8.dist-info → khoj-1.42.2.dev1.dist-info}/METADATA +1 -1
  49. {khoj-1.42.1.dev8.dist-info → khoj-1.42.2.dev1.dist-info}/RECORD +60 -60
  50. khoj/interface/compiled/_next/static/chunks/app/agents/layout-4e2a134ec26aa606.js +0 -1
  51. khoj/interface/compiled/_next/static/chunks/app/chat/layout-ad4d1792ab1a4108.js +0 -1
  52. khoj/interface/compiled/_next/static/chunks/app/search/layout-f5881c7ae3ba0795.js +0 -1
  53. khoj/interface/compiled/_next/static/chunks/app/share/chat/layout-abb6c5f4239ad7be.js +0 -1
  54. /khoj/interface/compiled/_next/static/{TrHI4J6qnG7RYFl2Irnqj → Dzg_ViqMwQEjqMgetZPRc}/_buildManifest.js +0 -0
  55. /khoj/interface/compiled/_next/static/{TrHI4J6qnG7RYFl2Irnqj → Dzg_ViqMwQEjqMgetZPRc}/_ssgManifest.js +0 -0
  56. /khoj/interface/compiled/_next/static/chunks/{1915-1943ee8a628b893c.js → 1915-ab4353eaca76f690.js} +0 -0
  57. /khoj/interface/compiled/_next/static/chunks/{2117-5a41630a2bd2eae8.js → 2117-1c18aa2098982bf9.js} +0 -0
  58. /khoj/interface/compiled/_next/static/chunks/{4363-e6ac2203564d1a3b.js → 4363-4efaf12abe696251.js} +0 -0
  59. /khoj/interface/compiled/_next/static/chunks/{4447-e038b251d626c340.js → 4447-5d44807c40355b1a.js} +0 -0
  60. /khoj/interface/compiled/_next/static/chunks/{8667-8136f74e9a086fca.js → 8667-adbe6017a66cef10.js} +0 -0
  61. /khoj/interface/compiled/_next/static/chunks/{9259-640fdd77408475df.js → 9259-d8bcd9da9e80c81e.js} +0 -0
  62. {khoj-1.42.1.dev8.dist-info → khoj-1.42.2.dev1.dist-info}/WHEEL +0 -0
  63. {khoj-1.42.1.dev8.dist-info → khoj-1.42.2.dev1.dist-info}/entry_points.txt +0 -0
  64. {khoj-1.42.1.dev8.dist-info → khoj-1.42.2.dev1.dist-info}/licenses/LICENSE +0 -0
khoj/routers/api_chat.py CHANGED
@@ -752,7 +752,7 @@ async def chat(
752
752
  q,
753
753
  chat_response="",
754
754
  user=user,
755
- meta_log=meta_log,
755
+ chat_history=chat_history,
756
756
  compiled_references=compiled_references,
757
757
  online_results=online_results,
758
758
  code_results=code_results,
@@ -918,7 +918,7 @@ async def chat(
918
918
  if city or region or country or country_code:
919
919
  location = LocationData(city=city, region=region, country=country, country_code=country_code)
920
920
  user_message_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
921
- meta_log = conversation.conversation_log
921
+ chat_history = conversation.messages
922
922
 
923
923
  # If interrupt flag is set, wait for the previous turn to be saved before proceeding
924
924
  if interrupt_flag:
@@ -964,14 +964,14 @@ async def chat(
964
964
  operator_results = [OperatorRun(**iter_dict) for iter_dict in last_message.operatorContext or []]
965
965
  train_of_thought = [thought.model_dump() for thought in last_message.trainOfThought or []]
966
966
  # Drop the interrupted message from conversation history
967
- meta_log["chat"].pop()
967
+ chat_history.pop()
968
968
  logger.info(f"Loaded interrupted partial context from conversation {conversation_id}.")
969
969
 
970
970
  if conversation_commands == [ConversationCommand.Default]:
971
971
  try:
972
972
  chosen_io = await aget_data_sources_and_output_format(
973
973
  q,
974
- meta_log,
974
+ chat_history,
975
975
  is_automated_task,
976
976
  user=user,
977
977
  query_images=uploaded_images,
@@ -1011,7 +1011,7 @@ async def chat(
1011
1011
  user=user,
1012
1012
  query=defiltered_query,
1013
1013
  conversation_id=conversation_id,
1014
- conversation_history=meta_log,
1014
+ conversation_history=conversation.messages,
1015
1015
  previous_iterations=list(research_results),
1016
1016
  query_images=uploaded_images,
1017
1017
  agent=agent,
@@ -1078,7 +1078,7 @@ async def chat(
1078
1078
  q=q,
1079
1079
  user=user,
1080
1080
  file_filters=file_filters,
1081
- meta_log=meta_log,
1081
+ chat_history=conversation.messages,
1082
1082
  query_images=uploaded_images,
1083
1083
  agent=agent,
1084
1084
  send_status_func=partial(send_event, ChatEvent.STATUS),
@@ -1123,7 +1123,7 @@ async def chat(
1123
1123
  if ConversationCommand.Automation in conversation_commands:
1124
1124
  try:
1125
1125
  automation, crontime, query_to_run, subject = await create_automation(
1126
- q, timezone, user, request.url, meta_log, tracer=tracer
1126
+ q, timezone, user, request.url, chat_history, tracer=tracer
1127
1127
  )
1128
1128
  except Exception as e:
1129
1129
  logger.error(f"Error scheduling task {q} for {user.email}: {e}")
@@ -1139,7 +1139,7 @@ async def chat(
1139
1139
  q,
1140
1140
  llm_response,
1141
1141
  user,
1142
- meta_log,
1142
+ chat_history,
1143
1143
  user_message_time,
1144
1144
  intent_type="automation",
1145
1145
  client_application=request.user.client_app,
@@ -1163,7 +1163,7 @@ async def chat(
1163
1163
  try:
1164
1164
  async for result in extract_references_and_questions(
1165
1165
  user,
1166
- meta_log,
1166
+ chat_history,
1167
1167
  q,
1168
1168
  (n or 7),
1169
1169
  d,
@@ -1212,7 +1212,7 @@ async def chat(
1212
1212
  try:
1213
1213
  async for result in search_online(
1214
1214
  defiltered_query,
1215
- meta_log,
1215
+ chat_history,
1216
1216
  location,
1217
1217
  user,
1218
1218
  partial(send_event, ChatEvent.STATUS),
@@ -1240,7 +1240,7 @@ async def chat(
1240
1240
  try:
1241
1241
  async for result in read_webpages(
1242
1242
  defiltered_query,
1243
- meta_log,
1243
+ chat_history,
1244
1244
  location,
1245
1245
  user,
1246
1246
  partial(send_event, ChatEvent.STATUS),
@@ -1281,7 +1281,7 @@ async def chat(
1281
1281
  context = f"# Iteration 1:\n#---\nNotes:\n{compiled_references}\n\nOnline Results:{online_results}"
1282
1282
  async for result in run_code(
1283
1283
  defiltered_query,
1284
- meta_log,
1284
+ chat_history,
1285
1285
  context,
1286
1286
  location,
1287
1287
  user,
@@ -1306,7 +1306,7 @@ async def chat(
1306
1306
  async for result in operate_environment(
1307
1307
  defiltered_query,
1308
1308
  user,
1309
- meta_log,
1309
+ chat_history,
1310
1310
  location,
1311
1311
  list(operator_results)[-1] if operator_results else None,
1312
1312
  query_images=uploaded_images,
@@ -1356,7 +1356,7 @@ async def chat(
1356
1356
  async for result in text_to_image(
1357
1357
  defiltered_query,
1358
1358
  user,
1359
- meta_log,
1359
+ chat_history,
1360
1360
  location_data=location,
1361
1361
  references=compiled_references,
1362
1362
  online_results=online_results,
@@ -1400,7 +1400,7 @@ async def chat(
1400
1400
 
1401
1401
  async for result in generate_mermaidjs_diagram(
1402
1402
  q=defiltered_query,
1403
- conversation_history=meta_log,
1403
+ chat_history=chat_history,
1404
1404
  location_data=location,
1405
1405
  note_references=compiled_references,
1406
1406
  online_results=online_results,
@@ -1456,7 +1456,7 @@ async def chat(
1456
1456
 
1457
1457
  llm_response, chat_metadata = await agenerate_chat_response(
1458
1458
  defiltered_query,
1459
- meta_log,
1459
+ chat_history,
1460
1460
  conversation,
1461
1461
  compiled_references,
1462
1462
  online_results,
khoj/routers/helpers.py CHANGED
@@ -55,6 +55,7 @@ from khoj.database.adapters import (
55
55
  )
56
56
  from khoj.database.models import (
57
57
  Agent,
58
+ ChatMessageModel,
58
59
  ChatModel,
59
60
  ClientApplication,
60
61
  Conversation,
@@ -285,7 +286,7 @@ async def acreate_title_from_history(
285
286
  """
286
287
  Create a title from the given conversation history
287
288
  """
288
- chat_history = construct_chat_history(conversation.conversation_log)
289
+ chat_history = construct_chat_history(conversation.messages)
289
290
 
290
291
  title_generation_prompt = prompts.conversation_title_generation.format(chat_history=chat_history)
291
292
 
@@ -345,7 +346,7 @@ async def acheck_if_safe_prompt(system_prompt: str, user: KhojUser = None, lax:
345
346
 
346
347
  async def aget_data_sources_and_output_format(
347
348
  query: str,
348
- conversation_history: dict,
349
+ chat_history: list[ChatMessageModel],
349
350
  is_task: bool,
350
351
  user: KhojUser,
351
352
  query_images: List[str] = None,
@@ -386,7 +387,7 @@ async def aget_data_sources_and_output_format(
386
387
  if len(agent_outputs) == 0 or output.value in agent_outputs:
387
388
  output_options_str += f'- "{output.value}": "{description}"\n'
388
389
 
389
- chat_history = construct_chat_history(conversation_history, n=6)
390
+ chat_history_str = construct_chat_history(chat_history, n=6)
390
391
 
391
392
  if query_images:
392
393
  query = f"[placeholder for {len(query_images)} user attached images]\n{query}"
@@ -399,7 +400,7 @@ async def aget_data_sources_and_output_format(
399
400
  query=query,
400
401
  sources=source_options_str,
401
402
  outputs=output_options_str,
402
- chat_history=chat_history,
403
+ chat_history=chat_history_str,
403
404
  personality_context=personality_context,
404
405
  )
405
406
 
@@ -462,7 +463,7 @@ async def aget_data_sources_and_output_format(
462
463
  async def infer_webpage_urls(
463
464
  q: str,
464
465
  max_webpages: int,
465
- conversation_history: dict,
466
+ chat_history: List[ChatMessageModel],
466
467
  location_data: LocationData,
467
468
  user: KhojUser,
468
469
  query_images: List[str] = None,
@@ -475,7 +476,7 @@ async def infer_webpage_urls(
475
476
  """
476
477
  location = f"{location_data}" if location_data else "Unknown"
477
478
  username = prompts.user_name.format(name=user.get_full_name()) if user.get_full_name() else ""
478
- chat_history = construct_chat_history(conversation_history)
479
+ chat_history_str = construct_chat_history(chat_history)
479
480
 
480
481
  utc_date = datetime.now(timezone.utc).strftime("%Y-%m-%d")
481
482
  personality_context = (
@@ -485,7 +486,7 @@ async def infer_webpage_urls(
485
486
  online_queries_prompt = prompts.infer_webpages_to_read.format(
486
487
  query=q,
487
488
  max_webpages=max_webpages,
488
- chat_history=chat_history,
489
+ chat_history=chat_history_str,
489
490
  current_date=utc_date,
490
491
  location=location,
491
492
  username=username,
@@ -526,7 +527,7 @@ async def infer_webpage_urls(
526
527
 
527
528
  async def generate_online_subqueries(
528
529
  q: str,
529
- conversation_history: dict,
530
+ chat_history: List[ChatMessageModel],
530
531
  location_data: LocationData,
531
532
  user: KhojUser,
532
533
  query_images: List[str] = None,
@@ -540,7 +541,7 @@ async def generate_online_subqueries(
540
541
  """
541
542
  location = f"{location_data}" if location_data else "Unknown"
542
543
  username = prompts.user_name.format(name=user.get_full_name()) if user.get_full_name() else ""
543
- chat_history = construct_chat_history(conversation_history)
544
+ chat_history_str = construct_chat_history(chat_history)
544
545
 
545
546
  utc_date = datetime.now(timezone.utc).strftime("%Y-%m-%d")
546
547
  personality_context = (
@@ -549,7 +550,7 @@ async def generate_online_subqueries(
549
550
 
550
551
  online_queries_prompt = prompts.online_search_conversation_subqueries.format(
551
552
  query=q,
552
- chat_history=chat_history,
553
+ chat_history=chat_history_str,
553
554
  max_queries=max_queries,
554
555
  current_date=utc_date,
555
556
  location=location,
@@ -591,16 +592,16 @@ async def generate_online_subqueries(
591
592
 
592
593
 
593
594
  def schedule_query(
594
- q: str, conversation_history: dict, user: KhojUser, query_images: List[str] = None, tracer: dict = {}
595
+ q: str, chat_history: List[ChatMessageModel], user: KhojUser, query_images: List[str] = None, tracer: dict = {}
595
596
  ) -> Tuple[str, str, str]:
596
597
  """
597
598
  Schedule the date, time to run the query. Assume the server timezone is UTC.
598
599
  """
599
- chat_history = construct_chat_history(conversation_history)
600
+ chat_history_str = construct_chat_history(chat_history)
600
601
 
601
602
  crontime_prompt = prompts.crontime_prompt.format(
602
603
  query=q,
603
- chat_history=chat_history,
604
+ chat_history=chat_history_str,
604
605
  )
605
606
 
606
607
  raw_response = send_message_to_model_wrapper_sync(
@@ -619,16 +620,16 @@ def schedule_query(
619
620
 
620
621
 
621
622
  async def aschedule_query(
622
- q: str, conversation_history: dict, user: KhojUser, query_images: List[str] = None, tracer: dict = {}
623
+ q: str, chat_history: List[ChatMessageModel], user: KhojUser, query_images: List[str] = None, tracer: dict = {}
623
624
  ) -> Tuple[str, str, str]:
624
625
  """
625
626
  Schedule the date, time to run the query. Assume the server timezone is UTC.
626
627
  """
627
- chat_history = construct_chat_history(conversation_history)
628
+ chat_history_str = construct_chat_history(chat_history)
628
629
 
629
630
  crontime_prompt = prompts.crontime_prompt.format(
630
631
  query=q,
631
- chat_history=chat_history,
632
+ chat_history=chat_history_str,
632
633
  )
633
634
 
634
635
  raw_response = await send_message_to_model_wrapper(
@@ -681,7 +682,7 @@ async def extract_relevant_info(
681
682
  async def extract_relevant_summary(
682
683
  q: str,
683
684
  corpus: str,
684
- conversation_history: dict,
685
+ chat_history: List[ChatMessageModel] = [],
685
686
  query_images: List[str] = None,
686
687
  user: KhojUser = None,
687
688
  agent: Agent = None,
@@ -698,11 +699,11 @@ async def extract_relevant_summary(
698
699
  prompts.personality_context.format(personality=agent.personality) if agent and agent.personality else ""
699
700
  )
700
701
 
701
- chat_history = construct_chat_history(conversation_history)
702
+ chat_history_str = construct_chat_history(chat_history)
702
703
 
703
704
  extract_relevant_information = prompts.extract_relevant_summary.format(
704
705
  query=q,
705
- chat_history=chat_history,
706
+ chat_history=chat_history_str,
706
707
  corpus=corpus.strip(),
707
708
  personality_context=personality_context,
708
709
  )
@@ -725,7 +726,7 @@ async def generate_summary_from_files(
725
726
  q: str,
726
727
  user: KhojUser,
727
728
  file_filters: List[str],
728
- meta_log: dict,
729
+ chat_history: List[ChatMessageModel] = [],
729
730
  query_images: List[str] = None,
730
731
  agent: Agent = None,
731
732
  send_status_func: Optional[Callable] = None,
@@ -766,7 +767,7 @@ async def generate_summary_from_files(
766
767
  response = await extract_relevant_summary(
767
768
  q,
768
769
  contextual_data,
769
- conversation_history=meta_log,
770
+ chat_history=chat_history,
770
771
  query_images=query_images,
771
772
  user=user,
772
773
  agent=agent,
@@ -782,7 +783,7 @@ async def generate_summary_from_files(
782
783
 
783
784
  async def generate_excalidraw_diagram(
784
785
  q: str,
785
- conversation_history: Dict[str, Any],
786
+ chat_history: List[ChatMessageModel],
786
787
  location_data: LocationData,
787
788
  note_references: List[Dict[str, Any]],
788
789
  online_results: Optional[dict] = None,
@@ -799,7 +800,7 @@ async def generate_excalidraw_diagram(
799
800
 
800
801
  better_diagram_description_prompt = await generate_better_diagram_description(
801
802
  q=q,
802
- conversation_history=conversation_history,
803
+ chat_history=chat_history,
803
804
  location_data=location_data,
804
805
  note_references=note_references,
805
806
  online_results=online_results,
@@ -834,7 +835,7 @@ async def generate_excalidraw_diagram(
834
835
 
835
836
  async def generate_better_diagram_description(
836
837
  q: str,
837
- conversation_history: Dict[str, Any],
838
+ chat_history: List[ChatMessageModel],
838
839
  location_data: LocationData,
839
840
  note_references: List[Dict[str, Any]],
840
841
  online_results: Optional[dict] = None,
@@ -857,7 +858,7 @@ async def generate_better_diagram_description(
857
858
 
858
859
  user_references = "\n\n".join([f"# {item['compiled']}" for item in note_references])
859
860
 
860
- chat_history = construct_chat_history(conversation_history)
861
+ chat_history_str = construct_chat_history(chat_history)
861
862
 
862
863
  simplified_online_results = {}
863
864
 
@@ -870,7 +871,7 @@ async def generate_better_diagram_description(
870
871
 
871
872
  improve_diagram_description_prompt = prompts.improve_excalidraw_diagram_description_prompt.format(
872
873
  query=q,
873
- chat_history=chat_history,
874
+ chat_history=chat_history_str,
874
875
  location=location,
875
876
  current_date=today_date,
876
877
  references=user_references,
@@ -939,7 +940,7 @@ async def generate_excalidraw_diagram_from_description(
939
940
 
940
941
  async def generate_mermaidjs_diagram(
941
942
  q: str,
942
- conversation_history: Dict[str, Any],
943
+ chat_history: List[ChatMessageModel],
943
944
  location_data: LocationData,
944
945
  note_references: List[Dict[str, Any]],
945
946
  online_results: Optional[dict] = None,
@@ -956,7 +957,7 @@ async def generate_mermaidjs_diagram(
956
957
 
957
958
  better_diagram_description_prompt = await generate_better_mermaidjs_diagram_description(
958
959
  q=q,
959
- conversation_history=conversation_history,
960
+ chat_history=chat_history,
960
961
  location_data=location_data,
961
962
  note_references=note_references,
962
963
  online_results=online_results,
@@ -985,7 +986,7 @@ async def generate_mermaidjs_diagram(
985
986
 
986
987
  async def generate_better_mermaidjs_diagram_description(
987
988
  q: str,
988
- conversation_history: Dict[str, Any],
989
+ chat_history: List[ChatMessageModel],
989
990
  location_data: LocationData,
990
991
  note_references: List[Dict[str, Any]],
991
992
  online_results: Optional[dict] = None,
@@ -1008,7 +1009,7 @@ async def generate_better_mermaidjs_diagram_description(
1008
1009
 
1009
1010
  user_references = "\n\n".join([f"# {item['compiled']}" for item in note_references])
1010
1011
 
1011
- chat_history = construct_chat_history(conversation_history)
1012
+ chat_history_str = construct_chat_history(chat_history)
1012
1013
 
1013
1014
  simplified_online_results = {}
1014
1015
 
@@ -1021,7 +1022,7 @@ async def generate_better_mermaidjs_diagram_description(
1021
1022
 
1022
1023
  improve_diagram_description_prompt = prompts.improve_mermaid_js_diagram_description_prompt.format(
1023
1024
  query=q,
1024
- chat_history=chat_history,
1025
+ chat_history=chat_history_str,
1025
1026
  location=location,
1026
1027
  current_date=today_date,
1027
1028
  references=user_references,
@@ -1160,7 +1161,7 @@ async def send_message_to_model_wrapper(
1160
1161
  query_images: List[str] = None,
1161
1162
  context: str = "",
1162
1163
  query_files: str = None,
1163
- conversation_log: dict = {},
1164
+ chat_history: list[ChatMessageModel] = [],
1164
1165
  agent_chat_model: ChatModel = None,
1165
1166
  tracer: dict = {},
1166
1167
  ):
@@ -1193,7 +1194,7 @@ async def send_message_to_model_wrapper(
1193
1194
  user_message=query,
1194
1195
  context_message=context,
1195
1196
  system_message=system_message,
1196
- conversation_log=conversation_log,
1197
+ chat_history=chat_history,
1197
1198
  model_name=chat_model_name,
1198
1199
  loaded_model=loaded_model,
1199
1200
  tokenizer_name=tokenizer,
@@ -1260,7 +1261,7 @@ def send_message_to_model_wrapper_sync(
1260
1261
  user: KhojUser = None,
1261
1262
  query_images: List[str] = None,
1262
1263
  query_files: str = "",
1263
- conversation_log: dict = {},
1264
+ chat_history: List[ChatMessageModel] = [],
1264
1265
  tracer: dict = {},
1265
1266
  ):
1266
1267
  chat_model: ChatModel = ConversationAdapters.get_default_chat_model(user)
@@ -1284,7 +1285,7 @@ def send_message_to_model_wrapper_sync(
1284
1285
  truncated_messages = generate_chatml_messages_with_context(
1285
1286
  user_message=message,
1286
1287
  system_message=system_message,
1287
- conversation_log=conversation_log,
1288
+ chat_history=chat_history,
1288
1289
  model_name=chat_model_name,
1289
1290
  loaded_model=loaded_model,
1290
1291
  max_prompt_size=max_tokens,
@@ -1342,7 +1343,7 @@ def send_message_to_model_wrapper_sync(
1342
1343
 
1343
1344
  async def agenerate_chat_response(
1344
1345
  q: str,
1345
- meta_log: dict,
1346
+ chat_history: List[ChatMessageModel],
1346
1347
  conversation: Conversation,
1347
1348
  compiled_references: List[Dict] = [],
1348
1349
  online_results: Dict[str, Dict] = {},
@@ -1379,7 +1380,7 @@ async def agenerate_chat_response(
1379
1380
  save_to_conversation_log,
1380
1381
  q,
1381
1382
  user=user,
1382
- meta_log=meta_log,
1383
+ chat_history=chat_history,
1383
1384
  compiled_references=compiled_references,
1384
1385
  online_results=online_results,
1385
1386
  code_results=code_results,
@@ -1424,7 +1425,7 @@ async def agenerate_chat_response(
1424
1425
  references=compiled_references,
1425
1426
  online_results=online_results,
1426
1427
  loaded_model=loaded_model,
1427
- conversation_log=meta_log,
1428
+ chat_history=chat_history,
1428
1429
  completion_func=partial_completion,
1429
1430
  conversation_commands=conversation_commands,
1430
1431
  model_name=chat_model.name,
@@ -1450,7 +1451,7 @@ async def agenerate_chat_response(
1450
1451
  online_results=online_results,
1451
1452
  code_results=code_results,
1452
1453
  operator_results=operator_results,
1453
- conversation_log=meta_log,
1454
+ chat_history=chat_history,
1454
1455
  model=chat_model_name,
1455
1456
  api_key=api_key,
1456
1457
  api_base_url=openai_chat_config.api_base_url,
@@ -1480,7 +1481,7 @@ async def agenerate_chat_response(
1480
1481
  online_results=online_results,
1481
1482
  code_results=code_results,
1482
1483
  operator_results=operator_results,
1483
- conversation_log=meta_log,
1484
+ chat_history=chat_history,
1484
1485
  model=chat_model.name,
1485
1486
  api_key=api_key,
1486
1487
  api_base_url=api_base_url,
@@ -1508,7 +1509,7 @@ async def agenerate_chat_response(
1508
1509
  online_results=online_results,
1509
1510
  code_results=code_results,
1510
1511
  operator_results=operator_results,
1511
- conversation_log=meta_log,
1512
+ chat_history=chat_history,
1512
1513
  model=chat_model.name,
1513
1514
  api_key=api_key,
1514
1515
  api_base_url=api_base_url,
@@ -2005,11 +2006,11 @@ async def create_automation(
2005
2006
  timezone: str,
2006
2007
  user: KhojUser,
2007
2008
  calling_url: URL,
2008
- meta_log: dict = {},
2009
+ chat_history: List[ChatMessageModel] = [],
2009
2010
  conversation_id: str = None,
2010
2011
  tracer: dict = {},
2011
2012
  ):
2012
- crontime, query_to_run, subject = await aschedule_query(q, meta_log, user, tracer=tracer)
2013
+ crontime, query_to_run, subject = await aschedule_query(q, chat_history, user, tracer=tracer)
2013
2014
  job = await aschedule_automation(query_to_run, subject, crontime, timezone, q, user, calling_url, conversation_id)
2014
2015
  return job, crontime, query_to_run, subject
2015
2016
 
khoj/routers/research.py CHANGED
@@ -10,7 +10,7 @@ import yaml
10
10
  from pydantic import BaseModel, Field
11
11
 
12
12
  from khoj.database.adapters import AgentAdapters, EntryAdapters
13
- from khoj.database.models import Agent, KhojUser
13
+ from khoj.database.models import Agent, ChatMessageModel, KhojUser
14
14
  from khoj.processor.conversation import prompts
15
15
  from khoj.processor.conversation.utils import (
16
16
  OperatorRun,
@@ -84,7 +84,7 @@ class PlanningResponse(BaseModel):
84
84
 
85
85
  async def apick_next_tool(
86
86
  query: str,
87
- conversation_history: dict,
87
+ conversation_history: List[ChatMessageModel],
88
88
  user: KhojUser = None,
89
89
  location: LocationData = None,
90
90
  user_name: str = None,
@@ -166,18 +166,18 @@ async def apick_next_tool(
166
166
  query = f"[placeholder for user attached images]\n{query}"
167
167
 
168
168
  # Construct chat history with user and iteration history with researcher agent for context
169
- previous_iterations_history = construct_iteration_history(previous_iterations, prompts.previous_iteration, query)
170
- iteration_chat_log = {"chat": conversation_history.get("chat", []) + previous_iterations_history}
169
+ iteration_chat_history = construct_iteration_history(previous_iterations, prompts.previous_iteration, query)
170
+ chat_and_research_history = conversation_history + iteration_chat_history
171
171
 
172
172
  # Plan function execution for the next tool
173
- query = prompts.plan_function_execution_next_tool.format(query=query) if previous_iterations_history else query
173
+ query = prompts.plan_function_execution_next_tool.format(query=query) if iteration_chat_history else query
174
174
 
175
175
  try:
176
176
  with timer("Chat actor: Infer information sources to refer", logger):
177
177
  response = await send_message_to_model_wrapper(
178
178
  query=query,
179
179
  system_message=function_planning_prompt,
180
- conversation_log=iteration_chat_log,
180
+ chat_history=chat_and_research_history,
181
181
  response_type="json_object",
182
182
  response_schema=planning_response_model,
183
183
  deepthought=True,
@@ -198,6 +198,8 @@ async def apick_next_tool(
198
198
 
199
199
  try:
200
200
  response = load_complex_json(response)
201
+ if not isinstance(response, dict):
202
+ raise ValueError(f"Expected dict response, got {type(response).__name__}: {response}")
201
203
  selected_tool = response.get("tool", None)
202
204
  generated_query = response.get("query", None)
203
205
  scratchpad = response.get("scratchpad", None)
@@ -236,7 +238,7 @@ async def research(
236
238
  user: KhojUser,
237
239
  query: str,
238
240
  conversation_id: str,
239
- conversation_history: dict,
241
+ conversation_history: List[ChatMessageModel],
240
242
  previous_iterations: List[ResearchIteration],
241
243
  query_images: List[str],
242
244
  agent: Agent = None,
@@ -259,9 +261,7 @@ async def research(
259
261
  if current_iteration := len(previous_iterations) > 0:
260
262
  logger.info(f"Continuing research with the previous {len(previous_iterations)} iteration results.")
261
263
  previous_iterations_history = construct_iteration_history(previous_iterations, prompts.previous_iteration)
262
- research_conversation_history["chat"] = (
263
- research_conversation_history.get("chat", []) + previous_iterations_history
264
- )
264
+ research_conversation_history += previous_iterations_history
265
265
 
266
266
  while current_iteration < MAX_ITERATIONS:
267
267
  # Check for cancellation at the start of each iteration
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: khoj
3
- Version: 1.42.1.dev8
3
+ Version: 1.42.2.dev1
4
4
  Summary: Your Second Brain
5
5
  Project-URL: Homepage, https://khoj.dev
6
6
  Project-URL: Documentation, https://docs.khoj.dev