khoj 1.27.2.dev18__py3-none-any.whl → 1.27.2.dev130__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (68) hide show
  1. khoj/database/adapters/__init__.py +34 -10
  2. khoj/interface/compiled/404/index.html +1 -1
  3. khoj/interface/compiled/_next/static/chunks/1034-da58b679fcbb79c1.js +1 -0
  4. khoj/interface/compiled/_next/static/chunks/1467-5a191c1cd5bf0b83.js +1 -0
  5. khoj/interface/compiled/_next/static/chunks/1603-5d70d9dfcdcb1f10.js +1 -0
  6. khoj/interface/compiled/_next/static/chunks/3423-fa918f4e5365a35e.js +1 -0
  7. khoj/interface/compiled/_next/static/chunks/8423-3ad0bfb299801220.js +1 -0
  8. khoj/interface/compiled/_next/static/chunks/app/chat/page-7dc98df9c88828f0.js +1 -0
  9. khoj/interface/compiled/_next/static/chunks/app/factchecker/page-d887f55fe6d4f35d.js +1 -0
  10. khoj/interface/compiled/_next/static/chunks/app/{page-8f22b790e50dd722.js → page-d46244282af16509.js} +1 -1
  11. khoj/interface/compiled/_next/static/chunks/app/share/chat/{page-6a01e07fb244c10c.js → page-505b07bce608b34e.js} +1 -1
  12. khoj/interface/compiled/_next/static/chunks/{webpack-31239d193815e49e.js → webpack-8ae5ce45161bd98e.js} +1 -1
  13. khoj/interface/compiled/_next/static/css/{2272c73fc7a3b571.css → 26c1c33d0423a7d8.css} +1 -1
  14. khoj/interface/compiled/_next/static/css/e9c5fe555dd3050b.css +25 -0
  15. khoj/interface/compiled/agents/index.html +1 -1
  16. khoj/interface/compiled/agents/index.txt +2 -2
  17. khoj/interface/compiled/automations/index.html +1 -1
  18. khoj/interface/compiled/automations/index.txt +2 -2
  19. khoj/interface/compiled/chat/index.html +1 -1
  20. khoj/interface/compiled/chat/index.txt +2 -2
  21. khoj/interface/compiled/factchecker/index.html +1 -1
  22. khoj/interface/compiled/factchecker/index.txt +2 -2
  23. khoj/interface/compiled/index.html +1 -1
  24. khoj/interface/compiled/index.txt +2 -2
  25. khoj/interface/compiled/search/index.html +1 -1
  26. khoj/interface/compiled/search/index.txt +2 -2
  27. khoj/interface/compiled/settings/index.html +1 -1
  28. khoj/interface/compiled/settings/index.txt +2 -2
  29. khoj/interface/compiled/share/chat/index.html +1 -1
  30. khoj/interface/compiled/share/chat/index.txt +2 -2
  31. khoj/processor/conversation/anthropic/anthropic_chat.py +19 -10
  32. khoj/processor/conversation/anthropic/utils.py +37 -6
  33. khoj/processor/conversation/google/gemini_chat.py +23 -13
  34. khoj/processor/conversation/google/utils.py +34 -10
  35. khoj/processor/conversation/offline/chat_model.py +40 -15
  36. khoj/processor/conversation/openai/gpt.py +25 -10
  37. khoj/processor/conversation/openai/utils.py +43 -9
  38. khoj/processor/conversation/prompts.py +131 -22
  39. khoj/processor/conversation/utils.py +299 -6
  40. khoj/processor/image/generate.py +2 -0
  41. khoj/processor/tools/online_search.py +19 -8
  42. khoj/processor/tools/run_code.py +144 -0
  43. khoj/routers/api.py +11 -6
  44. khoj/routers/api_chat.py +177 -88
  45. khoj/routers/helpers.py +155 -59
  46. khoj/routers/research.py +321 -0
  47. khoj/search_filter/date_filter.py +1 -3
  48. khoj/search_filter/file_filter.py +1 -2
  49. khoj/search_type/text_search.py +3 -3
  50. khoj/utils/helpers.py +15 -2
  51. khoj/utils/yaml.py +4 -0
  52. {khoj-1.27.2.dev18.dist-info → khoj-1.27.2.dev130.dist-info}/METADATA +2 -1
  53. {khoj-1.27.2.dev18.dist-info → khoj-1.27.2.dev130.dist-info}/RECORD +61 -58
  54. khoj/interface/compiled/_next/static/chunks/1603-5138bb7c8035d9a6.js +0 -1
  55. khoj/interface/compiled/_next/static/chunks/2697-61fcba89fd87eab4.js +0 -1
  56. khoj/interface/compiled/_next/static/chunks/3423-8e9c420574a9fbe3.js +0 -1
  57. khoj/interface/compiled/_next/static/chunks/9479-a5e7ff4c7d1d7ee7.js +0 -1
  58. khoj/interface/compiled/_next/static/chunks/app/chat/page-151232d8417a1ea1.js +0 -1
  59. khoj/interface/compiled/_next/static/chunks/app/factchecker/page-798904432c2417c4.js +0 -1
  60. khoj/interface/compiled/_next/static/css/76d55eb435962b19.css +0 -25
  61. /khoj/interface/compiled/_next/static/{_gBBcNbs4wMKxKXhQs5E4 → N19uqHAJYqRAVxvuVwHfE}/_buildManifest.js +0 -0
  62. /khoj/interface/compiled/_next/static/{_gBBcNbs4wMKxKXhQs5E4 → N19uqHAJYqRAVxvuVwHfE}/_ssgManifest.js +0 -0
  63. /khoj/interface/compiled/_next/static/chunks/{1970-1d6d0c1b00b4f343.js → 1970-444843bea1d17d61.js} +0 -0
  64. /khoj/interface/compiled/_next/static/chunks/{9417-759984ad62caa3dc.js → 9417-19cfd1a9cb758e71.js} +0 -0
  65. /khoj/interface/compiled/_next/static/chunks/app/settings/{page-7946cabb9c54e22d.js → page-89e6737b2cc9fb3a.js} +0 -0
  66. {khoj-1.27.2.dev18.dist-info → khoj-1.27.2.dev130.dist-info}/WHEEL +0 -0
  67. {khoj-1.27.2.dev18.dist-info → khoj-1.27.2.dev130.dist-info}/entry_points.txt +0 -0
  68. {khoj-1.27.2.dev18.dist-info → khoj-1.27.2.dev130.dist-info}/licenses/LICENSE +0 -0
khoj/routers/helpers.py CHANGED
@@ -43,6 +43,7 @@ from khoj.database.adapters import (
43
43
  AutomationAdapters,
44
44
  ConversationAdapters,
45
45
  EntryAdapters,
46
+ FileObjectAdapters,
46
47
  ais_user_subscribed,
47
48
  create_khoj_token,
48
49
  get_khoj_tokens,
@@ -87,9 +88,11 @@ from khoj.processor.conversation.offline.chat_model import (
87
88
  )
88
89
  from khoj.processor.conversation.openai.gpt import converse, send_message_to_model
89
90
  from khoj.processor.conversation.utils import (
91
+ ChatEvent,
90
92
  ThreadedGenerator,
93
+ clean_json,
94
+ construct_chat_history,
91
95
  generate_chatml_messages_with_context,
92
- remove_json_codeblock,
93
96
  save_to_conversation_log,
94
97
  )
95
98
  from khoj.processor.speech.text_to_speech import is_eleven_labs_enabled
@@ -137,7 +140,7 @@ def validate_conversation_config(user: KhojUser):
137
140
  async def is_ready_to_chat(user: KhojUser):
138
141
  user_conversation_config = await ConversationAdapters.aget_user_conversation_config(user)
139
142
  if user_conversation_config == None:
140
- user_conversation_config = await ConversationAdapters.aget_default_conversation_config()
143
+ user_conversation_config = await ConversationAdapters.aget_default_conversation_config(user)
141
144
 
142
145
  if user_conversation_config and user_conversation_config.model_type == ChatModelOptions.ModelType.OFFLINE:
143
146
  chat_model = user_conversation_config.chat_model
@@ -210,21 +213,6 @@ def get_next_url(request: Request) -> str:
210
213
  return urljoin(str(request.base_url).rstrip("/"), next_path)
211
214
 
212
215
 
213
- def construct_chat_history(conversation_history: dict, n: int = 4, agent_name="AI") -> str:
214
- chat_history = ""
215
- for chat in conversation_history.get("chat", [])[-n:]:
216
- if chat["by"] == "khoj" and chat["intent"].get("type") in ["remember", "reminder", "summarize"]:
217
- chat_history += f"User: {chat['intent']['query']}\n"
218
- chat_history += f"{agent_name}: {chat['message']}\n"
219
- elif chat["by"] == "khoj" and ("text-to-image" in chat["intent"].get("type")):
220
- chat_history += f"User: {chat['intent']['query']}\n"
221
- chat_history += f"{agent_name}: [generated image redacted for space]\n"
222
- elif chat["by"] == "khoj" and ("excalidraw" in chat["intent"].get("type")):
223
- chat_history += f"User: {chat['intent']['query']}\n"
224
- chat_history += f"{agent_name}: {chat['intent']['inferred-queries'][0]}\n"
225
- return chat_history
226
-
227
-
228
216
  def get_conversation_command(query: str, any_references: bool = False) -> ConversationCommand:
229
217
  if query.startswith("/notes"):
230
218
  return ConversationCommand.Notes
@@ -244,6 +232,10 @@ def get_conversation_command(query: str, any_references: bool = False) -> Conver
244
232
  return ConversationCommand.Summarize
245
233
  elif query.startswith("/diagram"):
246
234
  return ConversationCommand.Diagram
235
+ elif query.startswith("/code"):
236
+ return ConversationCommand.Code
237
+ elif query.startswith("/research"):
238
+ return ConversationCommand.Research
247
239
  # If no relevant notes found for the given query
248
240
  elif not any_references:
249
241
  return ConversationCommand.General
@@ -301,6 +293,7 @@ async def aget_relevant_information_sources(
301
293
  user: KhojUser,
302
294
  query_images: List[str] = None,
303
295
  agent: Agent = None,
296
+ tracer: dict = {},
304
297
  ):
305
298
  """
306
299
  Given a query, determine which of the available tools the agent should use in order to answer appropriately.
@@ -337,11 +330,11 @@ async def aget_relevant_information_sources(
337
330
  relevant_tools_prompt,
338
331
  response_type="json_object",
339
332
  user=user,
333
+ tracer=tracer,
340
334
  )
341
335
 
342
336
  try:
343
- response = response.strip()
344
- response = remove_json_codeblock(response)
337
+ response = clean_json(response)
345
338
  response = json.loads(response)
346
339
  response = [q.strip() for q in response["source"] if q.strip()]
347
340
  if not isinstance(response, list) or not response or len(response) == 0:
@@ -378,6 +371,7 @@ async def aget_relevant_output_modes(
378
371
  user: KhojUser = None,
379
372
  query_images: List[str] = None,
380
373
  agent: Agent = None,
374
+ tracer: dict = {},
381
375
  ):
382
376
  """
383
377
  Given a query, determine which of the available tools the agent should use in order to answer appropriately.
@@ -413,11 +407,12 @@ async def aget_relevant_output_modes(
413
407
  )
414
408
 
415
409
  with timer("Chat actor: Infer output mode for chat response", logger):
416
- response = await send_message_to_model_wrapper(relevant_mode_prompt, response_type="json_object", user=user)
410
+ response = await send_message_to_model_wrapper(
411
+ relevant_mode_prompt, response_type="json_object", user=user, tracer=tracer
412
+ )
417
413
 
418
414
  try:
419
- response = response.strip()
420
- response = remove_json_codeblock(response)
415
+ response = clean_json(response)
421
416
  response = json.loads(response)
422
417
 
423
418
  if is_none_or_empty(response):
@@ -444,6 +439,7 @@ async def infer_webpage_urls(
444
439
  user: KhojUser,
445
440
  query_images: List[str] = None,
446
441
  agent: Agent = None,
442
+ tracer: dict = {},
447
443
  ) -> List[str]:
448
444
  """
449
445
  Infer webpage links from the given query
@@ -468,12 +464,16 @@ async def infer_webpage_urls(
468
464
 
469
465
  with timer("Chat actor: Infer webpage urls to read", logger):
470
466
  response = await send_message_to_model_wrapper(
471
- online_queries_prompt, query_images=query_images, response_type="json_object", user=user
467
+ online_queries_prompt,
468
+ query_images=query_images,
469
+ response_type="json_object",
470
+ user=user,
471
+ tracer=tracer,
472
472
  )
473
473
 
474
474
  # Validate that the response is a non-empty, JSON-serializable list of URLs
475
475
  try:
476
- response = response.strip()
476
+ response = clean_json(response)
477
477
  urls = json.loads(response)
478
478
  valid_unique_urls = {str(url).strip() for url in urls["links"] if is_valid_url(url)}
479
479
  if is_none_or_empty(valid_unique_urls):
@@ -490,6 +490,7 @@ async def generate_online_subqueries(
490
490
  user: KhojUser,
491
491
  query_images: List[str] = None,
492
492
  agent: Agent = None,
493
+ tracer: dict = {},
493
494
  ) -> List[str]:
494
495
  """
495
496
  Generate subqueries from the given query
@@ -514,13 +515,16 @@ async def generate_online_subqueries(
514
515
 
515
516
  with timer("Chat actor: Generate online search subqueries", logger):
516
517
  response = await send_message_to_model_wrapper(
517
- online_queries_prompt, query_images=query_images, response_type="json_object", user=user
518
+ online_queries_prompt,
519
+ query_images=query_images,
520
+ response_type="json_object",
521
+ user=user,
522
+ tracer=tracer,
518
523
  )
519
524
 
520
525
  # Validate that the response is a non-empty, JSON-serializable list
521
526
  try:
522
- response = response.strip()
523
- response = remove_json_codeblock(response)
527
+ response = clean_json(response)
524
528
  response = json.loads(response)
525
529
  response = [q.strip() for q in response["queries"] if q.strip()]
526
530
  if not isinstance(response, list) or not response or len(response) == 0:
@@ -533,7 +537,7 @@ async def generate_online_subqueries(
533
537
 
534
538
 
535
539
  async def schedule_query(
536
- q: str, conversation_history: dict, user: KhojUser, query_images: List[str] = None
540
+ q: str, conversation_history: dict, user: KhojUser, query_images: List[str] = None, tracer: dict = {}
537
541
  ) -> Tuple[str, ...]:
538
542
  """
539
543
  Schedule the date, time to run the query. Assume the server timezone is UTC.
@@ -546,7 +550,7 @@ async def schedule_query(
546
550
  )
547
551
 
548
552
  raw_response = await send_message_to_model_wrapper(
549
- crontime_prompt, query_images=query_images, response_type="json_object", user=user
553
+ crontime_prompt, query_images=query_images, response_type="json_object", user=user, tracer=tracer
550
554
  )
551
555
 
552
556
  # Validate that the response is a non-empty, JSON-serializable list
@@ -561,7 +565,7 @@ async def schedule_query(
561
565
 
562
566
 
563
567
  async def extract_relevant_info(
564
- qs: set[str], corpus: str, user: KhojUser = None, agent: Agent = None
568
+ qs: set[str], corpus: str, user: KhojUser = None, agent: Agent = None, tracer: dict = {}
565
569
  ) -> Union[str, None]:
566
570
  """
567
571
  Extract relevant information for a given query from the target corpus
@@ -584,6 +588,7 @@ async def extract_relevant_info(
584
588
  extract_relevant_information,
585
589
  prompts.system_prompt_extract_relevant_information,
586
590
  user=user,
591
+ tracer=tracer,
587
592
  )
588
593
  return response.strip()
589
594
 
@@ -595,6 +600,7 @@ async def extract_relevant_summary(
595
600
  query_images: List[str] = None,
596
601
  user: KhojUser = None,
597
602
  agent: Agent = None,
603
+ tracer: dict = {},
598
604
  ) -> Union[str, None]:
599
605
  """
600
606
  Extract relevant information for a given query from the target corpus
@@ -622,10 +628,58 @@ async def extract_relevant_summary(
622
628
  prompts.system_prompt_extract_relevant_summary,
623
629
  user=user,
624
630
  query_images=query_images,
631
+ tracer=tracer,
625
632
  )
626
633
  return response.strip()
627
634
 
628
635
 
636
+ async def generate_summary_from_files(
637
+ q: str,
638
+ user: KhojUser,
639
+ file_filters: List[str],
640
+ meta_log: dict,
641
+ query_images: List[str] = None,
642
+ agent: Agent = None,
643
+ send_status_func: Optional[Callable] = None,
644
+ tracer: dict = {},
645
+ ):
646
+ try:
647
+ file_object = None
648
+ if await EntryAdapters.aagent_has_entries(agent):
649
+ file_names = await EntryAdapters.aget_agent_entry_filepaths(agent)
650
+ if len(file_names) > 0:
651
+ file_object = await FileObjectAdapters.async_get_file_objects_by_name(None, file_names.pop(), agent)
652
+
653
+ if len(file_filters) > 0:
654
+ file_object = await FileObjectAdapters.async_get_file_objects_by_name(user, file_filters[0])
655
+
656
+ if len(file_object) == 0:
657
+ response_log = "Sorry, I couldn't find the full text of this file."
658
+ yield response_log
659
+ return
660
+ contextual_data = " ".join([file.raw_text for file in file_object])
661
+ if not q:
662
+ q = "Create a general summary of the file"
663
+ async for result in send_status_func(f"**Constructing Summary Using:** {file_object[0].file_name}"):
664
+ yield {ChatEvent.STATUS: result}
665
+
666
+ response = await extract_relevant_summary(
667
+ q,
668
+ contextual_data,
669
+ conversation_history=meta_log,
670
+ query_images=query_images,
671
+ user=user,
672
+ agent=agent,
673
+ tracer=tracer,
674
+ )
675
+
676
+ yield str(response)
677
+ except Exception as e:
678
+ response_log = "Error summarizing file. Please try again, or contact support."
679
+ logger.error(f"Error summarizing file for {user.email}: {e}", exc_info=True)
680
+ yield result
681
+
682
+
629
683
  async def generate_excalidraw_diagram(
630
684
  q: str,
631
685
  conversation_history: Dict[str, Any],
@@ -636,6 +690,7 @@ async def generate_excalidraw_diagram(
636
690
  user: KhojUser = None,
637
691
  agent: Agent = None,
638
692
  send_status_func: Optional[Callable] = None,
693
+ tracer: dict = {},
639
694
  ):
640
695
  if send_status_func:
641
696
  async for event in send_status_func("**Enhancing the Diagramming Prompt**"):
@@ -650,6 +705,7 @@ async def generate_excalidraw_diagram(
650
705
  query_images=query_images,
651
706
  user=user,
652
707
  agent=agent,
708
+ tracer=tracer,
653
709
  )
654
710
 
655
711
  if send_status_func:
@@ -660,6 +716,7 @@ async def generate_excalidraw_diagram(
660
716
  q=better_diagram_description_prompt,
661
717
  user=user,
662
718
  agent=agent,
719
+ tracer=tracer,
663
720
  )
664
721
 
665
722
  yield better_diagram_description_prompt, excalidraw_diagram_description
@@ -674,6 +731,7 @@ async def generate_better_diagram_description(
674
731
  query_images: List[str] = None,
675
732
  user: KhojUser = None,
676
733
  agent: Agent = None,
734
+ tracer: dict = {},
677
735
  ) -> str:
678
736
  """
679
737
  Generate a diagram description from the given query and context
@@ -711,7 +769,7 @@ async def generate_better_diagram_description(
711
769
 
712
770
  with timer("Chat actor: Generate better diagram description", logger):
713
771
  response = await send_message_to_model_wrapper(
714
- improve_diagram_description_prompt, query_images=query_images, user=user
772
+ improve_diagram_description_prompt, query_images=query_images, user=user, tracer=tracer
715
773
  )
716
774
  response = response.strip()
717
775
  if response.startswith(('"', "'")) and response.endswith(('"', "'")):
@@ -724,6 +782,7 @@ async def generate_excalidraw_diagram_from_description(
724
782
  q: str,
725
783
  user: KhojUser = None,
726
784
  agent: Agent = None,
785
+ tracer: dict = {},
727
786
  ) -> str:
728
787
  personality_context = (
729
788
  prompts.personality_context.format(personality=agent.personality) if agent and agent.personality else ""
@@ -735,9 +794,10 @@ async def generate_excalidraw_diagram_from_description(
735
794
  )
736
795
 
737
796
  with timer("Chat actor: Generate excalidraw diagram", logger):
738
- raw_response = await send_message_to_model_wrapper(message=excalidraw_diagram_generation, user=user)
739
- raw_response = raw_response.strip()
740
- raw_response = remove_json_codeblock(raw_response)
797
+ raw_response = await send_message_to_model_wrapper(
798
+ query=excalidraw_diagram_generation, user=user, tracer=tracer
799
+ )
800
+ raw_response = clean_json(raw_response)
741
801
  response: Dict[str, str] = json.loads(raw_response)
742
802
  if not response or not isinstance(response, List) or not isinstance(response[0], Dict):
743
803
  # TODO Some additional validation here that it's a valid Excalidraw diagram
@@ -756,6 +816,7 @@ async def generate_better_image_prompt(
756
816
  query_images: Optional[List[str]] = None,
757
817
  user: KhojUser = None,
758
818
  agent: Agent = None,
819
+ tracer: dict = {},
759
820
  ) -> str:
760
821
  """
761
822
  Generate a better image prompt from the given query
@@ -802,7 +863,9 @@ async def generate_better_image_prompt(
802
863
  )
803
864
 
804
865
  with timer("Chat actor: Generate contextual image prompt", logger):
805
- response = await send_message_to_model_wrapper(image_prompt, query_images=query_images, user=user)
866
+ response = await send_message_to_model_wrapper(
867
+ image_prompt, query_images=query_images, user=user, tracer=tracer
868
+ )
806
869
  response = response.strip()
807
870
  if response.startswith(('"', "'")) and response.endswith(('"', "'")):
808
871
  response = response[1:-1]
@@ -811,11 +874,13 @@ async def generate_better_image_prompt(
811
874
 
812
875
 
813
876
  async def send_message_to_model_wrapper(
814
- message: str,
877
+ query: str,
815
878
  system_message: str = "",
816
879
  response_type: str = "text",
817
880
  user: KhojUser = None,
818
881
  query_images: List[str] = None,
882
+ context: str = "",
883
+ tracer: dict = {},
819
884
  ):
820
885
  conversation_config: ChatModelOptions = await ConversationAdapters.aget_default_conversation_config(user)
821
886
  vision_available = conversation_config.vision_enabled
@@ -845,7 +910,8 @@ async def send_message_to_model_wrapper(
845
910
 
846
911
  loaded_model = state.offline_chat_processor_config.loaded_model
847
912
  truncated_messages = generate_chatml_messages_with_context(
848
- user_message=message,
913
+ user_message=query,
914
+ context_message=context,
849
915
  system_message=system_message,
850
916
  model_name=chat_model,
851
917
  loaded_model=loaded_model,
@@ -862,6 +928,7 @@ async def send_message_to_model_wrapper(
862
928
  max_prompt_size=max_tokens,
863
929
  streaming=False,
864
930
  response_type=response_type,
931
+ tracer=tracer,
865
932
  )
866
933
 
867
934
  elif model_type == ChatModelOptions.ModelType.OPENAI:
@@ -869,7 +936,8 @@ async def send_message_to_model_wrapper(
869
936
  api_key = openai_chat_config.api_key
870
937
  api_base_url = openai_chat_config.api_base_url
871
938
  truncated_messages = generate_chatml_messages_with_context(
872
- user_message=message,
939
+ user_message=query,
940
+ context_message=context,
873
941
  system_message=system_message,
874
942
  model_name=chat_model,
875
943
  max_prompt_size=max_tokens,
@@ -885,11 +953,13 @@ async def send_message_to_model_wrapper(
885
953
  model=chat_model,
886
954
  response_type=response_type,
887
955
  api_base_url=api_base_url,
956
+ tracer=tracer,
888
957
  )
889
958
  elif model_type == ChatModelOptions.ModelType.ANTHROPIC:
890
959
  api_key = conversation_config.openai_config.api_key
891
960
  truncated_messages = generate_chatml_messages_with_context(
892
- user_message=message,
961
+ user_message=query,
962
+ context_message=context,
893
963
  system_message=system_message,
894
964
  model_name=chat_model,
895
965
  max_prompt_size=max_tokens,
@@ -903,11 +973,14 @@ async def send_message_to_model_wrapper(
903
973
  messages=truncated_messages,
904
974
  api_key=api_key,
905
975
  model=chat_model,
976
+ response_type=response_type,
977
+ tracer=tracer,
906
978
  )
907
979
  elif model_type == ChatModelOptions.ModelType.GOOGLE:
908
980
  api_key = conversation_config.openai_config.api_key
909
981
  truncated_messages = generate_chatml_messages_with_context(
910
- user_message=message,
982
+ user_message=query,
983
+ context_message=context,
911
984
  system_message=system_message,
912
985
  model_name=chat_model,
913
986
  max_prompt_size=max_tokens,
@@ -918,7 +991,7 @@ async def send_message_to_model_wrapper(
918
991
  )
919
992
 
920
993
  return gemini_send_message_to_model(
921
- messages=truncated_messages, api_key=api_key, model=chat_model, response_type=response_type
994
+ messages=truncated_messages, api_key=api_key, model=chat_model, response_type=response_type, tracer=tracer
922
995
  )
923
996
  else:
924
997
  raise HTTPException(status_code=500, detail="Invalid conversation config")
@@ -929,6 +1002,7 @@ def send_message_to_model_wrapper_sync(
929
1002
  system_message: str = "",
930
1003
  response_type: str = "text",
931
1004
  user: KhojUser = None,
1005
+ tracer: dict = {},
932
1006
  ):
933
1007
  conversation_config: ChatModelOptions = ConversationAdapters.get_default_conversation_config(user)
934
1008
 
@@ -961,6 +1035,7 @@ def send_message_to_model_wrapper_sync(
961
1035
  max_prompt_size=max_tokens,
962
1036
  streaming=False,
963
1037
  response_type=response_type,
1038
+ tracer=tracer,
964
1039
  )
965
1040
 
966
1041
  elif conversation_config.model_type == ChatModelOptions.ModelType.OPENAI:
@@ -975,7 +1050,11 @@ def send_message_to_model_wrapper_sync(
975
1050
  )
976
1051
 
977
1052
  openai_response = send_message_to_model(
978
- messages=truncated_messages, api_key=api_key, model=chat_model, response_type=response_type
1053
+ messages=truncated_messages,
1054
+ api_key=api_key,
1055
+ model=chat_model,
1056
+ response_type=response_type,
1057
+ tracer=tracer,
979
1058
  )
980
1059
 
981
1060
  return openai_response
@@ -995,6 +1074,8 @@ def send_message_to_model_wrapper_sync(
995
1074
  messages=truncated_messages,
996
1075
  api_key=api_key,
997
1076
  model=chat_model,
1077
+ response_type=response_type,
1078
+ tracer=tracer,
998
1079
  )
999
1080
 
1000
1081
  elif conversation_config.model_type == ChatModelOptions.ModelType.GOOGLE:
@@ -1013,6 +1094,7 @@ def send_message_to_model_wrapper_sync(
1013
1094
  api_key=api_key,
1014
1095
  model=chat_model,
1015
1096
  response_type=response_type,
1097
+ tracer=tracer,
1016
1098
  )
1017
1099
  else:
1018
1100
  raise HTTPException(status_code=500, detail="Invalid conversation config")
@@ -1024,6 +1106,7 @@ def generate_chat_response(
1024
1106
  conversation: Conversation,
1025
1107
  compiled_references: List[Dict] = [],
1026
1108
  online_results: Dict[str, Dict] = {},
1109
+ code_results: Dict[str, Dict] = {},
1027
1110
  inferred_queries: List[str] = [],
1028
1111
  conversation_commands: List[ConversationCommand] = [ConversationCommand.Default],
1029
1112
  user: KhojUser = None,
@@ -1031,7 +1114,10 @@ def generate_chat_response(
1031
1114
  conversation_id: str = None,
1032
1115
  location_data: LocationData = None,
1033
1116
  user_name: Optional[str] = None,
1117
+ meta_research: str = "",
1034
1118
  query_images: Optional[List[str]] = None,
1119
+ tracer: dict = {},
1120
+ train_of_thought: List[Any] = [],
1035
1121
  ) -> Tuple[Union[ThreadedGenerator, Iterator[str]], Dict[str, str]]:
1036
1122
  # Initialize Variables
1037
1123
  chat_response = None
@@ -1039,6 +1125,9 @@ def generate_chat_response(
1039
1125
 
1040
1126
  metadata = {}
1041
1127
  agent = AgentAdapters.get_conversation_agent_by_id(conversation.agent.id) if conversation.agent else None
1128
+ query_to_run = q
1129
+ if meta_research:
1130
+ query_to_run = f"AI Research: {meta_research} {q}"
1042
1131
  try:
1043
1132
  partial_completion = partial(
1044
1133
  save_to_conversation_log,
@@ -1047,10 +1136,13 @@ def generate_chat_response(
1047
1136
  meta_log=meta_log,
1048
1137
  compiled_references=compiled_references,
1049
1138
  online_results=online_results,
1139
+ code_results=code_results,
1050
1140
  inferred_queries=inferred_queries,
1051
1141
  client_application=client_application,
1052
1142
  conversation_id=conversation_id,
1053
1143
  query_images=query_images,
1144
+ tracer=tracer,
1145
+ train_of_thought=train_of_thought,
1054
1146
  )
1055
1147
 
1056
1148
  conversation_config = ConversationAdapters.get_valid_conversation_config(user, conversation)
@@ -1064,9 +1156,9 @@ def generate_chat_response(
1064
1156
  if conversation_config.model_type == "offline":
1065
1157
  loaded_model = state.offline_chat_processor_config.loaded_model
1066
1158
  chat_response = converse_offline(
1159
+ user_query=query_to_run,
1067
1160
  references=compiled_references,
1068
1161
  online_results=online_results,
1069
- user_query=q,
1070
1162
  loaded_model=loaded_model,
1071
1163
  conversation_log=meta_log,
1072
1164
  completion_func=partial_completion,
@@ -1077,6 +1169,7 @@ def generate_chat_response(
1077
1169
  location_data=location_data,
1078
1170
  user_name=user_name,
1079
1171
  agent=agent,
1172
+ tracer=tracer,
1080
1173
  )
1081
1174
 
1082
1175
  elif conversation_config.model_type == ChatModelOptions.ModelType.OPENAI:
@@ -1085,9 +1178,10 @@ def generate_chat_response(
1085
1178
  chat_model = conversation_config.chat_model
1086
1179
  chat_response = converse(
1087
1180
  compiled_references,
1088
- q,
1181
+ query_to_run,
1089
1182
  query_images=query_images,
1090
1183
  online_results=online_results,
1184
+ code_results=code_results,
1091
1185
  conversation_log=meta_log,
1092
1186
  model=chat_model,
1093
1187
  api_key=api_key,
@@ -1100,15 +1194,17 @@ def generate_chat_response(
1100
1194
  user_name=user_name,
1101
1195
  agent=agent,
1102
1196
  vision_available=vision_available,
1197
+ tracer=tracer,
1103
1198
  )
1104
1199
 
1105
1200
  elif conversation_config.model_type == ChatModelOptions.ModelType.ANTHROPIC:
1106
1201
  api_key = conversation_config.openai_config.api_key
1107
1202
  chat_response = converse_anthropic(
1108
1203
  compiled_references,
1109
- q,
1204
+ query_to_run,
1110
1205
  query_images=query_images,
1111
1206
  online_results=online_results,
1207
+ code_results=code_results,
1112
1208
  conversation_log=meta_log,
1113
1209
  model=conversation_config.chat_model,
1114
1210
  api_key=api_key,
@@ -1120,15 +1216,16 @@ def generate_chat_response(
1120
1216
  user_name=user_name,
1121
1217
  agent=agent,
1122
1218
  vision_available=vision_available,
1219
+ tracer=tracer,
1123
1220
  )
1124
1221
  elif conversation_config.model_type == ChatModelOptions.ModelType.GOOGLE:
1125
1222
  api_key = conversation_config.openai_config.api_key
1126
1223
  chat_response = converse_gemini(
1127
1224
  compiled_references,
1128
- q,
1129
- query_images=query_images,
1130
- online_results=online_results,
1131
- conversation_log=meta_log,
1225
+ query_to_run,
1226
+ online_results,
1227
+ code_results,
1228
+ meta_log,
1132
1229
  model=conversation_config.chat_model,
1133
1230
  api_key=api_key,
1134
1231
  completion_func=partial_completion,
@@ -1139,6 +1236,7 @@ def generate_chat_response(
1139
1236
  user_name=user_name,
1140
1237
  agent=agent,
1141
1238
  vision_available=vision_available,
1239
+ tracer=tracer,
1142
1240
  )
1143
1241
 
1144
1242
  metadata.update({"chat_model": conversation_config.chat_model})
@@ -1495,9 +1593,15 @@ def scheduled_chat(
1495
1593
 
1496
1594
 
1497
1595
  async def create_automation(
1498
- q: str, timezone: str, user: KhojUser, calling_url: URL, meta_log: dict = {}, conversation_id: str = None
1596
+ q: str,
1597
+ timezone: str,
1598
+ user: KhojUser,
1599
+ calling_url: URL,
1600
+ meta_log: dict = {},
1601
+ conversation_id: str = None,
1602
+ tracer: dict = {},
1499
1603
  ):
1500
- crontime, query_to_run, subject = await schedule_query(q, meta_log, user)
1604
+ crontime, query_to_run, subject = await schedule_query(q, meta_log, user, tracer=tracer)
1501
1605
  job = await schedule_automation(query_to_run, subject, crontime, timezone, q, user, calling_url, conversation_id)
1502
1606
  return job, crontime, query_to_run, subject
1503
1607
 
@@ -1575,14 +1679,6 @@ Manage your automations [here](/automations).
1575
1679
  """.strip()
1576
1680
 
1577
1681
 
1578
- class ChatEvent(Enum):
1579
- START_LLM_RESPONSE = "start_llm_response"
1580
- END_LLM_RESPONSE = "end_llm_response"
1581
- MESSAGE = "message"
1582
- REFERENCES = "references"
1583
- STATUS = "status"
1584
-
1585
-
1586
1682
  class MessageProcessor:
1587
1683
  def __init__(self):
1588
1684
  self.references = {}