khoj 1.27.2.dev15__py3-none-any.whl → 1.27.2.dev29__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (53) hide show
  1. khoj/interface/compiled/404/index.html +1 -1
  2. khoj/interface/compiled/_next/static/chunks/1603-5138bb7c8035d9a6.js +1 -0
  3. khoj/interface/compiled/_next/static/chunks/app/agents/{page-2beaba7c9bb750bd.js → page-5ae1e540bb5be8a9.js} +1 -1
  4. khoj/interface/compiled/_next/static/chunks/app/automations/{page-9b5c77e0b0dd772c.js → page-774ae3e033f938cd.js} +1 -1
  5. khoj/interface/compiled/_next/static/chunks/app/chat/{page-151232d8417a1ea1.js → page-97f5b61aaf46d364.js} +1 -1
  6. khoj/interface/compiled/_next/static/chunks/app/factchecker/{page-798904432c2417c4.js → page-d82403db2866bad8.js} +1 -1
  7. khoj/interface/compiled/_next/static/chunks/app/{page-4b6008223ea79955.js → page-4dc472cf6d674004.js} +1 -1
  8. khoj/interface/compiled/_next/static/chunks/app/search/{page-ab2995529ece3140.js → page-9b64f61caa5bd7f9.js} +1 -1
  9. khoj/interface/compiled/_next/static/chunks/app/settings/{page-7946cabb9c54e22d.js → page-7a8c382af2a7e870.js} +1 -1
  10. khoj/interface/compiled/_next/static/chunks/app/share/chat/{page-6a01e07fb244c10c.js → page-eb9e282691858f2e.js} +1 -1
  11. khoj/interface/compiled/_next/static/chunks/{webpack-878569182b3af4c6.js → webpack-2b720658ccc746f2.js} +1 -1
  12. khoj/interface/compiled/_next/static/css/4cae6c0e5c72fb2d.css +1 -0
  13. khoj/interface/compiled/_next/static/css/ddcc0cf73e062476.css +1 -0
  14. khoj/interface/compiled/agents/index.html +1 -1
  15. khoj/interface/compiled/agents/index.txt +2 -2
  16. khoj/interface/compiled/automations/index.html +1 -1
  17. khoj/interface/compiled/automations/index.txt +2 -2
  18. khoj/interface/compiled/chat/index.html +1 -1
  19. khoj/interface/compiled/chat/index.txt +2 -2
  20. khoj/interface/compiled/factchecker/index.html +1 -1
  21. khoj/interface/compiled/factchecker/index.txt +2 -2
  22. khoj/interface/compiled/index.html +1 -1
  23. khoj/interface/compiled/index.txt +2 -2
  24. khoj/interface/compiled/search/index.html +1 -1
  25. khoj/interface/compiled/search/index.txt +2 -2
  26. khoj/interface/compiled/settings/index.html +1 -1
  27. khoj/interface/compiled/settings/index.txt +2 -2
  28. khoj/interface/compiled/share/chat/index.html +1 -1
  29. khoj/interface/compiled/share/chat/index.txt +2 -2
  30. khoj/processor/conversation/anthropic/anthropic_chat.py +6 -1
  31. khoj/processor/conversation/anthropic/utils.py +25 -5
  32. khoj/processor/conversation/google/gemini_chat.py +8 -2
  33. khoj/processor/conversation/google/utils.py +34 -10
  34. khoj/processor/conversation/offline/chat_model.py +31 -7
  35. khoj/processor/conversation/openai/gpt.py +14 -2
  36. khoj/processor/conversation/openai/utils.py +43 -9
  37. khoj/processor/conversation/prompts.py +0 -16
  38. khoj/processor/conversation/utils.py +168 -1
  39. khoj/processor/image/generate.py +2 -0
  40. khoj/processor/tools/online_search.py +14 -5
  41. khoj/routers/api.py +5 -0
  42. khoj/routers/api_chat.py +23 -2
  43. khoj/routers/helpers.py +65 -13
  44. {khoj-1.27.2.dev15.dist-info → khoj-1.27.2.dev29.dist-info}/METADATA +2 -1
  45. {khoj-1.27.2.dev15.dist-info → khoj-1.27.2.dev29.dist-info}/RECORD +50 -50
  46. khoj/interface/compiled/_next/static/chunks/1603-b9d95833e0e025e8.js +0 -1
  47. khoj/interface/compiled/_next/static/css/592ca99f5122e75a.css +0 -1
  48. khoj/interface/compiled/_next/static/css/d738728883c68af8.css +0 -1
  49. /khoj/interface/compiled/_next/static/{vcyFRDGArOFXwUVotHIuv → atzIseFarmC7TIwq2BgHC}/_buildManifest.js +0 -0
  50. /khoj/interface/compiled/_next/static/{vcyFRDGArOFXwUVotHIuv → atzIseFarmC7TIwq2BgHC}/_ssgManifest.js +0 -0
  51. {khoj-1.27.2.dev15.dist-info → khoj-1.27.2.dev29.dist-info}/WHEEL +0 -0
  52. {khoj-1.27.2.dev15.dist-info → khoj-1.27.2.dev29.dist-info}/entry_points.txt +0 -0
  53. {khoj-1.27.2.dev15.dist-info → khoj-1.27.2.dev29.dist-info}/licenses/LICENSE +0 -0
@@ -64,6 +64,7 @@ async def search_online(
64
64
  custom_filters: List[str] = [],
65
65
  query_images: List[str] = None,
66
66
  agent: Agent = None,
67
+ tracer: dict = {},
67
68
  ):
68
69
  query += " ".join(custom_filters)
69
70
  if not is_internet_connected():
@@ -73,7 +74,7 @@ async def search_online(
73
74
 
74
75
  # Breakdown the query into subqueries to get the correct answer
75
76
  subqueries = await generate_online_subqueries(
76
- query, conversation_history, location, user, query_images=query_images, agent=agent
77
+ query, conversation_history, location, user, query_images=query_images, agent=agent, tracer=tracer
77
78
  )
78
79
  response_dict = {}
79
80
 
@@ -111,7 +112,7 @@ async def search_online(
111
112
  async for event in send_status_func(f"**Reading web pages**: {webpage_links_str}"):
112
113
  yield {ChatEvent.STATUS: event}
113
114
  tasks = [
114
- read_webpage_and_extract_content(data["queries"], link, data["content"], user=user, agent=agent)
115
+ read_webpage_and_extract_content(data["queries"], link, data["content"], user=user, agent=agent, tracer=tracer)
115
116
  for link, data in webpages.items()
116
117
  ]
117
118
  results = await asyncio.gather(*tasks)
@@ -153,6 +154,7 @@ async def read_webpages(
153
154
  send_status_func: Optional[Callable] = None,
154
155
  query_images: List[str] = None,
155
156
  agent: Agent = None,
157
+ tracer: dict = {},
156
158
  ):
157
159
  "Infer web pages to read from the query and extract relevant information from them"
158
160
  logger.info(f"Inferring web pages to read")
@@ -166,7 +168,7 @@ async def read_webpages(
166
168
  webpage_links_str = "\n- " + "\n- ".join(list(urls))
167
169
  async for event in send_status_func(f"**Reading web pages**: {webpage_links_str}"):
168
170
  yield {ChatEvent.STATUS: event}
169
- tasks = [read_webpage_and_extract_content({query}, url, user=user, agent=agent) for url in urls]
171
+ tasks = [read_webpage_and_extract_content({query}, url, user=user, agent=agent, tracer=tracer) for url in urls]
170
172
  results = await asyncio.gather(*tasks)
171
173
 
172
174
  response: Dict[str, Dict] = defaultdict(dict)
@@ -192,7 +194,12 @@ async def read_webpage(
192
194
 
193
195
 
194
196
  async def read_webpage_and_extract_content(
195
- subqueries: set[str], url: str, content: str = None, user: KhojUser = None, agent: Agent = None
197
+ subqueries: set[str],
198
+ url: str,
199
+ content: str = None,
200
+ user: KhojUser = None,
201
+ agent: Agent = None,
202
+ tracer: dict = {},
196
203
  ) -> Tuple[set[str], str, Union[None, str]]:
197
204
  # Select the web scrapers to use for reading the web page
198
205
  web_scrapers = await ConversationAdapters.aget_enabled_webscrapers()
@@ -214,7 +221,9 @@ async def read_webpage_and_extract_content(
214
221
  # Extract relevant information from the web page
215
222
  if is_none_or_empty(extracted_info):
216
223
  with timer(f"Extracting relevant information from web page at '{url}' took", logger):
217
- extracted_info = await extract_relevant_info(subqueries, content, user=user, agent=agent)
224
+ extracted_info = await extract_relevant_info(
225
+ subqueries, content, user=user, agent=agent, tracer=tracer
226
+ )
218
227
 
219
228
  # If we successfully extracted information, break the loop
220
229
  if not is_none_or_empty(extracted_info):
khoj/routers/api.py CHANGED
@@ -350,6 +350,7 @@ async def extract_references_and_questions(
350
350
  send_status_func: Optional[Callable] = None,
351
351
  query_images: Optional[List[str]] = None,
352
352
  agent: Agent = None,
353
+ tracer: dict = {},
353
354
  ):
354
355
  user = request.user.object if request.user.is_authenticated else None
355
356
 
@@ -425,6 +426,7 @@ async def extract_references_and_questions(
425
426
  user=user,
426
427
  max_prompt_size=conversation_config.max_prompt_size,
427
428
  personality_context=personality_context,
429
+ tracer=tracer,
428
430
  )
429
431
  elif conversation_config.model_type == ChatModelOptions.ModelType.OPENAI:
430
432
  openai_chat_config = conversation_config.openai_config
@@ -442,6 +444,7 @@ async def extract_references_and_questions(
442
444
  query_images=query_images,
443
445
  vision_enabled=vision_enabled,
444
446
  personality_context=personality_context,
447
+ tracer=tracer,
445
448
  )
446
449
  elif conversation_config.model_type == ChatModelOptions.ModelType.ANTHROPIC:
447
450
  api_key = conversation_config.openai_config.api_key
@@ -456,6 +459,7 @@ async def extract_references_and_questions(
456
459
  user=user,
457
460
  vision_enabled=vision_enabled,
458
461
  personality_context=personality_context,
462
+ tracer=tracer,
459
463
  )
460
464
  elif conversation_config.model_type == ChatModelOptions.ModelType.GOOGLE:
461
465
  api_key = conversation_config.openai_config.api_key
@@ -471,6 +475,7 @@ async def extract_references_and_questions(
471
475
  user=user,
472
476
  vision_enabled=vision_enabled,
473
477
  personality_context=personality_context,
478
+ tracer=tracer,
474
479
  )
475
480
 
476
481
  # Collate search results as context for GPT
khoj/routers/api_chat.py CHANGED
@@ -3,6 +3,7 @@ import base64
3
3
  import json
4
4
  import logging
5
5
  import time
6
+ import uuid
6
7
  from datetime import datetime
7
8
  from functools import partial
8
9
  from typing import Dict, Optional
@@ -563,6 +564,12 @@ async def chat(
563
564
  event_delimiter = "␃🔚␗"
564
565
  q = unquote(q)
565
566
  nonlocal conversation_id
567
+ tracer: dict = {
568
+ "mid": f"{uuid.uuid4()}",
569
+ "cid": conversation_id,
570
+ "uid": user.id,
571
+ "khoj_version": state.khoj_version,
572
+ }
566
573
 
567
574
  uploaded_images: list[str] = []
568
575
  if images:
@@ -682,6 +689,7 @@ async def chat(
682
689
  user=user,
683
690
  query_images=uploaded_images,
684
691
  agent=agent,
692
+ tracer=tracer,
685
693
  )
686
694
  conversation_commands_str = ", ".join([cmd.value for cmd in conversation_commands])
687
695
  async for result in send_event(
@@ -689,7 +697,9 @@ async def chat(
689
697
  ):
690
698
  yield result
691
699
 
692
- mode = await aget_relevant_output_modes(q, meta_log, is_automated_task, user, uploaded_images, agent)
700
+ mode = await aget_relevant_output_modes(
701
+ q, meta_log, is_automated_task, user, uploaded_images, agent, tracer=tracer
702
+ )
693
703
  async for result in send_event(ChatEvent.STATUS, f"**Decided Response Mode:** {mode.value}"):
694
704
  yield result
695
705
  if mode not in conversation_commands:
@@ -755,6 +765,7 @@ async def chat(
755
765
  query_images=uploaded_images,
756
766
  user=user,
757
767
  agent=agent,
768
+ tracer=tracer,
758
769
  )
759
770
  response_log = str(response)
760
771
  async for result in send_llm_response(response_log):
@@ -774,6 +785,7 @@ async def chat(
774
785
  client_application=request.user.client_app,
775
786
  conversation_id=conversation_id,
776
787
  query_images=uploaded_images,
788
+ tracer=tracer,
777
789
  )
778
790
  return
779
791
 
@@ -795,7 +807,7 @@ async def chat(
795
807
  if ConversationCommand.Automation in conversation_commands:
796
808
  try:
797
809
  automation, crontime, query_to_run, subject = await create_automation(
798
- q, timezone, user, request.url, meta_log
810
+ q, timezone, user, request.url, meta_log, tracer=tracer
799
811
  )
800
812
  except Exception as e:
801
813
  logger.error(f"Error scheduling task {q} for {user.email}: {e}")
@@ -817,6 +829,7 @@ async def chat(
817
829
  inferred_queries=[query_to_run],
818
830
  automation_id=automation.id,
819
831
  query_images=uploaded_images,
832
+ tracer=tracer,
820
833
  )
821
834
  async for result in send_llm_response(llm_response):
822
835
  yield result
@@ -838,6 +851,7 @@ async def chat(
838
851
  partial(send_event, ChatEvent.STATUS),
839
852
  query_images=uploaded_images,
840
853
  agent=agent,
854
+ tracer=tracer,
841
855
  ):
842
856
  if isinstance(result, dict) and ChatEvent.STATUS in result:
843
857
  yield result[ChatEvent.STATUS]
@@ -882,6 +896,7 @@ async def chat(
882
896
  custom_filters,
883
897
  query_images=uploaded_images,
884
898
  agent=agent,
899
+ tracer=tracer,
885
900
  ):
886
901
  if isinstance(result, dict) and ChatEvent.STATUS in result:
887
902
  yield result[ChatEvent.STATUS]
@@ -906,6 +921,7 @@ async def chat(
906
921
  partial(send_event, ChatEvent.STATUS),
907
922
  query_images=uploaded_images,
908
923
  agent=agent,
924
+ tracer=tracer,
909
925
  ):
910
926
  if isinstance(result, dict) and ChatEvent.STATUS in result:
911
927
  yield result[ChatEvent.STATUS]
@@ -956,6 +972,7 @@ async def chat(
956
972
  send_status_func=partial(send_event, ChatEvent.STATUS),
957
973
  query_images=uploaded_images,
958
974
  agent=agent,
975
+ tracer=tracer,
959
976
  ):
960
977
  if isinstance(result, dict) and ChatEvent.STATUS in result:
961
978
  yield result[ChatEvent.STATUS]
@@ -986,6 +1003,7 @@ async def chat(
986
1003
  compiled_references=compiled_references,
987
1004
  online_results=online_results,
988
1005
  query_images=uploaded_images,
1006
+ tracer=tracer,
989
1007
  )
990
1008
  content_obj = {
991
1009
  "intentType": intent_type,
@@ -1014,6 +1032,7 @@ async def chat(
1014
1032
  user=user,
1015
1033
  agent=agent,
1016
1034
  send_status_func=partial(send_event, ChatEvent.STATUS),
1035
+ tracer=tracer,
1017
1036
  ):
1018
1037
  if isinstance(result, dict) and ChatEvent.STATUS in result:
1019
1038
  yield result[ChatEvent.STATUS]
@@ -1041,6 +1060,7 @@ async def chat(
1041
1060
  compiled_references=compiled_references,
1042
1061
  online_results=online_results,
1043
1062
  query_images=uploaded_images,
1063
+ tracer=tracer,
1044
1064
  )
1045
1065
 
1046
1066
  async for result in send_llm_response(json.dumps(content_obj)):
@@ -1064,6 +1084,7 @@ async def chat(
1064
1084
  location,
1065
1085
  user_name,
1066
1086
  uploaded_images,
1087
+ tracer,
1067
1088
  )
1068
1089
 
1069
1090
  # Send Response
khoj/routers/helpers.py CHANGED
@@ -301,6 +301,7 @@ async def aget_relevant_information_sources(
301
301
  user: KhojUser,
302
302
  query_images: List[str] = None,
303
303
  agent: Agent = None,
304
+ tracer: dict = {},
304
305
  ):
305
306
  """
306
307
  Given a query, determine which of the available tools the agent should use in order to answer appropriately.
@@ -337,6 +338,7 @@ async def aget_relevant_information_sources(
337
338
  relevant_tools_prompt,
338
339
  response_type="json_object",
339
340
  user=user,
341
+ tracer=tracer,
340
342
  )
341
343
 
342
344
  try:
@@ -378,6 +380,7 @@ async def aget_relevant_output_modes(
378
380
  user: KhojUser = None,
379
381
  query_images: List[str] = None,
380
382
  agent: Agent = None,
383
+ tracer: dict = {},
381
384
  ):
382
385
  """
383
386
  Given a query, determine which of the available tools the agent should use in order to answer appropriately.
@@ -413,7 +416,9 @@ async def aget_relevant_output_modes(
413
416
  )
414
417
 
415
418
  with timer("Chat actor: Infer output mode for chat response", logger):
416
- response = await send_message_to_model_wrapper(relevant_mode_prompt, response_type="json_object", user=user)
419
+ response = await send_message_to_model_wrapper(
420
+ relevant_mode_prompt, response_type="json_object", user=user, tracer=tracer
421
+ )
417
422
 
418
423
  try:
419
424
  response = response.strip()
@@ -444,6 +449,7 @@ async def infer_webpage_urls(
444
449
  user: KhojUser,
445
450
  query_images: List[str] = None,
446
451
  agent: Agent = None,
452
+ tracer: dict = {},
447
453
  ) -> List[str]:
448
454
  """
449
455
  Infer webpage links from the given query
@@ -468,7 +474,11 @@ async def infer_webpage_urls(
468
474
 
469
475
  with timer("Chat actor: Infer webpage urls to read", logger):
470
476
  response = await send_message_to_model_wrapper(
471
- online_queries_prompt, query_images=query_images, response_type="json_object", user=user
477
+ online_queries_prompt,
478
+ query_images=query_images,
479
+ response_type="json_object",
480
+ user=user,
481
+ tracer=tracer,
472
482
  )
473
483
 
474
484
  # Validate that the response is a non-empty, JSON-serializable list of URLs
@@ -490,6 +500,7 @@ async def generate_online_subqueries(
490
500
  user: KhojUser,
491
501
  query_images: List[str] = None,
492
502
  agent: Agent = None,
503
+ tracer: dict = {},
493
504
  ) -> List[str]:
494
505
  """
495
506
  Generate subqueries from the given query
@@ -514,7 +525,11 @@ async def generate_online_subqueries(
514
525
 
515
526
  with timer("Chat actor: Generate online search subqueries", logger):
516
527
  response = await send_message_to_model_wrapper(
517
- online_queries_prompt, query_images=query_images, response_type="json_object", user=user
528
+ online_queries_prompt,
529
+ query_images=query_images,
530
+ response_type="json_object",
531
+ user=user,
532
+ tracer=tracer,
518
533
  )
519
534
 
520
535
  # Validate that the response is a non-empty, JSON-serializable list
@@ -533,7 +548,7 @@ async def generate_online_subqueries(
533
548
 
534
549
 
535
550
  async def schedule_query(
536
- q: str, conversation_history: dict, user: KhojUser, query_images: List[str] = None
551
+ q: str, conversation_history: dict, user: KhojUser, query_images: List[str] = None, tracer: dict = {}
537
552
  ) -> Tuple[str, ...]:
538
553
  """
539
554
  Schedule the date, time to run the query. Assume the server timezone is UTC.
@@ -546,7 +561,7 @@ async def schedule_query(
546
561
  )
547
562
 
548
563
  raw_response = await send_message_to_model_wrapper(
549
- crontime_prompt, query_images=query_images, response_type="json_object", user=user
564
+ crontime_prompt, query_images=query_images, response_type="json_object", user=user, tracer=tracer
550
565
  )
551
566
 
552
567
  # Validate that the response is a non-empty, JSON-serializable list
@@ -561,7 +576,7 @@ async def schedule_query(
561
576
 
562
577
 
563
578
  async def extract_relevant_info(
564
- qs: set[str], corpus: str, user: KhojUser = None, agent: Agent = None
579
+ qs: set[str], corpus: str, user: KhojUser = None, agent: Agent = None, tracer: dict = {}
565
580
  ) -> Union[str, None]:
566
581
  """
567
582
  Extract relevant information for a given query from the target corpus
@@ -584,6 +599,7 @@ async def extract_relevant_info(
584
599
  extract_relevant_information,
585
600
  prompts.system_prompt_extract_relevant_information,
586
601
  user=user,
602
+ tracer=tracer,
587
603
  )
588
604
  return response.strip()
589
605
 
@@ -595,6 +611,7 @@ async def extract_relevant_summary(
595
611
  query_images: List[str] = None,
596
612
  user: KhojUser = None,
597
613
  agent: Agent = None,
614
+ tracer: dict = {},
598
615
  ) -> Union[str, None]:
599
616
  """
600
617
  Extract relevant information for a given query from the target corpus
@@ -622,6 +639,7 @@ async def extract_relevant_summary(
622
639
  prompts.system_prompt_extract_relevant_summary,
623
640
  user=user,
624
641
  query_images=query_images,
642
+ tracer=tracer,
625
643
  )
626
644
  return response.strip()
627
645
 
@@ -636,6 +654,7 @@ async def generate_excalidraw_diagram(
636
654
  user: KhojUser = None,
637
655
  agent: Agent = None,
638
656
  send_status_func: Optional[Callable] = None,
657
+ tracer: dict = {},
639
658
  ):
640
659
  if send_status_func:
641
660
  async for event in send_status_func("**Enhancing the Diagramming Prompt**"):
@@ -650,6 +669,7 @@ async def generate_excalidraw_diagram(
650
669
  query_images=query_images,
651
670
  user=user,
652
671
  agent=agent,
672
+ tracer=tracer,
653
673
  )
654
674
 
655
675
  if send_status_func:
@@ -660,6 +680,7 @@ async def generate_excalidraw_diagram(
660
680
  q=better_diagram_description_prompt,
661
681
  user=user,
662
682
  agent=agent,
683
+ tracer=tracer,
663
684
  )
664
685
 
665
686
  yield better_diagram_description_prompt, excalidraw_diagram_description
@@ -674,6 +695,7 @@ async def generate_better_diagram_description(
674
695
  query_images: List[str] = None,
675
696
  user: KhojUser = None,
676
697
  agent: Agent = None,
698
+ tracer: dict = {},
677
699
  ) -> str:
678
700
  """
679
701
  Generate a diagram description from the given query and context
@@ -711,7 +733,7 @@ async def generate_better_diagram_description(
711
733
 
712
734
  with timer("Chat actor: Generate better diagram description", logger):
713
735
  response = await send_message_to_model_wrapper(
714
- improve_diagram_description_prompt, query_images=query_images, user=user
736
+ improve_diagram_description_prompt, query_images=query_images, user=user, tracer=tracer
715
737
  )
716
738
  response = response.strip()
717
739
  if response.startswith(('"', "'")) and response.endswith(('"', "'")):
@@ -724,6 +746,7 @@ async def generate_excalidraw_diagram_from_description(
724
746
  q: str,
725
747
  user: KhojUser = None,
726
748
  agent: Agent = None,
749
+ tracer: dict = {},
727
750
  ) -> str:
728
751
  personality_context = (
729
752
  prompts.personality_context.format(personality=agent.personality) if agent and agent.personality else ""
@@ -735,7 +758,9 @@ async def generate_excalidraw_diagram_from_description(
735
758
  )
736
759
 
737
760
  with timer("Chat actor: Generate excalidraw diagram", logger):
738
- raw_response = await send_message_to_model_wrapper(message=excalidraw_diagram_generation, user=user)
761
+ raw_response = await send_message_to_model_wrapper(
762
+ message=excalidraw_diagram_generation, user=user, tracer=tracer
763
+ )
739
764
  raw_response = raw_response.strip()
740
765
  raw_response = remove_json_codeblock(raw_response)
741
766
  response: Dict[str, str] = json.loads(raw_response)
@@ -756,6 +781,7 @@ async def generate_better_image_prompt(
756
781
  query_images: Optional[List[str]] = None,
757
782
  user: KhojUser = None,
758
783
  agent: Agent = None,
784
+ tracer: dict = {},
759
785
  ) -> str:
760
786
  """
761
787
  Generate a better image prompt from the given query
@@ -802,7 +828,9 @@ async def generate_better_image_prompt(
802
828
  )
803
829
 
804
830
  with timer("Chat actor: Generate contextual image prompt", logger):
805
- response = await send_message_to_model_wrapper(image_prompt, query_images=query_images, user=user)
831
+ response = await send_message_to_model_wrapper(
832
+ image_prompt, query_images=query_images, user=user, tracer=tracer
833
+ )
806
834
  response = response.strip()
807
835
  if response.startswith(('"', "'")) and response.endswith(('"', "'")):
808
836
  response = response[1:-1]
@@ -816,6 +844,7 @@ async def send_message_to_model_wrapper(
816
844
  response_type: str = "text",
817
845
  user: KhojUser = None,
818
846
  query_images: List[str] = None,
847
+ tracer: dict = {},
819
848
  ):
820
849
  conversation_config: ChatModelOptions = await ConversationAdapters.aget_default_conversation_config(user)
821
850
  vision_available = conversation_config.vision_enabled
@@ -862,6 +891,7 @@ async def send_message_to_model_wrapper(
862
891
  max_prompt_size=max_tokens,
863
892
  streaming=False,
864
893
  response_type=response_type,
894
+ tracer=tracer,
865
895
  )
866
896
 
867
897
  elif model_type == ChatModelOptions.ModelType.OPENAI:
@@ -885,6 +915,7 @@ async def send_message_to_model_wrapper(
885
915
  model=chat_model,
886
916
  response_type=response_type,
887
917
  api_base_url=api_base_url,
918
+ tracer=tracer,
888
919
  )
889
920
  elif model_type == ChatModelOptions.ModelType.ANTHROPIC:
890
921
  api_key = conversation_config.openai_config.api_key
@@ -903,6 +934,7 @@ async def send_message_to_model_wrapper(
903
934
  messages=truncated_messages,
904
935
  api_key=api_key,
905
936
  model=chat_model,
937
+ tracer=tracer,
906
938
  )
907
939
  elif model_type == ChatModelOptions.ModelType.GOOGLE:
908
940
  api_key = conversation_config.openai_config.api_key
@@ -918,7 +950,7 @@ async def send_message_to_model_wrapper(
918
950
  )
919
951
 
920
952
  return gemini_send_message_to_model(
921
- messages=truncated_messages, api_key=api_key, model=chat_model, response_type=response_type
953
+ messages=truncated_messages, api_key=api_key, model=chat_model, response_type=response_type, tracer=tracer
922
954
  )
923
955
  else:
924
956
  raise HTTPException(status_code=500, detail="Invalid conversation config")
@@ -929,6 +961,7 @@ def send_message_to_model_wrapper_sync(
929
961
  system_message: str = "",
930
962
  response_type: str = "text",
931
963
  user: KhojUser = None,
964
+ tracer: dict = {},
932
965
  ):
933
966
  conversation_config: ChatModelOptions = ConversationAdapters.get_default_conversation_config(user)
934
967
 
@@ -961,6 +994,7 @@ def send_message_to_model_wrapper_sync(
961
994
  max_prompt_size=max_tokens,
962
995
  streaming=False,
963
996
  response_type=response_type,
997
+ tracer=tracer,
964
998
  )
965
999
 
966
1000
  elif conversation_config.model_type == ChatModelOptions.ModelType.OPENAI:
@@ -975,7 +1009,11 @@ def send_message_to_model_wrapper_sync(
975
1009
  )
976
1010
 
977
1011
  openai_response = send_message_to_model(
978
- messages=truncated_messages, api_key=api_key, model=chat_model, response_type=response_type
1012
+ messages=truncated_messages,
1013
+ api_key=api_key,
1014
+ model=chat_model,
1015
+ response_type=response_type,
1016
+ tracer=tracer,
979
1017
  )
980
1018
 
981
1019
  return openai_response
@@ -995,6 +1033,7 @@ def send_message_to_model_wrapper_sync(
995
1033
  messages=truncated_messages,
996
1034
  api_key=api_key,
997
1035
  model=chat_model,
1036
+ tracer=tracer,
998
1037
  )
999
1038
 
1000
1039
  elif conversation_config.model_type == ChatModelOptions.ModelType.GOOGLE:
@@ -1013,6 +1052,7 @@ def send_message_to_model_wrapper_sync(
1013
1052
  api_key=api_key,
1014
1053
  model=chat_model,
1015
1054
  response_type=response_type,
1055
+ tracer=tracer,
1016
1056
  )
1017
1057
  else:
1018
1058
  raise HTTPException(status_code=500, detail="Invalid conversation config")
@@ -1032,6 +1072,7 @@ def generate_chat_response(
1032
1072
  location_data: LocationData = None,
1033
1073
  user_name: Optional[str] = None,
1034
1074
  query_images: Optional[List[str]] = None,
1075
+ tracer: dict = {},
1035
1076
  ) -> Tuple[Union[ThreadedGenerator, Iterator[str]], Dict[str, str]]:
1036
1077
  # Initialize Variables
1037
1078
  chat_response = None
@@ -1051,6 +1092,7 @@ def generate_chat_response(
1051
1092
  client_application=client_application,
1052
1093
  conversation_id=conversation_id,
1053
1094
  query_images=query_images,
1095
+ tracer=tracer,
1054
1096
  )
1055
1097
 
1056
1098
  conversation_config = ConversationAdapters.get_valid_conversation_config(user, conversation)
@@ -1077,6 +1119,7 @@ def generate_chat_response(
1077
1119
  location_data=location_data,
1078
1120
  user_name=user_name,
1079
1121
  agent=agent,
1122
+ tracer=tracer,
1080
1123
  )
1081
1124
 
1082
1125
  elif conversation_config.model_type == ChatModelOptions.ModelType.OPENAI:
@@ -1100,6 +1143,7 @@ def generate_chat_response(
1100
1143
  user_name=user_name,
1101
1144
  agent=agent,
1102
1145
  vision_available=vision_available,
1146
+ tracer=tracer,
1103
1147
  )
1104
1148
 
1105
1149
  elif conversation_config.model_type == ChatModelOptions.ModelType.ANTHROPIC:
@@ -1120,6 +1164,7 @@ def generate_chat_response(
1120
1164
  user_name=user_name,
1121
1165
  agent=agent,
1122
1166
  vision_available=vision_available,
1167
+ tracer=tracer,
1123
1168
  )
1124
1169
  elif conversation_config.model_type == ChatModelOptions.ModelType.GOOGLE:
1125
1170
  api_key = conversation_config.openai_config.api_key
@@ -1139,6 +1184,7 @@ def generate_chat_response(
1139
1184
  user_name=user_name,
1140
1185
  agent=agent,
1141
1186
  vision_available=vision_available,
1187
+ tracer=tracer,
1142
1188
  )
1143
1189
 
1144
1190
  metadata.update({"chat_model": conversation_config.chat_model})
@@ -1495,9 +1541,15 @@ def scheduled_chat(
1495
1541
 
1496
1542
 
1497
1543
  async def create_automation(
1498
- q: str, timezone: str, user: KhojUser, calling_url: URL, meta_log: dict = {}, conversation_id: str = None
1544
+ q: str,
1545
+ timezone: str,
1546
+ user: KhojUser,
1547
+ calling_url: URL,
1548
+ meta_log: dict = {},
1549
+ conversation_id: str = None,
1550
+ tracer: dict = {},
1499
1551
  ):
1500
- crontime, query_to_run, subject = await schedule_query(q, meta_log, user)
1552
+ crontime, query_to_run, subject = await schedule_query(q, meta_log, user, tracer=tracer)
1501
1553
  job = await schedule_automation(query_to_run, subject, crontime, timezone, q, user, calling_url, conversation_id)
1502
1554
  return job, crontime, query_to_run, subject
1503
1555
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: khoj
3
- Version: 1.27.2.dev15
3
+ Version: 1.27.2.dev29
4
4
  Summary: Your Second Brain
5
5
  Project-URL: Homepage, https://khoj.dev
6
6
  Project-URL: Documentation, https://docs.khoj.dev
@@ -78,6 +78,7 @@ Requires-Dist: black>=23.1.0; extra == 'dev'
78
78
  Requires-Dist: boto3>=1.34.57; extra == 'dev'
79
79
  Requires-Dist: factory-boy>=3.2.1; extra == 'dev'
80
80
  Requires-Dist: freezegun>=1.2.0; extra == 'dev'
81
+ Requires-Dist: gitpython~=3.1.43; extra == 'dev'
81
82
  Requires-Dist: google-auth==2.23.3; extra == 'dev'
82
83
  Requires-Dist: gunicorn==22.0.0; extra == 'dev'
83
84
  Requires-Dist: mypy>=1.0.1; extra == 'dev'