khoj 1.27.2.dev29__py3-none-any.whl → 1.27.2.dev130__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (72) hide show
  1. khoj/database/adapters/__init__.py +34 -10
  2. khoj/interface/compiled/404/index.html +1 -1
  3. khoj/interface/compiled/_next/static/chunks/1034-da58b679fcbb79c1.js +1 -0
  4. khoj/interface/compiled/_next/static/chunks/1467-5a191c1cd5bf0b83.js +1 -0
  5. khoj/interface/compiled/_next/static/chunks/1603-5d70d9dfcdcb1f10.js +1 -0
  6. khoj/interface/compiled/_next/static/chunks/3423-fa918f4e5365a35e.js +1 -0
  7. khoj/interface/compiled/_next/static/chunks/8423-3ad0bfb299801220.js +1 -0
  8. khoj/interface/compiled/_next/static/chunks/app/agents/{page-5ae1e540bb5be8a9.js → page-2beaba7c9bb750bd.js} +1 -1
  9. khoj/interface/compiled/_next/static/chunks/app/automations/{page-774ae3e033f938cd.js → page-9b5c77e0b0dd772c.js} +1 -1
  10. khoj/interface/compiled/_next/static/chunks/app/chat/page-7dc98df9c88828f0.js +1 -0
  11. khoj/interface/compiled/_next/static/chunks/app/factchecker/page-d887f55fe6d4f35d.js +1 -0
  12. khoj/interface/compiled/_next/static/chunks/app/{page-4dc472cf6d674004.js → page-d46244282af16509.js} +1 -1
  13. khoj/interface/compiled/_next/static/chunks/app/search/{page-9b64f61caa5bd7f9.js → page-ab2995529ece3140.js} +1 -1
  14. khoj/interface/compiled/_next/static/chunks/app/settings/{page-7a8c382af2a7e870.js → page-89e6737b2cc9fb3a.js} +1 -1
  15. khoj/interface/compiled/_next/static/chunks/app/share/chat/{page-eb9e282691858f2e.js → page-505b07bce608b34e.js} +1 -1
  16. khoj/interface/compiled/_next/static/chunks/{webpack-2b720658ccc746f2.js → webpack-8ae5ce45161bd98e.js} +1 -1
  17. khoj/interface/compiled/_next/static/css/{2272c73fc7a3b571.css → 26c1c33d0423a7d8.css} +1 -1
  18. khoj/interface/compiled/_next/static/css/592ca99f5122e75a.css +1 -0
  19. khoj/interface/compiled/_next/static/css/b70402177a7c3207.css +1 -0
  20. khoj/interface/compiled/_next/static/css/e9c5fe555dd3050b.css +25 -0
  21. khoj/interface/compiled/agents/index.html +1 -1
  22. khoj/interface/compiled/agents/index.txt +2 -2
  23. khoj/interface/compiled/automations/index.html +1 -1
  24. khoj/interface/compiled/automations/index.txt +2 -2
  25. khoj/interface/compiled/chat/index.html +1 -1
  26. khoj/interface/compiled/chat/index.txt +2 -2
  27. khoj/interface/compiled/factchecker/index.html +1 -1
  28. khoj/interface/compiled/factchecker/index.txt +2 -2
  29. khoj/interface/compiled/index.html +1 -1
  30. khoj/interface/compiled/index.txt +2 -2
  31. khoj/interface/compiled/search/index.html +1 -1
  32. khoj/interface/compiled/search/index.txt +2 -2
  33. khoj/interface/compiled/settings/index.html +1 -1
  34. khoj/interface/compiled/settings/index.txt +2 -2
  35. khoj/interface/compiled/share/chat/index.html +1 -1
  36. khoj/interface/compiled/share/chat/index.txt +2 -2
  37. khoj/processor/conversation/anthropic/anthropic_chat.py +14 -10
  38. khoj/processor/conversation/anthropic/utils.py +13 -2
  39. khoj/processor/conversation/google/gemini_chat.py +15 -11
  40. khoj/processor/conversation/offline/chat_model.py +10 -9
  41. khoj/processor/conversation/openai/gpt.py +11 -8
  42. khoj/processor/conversation/prompts.py +131 -22
  43. khoj/processor/conversation/utils.py +132 -6
  44. khoj/processor/tools/online_search.py +5 -3
  45. khoj/processor/tools/run_code.py +144 -0
  46. khoj/routers/api.py +6 -6
  47. khoj/routers/api_chat.py +156 -88
  48. khoj/routers/helpers.py +91 -47
  49. khoj/routers/research.py +321 -0
  50. khoj/search_filter/date_filter.py +1 -3
  51. khoj/search_filter/file_filter.py +1 -2
  52. khoj/search_type/text_search.py +3 -3
  53. khoj/utils/helpers.py +15 -2
  54. khoj/utils/yaml.py +4 -0
  55. {khoj-1.27.2.dev29.dist-info → khoj-1.27.2.dev130.dist-info}/METADATA +1 -1
  56. {khoj-1.27.2.dev29.dist-info → khoj-1.27.2.dev130.dist-info}/RECORD +63 -60
  57. khoj/interface/compiled/_next/static/chunks/1603-5138bb7c8035d9a6.js +0 -1
  58. khoj/interface/compiled/_next/static/chunks/2697-61fcba89fd87eab4.js +0 -1
  59. khoj/interface/compiled/_next/static/chunks/3423-0b533af8bf6ac218.js +0 -1
  60. khoj/interface/compiled/_next/static/chunks/9479-ff7d8c4dae2014d1.js +0 -1
  61. khoj/interface/compiled/_next/static/chunks/app/chat/page-97f5b61aaf46d364.js +0 -1
  62. khoj/interface/compiled/_next/static/chunks/app/factchecker/page-d82403db2866bad8.js +0 -1
  63. khoj/interface/compiled/_next/static/css/4cae6c0e5c72fb2d.css +0 -1
  64. khoj/interface/compiled/_next/static/css/76d55eb435962b19.css +0 -25
  65. khoj/interface/compiled/_next/static/css/ddcc0cf73e062476.css +0 -1
  66. /khoj/interface/compiled/_next/static/{atzIseFarmC7TIwq2BgHC → N19uqHAJYqRAVxvuVwHfE}/_buildManifest.js +0 -0
  67. /khoj/interface/compiled/_next/static/{atzIseFarmC7TIwq2BgHC → N19uqHAJYqRAVxvuVwHfE}/_ssgManifest.js +0 -0
  68. /khoj/interface/compiled/_next/static/chunks/{1970-60c96aed937a4928.js → 1970-444843bea1d17d61.js} +0 -0
  69. /khoj/interface/compiled/_next/static/chunks/{9417-2ca87207387fc790.js → 9417-19cfd1a9cb758e71.js} +0 -0
  70. {khoj-1.27.2.dev29.dist-info → khoj-1.27.2.dev130.dist-info}/WHEEL +0 -0
  71. {khoj-1.27.2.dev29.dist-info → khoj-1.27.2.dev130.dist-info}/entry_points.txt +0 -0
  72. {khoj-1.27.2.dev29.dist-info → khoj-1.27.2.dev130.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,144 @@
1
+ import asyncio
2
+ import datetime
3
+ import json
4
+ import logging
5
+ import os
6
+ from typing import Any, Callable, List, Optional
7
+
8
+ import aiohttp
9
+
10
+ from khoj.database.adapters import ais_user_subscribed
11
+ from khoj.database.models import Agent, KhojUser
12
+ from khoj.processor.conversation import prompts
13
+ from khoj.processor.conversation.utils import (
14
+ ChatEvent,
15
+ clean_code_python,
16
+ clean_json,
17
+ construct_chat_history,
18
+ )
19
+ from khoj.routers.helpers import send_message_to_model_wrapper
20
+ from khoj.utils.helpers import timer
21
+ from khoj.utils.rawconfig import LocationData
22
+
23
+ logger = logging.getLogger(__name__)
24
+
25
+
26
+ SANDBOX_URL = os.getenv("KHOJ_TERRARIUM_URL", "http://localhost:8080")
27
+
28
+
29
+ async def run_code(
30
+ query: str,
31
+ conversation_history: dict,
32
+ context: str,
33
+ location_data: LocationData,
34
+ user: KhojUser,
35
+ send_status_func: Optional[Callable] = None,
36
+ query_images: List[str] = None,
37
+ agent: Agent = None,
38
+ sandbox_url: str = SANDBOX_URL,
39
+ tracer: dict = {},
40
+ ):
41
+ # Generate Code
42
+ if send_status_func:
43
+ async for event in send_status_func(f"**Generate code snippets** for {query}"):
44
+ yield {ChatEvent.STATUS: event}
45
+ try:
46
+ with timer("Chat actor: Generate programs to execute", logger):
47
+ codes = await generate_python_code(
48
+ query,
49
+ conversation_history,
50
+ context,
51
+ location_data,
52
+ user,
53
+ query_images,
54
+ agent,
55
+ tracer,
56
+ )
57
+ except Exception as e:
58
+ raise ValueError(f"Failed to generate code for {query} with error: {e}")
59
+
60
+ # Run Code
61
+ if send_status_func:
62
+ async for event in send_status_func(f"**Running {len(codes)} code snippets**"):
63
+ yield {ChatEvent.STATUS: event}
64
+ try:
65
+ tasks = [execute_sandboxed_python(code, sandbox_url) for code in codes]
66
+ with timer("Chat actor: Execute generated programs", logger):
67
+ results = await asyncio.gather(*tasks)
68
+ for result in results:
69
+ code = result.pop("code")
70
+ logger.info(f"Executed Code:\n--@@--\n{code}\n--@@--Result:\n--@@--\n{result}\n--@@--")
71
+ yield {query: {"code": code, "results": result}}
72
+ except Exception as e:
73
+ raise ValueError(f"Failed to run code for {query} with error: {e}")
74
+
75
+
76
+ async def generate_python_code(
77
+ q: str,
78
+ conversation_history: dict,
79
+ context: str,
80
+ location_data: LocationData,
81
+ user: KhojUser,
82
+ query_images: List[str] = None,
83
+ agent: Agent = None,
84
+ tracer: dict = {},
85
+ ) -> List[str]:
86
+ location = f"{location_data}" if location_data else "Unknown"
87
+ username = prompts.user_name.format(name=user.get_full_name()) if user.get_full_name() else ""
88
+ subscribed = await ais_user_subscribed(user)
89
+ chat_history = construct_chat_history(conversation_history)
90
+
91
+ utc_date = datetime.datetime.now(datetime.timezone.utc).strftime("%Y-%m-%d")
92
+ personality_context = (
93
+ prompts.personality_context.format(personality=agent.personality) if agent and agent.personality else ""
94
+ )
95
+
96
+ code_generation_prompt = prompts.python_code_generation_prompt.format(
97
+ current_date=utc_date,
98
+ query=q,
99
+ chat_history=chat_history,
100
+ context=context,
101
+ location=location,
102
+ username=username,
103
+ personality_context=personality_context,
104
+ )
105
+
106
+ response = await send_message_to_model_wrapper(
107
+ code_generation_prompt,
108
+ query_images=query_images,
109
+ response_type="json_object",
110
+ user=user,
111
+ tracer=tracer,
112
+ )
113
+
114
+ # Validate that the response is a non-empty, JSON-serializable list
115
+ response = clean_json(response)
116
+ response = json.loads(response)
117
+ codes = [code.strip() for code in response["codes"] if code.strip()]
118
+
119
+ if not isinstance(codes, list) or not codes or len(codes) == 0:
120
+ raise ValueError
121
+ return codes
122
+
123
+
124
+ async def execute_sandboxed_python(code: str, sandbox_url: str = SANDBOX_URL) -> dict[str, Any]:
125
+ """
126
+ Takes code to run as a string and calls the terrarium API to execute it.
127
+ Returns the result of the code execution as a dictionary.
128
+ """
129
+ headers = {"Content-Type": "application/json"}
130
+ cleaned_code = clean_code_python(code)
131
+ data = {"code": cleaned_code}
132
+
133
+ async with aiohttp.ClientSession() as session:
134
+ async with session.post(sandbox_url, json=data, headers=headers) as response:
135
+ if response.status == 200:
136
+ result: dict[str, Any] = await response.json()
137
+ result["code"] = cleaned_code
138
+ return result
139
+ else:
140
+ return {
141
+ "code": cleaned_code,
142
+ "success": False,
143
+ "std_err": f"Failed to execute code with {response.status}",
144
+ }
khoj/routers/api.py CHANGED
@@ -44,6 +44,7 @@ from khoj.processor.conversation.offline.chat_model import extract_questions_off
44
44
  from khoj.processor.conversation.offline.whisper import transcribe_audio_offline
45
45
  from khoj.processor.conversation.openai.gpt import extract_questions
46
46
  from khoj.processor.conversation.openai.whisper import transcribe_audio
47
+ from khoj.processor.conversation.utils import defilter_query
47
48
  from khoj.routers.helpers import (
48
49
  ApiUserRateLimiter,
49
50
  ChatEvent,
@@ -167,8 +168,8 @@ async def execute_search(
167
168
  search_futures += [
168
169
  executor.submit(
169
170
  text_search.query,
170
- user,
171
171
  user_query,
172
+ user,
172
173
  t,
173
174
  question_embedding=encoded_asymmetric_query,
174
175
  max_distance=max_distance,
@@ -355,7 +356,7 @@ async def extract_references_and_questions(
355
356
  user = request.user.object if request.user.is_authenticated else None
356
357
 
357
358
  # Initialize Variables
358
- compiled_references: List[Any] = []
359
+ compiled_references: List[dict[str, str]] = []
359
360
  inferred_queries: List[str] = []
360
361
 
361
362
  agent_has_entries = False
@@ -384,9 +385,7 @@ async def extract_references_and_questions(
384
385
  return
385
386
 
386
387
  # Extract filter terms from user message
387
- defiltered_query = q
388
- for filter in [DateFilter(), WordFilter(), FileFilter()]:
389
- defiltered_query = filter.defilter(defiltered_query)
388
+ defiltered_query = defilter_query(q)
390
389
  filters_in_query = q.replace(defiltered_query, "").strip()
391
390
  conversation = await sync_to_async(ConversationAdapters.get_conversation_by_id)(conversation_id)
392
391
 
@@ -502,7 +501,8 @@ async def extract_references_and_questions(
502
501
  )
503
502
  search_results = text_search.deduplicated_search_responses(search_results)
504
503
  compiled_references = [
505
- {"compiled": item.additional["compiled"], "file": item.additional["file"]} for item in search_results
504
+ {"query": q, "compiled": item.additional["compiled"], "file": item.additional["file"]}
505
+ for q, item in zip(inferred_queries, search_results)
506
506
  ]
507
507
 
508
508
  yield compiled_references, inferred_queries, defiltered_query
khoj/routers/api_chat.py CHANGED
@@ -6,7 +6,7 @@ import time
6
6
  import uuid
7
7
  from datetime import datetime
8
8
  from functools import partial
9
- from typing import Dict, Optional
9
+ from typing import Any, Dict, List, Optional
10
10
  from urllib.parse import unquote
11
11
 
12
12
  from asgiref.sync import sync_to_async
@@ -25,10 +25,11 @@ from khoj.database.adapters import (
25
25
  )
26
26
  from khoj.database.models import Agent, KhojUser
27
27
  from khoj.processor.conversation.prompts import help_message, no_entries_found
28
- from khoj.processor.conversation.utils import save_to_conversation_log
28
+ from khoj.processor.conversation.utils import defilter_query, save_to_conversation_log
29
29
  from khoj.processor.image.generate import text_to_image
30
30
  from khoj.processor.speech.text_to_speech import generate_text_to_speech
31
31
  from khoj.processor.tools.online_search import read_webpages, search_online
32
+ from khoj.processor.tools.run_code import run_code
32
33
  from khoj.routers.api import extract_references_and_questions
33
34
  from khoj.routers.helpers import (
34
35
  ApiImageRateLimiter,
@@ -42,8 +43,10 @@ from khoj.routers.helpers import (
42
43
  aget_relevant_output_modes,
43
44
  construct_automation_created_message,
44
45
  create_automation,
46
+ extract_relevant_info,
45
47
  extract_relevant_summary,
46
48
  generate_excalidraw_diagram,
49
+ generate_summary_from_files,
47
50
  get_conversation_command,
48
51
  is_query_empty,
49
52
  is_ready_to_chat,
@@ -51,6 +54,10 @@ from khoj.routers.helpers import (
51
54
  update_telemetry_state,
52
55
  validate_conversation_config,
53
56
  )
57
+ from khoj.routers.research import (
58
+ InformationCollectionIteration,
59
+ execute_information_collection,
60
+ )
54
61
  from khoj.routers.storage import upload_image_to_bucket
55
62
  from khoj.utils import state
56
63
  from khoj.utils.helpers import (
@@ -563,7 +570,9 @@ async def chat(
563
570
  user: KhojUser = request.user.object
564
571
  event_delimiter = "␃🔚␗"
565
572
  q = unquote(q)
573
+ train_of_thought = []
566
574
  nonlocal conversation_id
575
+
567
576
  tracer: dict = {
568
577
  "mid": f"{uuid.uuid4()}",
569
578
  "cid": conversation_id,
@@ -583,7 +592,7 @@ async def chat(
583
592
  uploaded_images.append(uploaded_image)
584
593
 
585
594
  async def send_event(event_type: ChatEvent, data: str | dict):
586
- nonlocal connection_alive, ttft
595
+ nonlocal connection_alive, ttft, train_of_thought
587
596
  if not connection_alive or await request.is_disconnected():
588
597
  connection_alive = False
589
598
  logger.warning(f"User {user} disconnected from {common.client} client")
@@ -591,8 +600,11 @@ async def chat(
591
600
  try:
592
601
  if event_type == ChatEvent.END_LLM_RESPONSE:
593
602
  collect_telemetry()
594
- if event_type == ChatEvent.START_LLM_RESPONSE:
603
+ elif event_type == ChatEvent.START_LLM_RESPONSE:
595
604
  ttft = time.perf_counter() - start_time
605
+ elif event_type == ChatEvent.STATUS:
606
+ train_of_thought.append({"type": event_type.value, "data": data})
607
+
596
608
  if event_type == ChatEvent.MESSAGE:
597
609
  yield data
598
610
  elif event_type == ChatEvent.REFERENCES or stream:
@@ -681,6 +693,14 @@ async def chat(
681
693
  meta_log = conversation.conversation_log
682
694
  is_automated_task = conversation_commands == [ConversationCommand.AutomatedTask]
683
695
 
696
+ researched_results = ""
697
+ online_results: Dict = dict()
698
+ code_results: Dict = dict()
699
+ ## Extract Document References
700
+ compiled_references: List[Any] = []
701
+ inferred_queries: List[Any] = []
702
+ defiltered_query = defilter_query(q)
703
+
684
704
  if conversation_commands == [ConversationCommand.Default] or is_automated_task:
685
705
  conversation_commands = await aget_relevant_information_sources(
686
706
  q,
@@ -691,6 +711,11 @@ async def chat(
691
711
  agent=agent,
692
712
  tracer=tracer,
693
713
  )
714
+
715
+ # If we're doing research, we don't want to do anything else
716
+ if ConversationCommand.Research in conversation_commands:
717
+ conversation_commands = [ConversationCommand.Research]
718
+
694
719
  conversation_commands_str = ", ".join([cmd.value for cmd in conversation_commands])
695
720
  async for result in send_event(
696
721
  ChatEvent.STATUS, f"**Chose Data Sources to Search:** {conversation_commands_str}"
@@ -705,6 +730,38 @@ async def chat(
705
730
  if mode not in conversation_commands:
706
731
  conversation_commands.append(mode)
707
732
 
733
+ if conversation_commands == [ConversationCommand.Research]:
734
+ async for research_result in execute_information_collection(
735
+ request=request,
736
+ user=user,
737
+ query=defiltered_query,
738
+ conversation_id=conversation_id,
739
+ conversation_history=meta_log,
740
+ query_images=uploaded_images,
741
+ agent=agent,
742
+ send_status_func=partial(send_event, ChatEvent.STATUS),
743
+ user_name=user_name,
744
+ location=location,
745
+ file_filters=conversation.file_filters if conversation else [],
746
+ tracer=tracer,
747
+ ):
748
+ if isinstance(research_result, InformationCollectionIteration):
749
+ if research_result.summarizedResult:
750
+ if research_result.onlineContext:
751
+ online_results.update(research_result.onlineContext)
752
+ if research_result.codeContext:
753
+ code_results.update(research_result.codeContext)
754
+ if research_result.context:
755
+ compiled_references.extend(research_result.context)
756
+
757
+ researched_results += research_result.summarizedResult
758
+
759
+ else:
760
+ yield research_result
761
+
762
+ # researched_results = await extract_relevant_info(q, researched_results, agent)
763
+ logger.info(f"Researched Results: {researched_results}")
764
+
708
765
  for cmd in conversation_commands:
709
766
  await conversation_command_rate_limiter.update_and_check_if_valid(request, cmd)
710
767
  q = q.replace(f"/{cmd.value}", "").strip()
@@ -733,48 +790,24 @@ async def chat(
733
790
  async for result in send_llm_response(response_log):
734
791
  yield result
735
792
  else:
736
- try:
737
- file_object = None
738
- if await EntryAdapters.aagent_has_entries(agent):
739
- file_names = await EntryAdapters.aget_agent_entry_filepaths(agent)
740
- if len(file_names) > 0:
741
- file_object = await FileObjectAdapters.async_get_file_objects_by_name(
742
- None, file_names[0], agent
743
- )
744
-
745
- if len(file_filters) > 0:
746
- file_object = await FileObjectAdapters.async_get_file_objects_by_name(user, file_filters[0])
747
-
748
- if len(file_object) == 0:
749
- response_log = "Sorry, I couldn't find the full text of this file. Please re-upload the document and try again."
750
- async for result in send_llm_response(response_log):
751
- yield result
752
- return
753
- contextual_data = " ".join([file.raw_text for file in file_object])
754
- if not q:
755
- q = "Create a general summary of the file"
756
- async for result in send_event(
757
- ChatEvent.STATUS, f"**Constructing Summary Using:** {file_object[0].file_name}"
758
- ):
759
- yield result
760
-
761
- response = await extract_relevant_summary(
762
- q,
763
- contextual_data,
764
- conversation_history=meta_log,
765
- query_images=uploaded_images,
766
- user=user,
767
- agent=agent,
768
- tracer=tracer,
769
- )
770
- response_log = str(response)
771
- async for result in send_llm_response(response_log):
772
- yield result
773
- except Exception as e:
774
- response_log = "Error summarizing file. Please try again, or contact support."
775
- logger.error(f"Error summarizing file for {user.email}: {e}", exc_info=True)
776
- async for result in send_llm_response(response_log):
777
- yield result
793
+ async for response in generate_summary_from_files(
794
+ q=q,
795
+ user=user,
796
+ file_filters=file_filters,
797
+ meta_log=meta_log,
798
+ query_images=uploaded_images,
799
+ agent=agent,
800
+ send_status_func=partial(send_event, ChatEvent.STATUS),
801
+ tracer=tracer,
802
+ ):
803
+ if isinstance(response, dict) and ChatEvent.STATUS in response:
804
+ yield response[ChatEvent.STATUS]
805
+ else:
806
+ if isinstance(response, str):
807
+ response_log = response
808
+ async for result in send_llm_response(response):
809
+ yield result
810
+
778
811
  await sync_to_async(save_to_conversation_log)(
779
812
  q,
780
813
  response_log,
@@ -786,6 +819,7 @@ async def chat(
786
819
  conversation_id=conversation_id,
787
820
  query_images=uploaded_images,
788
821
  tracer=tracer,
822
+ train_of_thought=train_of_thought,
789
823
  )
790
824
  return
791
825
 
@@ -794,7 +828,7 @@ async def chat(
794
828
  if not q:
795
829
  conversation_config = await ConversationAdapters.aget_user_conversation_config(user)
796
830
  if conversation_config == None:
797
- conversation_config = await ConversationAdapters.aget_default_conversation_config()
831
+ conversation_config = await ConversationAdapters.aget_default_conversation_config(user)
798
832
  model_type = conversation_config.model_type
799
833
  formatted_help = help_message.format(model=model_type, version=state.khoj_version, device=get_device())
800
834
  async for result in send_llm_response(formatted_help):
@@ -830,6 +864,7 @@ async def chat(
830
864
  automation_id=automation.id,
831
865
  query_images=uploaded_images,
832
866
  tracer=tracer,
867
+ train_of_thought=train_of_thought,
833
868
  )
834
869
  async for result in send_llm_response(llm_response):
835
870
  yield result
@@ -837,49 +872,49 @@ async def chat(
837
872
 
838
873
  # Gather Context
839
874
  ## Extract Document References
840
- compiled_references, inferred_queries, defiltered_query = [], [], q
841
- try:
842
- async for result in extract_references_and_questions(
843
- request,
844
- meta_log,
845
- q,
846
- (n or 7),
847
- d,
848
- conversation_id,
849
- conversation_commands,
850
- location,
851
- partial(send_event, ChatEvent.STATUS),
852
- query_images=uploaded_images,
853
- agent=agent,
854
- tracer=tracer,
855
- ):
856
- if isinstance(result, dict) and ChatEvent.STATUS in result:
857
- yield result[ChatEvent.STATUS]
858
- else:
859
- compiled_references.extend(result[0])
860
- inferred_queries.extend(result[1])
861
- defiltered_query = result[2]
862
- except Exception as e:
863
- error_message = f"Error searching knowledge base: {e}. Attempting to respond without document references."
864
- logger.error(error_message, exc_info=True)
865
- async for result in send_event(
866
- ChatEvent.STATUS, "Document search failed. I'll try respond without document references"
867
- ):
868
- yield result
869
-
870
- if not is_none_or_empty(compiled_references):
871
- headings = "\n- " + "\n- ".join(set([c.get("compiled", c).split("\n")[0] for c in compiled_references]))
872
- # Strip only leading # from headings
873
- headings = headings.replace("#", "")
874
- async for result in send_event(ChatEvent.STATUS, f"**Found Relevant Notes**: {headings}"):
875
- yield result
875
+ if not ConversationCommand.Research in conversation_commands:
876
+ try:
877
+ async for result in extract_references_and_questions(
878
+ request,
879
+ meta_log,
880
+ q,
881
+ (n or 7),
882
+ d,
883
+ conversation_id,
884
+ conversation_commands,
885
+ location,
886
+ partial(send_event, ChatEvent.STATUS),
887
+ query_images=uploaded_images,
888
+ agent=agent,
889
+ tracer=tracer,
890
+ ):
891
+ if isinstance(result, dict) and ChatEvent.STATUS in result:
892
+ yield result[ChatEvent.STATUS]
893
+ else:
894
+ compiled_references.extend(result[0])
895
+ inferred_queries.extend(result[1])
896
+ defiltered_query = result[2]
897
+ except Exception as e:
898
+ error_message = (
899
+ f"Error searching knowledge base: {e}. Attempting to respond without document references."
900
+ )
901
+ logger.error(error_message, exc_info=True)
902
+ async for result in send_event(
903
+ ChatEvent.STATUS, "Document search failed. I'll try respond without document references"
904
+ ):
905
+ yield result
876
906
 
877
- online_results: Dict = dict()
907
+ if not is_none_or_empty(compiled_references):
908
+ headings = "\n- " + "\n- ".join(set([c.get("compiled", c).split("\n")[0] for c in compiled_references]))
909
+ # Strip only leading # from headings
910
+ headings = headings.replace("#", "")
911
+ async for result in send_event(ChatEvent.STATUS, f"**Found Relevant Notes**: {headings}"):
912
+ yield result
878
913
 
879
- if conversation_commands == [ConversationCommand.Notes] and not await EntryAdapters.auser_has_entries(user):
880
- async for result in send_llm_response(f"{no_entries_found.format()}"):
881
- yield result
882
- return
914
+ if conversation_commands == [ConversationCommand.Notes] and not await EntryAdapters.auser_has_entries(user):
915
+ async for result in send_llm_response(f"{no_entries_found.format()}"):
916
+ yield result
917
+ return
883
918
 
884
919
  if ConversationCommand.Notes in conversation_commands and is_none_or_empty(compiled_references):
885
920
  conversation_commands.remove(ConversationCommand.Notes)
@@ -948,6 +983,33 @@ async def chat(
948
983
  ):
949
984
  yield result
950
985
 
986
+ ## Gather Code Results
987
+ if ConversationCommand.Code in conversation_commands:
988
+ try:
989
+ context = f"# Iteration 1:\n#---\nNotes:\n{compiled_references}\n\nOnline Results:{online_results}"
990
+ async for result in run_code(
991
+ defiltered_query,
992
+ meta_log,
993
+ context,
994
+ location,
995
+ user,
996
+ partial(send_event, ChatEvent.STATUS),
997
+ query_images=uploaded_images,
998
+ agent=agent,
999
+ tracer=tracer,
1000
+ ):
1001
+ if isinstance(result, dict) and ChatEvent.STATUS in result:
1002
+ yield result[ChatEvent.STATUS]
1003
+ else:
1004
+ code_results = result
1005
+ async for result in send_event(ChatEvent.STATUS, f"**Ran code snippets**: {len(code_results)}"):
1006
+ yield result
1007
+ except ValueError as e:
1008
+ logger.warning(
1009
+ f"Failed to use code tool: {e}. Attempting to respond without code results",
1010
+ exc_info=True,
1011
+ )
1012
+
951
1013
  ## Send Gathered References
952
1014
  async for result in send_event(
953
1015
  ChatEvent.REFERENCES,
@@ -955,6 +1017,7 @@ async def chat(
955
1017
  "inferredQueries": inferred_queries,
956
1018
  "context": compiled_references,
957
1019
  "onlineContext": online_results,
1020
+ "codeContext": code_results,
958
1021
  },
959
1022
  ):
960
1023
  yield result
@@ -1004,6 +1067,7 @@ async def chat(
1004
1067
  online_results=online_results,
1005
1068
  query_images=uploaded_images,
1006
1069
  tracer=tracer,
1070
+ train_of_thought=train_of_thought,
1007
1071
  )
1008
1072
  content_obj = {
1009
1073
  "intentType": intent_type,
@@ -1061,6 +1125,7 @@ async def chat(
1061
1125
  online_results=online_results,
1062
1126
  query_images=uploaded_images,
1063
1127
  tracer=tracer,
1128
+ train_of_thought=train_of_thought,
1064
1129
  )
1065
1130
 
1066
1131
  async for result in send_llm_response(json.dumps(content_obj)):
@@ -1076,6 +1141,7 @@ async def chat(
1076
1141
  conversation,
1077
1142
  compiled_references,
1078
1143
  online_results,
1144
+ code_results,
1079
1145
  inferred_queries,
1080
1146
  conversation_commands,
1081
1147
  user,
@@ -1083,8 +1149,10 @@ async def chat(
1083
1149
  conversation_id,
1084
1150
  location,
1085
1151
  user_name,
1152
+ researched_results,
1086
1153
  uploaded_images,
1087
1154
  tracer,
1155
+ train_of_thought,
1088
1156
  )
1089
1157
 
1090
1158
  # Send Response