khoj 1.27.2.dev15__py3-none-any.whl → 1.28.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- khoj/configure.py +1 -1
- khoj/database/adapters/__init__.py +50 -12
- khoj/interface/compiled/404/index.html +1 -1
- khoj/interface/compiled/_next/static/chunks/1034-da58b679fcbb79c1.js +1 -0
- khoj/interface/compiled/_next/static/chunks/1467-b331e469fe411347.js +1 -0
- khoj/interface/compiled/_next/static/chunks/1603-c1568f45947e9f2c.js +1 -0
- khoj/interface/compiled/_next/static/chunks/3423-f4b7df2f6f3362f7.js +1 -0
- khoj/interface/compiled/_next/static/chunks/8423-da57554315eebcbe.js +1 -0
- khoj/interface/compiled/_next/static/chunks/app/agents/{page-2beaba7c9bb750bd.js → page-5ae1e540bb5be8a9.js} +1 -1
- khoj/interface/compiled/_next/static/chunks/app/automations/{page-9b5c77e0b0dd772c.js → page-774ae3e033f938cd.js} +1 -1
- khoj/interface/compiled/_next/static/chunks/app/chat/page-d8f4c107ad78e9e9.js +1 -0
- khoj/interface/compiled/_next/static/chunks/app/factchecker/page-1cc42ee55f89fb2e.js +1 -0
- khoj/interface/compiled/_next/static/chunks/app/{page-4b6008223ea79955.js → page-07e54186b066f5ce.js} +1 -1
- khoj/interface/compiled/_next/static/chunks/app/search/{page-ab2995529ece3140.js → page-9b64f61caa5bd7f9.js} +1 -1
- khoj/interface/compiled/_next/static/chunks/app/settings/{page-7946cabb9c54e22d.js → page-10b288c103f19468.js} +1 -1
- khoj/interface/compiled/_next/static/chunks/app/share/chat/{page-6a01e07fb244c10c.js → page-db775d42e820afb2.js} +1 -1
- khoj/interface/compiled/_next/static/chunks/{webpack-878569182b3af4c6.js → webpack-8f2abab7b11aa120.js} +1 -1
- khoj/interface/compiled/_next/static/css/{2272c73fc7a3b571.css → 26c1c33d0423a7d8.css} +1 -1
- khoj/interface/compiled/_next/static/css/4cae6c0e5c72fb2d.css +1 -0
- khoj/interface/compiled/_next/static/css/a795ee88875f4853.css +25 -0
- khoj/interface/compiled/_next/static/css/ddcc0cf73e062476.css +1 -0
- khoj/interface/compiled/agents/index.html +1 -1
- khoj/interface/compiled/agents/index.txt +2 -2
- khoj/interface/compiled/automations/index.html +1 -1
- khoj/interface/compiled/automations/index.txt +2 -2
- khoj/interface/compiled/chat/index.html +1 -1
- khoj/interface/compiled/chat/index.txt +2 -2
- khoj/interface/compiled/factchecker/index.html +1 -1
- khoj/interface/compiled/factchecker/index.txt +2 -2
- khoj/interface/compiled/index.html +1 -1
- khoj/interface/compiled/index.txt +2 -2
- khoj/interface/compiled/search/index.html +1 -1
- khoj/interface/compiled/search/index.txt +2 -2
- khoj/interface/compiled/settings/index.html +1 -1
- khoj/interface/compiled/settings/index.txt +2 -2
- khoj/interface/compiled/share/chat/index.html +1 -1
- khoj/interface/compiled/share/chat/index.txt +2 -2
- khoj/processor/conversation/anthropic/anthropic_chat.py +19 -10
- khoj/processor/conversation/anthropic/utils.py +37 -6
- khoj/processor/conversation/google/gemini_chat.py +23 -13
- khoj/processor/conversation/google/utils.py +34 -10
- khoj/processor/conversation/offline/chat_model.py +48 -16
- khoj/processor/conversation/openai/gpt.py +25 -10
- khoj/processor/conversation/openai/utils.py +50 -9
- khoj/processor/conversation/prompts.py +156 -65
- khoj/processor/conversation/utils.py +306 -6
- khoj/processor/embeddings.py +4 -4
- khoj/processor/image/generate.py +2 -0
- khoj/processor/tools/online_search.py +27 -12
- khoj/processor/tools/run_code.py +144 -0
- khoj/routers/api.py +11 -6
- khoj/routers/api_chat.py +213 -111
- khoj/routers/helpers.py +171 -60
- khoj/routers/research.py +320 -0
- khoj/search_filter/date_filter.py +1 -3
- khoj/search_filter/file_filter.py +1 -2
- khoj/search_type/text_search.py +3 -3
- khoj/utils/helpers.py +24 -2
- khoj/utils/yaml.py +4 -0
- {khoj-1.27.2.dev15.dist-info → khoj-1.28.0.dist-info}/METADATA +3 -2
- {khoj-1.27.2.dev15.dist-info → khoj-1.28.0.dist-info}/RECORD +68 -65
- khoj/interface/compiled/_next/static/chunks/1603-b9d95833e0e025e8.js +0 -1
- khoj/interface/compiled/_next/static/chunks/2697-61fcba89fd87eab4.js +0 -1
- khoj/interface/compiled/_next/static/chunks/3423-0b533af8bf6ac218.js +0 -1
- khoj/interface/compiled/_next/static/chunks/9479-ff7d8c4dae2014d1.js +0 -1
- khoj/interface/compiled/_next/static/chunks/app/chat/page-151232d8417a1ea1.js +0 -1
- khoj/interface/compiled/_next/static/chunks/app/factchecker/page-798904432c2417c4.js +0 -1
- khoj/interface/compiled/_next/static/css/592ca99f5122e75a.css +0 -1
- khoj/interface/compiled/_next/static/css/76d55eb435962b19.css +0 -25
- khoj/interface/compiled/_next/static/css/d738728883c68af8.css +0 -1
- /khoj/interface/compiled/_next/static/{vcyFRDGArOFXwUVotHIuv → cC7ahn2y_DddSVovjlztj}/_buildManifest.js +0 -0
- /khoj/interface/compiled/_next/static/{vcyFRDGArOFXwUVotHIuv → cC7ahn2y_DddSVovjlztj}/_ssgManifest.js +0 -0
- /khoj/interface/compiled/_next/static/chunks/{1970-60c96aed937a4928.js → 1970-d44050bf658ae5cc.js} +0 -0
- /khoj/interface/compiled/_next/static/chunks/{9417-2ca87207387fc790.js → 9417-0d0fc7eb49a86abb.js} +0 -0
- {khoj-1.27.2.dev15.dist-info → khoj-1.28.0.dist-info}/WHEEL +0 -0
- {khoj-1.27.2.dev15.dist-info → khoj-1.28.0.dist-info}/entry_points.txt +0 -0
- {khoj-1.27.2.dev15.dist-info → khoj-1.28.0.dist-info}/licenses/LICENSE +0 -0
khoj/routers/helpers.py
CHANGED
@@ -43,6 +43,7 @@ from khoj.database.adapters import (
|
|
43
43
|
AutomationAdapters,
|
44
44
|
ConversationAdapters,
|
45
45
|
EntryAdapters,
|
46
|
+
FileObjectAdapters,
|
46
47
|
ais_user_subscribed,
|
47
48
|
create_khoj_token,
|
48
49
|
get_khoj_tokens,
|
@@ -87,9 +88,11 @@ from khoj.processor.conversation.offline.chat_model import (
|
|
87
88
|
)
|
88
89
|
from khoj.processor.conversation.openai.gpt import converse, send_message_to_model
|
89
90
|
from khoj.processor.conversation.utils import (
|
91
|
+
ChatEvent,
|
90
92
|
ThreadedGenerator,
|
93
|
+
clean_json,
|
94
|
+
construct_chat_history,
|
91
95
|
generate_chatml_messages_with_context,
|
92
|
-
remove_json_codeblock,
|
93
96
|
save_to_conversation_log,
|
94
97
|
)
|
95
98
|
from khoj.processor.speech.text_to_speech import is_eleven_labs_enabled
|
@@ -137,7 +140,7 @@ def validate_conversation_config(user: KhojUser):
|
|
137
140
|
async def is_ready_to_chat(user: KhojUser):
|
138
141
|
user_conversation_config = await ConversationAdapters.aget_user_conversation_config(user)
|
139
142
|
if user_conversation_config == None:
|
140
|
-
user_conversation_config = await ConversationAdapters.aget_default_conversation_config()
|
143
|
+
user_conversation_config = await ConversationAdapters.aget_default_conversation_config(user)
|
141
144
|
|
142
145
|
if user_conversation_config and user_conversation_config.model_type == ChatModelOptions.ModelType.OFFLINE:
|
143
146
|
chat_model = user_conversation_config.chat_model
|
@@ -210,21 +213,6 @@ def get_next_url(request: Request) -> str:
|
|
210
213
|
return urljoin(str(request.base_url).rstrip("/"), next_path)
|
211
214
|
|
212
215
|
|
213
|
-
def construct_chat_history(conversation_history: dict, n: int = 4, agent_name="AI") -> str:
|
214
|
-
chat_history = ""
|
215
|
-
for chat in conversation_history.get("chat", [])[-n:]:
|
216
|
-
if chat["by"] == "khoj" and chat["intent"].get("type") in ["remember", "reminder", "summarize"]:
|
217
|
-
chat_history += f"User: {chat['intent']['query']}\n"
|
218
|
-
chat_history += f"{agent_name}: {chat['message']}\n"
|
219
|
-
elif chat["by"] == "khoj" and ("text-to-image" in chat["intent"].get("type")):
|
220
|
-
chat_history += f"User: {chat['intent']['query']}\n"
|
221
|
-
chat_history += f"{agent_name}: [generated image redacted for space]\n"
|
222
|
-
elif chat["by"] == "khoj" and ("excalidraw" in chat["intent"].get("type")):
|
223
|
-
chat_history += f"User: {chat['intent']['query']}\n"
|
224
|
-
chat_history += f"{agent_name}: {chat['intent']['inferred-queries'][0]}\n"
|
225
|
-
return chat_history
|
226
|
-
|
227
|
-
|
228
216
|
def get_conversation_command(query: str, any_references: bool = False) -> ConversationCommand:
|
229
217
|
if query.startswith("/notes"):
|
230
218
|
return ConversationCommand.Notes
|
@@ -244,6 +232,10 @@ def get_conversation_command(query: str, any_references: bool = False) -> Conver
|
|
244
232
|
return ConversationCommand.Summarize
|
245
233
|
elif query.startswith("/diagram"):
|
246
234
|
return ConversationCommand.Diagram
|
235
|
+
elif query.startswith("/code"):
|
236
|
+
return ConversationCommand.Code
|
237
|
+
elif query.startswith("/research"):
|
238
|
+
return ConversationCommand.Research
|
247
239
|
# If no relevant notes found for the given query
|
248
240
|
elif not any_references:
|
249
241
|
return ConversationCommand.General
|
@@ -301,6 +293,7 @@ async def aget_relevant_information_sources(
|
|
301
293
|
user: KhojUser,
|
302
294
|
query_images: List[str] = None,
|
303
295
|
agent: Agent = None,
|
296
|
+
tracer: dict = {},
|
304
297
|
):
|
305
298
|
"""
|
306
299
|
Given a query, determine which of the available tools the agent should use in order to answer appropriately.
|
@@ -337,11 +330,11 @@ async def aget_relevant_information_sources(
|
|
337
330
|
relevant_tools_prompt,
|
338
331
|
response_type="json_object",
|
339
332
|
user=user,
|
333
|
+
tracer=tracer,
|
340
334
|
)
|
341
335
|
|
342
336
|
try:
|
343
|
-
response = response
|
344
|
-
response = remove_json_codeblock(response)
|
337
|
+
response = clean_json(response)
|
345
338
|
response = json.loads(response)
|
346
339
|
response = [q.strip() for q in response["source"] if q.strip()]
|
347
340
|
if not isinstance(response, list) or not response or len(response) == 0:
|
@@ -378,6 +371,7 @@ async def aget_relevant_output_modes(
|
|
378
371
|
user: KhojUser = None,
|
379
372
|
query_images: List[str] = None,
|
380
373
|
agent: Agent = None,
|
374
|
+
tracer: dict = {},
|
381
375
|
):
|
382
376
|
"""
|
383
377
|
Given a query, determine which of the available tools the agent should use in order to answer appropriately.
|
@@ -413,11 +407,12 @@ async def aget_relevant_output_modes(
|
|
413
407
|
)
|
414
408
|
|
415
409
|
with timer("Chat actor: Infer output mode for chat response", logger):
|
416
|
-
response = await send_message_to_model_wrapper(
|
410
|
+
response = await send_message_to_model_wrapper(
|
411
|
+
relevant_mode_prompt, response_type="json_object", user=user, tracer=tracer
|
412
|
+
)
|
417
413
|
|
418
414
|
try:
|
419
|
-
response = response
|
420
|
-
response = remove_json_codeblock(response)
|
415
|
+
response = clean_json(response)
|
421
416
|
response = json.loads(response)
|
422
417
|
|
423
418
|
if is_none_or_empty(response):
|
@@ -444,6 +439,7 @@ async def infer_webpage_urls(
|
|
444
439
|
user: KhojUser,
|
445
440
|
query_images: List[str] = None,
|
446
441
|
agent: Agent = None,
|
442
|
+
tracer: dict = {},
|
447
443
|
) -> List[str]:
|
448
444
|
"""
|
449
445
|
Infer webpage links from the given query
|
@@ -468,16 +464,23 @@ async def infer_webpage_urls(
|
|
468
464
|
|
469
465
|
with timer("Chat actor: Infer webpage urls to read", logger):
|
470
466
|
response = await send_message_to_model_wrapper(
|
471
|
-
online_queries_prompt,
|
467
|
+
online_queries_prompt,
|
468
|
+
query_images=query_images,
|
469
|
+
response_type="json_object",
|
470
|
+
user=user,
|
471
|
+
tracer=tracer,
|
472
472
|
)
|
473
473
|
|
474
474
|
# Validate that the response is a non-empty, JSON-serializable list of URLs
|
475
475
|
try:
|
476
|
-
response = response
|
476
|
+
response = clean_json(response)
|
477
477
|
urls = json.loads(response)
|
478
478
|
valid_unique_urls = {str(url).strip() for url in urls["links"] if is_valid_url(url)}
|
479
479
|
if is_none_or_empty(valid_unique_urls):
|
480
480
|
raise ValueError(f"Invalid list of urls: {response}")
|
481
|
+
if len(valid_unique_urls) == 0:
|
482
|
+
logger.error(f"No valid URLs found in response: {response}")
|
483
|
+
return []
|
481
484
|
return list(valid_unique_urls)
|
482
485
|
except Exception:
|
483
486
|
raise ValueError(f"Invalid list of urls: {response}")
|
@@ -490,6 +493,7 @@ async def generate_online_subqueries(
|
|
490
493
|
user: KhojUser,
|
491
494
|
query_images: List[str] = None,
|
492
495
|
agent: Agent = None,
|
496
|
+
tracer: dict = {},
|
493
497
|
) -> List[str]:
|
494
498
|
"""
|
495
499
|
Generate subqueries from the given query
|
@@ -514,13 +518,16 @@ async def generate_online_subqueries(
|
|
514
518
|
|
515
519
|
with timer("Chat actor: Generate online search subqueries", logger):
|
516
520
|
response = await send_message_to_model_wrapper(
|
517
|
-
online_queries_prompt,
|
521
|
+
online_queries_prompt,
|
522
|
+
query_images=query_images,
|
523
|
+
response_type="json_object",
|
524
|
+
user=user,
|
525
|
+
tracer=tracer,
|
518
526
|
)
|
519
527
|
|
520
528
|
# Validate that the response is a non-empty, JSON-serializable list
|
521
529
|
try:
|
522
|
-
response = response
|
523
|
-
response = remove_json_codeblock(response)
|
530
|
+
response = clean_json(response)
|
524
531
|
response = json.loads(response)
|
525
532
|
response = [q.strip() for q in response["queries"] if q.strip()]
|
526
533
|
if not isinstance(response, list) or not response or len(response) == 0:
|
@@ -533,7 +540,7 @@ async def generate_online_subqueries(
|
|
533
540
|
|
534
541
|
|
535
542
|
async def schedule_query(
|
536
|
-
q: str, conversation_history: dict, user: KhojUser, query_images: List[str] = None
|
543
|
+
q: str, conversation_history: dict, user: KhojUser, query_images: List[str] = None, tracer: dict = {}
|
537
544
|
) -> Tuple[str, ...]:
|
538
545
|
"""
|
539
546
|
Schedule the date, time to run the query. Assume the server timezone is UTC.
|
@@ -546,7 +553,7 @@ async def schedule_query(
|
|
546
553
|
)
|
547
554
|
|
548
555
|
raw_response = await send_message_to_model_wrapper(
|
549
|
-
crontime_prompt, query_images=query_images, response_type="json_object", user=user
|
556
|
+
crontime_prompt, query_images=query_images, response_type="json_object", user=user, tracer=tracer
|
550
557
|
)
|
551
558
|
|
552
559
|
# Validate that the response is a non-empty, JSON-serializable list
|
@@ -561,7 +568,7 @@ async def schedule_query(
|
|
561
568
|
|
562
569
|
|
563
570
|
async def extract_relevant_info(
|
564
|
-
qs: set[str], corpus: str, user: KhojUser = None, agent: Agent = None
|
571
|
+
qs: set[str], corpus: str, user: KhojUser = None, agent: Agent = None, tracer: dict = {}
|
565
572
|
) -> Union[str, None]:
|
566
573
|
"""
|
567
574
|
Extract relevant information for a given query from the target corpus
|
@@ -584,6 +591,7 @@ async def extract_relevant_info(
|
|
584
591
|
extract_relevant_information,
|
585
592
|
prompts.system_prompt_extract_relevant_information,
|
586
593
|
user=user,
|
594
|
+
tracer=tracer,
|
587
595
|
)
|
588
596
|
return response.strip()
|
589
597
|
|
@@ -595,6 +603,7 @@ async def extract_relevant_summary(
|
|
595
603
|
query_images: List[str] = None,
|
596
604
|
user: KhojUser = None,
|
597
605
|
agent: Agent = None,
|
606
|
+
tracer: dict = {},
|
598
607
|
) -> Union[str, None]:
|
599
608
|
"""
|
600
609
|
Extract relevant information for a given query from the target corpus
|
@@ -622,10 +631,58 @@ async def extract_relevant_summary(
|
|
622
631
|
prompts.system_prompt_extract_relevant_summary,
|
623
632
|
user=user,
|
624
633
|
query_images=query_images,
|
634
|
+
tracer=tracer,
|
625
635
|
)
|
626
636
|
return response.strip()
|
627
637
|
|
628
638
|
|
639
|
+
async def generate_summary_from_files(
|
640
|
+
q: str,
|
641
|
+
user: KhojUser,
|
642
|
+
file_filters: List[str],
|
643
|
+
meta_log: dict,
|
644
|
+
query_images: List[str] = None,
|
645
|
+
agent: Agent = None,
|
646
|
+
send_status_func: Optional[Callable] = None,
|
647
|
+
tracer: dict = {},
|
648
|
+
):
|
649
|
+
try:
|
650
|
+
file_object = None
|
651
|
+
if await EntryAdapters.aagent_has_entries(agent):
|
652
|
+
file_names = await EntryAdapters.aget_agent_entry_filepaths(agent)
|
653
|
+
if len(file_names) > 0:
|
654
|
+
file_object = await FileObjectAdapters.async_get_file_objects_by_name(None, file_names.pop(), agent)
|
655
|
+
|
656
|
+
if len(file_filters) > 0:
|
657
|
+
file_object = await FileObjectAdapters.async_get_file_objects_by_name(user, file_filters[0])
|
658
|
+
|
659
|
+
if len(file_object) == 0:
|
660
|
+
response_log = "Sorry, I couldn't find the full text of this file."
|
661
|
+
yield response_log
|
662
|
+
return
|
663
|
+
contextual_data = " ".join([file.raw_text for file in file_object])
|
664
|
+
if not q:
|
665
|
+
q = "Create a general summary of the file"
|
666
|
+
async for result in send_status_func(f"**Constructing Summary Using:** {file_object[0].file_name}"):
|
667
|
+
yield {ChatEvent.STATUS: result}
|
668
|
+
|
669
|
+
response = await extract_relevant_summary(
|
670
|
+
q,
|
671
|
+
contextual_data,
|
672
|
+
conversation_history=meta_log,
|
673
|
+
query_images=query_images,
|
674
|
+
user=user,
|
675
|
+
agent=agent,
|
676
|
+
tracer=tracer,
|
677
|
+
)
|
678
|
+
|
679
|
+
yield str(response)
|
680
|
+
except Exception as e:
|
681
|
+
response_log = "Error summarizing file. Please try again, or contact support."
|
682
|
+
logger.error(f"Error summarizing file for {user.email}: {e}", exc_info=True)
|
683
|
+
yield result
|
684
|
+
|
685
|
+
|
629
686
|
async def generate_excalidraw_diagram(
|
630
687
|
q: str,
|
631
688
|
conversation_history: Dict[str, Any],
|
@@ -636,6 +693,7 @@ async def generate_excalidraw_diagram(
|
|
636
693
|
user: KhojUser = None,
|
637
694
|
agent: Agent = None,
|
638
695
|
send_status_func: Optional[Callable] = None,
|
696
|
+
tracer: dict = {},
|
639
697
|
):
|
640
698
|
if send_status_func:
|
641
699
|
async for event in send_status_func("**Enhancing the Diagramming Prompt**"):
|
@@ -650,6 +708,7 @@ async def generate_excalidraw_diagram(
|
|
650
708
|
query_images=query_images,
|
651
709
|
user=user,
|
652
710
|
agent=agent,
|
711
|
+
tracer=tracer,
|
653
712
|
)
|
654
713
|
|
655
714
|
if send_status_func:
|
@@ -660,6 +719,7 @@ async def generate_excalidraw_diagram(
|
|
660
719
|
q=better_diagram_description_prompt,
|
661
720
|
user=user,
|
662
721
|
agent=agent,
|
722
|
+
tracer=tracer,
|
663
723
|
)
|
664
724
|
|
665
725
|
yield better_diagram_description_prompt, excalidraw_diagram_description
|
@@ -674,6 +734,7 @@ async def generate_better_diagram_description(
|
|
674
734
|
query_images: List[str] = None,
|
675
735
|
user: KhojUser = None,
|
676
736
|
agent: Agent = None,
|
737
|
+
tracer: dict = {},
|
677
738
|
) -> str:
|
678
739
|
"""
|
679
740
|
Generate a diagram description from the given query and context
|
@@ -711,7 +772,7 @@ async def generate_better_diagram_description(
|
|
711
772
|
|
712
773
|
with timer("Chat actor: Generate better diagram description", logger):
|
713
774
|
response = await send_message_to_model_wrapper(
|
714
|
-
improve_diagram_description_prompt, query_images=query_images, user=user
|
775
|
+
improve_diagram_description_prompt, query_images=query_images, user=user, tracer=tracer
|
715
776
|
)
|
716
777
|
response = response.strip()
|
717
778
|
if response.startswith(('"', "'")) and response.endswith(('"', "'")):
|
@@ -724,6 +785,7 @@ async def generate_excalidraw_diagram_from_description(
|
|
724
785
|
q: str,
|
725
786
|
user: KhojUser = None,
|
726
787
|
agent: Agent = None,
|
788
|
+
tracer: dict = {},
|
727
789
|
) -> str:
|
728
790
|
personality_context = (
|
729
791
|
prompts.personality_context.format(personality=agent.personality) if agent and agent.personality else ""
|
@@ -735,9 +797,10 @@ async def generate_excalidraw_diagram_from_description(
|
|
735
797
|
)
|
736
798
|
|
737
799
|
with timer("Chat actor: Generate excalidraw diagram", logger):
|
738
|
-
raw_response = await send_message_to_model_wrapper(
|
739
|
-
|
740
|
-
|
800
|
+
raw_response = await send_message_to_model_wrapper(
|
801
|
+
query=excalidraw_diagram_generation, user=user, tracer=tracer
|
802
|
+
)
|
803
|
+
raw_response = clean_json(raw_response)
|
741
804
|
response: Dict[str, str] = json.loads(raw_response)
|
742
805
|
if not response or not isinstance(response, List) or not isinstance(response[0], Dict):
|
743
806
|
# TODO Some additional validation here that it's a valid Excalidraw diagram
|
@@ -756,6 +819,7 @@ async def generate_better_image_prompt(
|
|
756
819
|
query_images: Optional[List[str]] = None,
|
757
820
|
user: KhojUser = None,
|
758
821
|
agent: Agent = None,
|
822
|
+
tracer: dict = {},
|
759
823
|
) -> str:
|
760
824
|
"""
|
761
825
|
Generate a better image prompt from the given query
|
@@ -802,7 +866,9 @@ async def generate_better_image_prompt(
|
|
802
866
|
)
|
803
867
|
|
804
868
|
with timer("Chat actor: Generate contextual image prompt", logger):
|
805
|
-
response = await send_message_to_model_wrapper(
|
869
|
+
response = await send_message_to_model_wrapper(
|
870
|
+
image_prompt, query_images=query_images, user=user, tracer=tracer
|
871
|
+
)
|
806
872
|
response = response.strip()
|
807
873
|
if response.startswith(('"', "'")) and response.endswith(('"', "'")):
|
808
874
|
response = response[1:-1]
|
@@ -811,11 +877,13 @@ async def generate_better_image_prompt(
|
|
811
877
|
|
812
878
|
|
813
879
|
async def send_message_to_model_wrapper(
|
814
|
-
|
880
|
+
query: str,
|
815
881
|
system_message: str = "",
|
816
882
|
response_type: str = "text",
|
817
883
|
user: KhojUser = None,
|
818
884
|
query_images: List[str] = None,
|
885
|
+
context: str = "",
|
886
|
+
tracer: dict = {},
|
819
887
|
):
|
820
888
|
conversation_config: ChatModelOptions = await ConversationAdapters.aget_default_conversation_config(user)
|
821
889
|
vision_available = conversation_config.vision_enabled
|
@@ -845,7 +913,8 @@ async def send_message_to_model_wrapper(
|
|
845
913
|
|
846
914
|
loaded_model = state.offline_chat_processor_config.loaded_model
|
847
915
|
truncated_messages = generate_chatml_messages_with_context(
|
848
|
-
user_message=
|
916
|
+
user_message=query,
|
917
|
+
context_message=context,
|
849
918
|
system_message=system_message,
|
850
919
|
model_name=chat_model,
|
851
920
|
loaded_model=loaded_model,
|
@@ -862,6 +931,7 @@ async def send_message_to_model_wrapper(
|
|
862
931
|
max_prompt_size=max_tokens,
|
863
932
|
streaming=False,
|
864
933
|
response_type=response_type,
|
934
|
+
tracer=tracer,
|
865
935
|
)
|
866
936
|
|
867
937
|
elif model_type == ChatModelOptions.ModelType.OPENAI:
|
@@ -869,7 +939,8 @@ async def send_message_to_model_wrapper(
|
|
869
939
|
api_key = openai_chat_config.api_key
|
870
940
|
api_base_url = openai_chat_config.api_base_url
|
871
941
|
truncated_messages = generate_chatml_messages_with_context(
|
872
|
-
user_message=
|
942
|
+
user_message=query,
|
943
|
+
context_message=context,
|
873
944
|
system_message=system_message,
|
874
945
|
model_name=chat_model,
|
875
946
|
max_prompt_size=max_tokens,
|
@@ -885,11 +956,13 @@ async def send_message_to_model_wrapper(
|
|
885
956
|
model=chat_model,
|
886
957
|
response_type=response_type,
|
887
958
|
api_base_url=api_base_url,
|
959
|
+
tracer=tracer,
|
888
960
|
)
|
889
961
|
elif model_type == ChatModelOptions.ModelType.ANTHROPIC:
|
890
962
|
api_key = conversation_config.openai_config.api_key
|
891
963
|
truncated_messages = generate_chatml_messages_with_context(
|
892
|
-
user_message=
|
964
|
+
user_message=query,
|
965
|
+
context_message=context,
|
893
966
|
system_message=system_message,
|
894
967
|
model_name=chat_model,
|
895
968
|
max_prompt_size=max_tokens,
|
@@ -903,11 +976,14 @@ async def send_message_to_model_wrapper(
|
|
903
976
|
messages=truncated_messages,
|
904
977
|
api_key=api_key,
|
905
978
|
model=chat_model,
|
979
|
+
response_type=response_type,
|
980
|
+
tracer=tracer,
|
906
981
|
)
|
907
982
|
elif model_type == ChatModelOptions.ModelType.GOOGLE:
|
908
983
|
api_key = conversation_config.openai_config.api_key
|
909
984
|
truncated_messages = generate_chatml_messages_with_context(
|
910
|
-
user_message=
|
985
|
+
user_message=query,
|
986
|
+
context_message=context,
|
911
987
|
system_message=system_message,
|
912
988
|
model_name=chat_model,
|
913
989
|
max_prompt_size=max_tokens,
|
@@ -918,7 +994,7 @@ async def send_message_to_model_wrapper(
|
|
918
994
|
)
|
919
995
|
|
920
996
|
return gemini_send_message_to_model(
|
921
|
-
messages=truncated_messages, api_key=api_key, model=chat_model, response_type=response_type
|
997
|
+
messages=truncated_messages, api_key=api_key, model=chat_model, response_type=response_type, tracer=tracer
|
922
998
|
)
|
923
999
|
else:
|
924
1000
|
raise HTTPException(status_code=500, detail="Invalid conversation config")
|
@@ -929,6 +1005,7 @@ def send_message_to_model_wrapper_sync(
|
|
929
1005
|
system_message: str = "",
|
930
1006
|
response_type: str = "text",
|
931
1007
|
user: KhojUser = None,
|
1008
|
+
tracer: dict = {},
|
932
1009
|
):
|
933
1010
|
conversation_config: ChatModelOptions = ConversationAdapters.get_default_conversation_config(user)
|
934
1011
|
|
@@ -961,6 +1038,7 @@ def send_message_to_model_wrapper_sync(
|
|
961
1038
|
max_prompt_size=max_tokens,
|
962
1039
|
streaming=False,
|
963
1040
|
response_type=response_type,
|
1041
|
+
tracer=tracer,
|
964
1042
|
)
|
965
1043
|
|
966
1044
|
elif conversation_config.model_type == ChatModelOptions.ModelType.OPENAI:
|
@@ -975,7 +1053,11 @@ def send_message_to_model_wrapper_sync(
|
|
975
1053
|
)
|
976
1054
|
|
977
1055
|
openai_response = send_message_to_model(
|
978
|
-
messages=truncated_messages,
|
1056
|
+
messages=truncated_messages,
|
1057
|
+
api_key=api_key,
|
1058
|
+
model=chat_model,
|
1059
|
+
response_type=response_type,
|
1060
|
+
tracer=tracer,
|
979
1061
|
)
|
980
1062
|
|
981
1063
|
return openai_response
|
@@ -995,6 +1077,8 @@ def send_message_to_model_wrapper_sync(
|
|
995
1077
|
messages=truncated_messages,
|
996
1078
|
api_key=api_key,
|
997
1079
|
model=chat_model,
|
1080
|
+
response_type=response_type,
|
1081
|
+
tracer=tracer,
|
998
1082
|
)
|
999
1083
|
|
1000
1084
|
elif conversation_config.model_type == ChatModelOptions.ModelType.GOOGLE:
|
@@ -1013,6 +1097,7 @@ def send_message_to_model_wrapper_sync(
|
|
1013
1097
|
api_key=api_key,
|
1014
1098
|
model=chat_model,
|
1015
1099
|
response_type=response_type,
|
1100
|
+
tracer=tracer,
|
1016
1101
|
)
|
1017
1102
|
else:
|
1018
1103
|
raise HTTPException(status_code=500, detail="Invalid conversation config")
|
@@ -1024,6 +1109,7 @@ def generate_chat_response(
|
|
1024
1109
|
conversation: Conversation,
|
1025
1110
|
compiled_references: List[Dict] = [],
|
1026
1111
|
online_results: Dict[str, Dict] = {},
|
1112
|
+
code_results: Dict[str, Dict] = {},
|
1027
1113
|
inferred_queries: List[str] = [],
|
1028
1114
|
conversation_commands: List[ConversationCommand] = [ConversationCommand.Default],
|
1029
1115
|
user: KhojUser = None,
|
@@ -1031,7 +1117,10 @@ def generate_chat_response(
|
|
1031
1117
|
conversation_id: str = None,
|
1032
1118
|
location_data: LocationData = None,
|
1033
1119
|
user_name: Optional[str] = None,
|
1120
|
+
meta_research: str = "",
|
1034
1121
|
query_images: Optional[List[str]] = None,
|
1122
|
+
tracer: dict = {},
|
1123
|
+
train_of_thought: List[Any] = [],
|
1035
1124
|
) -> Tuple[Union[ThreadedGenerator, Iterator[str]], Dict[str, str]]:
|
1036
1125
|
# Initialize Variables
|
1037
1126
|
chat_response = None
|
@@ -1039,6 +1128,9 @@ def generate_chat_response(
|
|
1039
1128
|
|
1040
1129
|
metadata = {}
|
1041
1130
|
agent = AgentAdapters.get_conversation_agent_by_id(conversation.agent.id) if conversation.agent else None
|
1131
|
+
query_to_run = q
|
1132
|
+
if meta_research:
|
1133
|
+
query_to_run = f"AI Research: {meta_research} {q}"
|
1042
1134
|
try:
|
1043
1135
|
partial_completion = partial(
|
1044
1136
|
save_to_conversation_log,
|
@@ -1047,10 +1139,13 @@ def generate_chat_response(
|
|
1047
1139
|
meta_log=meta_log,
|
1048
1140
|
compiled_references=compiled_references,
|
1049
1141
|
online_results=online_results,
|
1142
|
+
code_results=code_results,
|
1050
1143
|
inferred_queries=inferred_queries,
|
1051
1144
|
client_application=client_application,
|
1052
1145
|
conversation_id=conversation_id,
|
1053
1146
|
query_images=query_images,
|
1147
|
+
tracer=tracer,
|
1148
|
+
train_of_thought=train_of_thought,
|
1054
1149
|
)
|
1055
1150
|
|
1056
1151
|
conversation_config = ConversationAdapters.get_valid_conversation_config(user, conversation)
|
@@ -1064,9 +1159,9 @@ def generate_chat_response(
|
|
1064
1159
|
if conversation_config.model_type == "offline":
|
1065
1160
|
loaded_model = state.offline_chat_processor_config.loaded_model
|
1066
1161
|
chat_response = converse_offline(
|
1162
|
+
user_query=query_to_run,
|
1067
1163
|
references=compiled_references,
|
1068
1164
|
online_results=online_results,
|
1069
|
-
user_query=q,
|
1070
1165
|
loaded_model=loaded_model,
|
1071
1166
|
conversation_log=meta_log,
|
1072
1167
|
completion_func=partial_completion,
|
@@ -1077,6 +1172,7 @@ def generate_chat_response(
|
|
1077
1172
|
location_data=location_data,
|
1078
1173
|
user_name=user_name,
|
1079
1174
|
agent=agent,
|
1175
|
+
tracer=tracer,
|
1080
1176
|
)
|
1081
1177
|
|
1082
1178
|
elif conversation_config.model_type == ChatModelOptions.ModelType.OPENAI:
|
@@ -1085,9 +1181,10 @@ def generate_chat_response(
|
|
1085
1181
|
chat_model = conversation_config.chat_model
|
1086
1182
|
chat_response = converse(
|
1087
1183
|
compiled_references,
|
1088
|
-
|
1184
|
+
query_to_run,
|
1089
1185
|
query_images=query_images,
|
1090
1186
|
online_results=online_results,
|
1187
|
+
code_results=code_results,
|
1091
1188
|
conversation_log=meta_log,
|
1092
1189
|
model=chat_model,
|
1093
1190
|
api_key=api_key,
|
@@ -1100,15 +1197,17 @@ def generate_chat_response(
|
|
1100
1197
|
user_name=user_name,
|
1101
1198
|
agent=agent,
|
1102
1199
|
vision_available=vision_available,
|
1200
|
+
tracer=tracer,
|
1103
1201
|
)
|
1104
1202
|
|
1105
1203
|
elif conversation_config.model_type == ChatModelOptions.ModelType.ANTHROPIC:
|
1106
1204
|
api_key = conversation_config.openai_config.api_key
|
1107
1205
|
chat_response = converse_anthropic(
|
1108
1206
|
compiled_references,
|
1109
|
-
|
1207
|
+
query_to_run,
|
1110
1208
|
query_images=query_images,
|
1111
1209
|
online_results=online_results,
|
1210
|
+
code_results=code_results,
|
1112
1211
|
conversation_log=meta_log,
|
1113
1212
|
model=conversation_config.chat_model,
|
1114
1213
|
api_key=api_key,
|
@@ -1120,15 +1219,16 @@ def generate_chat_response(
|
|
1120
1219
|
user_name=user_name,
|
1121
1220
|
agent=agent,
|
1122
1221
|
vision_available=vision_available,
|
1222
|
+
tracer=tracer,
|
1123
1223
|
)
|
1124
1224
|
elif conversation_config.model_type == ChatModelOptions.ModelType.GOOGLE:
|
1125
1225
|
api_key = conversation_config.openai_config.api_key
|
1126
1226
|
chat_response = converse_gemini(
|
1127
1227
|
compiled_references,
|
1128
|
-
|
1129
|
-
|
1130
|
-
|
1131
|
-
|
1228
|
+
query_to_run,
|
1229
|
+
online_results,
|
1230
|
+
code_results,
|
1231
|
+
meta_log,
|
1132
1232
|
model=conversation_config.chat_model,
|
1133
1233
|
api_key=api_key,
|
1134
1234
|
completion_func=partial_completion,
|
@@ -1139,6 +1239,7 @@ def generate_chat_response(
|
|
1139
1239
|
user_name=user_name,
|
1140
1240
|
agent=agent,
|
1141
1241
|
vision_available=vision_available,
|
1242
|
+
tracer=tracer,
|
1142
1243
|
)
|
1143
1244
|
|
1144
1245
|
metadata.update({"chat_model": conversation_config.chat_model})
|
@@ -1157,6 +1258,7 @@ class ChatRequestBody(BaseModel):
|
|
1157
1258
|
stream: Optional[bool] = False
|
1158
1259
|
title: Optional[str] = None
|
1159
1260
|
conversation_id: Optional[str] = None
|
1261
|
+
turn_id: Optional[str] = None
|
1160
1262
|
city: Optional[str] = None
|
1161
1263
|
region: Optional[str] = None
|
1162
1264
|
country: Optional[str] = None
|
@@ -1166,6 +1268,17 @@ class ChatRequestBody(BaseModel):
|
|
1166
1268
|
create_new: Optional[bool] = False
|
1167
1269
|
|
1168
1270
|
|
1271
|
+
class DeleteMessageRequestBody(BaseModel):
|
1272
|
+
conversation_id: str
|
1273
|
+
turn_id: str
|
1274
|
+
|
1275
|
+
|
1276
|
+
class FeedbackData(BaseModel):
|
1277
|
+
uquery: str
|
1278
|
+
kquery: str
|
1279
|
+
sentiment: str
|
1280
|
+
|
1281
|
+
|
1169
1282
|
class ApiUserRateLimiter:
|
1170
1283
|
def __init__(self, requests: int, subscribed_requests: int, window: int, slug: str):
|
1171
1284
|
self.requests = requests
|
@@ -1268,7 +1381,7 @@ class ConversationCommandRateLimiter:
|
|
1268
1381
|
self.slug = slug
|
1269
1382
|
self.trial_rate_limit = trial_rate_limit
|
1270
1383
|
self.subscribed_rate_limit = subscribed_rate_limit
|
1271
|
-
self.restricted_commands = [ConversationCommand.
|
1384
|
+
self.restricted_commands = [ConversationCommand.Research]
|
1272
1385
|
|
1273
1386
|
async def update_and_check_if_valid(self, request: Request, conversation_command: ConversationCommand):
|
1274
1387
|
if state.billing_enabled is False:
|
@@ -1495,9 +1608,15 @@ def scheduled_chat(
|
|
1495
1608
|
|
1496
1609
|
|
1497
1610
|
async def create_automation(
|
1498
|
-
q: str,
|
1611
|
+
q: str,
|
1612
|
+
timezone: str,
|
1613
|
+
user: KhojUser,
|
1614
|
+
calling_url: URL,
|
1615
|
+
meta_log: dict = {},
|
1616
|
+
conversation_id: str = None,
|
1617
|
+
tracer: dict = {},
|
1499
1618
|
):
|
1500
|
-
crontime, query_to_run, subject = await schedule_query(q, meta_log, user)
|
1619
|
+
crontime, query_to_run, subject = await schedule_query(q, meta_log, user, tracer=tracer)
|
1501
1620
|
job = await schedule_automation(query_to_run, subject, crontime, timezone, q, user, calling_url, conversation_id)
|
1502
1621
|
return job, crontime, query_to_run, subject
|
1503
1622
|
|
@@ -1575,14 +1694,6 @@ Manage your automations [here](/automations).
|
|
1575
1694
|
""".strip()
|
1576
1695
|
|
1577
1696
|
|
1578
|
-
class ChatEvent(Enum):
|
1579
|
-
START_LLM_RESPONSE = "start_llm_response"
|
1580
|
-
END_LLM_RESPONSE = "end_llm_response"
|
1581
|
-
MESSAGE = "message"
|
1582
|
-
REFERENCES = "references"
|
1583
|
-
STATUS = "status"
|
1584
|
-
|
1585
|
-
|
1586
1697
|
class MessageProcessor:
|
1587
1698
|
def __init__(self):
|
1588
1699
|
self.references = {}
|