khoj 1.26.5.dev34__py3-none-any.whl → 1.26.5.dev43__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (55) hide show
  1. khoj/database/adapters/__init__.py +13 -20
  2. khoj/database/admin.py +2 -0
  3. khoj/database/management/commands/change_default_model.py +182 -0
  4. khoj/database/migrations/0072_entry_search_model.py +24 -0
  5. khoj/database/models/__init__.py +2 -0
  6. khoj/interface/compiled/404/index.html +1 -1
  7. khoj/interface/compiled/_next/static/chunks/{3678-ef0d20e267e9f010.js → 7883-b1305ec254213afe.js} +2 -2
  8. khoj/interface/compiled/_next/static/chunks/app/agents/{page-2beaba7c9bb750bd.js → page-5ae1e540bb5be8a9.js} +1 -1
  9. khoj/interface/compiled/_next/static/chunks/app/automations/{page-9b5c77e0b0dd772c.js → page-774ae3e033f938cd.js} +1 -1
  10. khoj/interface/compiled/_next/static/chunks/app/chat/{page-151232d8417a1ea1.js → page-97f5b61aaf46d364.js} +1 -1
  11. khoj/interface/compiled/_next/static/chunks/app/factchecker/{page-798904432c2417c4.js → page-d82403db2866bad8.js} +1 -1
  12. khoj/interface/compiled/_next/static/chunks/app/{page-db4e38a5255af7ad.js → page-58357cd206c50a83.js} +1 -1
  13. khoj/interface/compiled/_next/static/chunks/app/search/{page-ab2995529ece3140.js → page-9b64f61caa5bd7f9.js} +1 -1
  14. khoj/interface/compiled/_next/static/chunks/app/settings/page-7a8c382af2a7e870.js +1 -0
  15. khoj/interface/compiled/_next/static/chunks/app/share/chat/{page-6a01e07fb244c10c.js → page-eb9e282691858f2e.js} +1 -1
  16. khoj/interface/compiled/_next/static/chunks/{webpack-313247d7eb764923.js → webpack-2c4ce09149d3279a.js} +1 -1
  17. khoj/interface/compiled/_next/static/css/4cae6c0e5c72fb2d.css +1 -0
  18. khoj/interface/compiled/_next/static/css/ddcc0cf73e062476.css +1 -0
  19. khoj/interface/compiled/agents/index.html +1 -1
  20. khoj/interface/compiled/agents/index.txt +2 -2
  21. khoj/interface/compiled/automations/index.html +1 -1
  22. khoj/interface/compiled/automations/index.txt +2 -2
  23. khoj/interface/compiled/chat/index.html +1 -1
  24. khoj/interface/compiled/chat/index.txt +2 -2
  25. khoj/interface/compiled/factchecker/index.html +1 -1
  26. khoj/interface/compiled/factchecker/index.txt +2 -2
  27. khoj/interface/compiled/index.html +1 -1
  28. khoj/interface/compiled/index.txt +2 -2
  29. khoj/interface/compiled/search/index.html +1 -1
  30. khoj/interface/compiled/search/index.txt +2 -2
  31. khoj/interface/compiled/settings/index.html +1 -1
  32. khoj/interface/compiled/settings/index.txt +2 -2
  33. khoj/interface/compiled/share/chat/index.html +1 -1
  34. khoj/interface/compiled/share/chat/index.txt +2 -2
  35. khoj/processor/content/text_to_entries.py +4 -2
  36. khoj/processor/conversation/anthropic/anthropic_chat.py +21 -21
  37. khoj/processor/conversation/anthropic/utils.py +51 -1
  38. khoj/processor/conversation/google/utils.py +2 -15
  39. khoj/processor/conversation/prompts.py +1 -1
  40. khoj/processor/conversation/utils.py +39 -1
  41. khoj/processor/image/generate.py +2 -1
  42. khoj/routers/api.py +5 -2
  43. khoj/routers/api_model.py +0 -33
  44. khoj/routers/helpers.py +12 -22
  45. khoj/search_type/text_search.py +6 -2
  46. {khoj-1.26.5.dev34.dist-info → khoj-1.26.5.dev43.dist-info}/METADATA +1 -1
  47. {khoj-1.26.5.dev34.dist-info → khoj-1.26.5.dev43.dist-info}/RECORD +52 -50
  48. khoj/interface/compiled/_next/static/chunks/app/settings/page-3e9cf5ed5ace4310.js +0 -1
  49. khoj/interface/compiled/_next/static/css/592ca99f5122e75a.css +0 -1
  50. khoj/interface/compiled/_next/static/css/b70402177a7c3207.css +0 -1
  51. /khoj/interface/compiled/_next/static/{7viHIza-WalEOzloM67l4 → FOVRQ-jS1N3UyX5waTycY}/_buildManifest.js +0 -0
  52. /khoj/interface/compiled/_next/static/{7viHIza-WalEOzloM67l4 → FOVRQ-jS1N3UyX5waTycY}/_ssgManifest.js +0 -0
  53. {khoj-1.26.5.dev34.dist-info → khoj-1.26.5.dev43.dist-info}/WHEEL +0 -0
  54. {khoj-1.26.5.dev34.dist-info → khoj-1.26.5.dev43.dist-info}/entry_points.txt +0 -0
  55. {khoj-1.26.5.dev34.dist-info → khoj-1.26.5.dev43.dist-info}/licenses/LICENSE +0 -0
@@ -11,8 +11,12 @@ from khoj.processor.conversation import prompts
11
11
  from khoj.processor.conversation.anthropic.utils import (
12
12
  anthropic_chat_completion_with_backoff,
13
13
  anthropic_completion_with_backoff,
14
+ format_messages_for_anthropic,
15
+ )
16
+ from khoj.processor.conversation.utils import (
17
+ construct_structured_message,
18
+ generate_chatml_messages_with_context,
14
19
  )
15
- from khoj.processor.conversation.utils import generate_chatml_messages_with_context
16
20
  from khoj.utils.helpers import ConversationCommand, is_none_or_empty
17
21
  from khoj.utils.rawconfig import LocationData
18
22
 
@@ -27,6 +31,8 @@ def extract_questions_anthropic(
27
31
  temperature=0.7,
28
32
  location_data: LocationData = None,
29
33
  user: KhojUser = None,
34
+ query_images: Optional[list[str]] = None,
35
+ vision_enabled: bool = False,
30
36
  personality_context: Optional[str] = None,
31
37
  ):
32
38
  """
@@ -68,6 +74,13 @@ def extract_questions_anthropic(
68
74
  text=text,
69
75
  )
70
76
 
77
+ prompt = construct_structured_message(
78
+ message=prompt,
79
+ images=query_images,
80
+ model_type=ChatModelOptions.ModelType.ANTHROPIC,
81
+ vision_enabled=vision_enabled,
82
+ )
83
+
71
84
  messages = [ChatMessage(content=prompt, role="user")]
72
85
 
73
86
  response = anthropic_completion_with_backoff(
@@ -101,17 +114,7 @@ def anthropic_send_message_to_model(messages, api_key, model):
101
114
  """
102
115
  Send message to model
103
116
  """
104
- # Anthropic requires the first message to be a 'user' message, and the system prompt is not to be sent in the messages parameter
105
- system_prompt = None
106
-
107
- if len(messages) == 1:
108
- messages[0].role = "user"
109
- else:
110
- system_prompt = ""
111
- for message in messages.copy():
112
- if message.role == "system":
113
- system_prompt += message.content
114
- messages.remove(message)
117
+ messages, system_prompt = format_messages_for_anthropic(messages)
115
118
 
116
119
  # Get Response from GPT. Don't use response_type because Anthropic doesn't support it.
117
120
  return anthropic_completion_with_backoff(
@@ -127,7 +130,7 @@ def converse_anthropic(
127
130
  user_query,
128
131
  online_results: Optional[Dict[str, Dict]] = None,
129
132
  conversation_log={},
130
- model: Optional[str] = "claude-instant-1.2",
133
+ model: Optional[str] = "claude-3-5-sonnet-20241022",
131
134
  api_key: Optional[str] = None,
132
135
  completion_func=None,
133
136
  conversation_commands=[ConversationCommand.Default],
@@ -136,6 +139,8 @@ def converse_anthropic(
136
139
  location_data: LocationData = None,
137
140
  user_name: str = None,
138
141
  agent: Agent = None,
142
+ query_images: Optional[list[str]] = None,
143
+ vision_available: bool = False,
139
144
  ):
140
145
  """
141
146
  Converse with user using Anthropic's Claude
@@ -189,17 +194,12 @@ def converse_anthropic(
189
194
  model_name=model,
190
195
  max_prompt_size=max_prompt_size,
191
196
  tokenizer_name=tokenizer_name,
197
+ query_images=query_images,
198
+ vision_enabled=vision_available,
192
199
  model_type=ChatModelOptions.ModelType.ANTHROPIC,
193
200
  )
194
201
 
195
- if len(messages) > 1:
196
- if messages[0].role == "assistant":
197
- messages = messages[1:]
198
-
199
- for message in messages.copy():
200
- if message.role == "system":
201
- system_prompt += message.content
202
- messages.remove(message)
202
+ messages, system_prompt = format_messages_for_anthropic(messages, system_prompt)
203
203
 
204
204
  truncated_messages = "\n".join({f"{message.content[:40]}..." for message in messages})
205
205
  logger.debug(f"Conversation Context for Claude: {truncated_messages}")
@@ -3,6 +3,7 @@ from threading import Thread
3
3
  from typing import Dict, List
4
4
 
5
5
  import anthropic
6
+ from langchain.schema import ChatMessage
6
7
  from tenacity import (
7
8
  before_sleep_log,
8
9
  retry,
@@ -11,7 +12,8 @@ from tenacity import (
11
12
  wait_random_exponential,
12
13
  )
13
14
 
14
- from khoj.processor.conversation.utils import ThreadedGenerator
15
+ from khoj.processor.conversation.utils import ThreadedGenerator, get_image_from_url
16
+ from khoj.utils.helpers import is_none_or_empty
15
17
 
16
18
  logger = logging.getLogger(__name__)
17
19
 
@@ -115,3 +117,51 @@ def anthropic_llm_thread(
115
117
  logger.error(f"Error in anthropic_llm_thread: {e}", exc_info=True)
116
118
  finally:
117
119
  g.close()
120
+
121
+
122
+ def format_messages_for_anthropic(messages: list[ChatMessage], system_prompt=None):
123
+ """
124
+ Format messages for Anthropic
125
+ """
126
+ # Extract system prompt
127
+ system_prompt = system_prompt or ""
128
+ for message in messages.copy():
129
+ if message.role == "system":
130
+ system_prompt += message.content
131
+ messages.remove(message)
132
+ system_prompt = None if is_none_or_empty(system_prompt) else system_prompt
133
+
134
+ # Anthropic requires the first message to be a 'user' message
135
+ if len(messages) == 1:
136
+ messages[0].role = "user"
137
+ elif len(messages) > 1 and messages[0].role == "assistant":
138
+ messages = messages[1:]
139
+
140
+ # Convert image urls to base64 encoded images in Anthropic message format
141
+ for message in messages:
142
+ if isinstance(message.content, list):
143
+ content = []
144
+ # Sort the content. Anthropic models prefer that text comes after images.
145
+ message.content.sort(key=lambda x: 0 if x["type"] == "image_url" else 1)
146
+ for idx, part in enumerate(message.content):
147
+ if part["type"] == "text":
148
+ content.append({"type": "text", "text": part["text"]})
149
+ elif part["type"] == "image_url":
150
+ image = get_image_from_url(part["image_url"]["url"], type="b64")
151
+ # Prefix each image with text block enumerating the image number
152
+ # This helps the model reference the image in its response. Recommended by Anthropic
153
+ content.extend(
154
+ [
155
+ {
156
+ "type": "text",
157
+ "text": f"Image {idx + 1}:",
158
+ },
159
+ {
160
+ "type": "image",
161
+ "source": {"type": "base64", "media_type": image.type, "data": image.content},
162
+ },
163
+ ]
164
+ )
165
+ message.content = content
166
+
167
+ return messages, system_prompt
@@ -1,11 +1,8 @@
1
1
  import logging
2
2
  import random
3
- from io import BytesIO
4
3
  from threading import Thread
5
4
 
6
5
  import google.generativeai as genai
7
- import PIL.Image
8
- import requests
9
6
  from google.generativeai.types.answer_types import FinishReason
10
7
  from google.generativeai.types.generation_types import StopCandidateException
11
8
  from google.generativeai.types.safety_types import (
@@ -22,7 +19,7 @@ from tenacity import (
22
19
  wait_random_exponential,
23
20
  )
24
21
 
25
- from khoj.processor.conversation.utils import ThreadedGenerator
22
+ from khoj.processor.conversation.utils import ThreadedGenerator, get_image_from_url
26
23
  from khoj.utils.helpers import is_none_or_empty
27
24
 
28
25
  logger = logging.getLogger(__name__)
@@ -207,7 +204,7 @@ def format_messages_for_gemini(messages: list[ChatMessage], system_prompt: str =
207
204
  if isinstance(message.content, list):
208
205
  # Convert image_urls to PIL.Image and place them at beginning of list (better for Gemini)
209
206
  message.content = [
210
- get_image_from_url(item["image_url"]["url"]) if item["type"] == "image_url" else item["text"]
207
+ get_image_from_url(item["image_url"]["url"]).content if item["type"] == "image_url" else item["text"]
211
208
  for item in sorted(message.content, key=lambda x: 0 if x["type"] == "image_url" else 1)
212
209
  ]
213
210
  elif isinstance(message.content, str):
@@ -220,13 +217,3 @@ def format_messages_for_gemini(messages: list[ChatMessage], system_prompt: str =
220
217
  messages[0].role = "user"
221
218
 
222
219
  return messages, system_prompt
223
-
224
-
225
- def get_image_from_url(image_url: str) -> PIL.Image:
226
- try:
227
- response = requests.get(image_url)
228
- response.raise_for_status() # Check if the request was successful
229
- return PIL.Image.open(BytesIO(response.content))
230
- except requests.exceptions.RequestException as e:
231
- logger.error(f"Failed to get image from URL {image_url}: {e}")
232
- return None
@@ -619,7 +619,7 @@ AI: It's currently 28°C and partly cloudy in Bali.
619
619
  Q: Share a painting using the weather for Bali every morning.
620
620
  Khoj: {{"output": "automation"}}
621
621
 
622
- Now it's your turn to pick the mode you would like to use to answer the user's question. Provide your response as a JSON.
622
+ Now it's your turn to pick the mode you would like to use to answer the user's question. Provide your response as a JSON. Do not say anything else.
623
623
 
624
624
  Chat History:
625
625
  {chat_history}
@@ -1,10 +1,16 @@
1
+ import base64
1
2
  import logging
2
3
  import math
4
+ import mimetypes
3
5
  import queue
6
+ from dataclasses import dataclass
4
7
  from datetime import datetime
8
+ from io import BytesIO
5
9
  from time import perf_counter
6
10
  from typing import Any, Dict, List, Optional
7
11
 
12
+ import PIL.Image
13
+ import requests
8
14
  import tiktoken
9
15
  from langchain.schema import ChatMessage
10
16
  from llama_cpp.llama import Llama
@@ -152,7 +158,11 @@ def construct_structured_message(message: str, images: list[str], model_type: st
152
158
  if not images or not vision_enabled:
153
159
  return message
154
160
 
155
- if model_type in [ChatModelOptions.ModelType.OPENAI, ChatModelOptions.ModelType.GOOGLE]:
161
+ if model_type in [
162
+ ChatModelOptions.ModelType.OPENAI,
163
+ ChatModelOptions.ModelType.GOOGLE,
164
+ ChatModelOptions.ModelType.ANTHROPIC,
165
+ ]:
156
166
  return [
157
167
  {"type": "text", "text": message},
158
168
  *[{"type": "image_url", "image_url": {"url": image}} for image in images],
@@ -306,3 +316,31 @@ def reciprocal_conversation_to_chatml(message_pair):
306
316
  def remove_json_codeblock(response: str):
307
317
  """Remove any markdown json codeblock formatting if present. Useful for non schema enforceable models"""
308
318
  return response.removeprefix("```json").removesuffix("```")
319
+
320
+
321
+ @dataclass
322
+ class ImageWithType:
323
+ content: Any
324
+ type: str
325
+
326
+
327
+ def get_image_from_url(image_url: str, type="pil"):
328
+ try:
329
+ response = requests.get(image_url)
330
+ response.raise_for_status() # Check if the request was successful
331
+
332
+ # Get content type from response or infer from URL
333
+ content_type = response.headers.get("content-type") or mimetypes.guess_type(image_url)[0] or "image/webp"
334
+
335
+ # Convert image to desired format
336
+ if type == "b64":
337
+ image_data = base64.b64encode(response.content).decode("utf-8")
338
+ elif type == "pil":
339
+ image_data = PIL.Image.open(BytesIO(response.content))
340
+ else:
341
+ raise ValueError(f"Invalid image type: {type}")
342
+
343
+ return ImageWithType(content=image_data, type=content_type)
344
+ except requests.exceptions.RequestException as e:
345
+ logger.error(f"Failed to get image from URL {image_url}: {e}")
346
+ return ImageWithType(content=None, type=None)
@@ -204,9 +204,10 @@ def generate_image_with_replicate(
204
204
 
205
205
  # Raise exception if the image generation task fails
206
206
  if status != "succeeded":
207
+ error = get_prediction.get("error")
207
208
  if retry_count >= 10:
208
209
  raise requests.RequestException("Image generation timed out")
209
- raise requests.RequestException(f"Image generation failed with status: {status}")
210
+ raise requests.RequestException(f"Image generation failed with status: {status}, message: {error}")
210
211
 
211
212
  # Get the generated image
212
213
  image_url = get_prediction["output"][0] if isinstance(get_prediction["output"], list) else get_prediction["output"]
khoj/routers/api.py CHANGED
@@ -25,8 +25,9 @@ from khoj.database.adapters import (
25
25
  AutomationAdapters,
26
26
  ConversationAdapters,
27
27
  EntryAdapters,
28
+ get_default_search_model,
29
+ get_user_default_search_model,
28
30
  get_user_photo,
29
- get_user_search_model_or_default,
30
31
  )
31
32
  from khoj.database.models import (
32
33
  Agent,
@@ -149,7 +150,7 @@ async def execute_search(
149
150
  encoded_asymmetric_query = None
150
151
  if t != SearchType.Image:
151
152
  with timer("Encoding query took", logger=logger):
152
- search_model = await sync_to_async(get_user_search_model_or_default)(user)
153
+ search_model = await sync_to_async(get_user_default_search_model)(user)
153
154
  encoded_asymmetric_query = state.embeddings_model[search_model.name].embed_query(defiltered_query)
154
155
 
155
156
  with concurrent.futures.ThreadPoolExecutor() as executor:
@@ -447,11 +448,13 @@ async def extract_references_and_questions(
447
448
  chat_model = conversation_config.chat_model
448
449
  inferred_queries = extract_questions_anthropic(
449
450
  defiltered_query,
451
+ query_images=query_images,
450
452
  model=chat_model,
451
453
  api_key=api_key,
452
454
  conversation_log=meta_log,
453
455
  location_data=location_data,
454
456
  user=user,
457
+ vision_enabled=vision_enabled,
455
458
  personality_context=personality_context,
456
459
  )
457
460
  elif conversation_config.model_type == ChatModelOptions.ModelType.GOOGLE:
khoj/routers/api_model.py CHANGED
@@ -94,39 +94,6 @@ async def update_voice_model(
94
94
  return Response(status_code=202, content=json.dumps({"status": "ok"}))
95
95
 
96
96
 
97
- @api_model.post("/search", status_code=200)
98
- @requires(["authenticated"])
99
- async def update_search_model(
100
- request: Request,
101
- id: str,
102
- client: Optional[str] = None,
103
- ):
104
- user = request.user.object
105
-
106
- prev_config = await adapters.aget_user_search_model(user)
107
- new_config = await adapters.aset_user_search_model(user, int(id))
108
-
109
- if prev_config and int(id) != prev_config.id and new_config:
110
- await EntryAdapters.adelete_all_entries(user)
111
-
112
- if not prev_config:
113
- # If the use was just using the default config, delete all the entries and set the new config.
114
- await EntryAdapters.adelete_all_entries(user)
115
-
116
- if new_config is None:
117
- return {"status": "error", "message": "Model not found"}
118
- else:
119
- update_telemetry_state(
120
- request=request,
121
- telemetry_type="api",
122
- api="set_search_model",
123
- client=client,
124
- metadata={"search_model": new_config.setting.name},
125
- )
126
-
127
- return {"status": "ok"}
128
-
129
-
130
97
  @api_model.post("/paint", status_code=200)
131
98
  @requires(["authenticated"])
132
99
  async def update_paint_model(
khoj/routers/helpers.py CHANGED
@@ -684,10 +684,7 @@ async def generate_better_diagram_description(
684
684
  prompts.personality_context.format(personality=agent.personality) if agent and agent.personality else ""
685
685
  )
686
686
 
687
- if location_data:
688
- location_prompt = prompts.user_location.format(location=f"{location_data}")
689
- else:
690
- location_prompt = "Unknown"
687
+ location = f"{location_data}" if location_data else "Unknown"
691
688
 
692
689
  user_references = "\n\n".join([f"# {item['compiled']}" for item in note_references])
693
690
 
@@ -705,7 +702,7 @@ async def generate_better_diagram_description(
705
702
  improve_diagram_description_prompt = prompts.improve_diagram_description_prompt.format(
706
703
  query=q,
707
704
  chat_history=chat_history,
708
- location=location_prompt,
705
+ location=location,
709
706
  current_date=today_date,
710
707
  references=user_references,
711
708
  online_results=simplified_online_results,
@@ -770,10 +767,7 @@ async def generate_better_image_prompt(
770
767
  )
771
768
  model_type = model_type or TextToImageModelConfig.ModelType.OPENAI
772
769
 
773
- if location_data:
774
- location_prompt = prompts.user_location.format(location=f"{location_data}")
775
- else:
776
- location_prompt = "Unknown"
770
+ location = f"{location_data}" if location_data else "Unknown"
777
771
 
778
772
  user_references = "\n\n".join([f"# {item['compiled']}" for item in note_references])
779
773
 
@@ -790,7 +784,7 @@ async def generate_better_image_prompt(
790
784
  image_prompt = prompts.image_generation_improve_prompt_dalle.format(
791
785
  query=q,
792
786
  chat_history=conversation_history,
793
- location=location_prompt,
787
+ location=location,
794
788
  current_date=today_date,
795
789
  references=user_references,
796
790
  online_results=simplified_online_results,
@@ -800,7 +794,7 @@ async def generate_better_image_prompt(
800
794
  image_prompt = prompts.image_generation_improve_prompt_sd.format(
801
795
  query=q,
802
796
  chat_history=conversation_history,
803
- location=location_prompt,
797
+ location=location,
804
798
  current_date=today_date,
805
799
  references=user_references,
806
800
  online_results=simplified_online_results,
@@ -826,10 +820,13 @@ async def send_message_to_model_wrapper(
826
820
  conversation_config: ChatModelOptions = await ConversationAdapters.aget_default_conversation_config(user)
827
821
  vision_available = conversation_config.vision_enabled
828
822
  if not vision_available and query_images:
823
+ logger.warning(f"Vision is not enabled for default model: {conversation_config.chat_model}.")
829
824
  vision_enabled_config = await ConversationAdapters.aget_vision_enabled_config()
830
825
  if vision_enabled_config:
831
826
  conversation_config = vision_enabled_config
832
827
  vision_available = True
828
+ if vision_available and query_images:
829
+ logger.info(f"Using {conversation_config.chat_model} model to understand {len(query_images)} images.")
833
830
 
834
831
  subscribed = await ais_user_subscribed(user)
835
832
  chat_model = conversation_config.chat_model
@@ -1110,8 +1107,9 @@ def generate_chat_response(
1110
1107
  chat_response = converse_anthropic(
1111
1108
  compiled_references,
1112
1109
  q,
1113
- online_results,
1114
- meta_log,
1110
+ query_images=query_images,
1111
+ online_results=online_results,
1112
+ conversation_log=meta_log,
1115
1113
  model=conversation_config.chat_model,
1116
1114
  api_key=api_key,
1117
1115
  completion_func=partial_completion,
@@ -1121,6 +1119,7 @@ def generate_chat_response(
1121
1119
  location_data=location_data,
1122
1120
  user_name=user_name,
1123
1121
  agent=agent,
1122
+ vision_available=vision_available,
1124
1123
  )
1125
1124
  elif conversation_config.model_type == ChatModelOptions.ModelType.GOOGLE:
1126
1125
  api_key = conversation_config.openai_config.api_key
@@ -1706,13 +1705,6 @@ def get_user_config(user: KhojUser, request: Request, is_detailed: bool = False)
1706
1705
  for chat_model in chat_models:
1707
1706
  chat_model_options.append({"name": chat_model.chat_model, "id": chat_model.id})
1708
1707
 
1709
- search_model_options = adapters.get_or_create_search_models().all()
1710
- all_search_model_options = list()
1711
- for search_model_option in search_model_options:
1712
- all_search_model_options.append({"name": search_model_option.name, "id": search_model_option.id})
1713
-
1714
- current_search_model_option = adapters.get_user_search_model_or_default(user)
1715
-
1716
1708
  selected_paint_model_config = ConversationAdapters.get_user_text_to_image_model_config(user)
1717
1709
  paint_model_options = ConversationAdapters.get_text_to_image_model_options().all()
1718
1710
  all_paint_model_options = list()
@@ -1745,8 +1737,6 @@ def get_user_config(user: KhojUser, request: Request, is_detailed: bool = False)
1745
1737
  "has_documents": has_documents,
1746
1738
  "notion_token": notion_token,
1747
1739
  # user model settings
1748
- "search_model_options": all_search_model_options,
1749
- "selected_search_model_config": current_search_model_option.id,
1750
1740
  "chat_model_options": chat_model_options,
1751
1741
  "selected_chat_model_config": selected_chat_model_config.id if selected_chat_model_config else None,
1752
1742
  "paint_model_options": all_paint_model_options,
@@ -8,7 +8,11 @@ import torch
8
8
  from asgiref.sync import sync_to_async
9
9
  from sentence_transformers import util
10
10
 
11
- from khoj.database.adapters import EntryAdapters, get_user_search_model_or_default
11
+ from khoj.database.adapters import (
12
+ EntryAdapters,
13
+ get_default_search_model,
14
+ get_user_default_search_model,
15
+ )
12
16
  from khoj.database.models import Agent
13
17
  from khoj.database.models import Entry as DbEntry
14
18
  from khoj.database.models import KhojUser
@@ -110,7 +114,7 @@ async def query(
110
114
  file_type = search_type_to_embeddings_type[type.value]
111
115
 
112
116
  query = raw_query
113
- search_model = await sync_to_async(get_user_search_model_or_default)(user)
117
+ search_model = await sync_to_async(get_user_default_search_model)(user)
114
118
  if not max_distance:
115
119
  if search_model.bi_encoder_confidence_threshold:
116
120
  max_distance = search_model.bi_encoder_confidence_threshold
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: khoj
3
- Version: 1.26.5.dev34
3
+ Version: 1.26.5.dev43
4
4
  Summary: Your Second Brain
5
5
  Project-URL: Homepage, https://khoj.dev
6
6
  Project-URL: Documentation, https://docs.khoj.dev