khoj 1.26.5.dev32__py3-none-any.whl → 1.27.2.dev1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (54) hide show
  1. khoj/configure.py +2 -2
  2. khoj/database/adapters/__init__.py +41 -28
  3. khoj/database/admin.py +2 -0
  4. khoj/database/management/commands/change_default_model.py +186 -0
  5. khoj/database/migrations/0071_subscription_enabled_trial_at_and_more.py +32 -0
  6. khoj/database/migrations/0072_entry_search_model.py +24 -0
  7. khoj/database/models/__init__.py +4 -1
  8. khoj/interface/compiled/404/index.html +1 -1
  9. khoj/interface/compiled/_next/static/chunks/{3678-ef0d20e267e9f010.js → 7883-b1305ec254213afe.js} +2 -2
  10. khoj/interface/compiled/_next/static/chunks/app/settings/page-7a8c382af2a7e870.js +1 -0
  11. khoj/interface/compiled/_next/static/chunks/{webpack-97b668cf180c18d3.js → webpack-0a32754d38e6f6a5.js} +1 -1
  12. khoj/interface/compiled/_next/static/css/{ddcc0cf73e062476.css → 798b0de12852bd20.css} +1 -1
  13. khoj/interface/compiled/agents/index.html +1 -1
  14. khoj/interface/compiled/agents/index.txt +2 -2
  15. khoj/interface/compiled/automations/index.html +1 -1
  16. khoj/interface/compiled/automations/index.txt +1 -1
  17. khoj/interface/compiled/chat/index.html +1 -1
  18. khoj/interface/compiled/chat/index.txt +2 -2
  19. khoj/interface/compiled/factchecker/index.html +1 -1
  20. khoj/interface/compiled/factchecker/index.txt +2 -2
  21. khoj/interface/compiled/index.html +1 -1
  22. khoj/interface/compiled/index.txt +2 -2
  23. khoj/interface/compiled/search/index.html +1 -1
  24. khoj/interface/compiled/search/index.txt +2 -2
  25. khoj/interface/compiled/settings/index.html +1 -1
  26. khoj/interface/compiled/settings/index.txt +2 -2
  27. khoj/interface/compiled/share/chat/index.html +1 -1
  28. khoj/interface/compiled/share/chat/index.txt +2 -2
  29. khoj/processor/content/text_to_entries.py +4 -2
  30. khoj/processor/conversation/anthropic/anthropic_chat.py +21 -21
  31. khoj/processor/conversation/anthropic/utils.py +51 -1
  32. khoj/processor/conversation/google/utils.py +2 -15
  33. khoj/processor/conversation/prompts.py +1 -1
  34. khoj/processor/conversation/utils.py +39 -1
  35. khoj/processor/image/generate.py +2 -1
  36. khoj/routers/api.py +5 -2
  37. khoj/routers/api_model.py +0 -33
  38. khoj/routers/{subscription.py → api_subscription.py} +20 -2
  39. khoj/routers/helpers.py +22 -23
  40. khoj/search_type/text_search.py +6 -2
  41. khoj/utils/helpers.py +1 -1
  42. {khoj-1.26.5.dev32.dist-info → khoj-1.27.2.dev1.dist-info}/METADATA +1 -1
  43. {khoj-1.26.5.dev32.dist-info → khoj-1.27.2.dev1.dist-info}/RECORD +53 -50
  44. khoj/interface/compiled/_next/static/chunks/app/settings/page-989cf38b87b19427.js +0 -1
  45. /khoj/interface/compiled/_next/static/chunks/{1970-1d6d0c1b00b4f343.js → 1970-60c96aed937a4928.js} +0 -0
  46. /khoj/interface/compiled/_next/static/chunks/{3423-8e9c420574a9fbe3.js → 3423-0b533af8bf6ac218.js} +0 -0
  47. /khoj/interface/compiled/_next/static/chunks/{9417-759984ad62caa3dc.js → 9417-2ca87207387fc790.js} +0 -0
  48. /khoj/interface/compiled/_next/static/chunks/{9479-4b443fdcc99141c9.js → 9479-646dff2911c5d2e1.js} +0 -0
  49. /khoj/interface/compiled/_next/static/chunks/app/{page-58357cd206c50a83.js → page-bf90ab8e0431b578.js} +0 -0
  50. /khoj/interface/compiled/_next/static/{33lVd_MKsWtGKj4gb1bCi → hYOhRxnAiYj2W_ucQNk6-}/_buildManifest.js +0 -0
  51. /khoj/interface/compiled/_next/static/{33lVd_MKsWtGKj4gb1bCi → hYOhRxnAiYj2W_ucQNk6-}/_ssgManifest.js +0 -0
  52. {khoj-1.26.5.dev32.dist-info → khoj-1.27.2.dev1.dist-info}/WHEEL +0 -0
  53. {khoj-1.26.5.dev32.dist-info → khoj-1.27.2.dev1.dist-info}/entry_points.txt +0 -0
  54. {khoj-1.26.5.dev32.dist-info → khoj-1.27.2.dev1.dist-info}/licenses/LICENSE +0 -0
@@ -11,8 +11,12 @@ from khoj.processor.conversation import prompts
11
11
  from khoj.processor.conversation.anthropic.utils import (
12
12
  anthropic_chat_completion_with_backoff,
13
13
  anthropic_completion_with_backoff,
14
+ format_messages_for_anthropic,
15
+ )
16
+ from khoj.processor.conversation.utils import (
17
+ construct_structured_message,
18
+ generate_chatml_messages_with_context,
14
19
  )
15
- from khoj.processor.conversation.utils import generate_chatml_messages_with_context
16
20
  from khoj.utils.helpers import ConversationCommand, is_none_or_empty
17
21
  from khoj.utils.rawconfig import LocationData
18
22
 
@@ -27,6 +31,8 @@ def extract_questions_anthropic(
27
31
  temperature=0.7,
28
32
  location_data: LocationData = None,
29
33
  user: KhojUser = None,
34
+ query_images: Optional[list[str]] = None,
35
+ vision_enabled: bool = False,
30
36
  personality_context: Optional[str] = None,
31
37
  ):
32
38
  """
@@ -68,6 +74,13 @@ def extract_questions_anthropic(
68
74
  text=text,
69
75
  )
70
76
 
77
+ prompt = construct_structured_message(
78
+ message=prompt,
79
+ images=query_images,
80
+ model_type=ChatModelOptions.ModelType.ANTHROPIC,
81
+ vision_enabled=vision_enabled,
82
+ )
83
+
71
84
  messages = [ChatMessage(content=prompt, role="user")]
72
85
 
73
86
  response = anthropic_completion_with_backoff(
@@ -101,17 +114,7 @@ def anthropic_send_message_to_model(messages, api_key, model):
101
114
  """
102
115
  Send message to model
103
116
  """
104
- # Anthropic requires the first message to be a 'user' message, and the system prompt is not to be sent in the messages parameter
105
- system_prompt = None
106
-
107
- if len(messages) == 1:
108
- messages[0].role = "user"
109
- else:
110
- system_prompt = ""
111
- for message in messages.copy():
112
- if message.role == "system":
113
- system_prompt += message.content
114
- messages.remove(message)
117
+ messages, system_prompt = format_messages_for_anthropic(messages)
115
118
 
116
119
  # Get Response from GPT. Don't use response_type because Anthropic doesn't support it.
117
120
  return anthropic_completion_with_backoff(
@@ -127,7 +130,7 @@ def converse_anthropic(
127
130
  user_query,
128
131
  online_results: Optional[Dict[str, Dict]] = None,
129
132
  conversation_log={},
130
- model: Optional[str] = "claude-instant-1.2",
133
+ model: Optional[str] = "claude-3-5-sonnet-20241022",
131
134
  api_key: Optional[str] = None,
132
135
  completion_func=None,
133
136
  conversation_commands=[ConversationCommand.Default],
@@ -136,6 +139,8 @@ def converse_anthropic(
136
139
  location_data: LocationData = None,
137
140
  user_name: str = None,
138
141
  agent: Agent = None,
142
+ query_images: Optional[list[str]] = None,
143
+ vision_available: bool = False,
139
144
  ):
140
145
  """
141
146
  Converse with user using Anthropic's Claude
@@ -189,17 +194,12 @@ def converse_anthropic(
189
194
  model_name=model,
190
195
  max_prompt_size=max_prompt_size,
191
196
  tokenizer_name=tokenizer_name,
197
+ query_images=query_images,
198
+ vision_enabled=vision_available,
192
199
  model_type=ChatModelOptions.ModelType.ANTHROPIC,
193
200
  )
194
201
 
195
- if len(messages) > 1:
196
- if messages[0].role == "assistant":
197
- messages = messages[1:]
198
-
199
- for message in messages.copy():
200
- if message.role == "system":
201
- system_prompt += message.content
202
- messages.remove(message)
202
+ messages, system_prompt = format_messages_for_anthropic(messages, system_prompt)
203
203
 
204
204
  truncated_messages = "\n".join({f"{message.content[:40]}..." for message in messages})
205
205
  logger.debug(f"Conversation Context for Claude: {truncated_messages}")
@@ -3,6 +3,7 @@ from threading import Thread
3
3
  from typing import Dict, List
4
4
 
5
5
  import anthropic
6
+ from langchain.schema import ChatMessage
6
7
  from tenacity import (
7
8
  before_sleep_log,
8
9
  retry,
@@ -11,7 +12,8 @@ from tenacity import (
11
12
  wait_random_exponential,
12
13
  )
13
14
 
14
- from khoj.processor.conversation.utils import ThreadedGenerator
15
+ from khoj.processor.conversation.utils import ThreadedGenerator, get_image_from_url
16
+ from khoj.utils.helpers import is_none_or_empty
15
17
 
16
18
  logger = logging.getLogger(__name__)
17
19
 
@@ -115,3 +117,51 @@ def anthropic_llm_thread(
115
117
  logger.error(f"Error in anthropic_llm_thread: {e}", exc_info=True)
116
118
  finally:
117
119
  g.close()
120
+
121
+
122
+ def format_messages_for_anthropic(messages: list[ChatMessage], system_prompt=None):
123
+ """
124
+ Format messages for Anthropic
125
+ """
126
+ # Extract system prompt
127
+ system_prompt = system_prompt or ""
128
+ for message in messages.copy():
129
+ if message.role == "system":
130
+ system_prompt += message.content
131
+ messages.remove(message)
132
+ system_prompt = None if is_none_or_empty(system_prompt) else system_prompt
133
+
134
+ # Anthropic requires the first message to be a 'user' message
135
+ if len(messages) == 1:
136
+ messages[0].role = "user"
137
+ elif len(messages) > 1 and messages[0].role == "assistant":
138
+ messages = messages[1:]
139
+
140
+ # Convert image urls to base64 encoded images in Anthropic message format
141
+ for message in messages:
142
+ if isinstance(message.content, list):
143
+ content = []
144
+ # Sort the content. Anthropic models prefer that text comes after images.
145
+ message.content.sort(key=lambda x: 0 if x["type"] == "image_url" else 1)
146
+ for idx, part in enumerate(message.content):
147
+ if part["type"] == "text":
148
+ content.append({"type": "text", "text": part["text"]})
149
+ elif part["type"] == "image_url":
150
+ image = get_image_from_url(part["image_url"]["url"], type="b64")
151
+ # Prefix each image with text block enumerating the image number
152
+ # This helps the model reference the image in its response. Recommended by Anthropic
153
+ content.extend(
154
+ [
155
+ {
156
+ "type": "text",
157
+ "text": f"Image {idx + 1}:",
158
+ },
159
+ {
160
+ "type": "image",
161
+ "source": {"type": "base64", "media_type": image.type, "data": image.content},
162
+ },
163
+ ]
164
+ )
165
+ message.content = content
166
+
167
+ return messages, system_prompt
@@ -1,11 +1,8 @@
1
1
  import logging
2
2
  import random
3
- from io import BytesIO
4
3
  from threading import Thread
5
4
 
6
5
  import google.generativeai as genai
7
- import PIL.Image
8
- import requests
9
6
  from google.generativeai.types.answer_types import FinishReason
10
7
  from google.generativeai.types.generation_types import StopCandidateException
11
8
  from google.generativeai.types.safety_types import (
@@ -22,7 +19,7 @@ from tenacity import (
22
19
  wait_random_exponential,
23
20
  )
24
21
 
25
- from khoj.processor.conversation.utils import ThreadedGenerator
22
+ from khoj.processor.conversation.utils import ThreadedGenerator, get_image_from_url
26
23
  from khoj.utils.helpers import is_none_or_empty
27
24
 
28
25
  logger = logging.getLogger(__name__)
@@ -207,7 +204,7 @@ def format_messages_for_gemini(messages: list[ChatMessage], system_prompt: str =
207
204
  if isinstance(message.content, list):
208
205
  # Convert image_urls to PIL.Image and place them at beginning of list (better for Gemini)
209
206
  message.content = [
210
- get_image_from_url(item["image_url"]["url"]) if item["type"] == "image_url" else item["text"]
207
+ get_image_from_url(item["image_url"]["url"]).content if item["type"] == "image_url" else item["text"]
211
208
  for item in sorted(message.content, key=lambda x: 0 if x["type"] == "image_url" else 1)
212
209
  ]
213
210
  elif isinstance(message.content, str):
@@ -220,13 +217,3 @@ def format_messages_for_gemini(messages: list[ChatMessage], system_prompt: str =
220
217
  messages[0].role = "user"
221
218
 
222
219
  return messages, system_prompt
223
-
224
-
225
- def get_image_from_url(image_url: str) -> PIL.Image:
226
- try:
227
- response = requests.get(image_url)
228
- response.raise_for_status() # Check if the request was successful
229
- return PIL.Image.open(BytesIO(response.content))
230
- except requests.exceptions.RequestException as e:
231
- logger.error(f"Failed to get image from URL {image_url}: {e}")
232
- return None
@@ -619,7 +619,7 @@ AI: It's currently 28°C and partly cloudy in Bali.
619
619
  Q: Share a painting using the weather for Bali every morning.
620
620
  Khoj: {{"output": "automation"}}
621
621
 
622
- Now it's your turn to pick the mode you would like to use to answer the user's question. Provide your response as a JSON.
622
+ Now it's your turn to pick the mode you would like to use to answer the user's question. Provide your response as a JSON. Do not say anything else.
623
623
 
624
624
  Chat History:
625
625
  {chat_history}
@@ -1,10 +1,16 @@
1
+ import base64
1
2
  import logging
2
3
  import math
4
+ import mimetypes
3
5
  import queue
6
+ from dataclasses import dataclass
4
7
  from datetime import datetime
8
+ from io import BytesIO
5
9
  from time import perf_counter
6
10
  from typing import Any, Dict, List, Optional
7
11
 
12
+ import PIL.Image
13
+ import requests
8
14
  import tiktoken
9
15
  from langchain.schema import ChatMessage
10
16
  from llama_cpp.llama import Llama
@@ -152,7 +158,11 @@ def construct_structured_message(message: str, images: list[str], model_type: st
152
158
  if not images or not vision_enabled:
153
159
  return message
154
160
 
155
- if model_type in [ChatModelOptions.ModelType.OPENAI, ChatModelOptions.ModelType.GOOGLE]:
161
+ if model_type in [
162
+ ChatModelOptions.ModelType.OPENAI,
163
+ ChatModelOptions.ModelType.GOOGLE,
164
+ ChatModelOptions.ModelType.ANTHROPIC,
165
+ ]:
156
166
  return [
157
167
  {"type": "text", "text": message},
158
168
  *[{"type": "image_url", "image_url": {"url": image}} for image in images],
@@ -306,3 +316,31 @@ def reciprocal_conversation_to_chatml(message_pair):
306
316
  def remove_json_codeblock(response: str):
307
317
  """Remove any markdown json codeblock formatting if present. Useful for non schema enforceable models"""
308
318
  return response.removeprefix("```json").removesuffix("```")
319
+
320
+
321
+ @dataclass
322
+ class ImageWithType:
323
+ content: Any
324
+ type: str
325
+
326
+
327
+ def get_image_from_url(image_url: str, type="pil"):
328
+ try:
329
+ response = requests.get(image_url)
330
+ response.raise_for_status() # Check if the request was successful
331
+
332
+ # Get content type from response or infer from URL
333
+ content_type = response.headers.get("content-type") or mimetypes.guess_type(image_url)[0] or "image/webp"
334
+
335
+ # Convert image to desired format
336
+ if type == "b64":
337
+ image_data = base64.b64encode(response.content).decode("utf-8")
338
+ elif type == "pil":
339
+ image_data = PIL.Image.open(BytesIO(response.content))
340
+ else:
341
+ raise ValueError(f"Invalid image type: {type}")
342
+
343
+ return ImageWithType(content=image_data, type=content_type)
344
+ except requests.exceptions.RequestException as e:
345
+ logger.error(f"Failed to get image from URL {image_url}: {e}")
346
+ return ImageWithType(content=None, type=None)
@@ -204,9 +204,10 @@ def generate_image_with_replicate(
204
204
 
205
205
  # Raise exception if the image generation task fails
206
206
  if status != "succeeded":
207
+ error = get_prediction.get("error")
207
208
  if retry_count >= 10:
208
209
  raise requests.RequestException("Image generation timed out")
209
- raise requests.RequestException(f"Image generation failed with status: {status}")
210
+ raise requests.RequestException(f"Image generation failed with status: {status}, message: {error}")
210
211
 
211
212
  # Get the generated image
212
213
  image_url = get_prediction["output"][0] if isinstance(get_prediction["output"], list) else get_prediction["output"]
khoj/routers/api.py CHANGED
@@ -25,8 +25,9 @@ from khoj.database.adapters import (
25
25
  AutomationAdapters,
26
26
  ConversationAdapters,
27
27
  EntryAdapters,
28
+ get_default_search_model,
29
+ get_user_default_search_model,
28
30
  get_user_photo,
29
- get_user_search_model_or_default,
30
31
  )
31
32
  from khoj.database.models import (
32
33
  Agent,
@@ -149,7 +150,7 @@ async def execute_search(
149
150
  encoded_asymmetric_query = None
150
151
  if t != SearchType.Image:
151
152
  with timer("Encoding query took", logger=logger):
152
- search_model = await sync_to_async(get_user_search_model_or_default)(user)
153
+ search_model = await sync_to_async(get_user_default_search_model)(user)
153
154
  encoded_asymmetric_query = state.embeddings_model[search_model.name].embed_query(defiltered_query)
154
155
 
155
156
  with concurrent.futures.ThreadPoolExecutor() as executor:
@@ -447,11 +448,13 @@ async def extract_references_and_questions(
447
448
  chat_model = conversation_config.chat_model
448
449
  inferred_queries = extract_questions_anthropic(
449
450
  defiltered_query,
451
+ query_images=query_images,
450
452
  model=chat_model,
451
453
  api_key=api_key,
452
454
  conversation_log=meta_log,
453
455
  location_data=location_data,
454
456
  user=user,
457
+ vision_enabled=vision_enabled,
455
458
  personality_context=personality_context,
456
459
  )
457
460
  elif conversation_config.model_type == ChatModelOptions.ModelType.GOOGLE:
khoj/routers/api_model.py CHANGED
@@ -94,39 +94,6 @@ async def update_voice_model(
94
94
  return Response(status_code=202, content=json.dumps({"status": "ok"}))
95
95
 
96
96
 
97
- @api_model.post("/search", status_code=200)
98
- @requires(["authenticated"])
99
- async def update_search_model(
100
- request: Request,
101
- id: str,
102
- client: Optional[str] = None,
103
- ):
104
- user = request.user.object
105
-
106
- prev_config = await adapters.aget_user_search_model(user)
107
- new_config = await adapters.aset_user_search_model(user, int(id))
108
-
109
- if prev_config and int(id) != prev_config.id and new_config:
110
- await EntryAdapters.adelete_all_entries(user)
111
-
112
- if not prev_config:
113
- # If the use was just using the default config, delete all the entries and set the new config.
114
- await EntryAdapters.adelete_all_entries(user)
115
-
116
- if new_config is None:
117
- return {"status": "error", "message": "Model not found"}
118
- else:
119
- update_telemetry_state(
120
- request=request,
121
- telemetry_type="api",
122
- api="set_search_model",
123
- client=client,
124
- metadata={"search_model": new_config.setting.name},
125
- )
126
-
127
- return {"status": "ok"}
128
-
129
-
130
97
  @api_model.post("/paint", status_code=200)
131
98
  @requires(["authenticated"])
132
99
  async def update_paint_model(
@@ -1,12 +1,14 @@
1
+ import json
1
2
  import logging
2
3
  import os
3
4
  from datetime import datetime, timezone
4
5
 
5
6
  from asgiref.sync import sync_to_async
6
- from fastapi import APIRouter, Request
7
+ from fastapi import APIRouter, Request, Response
7
8
  from starlette.authentication import requires
8
9
 
9
10
  from khoj.database import adapters
11
+ from khoj.database.models import KhojUser, Subscription
10
12
  from khoj.routers.helpers import update_telemetry_state
11
13
  from khoj.utils import state
12
14
 
@@ -73,7 +75,7 @@ async def subscribe(request: Request):
73
75
  elif event_type in {"customer.subscription.deleted"}:
74
76
  # Reset the user to trial state
75
77
  user, is_new = await adapters.set_user_subscription(
76
- customer_email, is_recurring=False, renewal_date=False, type="trial"
78
+ customer_email, is_recurring=False, renewal_date=False, type=Subscription.Type.TRIAL
77
79
  )
78
80
  success = user is not None
79
81
 
@@ -116,3 +118,19 @@ async def update_subscription(request: Request, email: str, operation: str):
116
118
  return {"success": False, "message": "No subscription found that is set to cancel"}
117
119
 
118
120
  return {"success": False, "message": "Invalid operation"}
121
+
122
+
123
+ @subscription_router.post("/trial", response_class=Response)
124
+ @requires(["authenticated"])
125
+ async def start_trial(request: Request) -> Response:
126
+ user: KhojUser = request.user.object
127
+
128
+ # Start a trial for the user
129
+ updated_subscription = await adapters.astart_trial_subscription(user)
130
+
131
+ # Return trial status as a JSON response
132
+ return Response(
133
+ content=json.dumps({"trial_enabled": updated_subscription is not None}),
134
+ media_type="application/json",
135
+ status_code=200,
136
+ )
khoj/routers/helpers.py CHANGED
@@ -38,6 +38,7 @@ from starlette.requests import URL
38
38
 
39
39
  from khoj.database import adapters
40
40
  from khoj.database.adapters import (
41
+ LENGTH_OF_FREE_TRIAL,
41
42
  AgentAdapters,
42
43
  AutomationAdapters,
43
44
  ConversationAdapters,
@@ -683,10 +684,7 @@ async def generate_better_diagram_description(
683
684
  prompts.personality_context.format(personality=agent.personality) if agent and agent.personality else ""
684
685
  )
685
686
 
686
- if location_data:
687
- location_prompt = prompts.user_location.format(location=f"{location_data}")
688
- else:
689
- location_prompt = "Unknown"
687
+ location = f"{location_data}" if location_data else "Unknown"
690
688
 
691
689
  user_references = "\n\n".join([f"# {item['compiled']}" for item in note_references])
692
690
 
@@ -704,7 +702,7 @@ async def generate_better_diagram_description(
704
702
  improve_diagram_description_prompt = prompts.improve_diagram_description_prompt.format(
705
703
  query=q,
706
704
  chat_history=chat_history,
707
- location=location_prompt,
705
+ location=location,
708
706
  current_date=today_date,
709
707
  references=user_references,
710
708
  online_results=simplified_online_results,
@@ -769,10 +767,7 @@ async def generate_better_image_prompt(
769
767
  )
770
768
  model_type = model_type or TextToImageModelConfig.ModelType.OPENAI
771
769
 
772
- if location_data:
773
- location_prompt = prompts.user_location.format(location=f"{location_data}")
774
- else:
775
- location_prompt = "Unknown"
770
+ location = f"{location_data}" if location_data else "Unknown"
776
771
 
777
772
  user_references = "\n\n".join([f"# {item['compiled']}" for item in note_references])
778
773
 
@@ -789,7 +784,7 @@ async def generate_better_image_prompt(
789
784
  image_prompt = prompts.image_generation_improve_prompt_dalle.format(
790
785
  query=q,
791
786
  chat_history=conversation_history,
792
- location=location_prompt,
787
+ location=location,
793
788
  current_date=today_date,
794
789
  references=user_references,
795
790
  online_results=simplified_online_results,
@@ -799,7 +794,7 @@ async def generate_better_image_prompt(
799
794
  image_prompt = prompts.image_generation_improve_prompt_sd.format(
800
795
  query=q,
801
796
  chat_history=conversation_history,
802
- location=location_prompt,
797
+ location=location,
803
798
  current_date=today_date,
804
799
  references=user_references,
805
800
  online_results=simplified_online_results,
@@ -825,10 +820,13 @@ async def send_message_to_model_wrapper(
825
820
  conversation_config: ChatModelOptions = await ConversationAdapters.aget_default_conversation_config(user)
826
821
  vision_available = conversation_config.vision_enabled
827
822
  if not vision_available and query_images:
823
+ logger.warning(f"Vision is not enabled for default model: {conversation_config.chat_model}.")
828
824
  vision_enabled_config = await ConversationAdapters.aget_vision_enabled_config()
829
825
  if vision_enabled_config:
830
826
  conversation_config = vision_enabled_config
831
827
  vision_available = True
828
+ if vision_available and query_images:
829
+ logger.info(f"Using {conversation_config.chat_model} model to understand {len(query_images)} images.")
832
830
 
833
831
  subscribed = await ais_user_subscribed(user)
834
832
  chat_model = conversation_config.chat_model
@@ -1109,8 +1107,9 @@ def generate_chat_response(
1109
1107
  chat_response = converse_anthropic(
1110
1108
  compiled_references,
1111
1109
  q,
1112
- online_results,
1113
- meta_log,
1110
+ query_images=query_images,
1111
+ online_results=online_results,
1112
+ conversation_log=meta_log,
1114
1113
  model=conversation_config.chat_model,
1115
1114
  api_key=api_key,
1116
1115
  completion_func=partial_completion,
@@ -1120,6 +1119,7 @@ def generate_chat_response(
1120
1119
  location_data=location_data,
1121
1120
  user_name=user_name,
1122
1121
  agent=agent,
1122
+ vision_available=vision_available,
1123
1123
  )
1124
1124
  elif conversation_config.model_type == ChatModelOptions.ModelType.GOOGLE:
1125
1125
  api_key = conversation_config.openai_config.api_key
@@ -1673,10 +1673,16 @@ def get_user_config(user: KhojUser, request: Request, is_detailed: bool = False)
1673
1673
 
1674
1674
  user_subscription_state = get_user_subscription_state(user.email)
1675
1675
  user_subscription = adapters.get_user_subscription(user.email)
1676
+
1676
1677
  subscription_renewal_date = (
1677
1678
  user_subscription.renewal_date.strftime("%d %b %Y")
1678
1679
  if user_subscription and user_subscription.renewal_date
1679
- else (user_subscription.created_at + timedelta(days=7)).strftime("%d %b %Y")
1680
+ else None
1681
+ )
1682
+ subscription_enabled_trial_at = (
1683
+ user_subscription.enabled_trial_at.strftime("%d %b %Y")
1684
+ if user_subscription and user_subscription.enabled_trial_at
1685
+ else None
1680
1686
  )
1681
1687
  given_name = get_user_name(user)
1682
1688
 
@@ -1699,13 +1705,6 @@ def get_user_config(user: KhojUser, request: Request, is_detailed: bool = False)
1699
1705
  for chat_model in chat_models:
1700
1706
  chat_model_options.append({"name": chat_model.chat_model, "id": chat_model.id})
1701
1707
 
1702
- search_model_options = adapters.get_or_create_search_models().all()
1703
- all_search_model_options = list()
1704
- for search_model_option in search_model_options:
1705
- all_search_model_options.append({"name": search_model_option.name, "id": search_model_option.id})
1706
-
1707
- current_search_model_option = adapters.get_user_search_model_or_default(user)
1708
-
1709
1708
  selected_paint_model_config = ConversationAdapters.get_user_text_to_image_model_config(user)
1710
1709
  paint_model_options = ConversationAdapters.get_text_to_image_model_options().all()
1711
1710
  all_paint_model_options = list()
@@ -1738,8 +1737,6 @@ def get_user_config(user: KhojUser, request: Request, is_detailed: bool = False)
1738
1737
  "has_documents": has_documents,
1739
1738
  "notion_token": notion_token,
1740
1739
  # user model settings
1741
- "search_model_options": all_search_model_options,
1742
- "selected_search_model_config": current_search_model_option.id,
1743
1740
  "chat_model_options": chat_model_options,
1744
1741
  "selected_chat_model_config": selected_chat_model_config.id if selected_chat_model_config else None,
1745
1742
  "paint_model_options": all_paint_model_options,
@@ -1749,6 +1746,7 @@ def get_user_config(user: KhojUser, request: Request, is_detailed: bool = False)
1749
1746
  # user billing info
1750
1747
  "subscription_state": user_subscription_state,
1751
1748
  "subscription_renewal_date": subscription_renewal_date,
1749
+ "subscription_enabled_trial_at": subscription_enabled_trial_at,
1752
1750
  # server settings
1753
1751
  "khoj_cloud_subscription_url": os.getenv("KHOJ_CLOUD_SUBSCRIPTION_URL"),
1754
1752
  "billing_enabled": state.billing_enabled,
@@ -1757,6 +1755,7 @@ def get_user_config(user: KhojUser, request: Request, is_detailed: bool = False)
1757
1755
  "khoj_version": state.khoj_version,
1758
1756
  "anonymous_mode": state.anonymous_mode,
1759
1757
  "notion_oauth_url": notion_oauth_url,
1758
+ "length_of_free_trial": LENGTH_OF_FREE_TRIAL,
1760
1759
  }
1761
1760
 
1762
1761
 
@@ -8,7 +8,11 @@ import torch
8
8
  from asgiref.sync import sync_to_async
9
9
  from sentence_transformers import util
10
10
 
11
- from khoj.database.adapters import EntryAdapters, get_user_search_model_or_default
11
+ from khoj.database.adapters import (
12
+ EntryAdapters,
13
+ get_default_search_model,
14
+ get_user_default_search_model,
15
+ )
12
16
  from khoj.database.models import Agent
13
17
  from khoj.database.models import Entry as DbEntry
14
18
  from khoj.database.models import KhojUser
@@ -110,7 +114,7 @@ async def query(
110
114
  file_type = search_type_to_embeddings_type[type.value]
111
115
 
112
116
  query = raw_query
113
- search_model = await sync_to_async(get_user_search_model_or_default)(user)
117
+ search_model = await sync_to_async(get_user_default_search_model)(user)
114
118
  if not max_distance:
115
119
  if search_model.bi_encoder_confidence_threshold:
116
120
  max_distance = search_model.bi_encoder_confidence_threshold
khoj/utils/helpers.py CHANGED
@@ -352,7 +352,7 @@ tool_descriptions_for_llm = {
352
352
  }
353
353
 
354
354
  mode_descriptions_for_llm = {
355
- ConversationCommand.Image: "Use this if the user is requesting you to create a new picture based on their description.",
355
+ ConversationCommand.Image: "Use this if you are confident the user is requesting you to create a new picture based on their description.",
356
356
  ConversationCommand.Automation: "Use this if you are confident the user is requesting a response at a scheduled date, time and frequency",
357
357
  ConversationCommand.Text: "Use this if a normal text response would be sufficient for accurately responding to the query.",
358
358
  ConversationCommand.Diagram: "Use this if the user is requesting a visual representation that requires primitives like lines, rectangles, and text.",
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: khoj
3
- Version: 1.26.5.dev32
3
+ Version: 1.27.2.dev1
4
4
  Summary: Your Second Brain
5
5
  Project-URL: Homepage, https://khoj.dev
6
6
  Project-URL: Documentation, https://docs.khoj.dev