khoj 1.23.1.dev1__py3-none-any.whl → 1.23.4.dev1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (71) hide show
  1. khoj/database/adapters/__init__.py +8 -8
  2. khoj/database/migrations/0063_conversation_temp_id.py +36 -0
  3. khoj/database/migrations/0064_remove_conversation_temp_id_alter_conversation_id.py +86 -0
  4. khoj/database/models/__init__.py +1 -0
  5. khoj/interface/compiled/404/index.html +1 -1
  6. khoj/interface/compiled/_next/static/chunks/1603-d643510c2c0b8871.js +1 -0
  7. khoj/interface/compiled/_next/static/chunks/{1906-1747a36c336df02c.js → 4051-3dc2df557ccb5213.js} +2 -2
  8. khoj/interface/compiled/_next/static/chunks/8423-62ac6c832be2461b.js +1 -0
  9. khoj/interface/compiled/_next/static/chunks/9178-421e47df97ff0213.js +1 -0
  10. khoj/interface/compiled/_next/static/chunks/app/agents/page-f8ad4d2944dbcf91.js +1 -0
  11. khoj/interface/compiled/_next/static/chunks/app/automations/{page-3f4b6ff0261e19b7.js → page-cc875a656df43713.js} +1 -1
  12. khoj/interface/compiled/_next/static/chunks/app/chat/page-3e75b0e0aa3aaaf5.js +1 -0
  13. khoj/interface/compiled/_next/static/chunks/app/factchecker/{page-828cf3c5b8e3af79.js → page-bb320ff7d4dee716.js} +1 -1
  14. khoj/interface/compiled/_next/static/chunks/app/{page-2f423a763e2ee0c7.js → page-60193524cf570002.js} +1 -1
  15. khoj/interface/compiled/_next/static/chunks/app/search/page-4bceb5b0df9cfd66.js +1 -0
  16. khoj/interface/compiled/_next/static/chunks/app/settings/page-532ed8b778a0b40d.js +1 -0
  17. khoj/interface/compiled/_next/static/chunks/app/share/chat/page-b5e63aabfd573dba.js +1 -0
  18. khoj/interface/compiled/_next/static/chunks/{webpack-61a553b6ff44f97c.js → webpack-de28762883e5816d.js} +1 -1
  19. khoj/interface/compiled/_next/static/css/1105696872e3f20c.css +25 -0
  20. khoj/interface/compiled/_next/static/css/37a313cb39403a84.css +1 -0
  21. khoj/interface/compiled/_next/static/css/6bde1f2045622ef7.css +1 -0
  22. khoj/interface/compiled/_next/static/css/{a3530ec58b0b660f.css → ab57702ed2b98214.css} +1 -1
  23. khoj/interface/compiled/_next/static/css/{92c48eece0b102b9.css → e41ec62af8ee4e38.css} +1 -1
  24. khoj/interface/compiled/agents/index.html +1 -1
  25. khoj/interface/compiled/agents/index.txt +2 -2
  26. khoj/interface/compiled/automations/index.html +1 -1
  27. khoj/interface/compiled/automations/index.txt +2 -2
  28. khoj/interface/compiled/chat/index.html +1 -1
  29. khoj/interface/compiled/chat/index.txt +2 -2
  30. khoj/interface/compiled/factchecker/index.html +1 -1
  31. khoj/interface/compiled/factchecker/index.txt +2 -2
  32. khoj/interface/compiled/index.html +1 -1
  33. khoj/interface/compiled/index.txt +2 -2
  34. khoj/interface/compiled/search/index.html +1 -1
  35. khoj/interface/compiled/search/index.txt +2 -2
  36. khoj/interface/compiled/settings/index.html +1 -1
  37. khoj/interface/compiled/settings/index.txt +2 -2
  38. khoj/interface/compiled/share/chat/index.html +1 -1
  39. khoj/interface/compiled/share/chat/index.txt +2 -2
  40. khoj/interface/email/magic_link.html +1 -1
  41. khoj/interface/email/task.html +31 -34
  42. khoj/interface/email/welcome.html +82 -53
  43. khoj/main.py +1 -1
  44. khoj/processor/content/images/image_to_entries.py +6 -4
  45. khoj/processor/conversation/google/utils.py +106 -7
  46. khoj/processor/conversation/utils.py +13 -8
  47. khoj/routers/api.py +1 -1
  48. khoj/routers/api_chat.py +13 -15
  49. khoj/routers/helpers.py +12 -7
  50. khoj/utils/cli.py +6 -0
  51. khoj/utils/constants.py +9 -2
  52. khoj/utils/initialization.py +158 -71
  53. {khoj-1.23.1.dev1.dist-info → khoj-1.23.4.dev1.dist-info}/METADATA +2 -2
  54. {khoj-1.23.1.dev1.dist-info → khoj-1.23.4.dev1.dist-info}/RECORD +60 -58
  55. khoj/interface/compiled/_next/static/chunks/1603-fb2d80ae73990df3.js +0 -1
  56. khoj/interface/compiled/_next/static/chunks/8423-14fc72aec9104ce9.js +0 -1
  57. khoj/interface/compiled/_next/static/chunks/9178-c153fc402c970365.js +0 -1
  58. khoj/interface/compiled/_next/static/chunks/app/agents/page-989a824c640bc532.js +0 -1
  59. khoj/interface/compiled/_next/static/chunks/app/chat/page-cc71b18feddf80d6.js +0 -1
  60. khoj/interface/compiled/_next/static/chunks/app/search/page-dcd385f03255ef36.js +0 -1
  61. khoj/interface/compiled/_next/static/chunks/app/settings/page-ddcd51147d18c694.js +0 -1
  62. khoj/interface/compiled/_next/static/chunks/app/share/chat/page-a84001b4724b5463.js +0 -1
  63. khoj/interface/compiled/_next/static/css/2272c73fc7a3b571.css +0 -1
  64. khoj/interface/compiled/_next/static/css/3e49e5ee49c6bda1.css +0 -25
  65. khoj/interface/compiled/_next/static/css/553f9cdcc7a2bcd6.css +0 -1
  66. /khoj/interface/compiled/_next/static/chunks/{7023-52c1be60135eb057.js → 7023-1074a582ec989284.js} +0 -0
  67. /khoj/interface/compiled/_next/static/{A47_BQNcqhjWCuNh_iSec → u496AO3jlFBPp2apVnMkP}/_buildManifest.js +0 -0
  68. /khoj/interface/compiled/_next/static/{A47_BQNcqhjWCuNh_iSec → u496AO3jlFBPp2apVnMkP}/_ssgManifest.js +0 -0
  69. {khoj-1.23.1.dev1.dist-info → khoj-1.23.4.dev1.dist-info}/WHEEL +0 -0
  70. {khoj-1.23.1.dev1.dist-info → khoj-1.23.4.dev1.dist-info}/entry_points.txt +0 -0
  71. {khoj-1.23.1.dev1.dist-info → khoj-1.23.4.dev1.dist-info}/licenses/LICENSE +0 -0
khoj/routers/helpers.py CHANGED
@@ -21,7 +21,7 @@ from typing import (
21
21
  Tuple,
22
22
  Union,
23
23
  )
24
- from urllib.parse import parse_qs, urljoin, urlparse
24
+ from urllib.parse import parse_qs, quote, urljoin, urlparse
25
25
 
26
26
  import cron_descriptor
27
27
  import pytz
@@ -632,6 +632,7 @@ async def send_message_to_model_wrapper(
632
632
  messages=truncated_messages,
633
633
  loaded_model=loaded_model,
634
634
  model=chat_model,
635
+ max_prompt_size=max_tokens,
635
636
  streaming=False,
636
637
  response_type=response_type,
637
638
  )
@@ -721,6 +722,7 @@ def send_message_to_model_wrapper_sync(
721
722
  system_message=system_message,
722
723
  model_name=chat_model,
723
724
  loaded_model=loaded_model,
725
+ max_prompt_size=max_tokens,
724
726
  vision_enabled=vision_available,
725
727
  model_type=conversation_config.model_type,
726
728
  )
@@ -729,6 +731,7 @@ def send_message_to_model_wrapper_sync(
729
731
  messages=truncated_messages,
730
732
  loaded_model=loaded_model,
731
733
  model=chat_model,
734
+ max_prompt_size=max_tokens,
732
735
  streaming=False,
733
736
  response_type=response_type,
734
737
  )
@@ -739,6 +742,7 @@ def send_message_to_model_wrapper_sync(
739
742
  user_message=message,
740
743
  system_message=system_message,
741
744
  model_name=chat_model,
745
+ max_prompt_size=max_tokens,
742
746
  vision_enabled=vision_available,
743
747
  model_type=conversation_config.model_type,
744
748
  )
@@ -795,7 +799,7 @@ def generate_chat_response(
795
799
  conversation_commands: List[ConversationCommand] = [ConversationCommand.Default],
796
800
  user: KhojUser = None,
797
801
  client_application: ClientApplication = None,
798
- conversation_id: int = None,
802
+ conversation_id: str = None,
799
803
  location_data: LocationData = None,
800
804
  user_name: Optional[str] = None,
801
805
  uploaded_image_url: Optional[str] = None,
@@ -1098,7 +1102,7 @@ def scheduled_chat(
1098
1102
  user: KhojUser,
1099
1103
  calling_url: URL,
1100
1104
  job_id: str = None,
1101
- conversation_id: int = None,
1105
+ conversation_id: str = None,
1102
1106
  ):
1103
1107
  logger.info(f"Processing scheduled_chat: {query_to_run}")
1104
1108
  if job_id:
@@ -1127,7 +1131,8 @@ def scheduled_chat(
1127
1131
 
1128
1132
  # Replace the original conversation_id with the conversation_id
1129
1133
  if conversation_id:
1130
- query_dict["conversation_id"] = [conversation_id]
1134
+ # encode the conversation_id to avoid any issues with special characters
1135
+ query_dict["conversation_id"] = [quote(conversation_id)]
1131
1136
 
1132
1137
  # Restructure the original query_dict into a valid JSON payload for the chat API
1133
1138
  json_payload = {key: values[0] for key, values in query_dict.items()}
@@ -1181,7 +1186,7 @@ def scheduled_chat(
1181
1186
 
1182
1187
 
1183
1188
  async def create_automation(
1184
- q: str, timezone: str, user: KhojUser, calling_url: URL, meta_log: dict = {}, conversation_id: int = None
1189
+ q: str, timezone: str, user: KhojUser, calling_url: URL, meta_log: dict = {}, conversation_id: str = None
1185
1190
  ):
1186
1191
  crontime, query_to_run, subject = await schedule_query(q, meta_log)
1187
1192
  job = await schedule_automation(query_to_run, subject, crontime, timezone, q, user, calling_url, conversation_id)
@@ -1196,7 +1201,7 @@ async def schedule_automation(
1196
1201
  scheduling_request: str,
1197
1202
  user: KhojUser,
1198
1203
  calling_url: URL,
1199
- conversation_id: int,
1204
+ conversation_id: str,
1200
1205
  ):
1201
1206
  # Disable minute level automation recurrence
1202
1207
  minute_value = crontime.split(" ")[0]
@@ -1214,7 +1219,7 @@ async def schedule_automation(
1214
1219
  "scheduling_request": scheduling_request,
1215
1220
  "subject": subject,
1216
1221
  "crontime": crontime,
1217
- "conversation_id": conversation_id,
1222
+ "conversation_id": str(conversation_id),
1218
1223
  }
1219
1224
  )
1220
1225
  query_id = hashlib.md5(f"{query_to_run}_{crontime}".encode("utf-8")).hexdigest()
khoj/utils/cli.py CHANGED
@@ -50,6 +50,12 @@ def cli(args=None):
50
50
  default=False,
51
51
  help="Run Khoj in anonymous mode. This does not require any login for connecting users.",
52
52
  )
53
+ parser.add_argument(
54
+ "--non-interactive",
55
+ action="store_true",
56
+ default=False,
57
+ help="Start Khoj in non-interactive mode. Assumes interactive shell unavailable for config. E.g when run via Docker.",
58
+ )
53
59
 
54
60
  args, remaining_args = parser.parse_known_args(args)
55
61
 
khoj/utils/constants.py CHANGED
@@ -8,8 +8,15 @@ empty_escape_sequences = "\n|\r|\t| "
8
8
  app_env_filepath = "~/.khoj/env"
9
9
  telemetry_server = "https://khoj.beta.haletic.com/v1/telemetry"
10
10
  content_directory = "~/.khoj/content/"
11
- default_offline_chat_model = "bartowski/Meta-Llama-3.1-8B-Instruct-GGUF"
12
- default_online_chat_model = "gpt-4o-mini"
11
+ default_offline_chat_models = [
12
+ "bartowski/Meta-Llama-3.1-8B-Instruct-GGUF",
13
+ "bartowski/gemma-2-9b-it-GGUF",
14
+ "bartowski/gemma-2-2b-it-GGUF",
15
+ "bartowski/Phi-3.5-mini-instruct-GGUF",
16
+ ]
17
+ default_openai_chat_models = ["gpt-4o-mini", "gpt-4o"]
18
+ default_gemini_chat_models = ["gemini-1.5-flash", "gemini-1.5-pro"]
19
+ default_anthropic_chat_models = ["claude-3-5-sonnet-20240620", "claude-3-opus-20240229"]
13
20
 
14
21
  empty_config = {
15
22
  "search-type": {
@@ -1,25 +1,37 @@
1
1
  import logging
2
2
  import os
3
+ from typing import Tuple
3
4
 
4
5
  from khoj.database.adapters import ConversationAdapters
5
6
  from khoj.database.models import (
6
7
  ChatModelOptions,
7
8
  KhojUser,
8
9
  OpenAIProcessorConversationConfig,
10
+ ServerChatSettings,
9
11
  SpeechToTextModelOptions,
10
12
  TextToImageModelConfig,
11
13
  )
12
14
  from khoj.processor.conversation.utils import model_to_prompt_size, model_to_tokenizer
13
- from khoj.utils.constants import default_offline_chat_model, default_online_chat_model
15
+ from khoj.utils.constants import (
16
+ default_anthropic_chat_models,
17
+ default_gemini_chat_models,
18
+ default_offline_chat_models,
19
+ default_openai_chat_models,
20
+ )
14
21
 
15
22
  logger = logging.getLogger(__name__)
16
23
 
17
24
 
18
- def initialization():
25
+ def initialization(interactive: bool = True):
19
26
  def _create_admin_user():
20
27
  logger.info(
21
28
  "👩‍✈️ Setting up admin user. These credentials will allow you to configure your server at /server/admin."
22
29
  )
30
+ if not interactive and (not os.getenv("KHOJ_ADMIN_EMAIL") or not os.getenv("KHOJ_ADMIN_PASSWORD")):
31
+ logger.error(
32
+ "🚨 Admin user cannot be created. Please set the KHOJ_ADMIN_EMAIL, KHOJ_ADMIN_PASSWORD environment variables or start server in interactive mode."
33
+ )
34
+ exit(1)
23
35
  email_addr = os.getenv("KHOJ_ADMIN_EMAIL") or input("Email: ")
24
36
  password = os.getenv("KHOJ_ADMIN_PASSWORD") or input("Password: ")
25
37
  admin_user = KhojUser.objects.create_superuser(email=email_addr, username=email_addr, password=password)
@@ -27,87 +39,103 @@ def initialization():
27
39
 
28
40
  def _create_chat_configuration():
29
41
  logger.info(
30
- "🗣️ Configure chat models available to your server. You can always update these at /server/admin using the credentials of your admin account"
42
+ "🗣️ Configure chat models available to your server. You can always update these at /server/admin using your admin account"
31
43
  )
32
44
 
33
- try:
34
- use_offline_model = input("Use offline chat model? (y/n): ")
35
- if use_offline_model == "y":
36
- logger.info("🗣️ Setting up offline chat model")
37
-
38
- offline_chat_model = input(
39
- f"Enter the offline chat model you want to use. See HuggingFace for available GGUF models (default: {default_offline_chat_model}): "
40
- )
41
- if offline_chat_model == "":
42
- ChatModelOptions.objects.create(
43
- chat_model=default_offline_chat_model, model_type=ChatModelOptions.ModelType.OFFLINE
44
- )
45
- else:
46
- default_max_tokens = model_to_prompt_size.get(offline_chat_model, 2000)
47
- max_tokens = input(
48
- f"Enter the maximum number of tokens to use for the offline chat model (default {default_max_tokens}):"
49
- )
50
- max_tokens = max_tokens or default_max_tokens
51
-
52
- default_tokenizer = model_to_tokenizer.get(
53
- offline_chat_model, "hf-internal-testing/llama-tokenizer"
54
- )
55
- tokenizer = input(
56
- f"Enter the tokenizer to use for the offline chat model (default: {default_tokenizer}):"
57
- )
58
- tokenizer = tokenizer or default_tokenizer
59
-
60
- ChatModelOptions.objects.create(
61
- chat_model=offline_chat_model,
62
- model_type=ChatModelOptions.ModelType.OFFLINE,
63
- max_prompt_size=max_tokens,
64
- tokenizer=tokenizer,
65
- )
66
- except ModuleNotFoundError as e:
67
- logger.warning("Offline models are not supported on this device.")
68
-
69
- use_openai_model = input("Use OpenAI models? (y/n): ")
70
- if use_openai_model == "y":
71
- logger.info("🗣️ Setting up your OpenAI configuration")
72
- api_key = input("Enter your OpenAI API key: ")
73
- OpenAIProcessorConversationConfig.objects.create(api_key=api_key)
74
-
75
- openai_chat_model = input(
76
- f"Enter the OpenAI chat model you want to use (default: {default_online_chat_model}): "
77
- )
78
- openai_chat_model = openai_chat_model or default_online_chat_model
79
-
80
- default_max_tokens = model_to_prompt_size.get(openai_chat_model, 2000)
81
- max_tokens = input(
82
- f"Enter the maximum number of tokens to use for the OpenAI chat model (default: {default_max_tokens}): "
83
- )
84
- max_tokens = max_tokens or default_max_tokens
85
- ChatModelOptions.objects.create(
86
- chat_model=openai_chat_model, model_type=ChatModelOptions.ModelType.OPENAI, max_prompt_size=max_tokens
87
- )
45
+ # Set up OpenAI's online chat models
46
+ openai_configured, openai_provider = _setup_chat_model_provider(
47
+ ChatModelOptions.ModelType.OPENAI,
48
+ default_openai_chat_models,
49
+ default_api_key=os.getenv("OPENAI_API_KEY"),
50
+ vision_enabled=True,
51
+ is_offline=False,
52
+ interactive=interactive,
53
+ )
88
54
 
55
+ # Setup OpenAI speech to text model
56
+ if openai_configured:
89
57
  default_speech2text_model = "whisper-1"
90
- openai_speech2text_model = input(
91
- f"Enter the OpenAI speech to text model you want to use (default: {default_speech2text_model}): "
92
- )
93
- openai_speech2text_model = openai_speech2text_model or default_speech2text_model
58
+ if interactive:
59
+ openai_speech2text_model = input(
60
+ f"Enter the OpenAI speech to text model you want to use (default: {default_speech2text_model}): "
61
+ )
62
+ openai_speech2text_model = openai_speech2text_model or default_speech2text_model
63
+ else:
64
+ openai_speech2text_model = default_speech2text_model
94
65
  SpeechToTextModelOptions.objects.create(
95
66
  model_name=openai_speech2text_model, model_type=SpeechToTextModelOptions.ModelType.OPENAI
96
67
  )
97
68
 
69
+ # Setup OpenAI text to image model
70
+ if openai_configured:
98
71
  default_text_to_image_model = "dall-e-3"
99
- openai_text_to_image_model = input(
100
- f"Enter the OpenAI text to image model you want to use (default: {default_text_to_image_model}): "
101
- )
102
- openai_speech2text_model = openai_text_to_image_model or default_text_to_image_model
72
+ if interactive:
73
+ openai_text_to_image_model = input(
74
+ f"Enter the OpenAI text to image model you want to use (default: {default_text_to_image_model}): "
75
+ )
76
+ openai_text_to_image_model = openai_text_to_image_model or default_text_to_image_model
77
+ else:
78
+ openai_text_to_image_model = default_text_to_image_model
103
79
  TextToImageModelConfig.objects.create(
104
- model_name=openai_text_to_image_model, model_type=TextToImageModelConfig.ModelType.OPENAI
80
+ model_name=openai_text_to_image_model,
81
+ model_type=TextToImageModelConfig.ModelType.OPENAI,
82
+ openai_config=openai_provider,
105
83
  )
106
84
 
107
- if use_offline_model == "y" or use_openai_model == "y":
108
- logger.info("🗣️ Chat model configuration complete")
85
+ # Set up Google's Gemini online chat models
86
+ _setup_chat_model_provider(
87
+ ChatModelOptions.ModelType.GOOGLE,
88
+ default_gemini_chat_models,
89
+ default_api_key=os.getenv("GEMINI_API_KEY"),
90
+ vision_enabled=False,
91
+ is_offline=False,
92
+ interactive=interactive,
93
+ provider_name="Google Gemini",
94
+ )
109
95
 
110
- use_offline_speech2text_model = input("Use offline speech to text model? (y/n): ")
96
+ # Set up Anthropic's online chat models
97
+ _setup_chat_model_provider(
98
+ ChatModelOptions.ModelType.ANTHROPIC,
99
+ default_anthropic_chat_models,
100
+ default_api_key=os.getenv("ANTHROPIC_API_KEY"),
101
+ vision_enabled=False,
102
+ is_offline=False,
103
+ interactive=interactive,
104
+ )
105
+
106
+ # Set up offline chat models
107
+ _setup_chat_model_provider(
108
+ ChatModelOptions.ModelType.OFFLINE,
109
+ default_offline_chat_models,
110
+ default_api_key=None,
111
+ vision_enabled=False,
112
+ is_offline=True,
113
+ interactive=interactive,
114
+ )
115
+
116
+ # Explicitly set default chat model
117
+ chat_models_configured = ChatModelOptions.objects.count()
118
+ if chat_models_configured > 0:
119
+ default_chat_model_name = ChatModelOptions.objects.first().chat_model
120
+ # If there are multiple chat models, ask the user to choose the default chat model
121
+ if chat_models_configured > 1 and interactive:
122
+ user_chat_model_name = input(
123
+ f"Enter the default chat model to use (default: {default_chat_model_name}): "
124
+ )
125
+ else:
126
+ user_chat_model_name = None
127
+
128
+ # If the user's choice is valid, set it as the default chat model
129
+ if user_chat_model_name and ChatModelOptions.objects.filter(chat_model=user_chat_model_name).exists():
130
+ default_chat_model_name = user_chat_model_name
131
+
132
+ # Create a server chat settings object with the default chat model
133
+ default_chat_model = ChatModelOptions.objects.filter(chat_model=default_chat_model_name).first()
134
+ ServerChatSettings.objects.create(chat_default=default_chat_model)
135
+ logger.info("🗣️ Chat model configuration complete")
136
+
137
+ # Set up offline speech to text model
138
+ use_offline_speech2text_model = "n" if not interactive else input("Use offline speech to text model? (y/n): ")
111
139
  if use_offline_speech2text_model == "y":
112
140
  logger.info("🗣️ Setting up offline speech to text model")
113
141
  # Delete any existing speech to text model options. There can only be one.
@@ -124,6 +152,64 @@ def initialization():
124
152
 
125
153
  logger.info(f"🗣️ Offline speech to text model configured to {offline_speech2text_model}")
126
154
 
155
+ def _setup_chat_model_provider(
156
+ model_type: ChatModelOptions.ModelType,
157
+ default_chat_models: list,
158
+ default_api_key: str,
159
+ interactive: bool,
160
+ vision_enabled: bool = False,
161
+ is_offline: bool = False,
162
+ provider_name: str = None,
163
+ ) -> Tuple[bool, OpenAIProcessorConversationConfig]:
164
+ supported_vision_models = ["gpt-4o-mini", "gpt-4o"]
165
+ provider_name = provider_name or model_type.name.capitalize()
166
+ default_use_model = {True: "y", False: "n"}[default_api_key is not None or is_offline]
167
+ use_model_provider = (
168
+ default_use_model if not interactive else input(f"Add {provider_name} chat models? (y/n): ")
169
+ )
170
+
171
+ if use_model_provider != "y":
172
+ return False, None
173
+
174
+ logger.info(f"️💬 Setting up your {provider_name} chat configuration")
175
+
176
+ chat_model_provider = None
177
+ if not is_offline:
178
+ if interactive:
179
+ user_api_key = input(f"Enter your {provider_name} API key (default: {default_api_key}): ")
180
+ api_key = user_api_key if user_api_key != "" else default_api_key
181
+ else:
182
+ api_key = default_api_key
183
+ chat_model_provider = OpenAIProcessorConversationConfig.objects.create(api_key=api_key, name=provider_name)
184
+
185
+ if interactive:
186
+ chat_model_names = input(
187
+ f"Enter the {provider_name} chat models you want to use (default: {','.join(default_chat_models)}): "
188
+ )
189
+ chat_models = chat_model_names.split(",") if chat_model_names != "" else default_chat_models
190
+ chat_models = [model.strip() for model in chat_models]
191
+ else:
192
+ chat_models = default_chat_models
193
+
194
+ for chat_model in chat_models:
195
+ default_max_tokens = model_to_prompt_size.get(chat_model)
196
+ default_tokenizer = model_to_tokenizer.get(chat_model)
197
+ vision_enabled = vision_enabled and chat_model in supported_vision_models
198
+
199
+ chat_model_options = {
200
+ "chat_model": chat_model,
201
+ "model_type": model_type,
202
+ "max_prompt_size": default_max_tokens,
203
+ "vision_enabled": vision_enabled,
204
+ "tokenizer": default_tokenizer,
205
+ "openai_config": chat_model_provider,
206
+ }
207
+
208
+ ChatModelOptions.objects.create(**chat_model_options)
209
+
210
+ logger.info(f"🗣️ {provider_name} chat model configuration complete")
211
+ return True, chat_model_provider
212
+
127
213
  admin_user = KhojUser.objects.filter(is_staff=True).first()
128
214
  if admin_user is None:
129
215
  while True:
@@ -139,7 +225,8 @@ def initialization():
139
225
  try:
140
226
  _create_chat_configuration()
141
227
  break
142
- # Some environments don't support interactive input. We catch the exception and return if that's the case. The admin can still configure their settings from the admin page.
228
+ # Some environments don't support interactive input. We catch the exception and return if that's the case.
229
+ # The admin can still configure their settings from the admin page.
143
230
  except EOFError:
144
231
  return
145
232
  except Exception as e:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: khoj
3
- Version: 1.23.1.dev1
3
+ Version: 1.23.4.dev1
4
4
  Summary: Your Second Brain
5
5
  Project-URL: Homepage, https://khoj.dev
6
6
  Project-URL: Documentation, https://docs.khoj.dev
@@ -61,7 +61,7 @@ Requires-Dist: pymupdf>=1.23.5
61
61
  Requires-Dist: python-multipart>=0.0.7
62
62
  Requires-Dist: pytz~=2024.1
63
63
  Requires-Dist: pyyaml~=6.0
64
- Requires-Dist: rapidocr-onnxruntime==1.3.22
64
+ Requires-Dist: rapidocr-onnxruntime==1.3.24
65
65
  Requires-Dist: requests>=2.26.0
66
66
  Requires-Dist: rich>=13.3.1
67
67
  Requires-Dist: schedule==1.1.0