khoj 1.30.11.dev64__py3-none-any.whl → 1.32.3.dev34__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (146) hide show
  1. khoj/configure.py +4 -2
  2. khoj/database/adapters/__init__.py +67 -58
  3. khoj/database/admin.py +9 -9
  4. khoj/database/migrations/0077_chatmodel_alter_agent_chat_model_and_more.py +62 -0
  5. khoj/database/migrations/0078_khojuser_email_verification_code_expiry.py +17 -0
  6. khoj/database/models/__init__.py +9 -8
  7. khoj/interface/compiled/404/index.html +1 -1
  8. khoj/interface/compiled/_next/static/chunks/182-8cd8b17d40e6e989.js +20 -0
  9. khoj/interface/compiled/_next/static/chunks/1915-605f698f2573cfd4.js +1 -0
  10. khoj/interface/compiled/_next/static/chunks/2117-9886e6a0232dc093.js +2 -0
  11. khoj/interface/compiled/_next/static/chunks/2581-455000f8aeb08fc3.js +1 -0
  12. khoj/interface/compiled/_next/static/chunks/3175-b2e522f8ca392f7e.js +3 -0
  13. khoj/interface/compiled/_next/static/chunks/3727.dcea8f2193111552.js +1 -0
  14. khoj/interface/compiled/_next/static/chunks/3789-a09e37a819171a9d.js +1 -0
  15. khoj/interface/compiled/_next/static/chunks/4124-0baa32400521e909.js +1 -0
  16. khoj/interface/compiled/_next/static/chunks/4357-03ea130575287c27.js +1 -0
  17. khoj/interface/compiled/_next/static/chunks/5243-f7f0a2a6e1ac5d28.js +1 -0
  18. khoj/interface/compiled/_next/static/chunks/5427-3e7360c8e6ac9728.js +1 -0
  19. khoj/interface/compiled/_next/static/chunks/{1279-4cb23143aa2c0228.js → 5473-b1cf56dedac6577a.js} +1 -1
  20. khoj/interface/compiled/_next/static/chunks/5477-c5d7eabee28a789a.js +1 -0
  21. khoj/interface/compiled/_next/static/chunks/6065-64db9ad305ba0bcd.js +1 -0
  22. khoj/interface/compiled/_next/static/chunks/8667-d3e5bc726e4ff4e3.js +1 -0
  23. khoj/interface/compiled/_next/static/chunks/9259-27d1ff42af9a43e0.js +1 -0
  24. khoj/interface/compiled/_next/static/chunks/94ca1967.1d9b42d929a1ee8c.js +1 -0
  25. khoj/interface/compiled/_next/static/chunks/{1210.ef7a0f9a7e43da1d.js → 9597.83583248dfbf6e73.js} +1 -1
  26. khoj/interface/compiled/_next/static/chunks/964ecbae.51d6faf8801d15e6.js +1 -0
  27. khoj/interface/compiled/_next/static/chunks/9665-1ab5c8c667b74dca.js +1 -0
  28. khoj/interface/compiled/_next/static/chunks/app/_not-found/{page-cfba071f5a657256.js → page-a834eddae3e235df.js} +1 -1
  29. khoj/interface/compiled/_next/static/chunks/app/agents/layout-e00fb81dca656a10.js +1 -0
  30. khoj/interface/compiled/_next/static/chunks/app/agents/page-ab5ebe4efba9b582.js +1 -0
  31. khoj/interface/compiled/_next/static/chunks/app/automations/{layout-7f1b79a2c67af0b4.js → layout-1fe1537449f43496.js} +1 -1
  32. khoj/interface/compiled/_next/static/chunks/app/automations/page-37d56a7bbfd307df.js +1 -0
  33. khoj/interface/compiled/_next/static/chunks/app/chat/layout-33934fc2d6ae6838.js +1 -0
  34. khoj/interface/compiled/_next/static/chunks/app/chat/page-a0b61f10b0bf6dd5.js +1 -0
  35. khoj/interface/compiled/_next/static/chunks/app/layout-30e7fda7262713ce.js +1 -0
  36. khoj/interface/compiled/_next/static/chunks/app/page-33a3375b1414d1bd.js +1 -0
  37. khoj/interface/compiled/_next/static/chunks/app/search/layout-c02531d586972d7d.js +1 -0
  38. khoj/interface/compiled/_next/static/chunks/app/search/page-bbbfda90fa03c5be.js +1 -0
  39. khoj/interface/compiled/_next/static/chunks/app/settings/layout-d09d6510a45cd4bd.js +1 -0
  40. khoj/interface/compiled/_next/static/chunks/app/settings/page-430db6215e48aea2.js +1 -0
  41. khoj/interface/compiled/_next/static/chunks/app/share/chat/layout-e8e5db7830bf3f47.js +1 -0
  42. khoj/interface/compiled/_next/static/chunks/app/share/chat/page-02dc1f2e2a41e522.js +1 -0
  43. khoj/interface/compiled/_next/static/chunks/d3ac728e-44ebd2a0c99b12a0.js +1 -0
  44. khoj/interface/compiled/_next/static/chunks/{fd9d1056-2e6c8140e79afc3b.js → fd9d1056-4482b99a36fd1673.js} +1 -1
  45. khoj/interface/compiled/_next/static/chunks/main-app-de1f09df97a3cfc7.js +1 -0
  46. khoj/interface/compiled/_next/static/chunks/main-db4bfac6b0a8d00b.js +1 -0
  47. khoj/interface/compiled/_next/static/chunks/pages/{_app-f870474a17b7f2fd.js → _app-3c9ca398d360b709.js} +1 -1
  48. khoj/interface/compiled/_next/static/chunks/pages/{_error-c66a4e8afc46f17b.js → _error-cf5ca766ac8f493f.js} +1 -1
  49. khoj/interface/compiled/_next/static/chunks/polyfills-42372ed130431b0a.js +1 -0
  50. khoj/interface/compiled/_next/static/chunks/webpack-b0a1b08bb62bdc15.js +1 -0
  51. khoj/interface/compiled/_next/static/css/0f04760e76bba6c1.css +25 -0
  52. khoj/interface/compiled/_next/static/css/37a73b87f02df402.css +1 -0
  53. khoj/interface/compiled/_next/static/css/8e6a3ca11a60b189.css +1 -0
  54. khoj/interface/compiled/_next/static/css/9c164d9727dd8092.css +1 -0
  55. khoj/interface/compiled/_next/static/css/c3acbadc30537d04.css +1 -0
  56. khoj/interface/compiled/_next/static/css/dac88c17aaee5fcf.css +1 -0
  57. khoj/interface/compiled/_next/static/css/df4b47a2d0d85eae.css +1 -0
  58. khoj/interface/compiled/_next/static/css/e546bf5cc4914244.css +1 -0
  59. khoj/interface/compiled/_next/static/mqcIHpVqVWkmBuN0npYHA/_buildManifest.js +1 -0
  60. khoj/interface/compiled/agents/index.html +1 -1
  61. khoj/interface/compiled/agents/index.txt +6 -6
  62. khoj/interface/compiled/automations/index.html +1 -1
  63. khoj/interface/compiled/automations/index.txt +7 -7
  64. khoj/interface/compiled/chat/index.html +1 -1
  65. khoj/interface/compiled/chat/index.txt +6 -6
  66. khoj/interface/compiled/index.html +1 -1
  67. khoj/interface/compiled/index.txt +6 -6
  68. khoj/interface/compiled/search/index.html +1 -1
  69. khoj/interface/compiled/search/index.txt +6 -6
  70. khoj/interface/compiled/settings/index.html +1 -1
  71. khoj/interface/compiled/settings/index.txt +8 -8
  72. khoj/interface/compiled/share/chat/index.html +1 -1
  73. khoj/interface/compiled/share/chat/index.txt +6 -6
  74. khoj/interface/email/magic_link.html +36 -13
  75. khoj/main.py +1 -1
  76. khoj/migrations/migrate_server_pg.py +7 -7
  77. khoj/processor/conversation/anthropic/anthropic_chat.py +5 -7
  78. khoj/processor/conversation/google/gemini_chat.py +5 -7
  79. khoj/processor/conversation/google/utils.py +0 -1
  80. khoj/processor/conversation/offline/chat_model.py +15 -14
  81. khoj/processor/conversation/openai/gpt.py +7 -9
  82. khoj/processor/conversation/openai/utils.py +31 -17
  83. khoj/processor/conversation/prompts.py +65 -49
  84. khoj/processor/conversation/utils.py +46 -44
  85. khoj/processor/tools/online_search.py +49 -2
  86. khoj/routers/api.py +22 -27
  87. khoj/routers/api_agents.py +4 -4
  88. khoj/routers/api_chat.py +33 -13
  89. khoj/routers/api_model.py +4 -4
  90. khoj/routers/auth.py +108 -7
  91. khoj/routers/email.py +10 -14
  92. khoj/routers/helpers.py +187 -143
  93. khoj/routers/web_client.py +1 -1
  94. khoj/utils/constants.py +1 -1
  95. khoj/utils/helpers.py +5 -3
  96. khoj/utils/initialization.py +28 -26
  97. {khoj-1.30.11.dev64.dist-info → khoj-1.32.3.dev34.dist-info}/METADATA +7 -7
  98. {khoj-1.30.11.dev64.dist-info → khoj-1.32.3.dev34.dist-info}/RECORD +102 -99
  99. {khoj-1.30.11.dev64.dist-info → khoj-1.32.3.dev34.dist-info}/WHEEL +1 -1
  100. khoj/interface/compiled/_next/static/67DcUiU9MqkM1fhksWunh/_buildManifest.js +0 -1
  101. khoj/interface/compiled/_next/static/chunks/1459.690bf20e7d7b7090.js +0 -1
  102. khoj/interface/compiled/_next/static/chunks/1603-13cef426e0e650ec.js +0 -1
  103. khoj/interface/compiled/_next/static/chunks/1970-1b63ac1497b03a10.js +0 -1
  104. khoj/interface/compiled/_next/static/chunks/2646-92ba433951d02d52.js +0 -20
  105. khoj/interface/compiled/_next/static/chunks/3072-be830e4f8412b9d2.js +0 -1
  106. khoj/interface/compiled/_next/static/chunks/3463-081c031e873b7966.js +0 -3
  107. khoj/interface/compiled/_next/static/chunks/3690-51312931ba1eae30.js +0 -1
  108. khoj/interface/compiled/_next/static/chunks/3717-b46079dbe9f55694.js +0 -1
  109. khoj/interface/compiled/_next/static/chunks/4504-62ac13e7d94c52f9.js +0 -1
  110. khoj/interface/compiled/_next/static/chunks/4602-460621c3241e0d13.js +0 -1
  111. khoj/interface/compiled/_next/static/chunks/4752-554a3db270186ce3.js +0 -1
  112. khoj/interface/compiled/_next/static/chunks/5512-7cc62049bbe60e11.js +0 -1
  113. khoj/interface/compiled/_next/static/chunks/5538-0ea2d3944ca051e1.js +0 -1
  114. khoj/interface/compiled/_next/static/chunks/7023-e8de2bded4df6539.js +0 -2
  115. khoj/interface/compiled/_next/static/chunks/7592-a09c39a38e60634b.js +0 -1
  116. khoj/interface/compiled/_next/static/chunks/8423-1dda16bc56236523.js +0 -1
  117. khoj/interface/compiled/_next/static/chunks/94ca1967.5584df65931cfe83.js +0 -1
  118. khoj/interface/compiled/_next/static/chunks/964ecbae.ea4eab2a3a835ffe.js +0 -1
  119. khoj/interface/compiled/_next/static/chunks/app/agents/layout-1878cc328ea380bd.js +0 -1
  120. khoj/interface/compiled/_next/static/chunks/app/agents/page-8eead7920b0ff92a.js +0 -1
  121. khoj/interface/compiled/_next/static/chunks/app/automations/page-b5800b5286306140.js +0 -1
  122. khoj/interface/compiled/_next/static/chunks/app/chat/layout-9219a85f3477e722.js +0 -1
  123. khoj/interface/compiled/_next/static/chunks/app/chat/page-d7d2ab93e519f0b2.js +0 -1
  124. khoj/interface/compiled/_next/static/chunks/app/layout-6310c57b674dd6f5.js +0 -1
  125. khoj/interface/compiled/_next/static/chunks/app/page-3c32ad5472f75965.js +0 -1
  126. khoj/interface/compiled/_next/static/chunks/app/search/layout-2ca475462c0b2176.js +0 -1
  127. khoj/interface/compiled/_next/static/chunks/app/search/page-faa998c71eb7ca8e.js +0 -1
  128. khoj/interface/compiled/_next/static/chunks/app/settings/layout-f285795bc3154b8c.js +0 -1
  129. khoj/interface/compiled/_next/static/chunks/app/settings/page-cbe7f56b1f87d77a.js +0 -1
  130. khoj/interface/compiled/_next/static/chunks/app/share/chat/layout-592e8c470f2c2084.js +0 -1
  131. khoj/interface/compiled/_next/static/chunks/app/share/chat/page-cd5757199539bbf2.js +0 -1
  132. khoj/interface/compiled/_next/static/chunks/d3ac728e-a9e3522eef9b6b28.js +0 -1
  133. khoj/interface/compiled/_next/static/chunks/main-1ea5c2e0fdef4626.js +0 -1
  134. khoj/interface/compiled/_next/static/chunks/main-app-6d6ee3495efe03d4.js +0 -1
  135. khoj/interface/compiled/_next/static/chunks/polyfills-78c92fac7aa8fdd8.js +0 -1
  136. khoj/interface/compiled/_next/static/chunks/webpack-616f0694bfe6f6c1.js +0 -1
  137. khoj/interface/compiled/_next/static/css/1f293605f2871853.css +0 -1
  138. khoj/interface/compiled/_next/static/css/3c34171b174cc381.css +0 -25
  139. khoj/interface/compiled/_next/static/css/3cf13271869a4aeb.css +0 -1
  140. khoj/interface/compiled/_next/static/css/592ca99f5122e75a.css +0 -1
  141. khoj/interface/compiled/_next/static/css/5a400c87d295e68a.css +0 -1
  142. khoj/interface/compiled/_next/static/css/80bd6301fc657983.css +0 -1
  143. khoj/interface/compiled/_next/static/css/9c4221ae0779cc04.css +0 -1
  144. /khoj/interface/compiled/_next/static/{67DcUiU9MqkM1fhksWunh → mqcIHpVqVWkmBuN0npYHA}/_ssgManifest.js +0 -0
  145. {khoj-1.30.11.dev64.dist-info → khoj-1.32.3.dev34.dist-info}/entry_points.txt +0 -0
  146. {khoj-1.30.11.dev64.dist-info → khoj-1.32.3.dev34.dist-info}/licenses/LICENSE +0 -0
khoj/routers/helpers.py CHANGED
@@ -49,6 +49,7 @@ from khoj.database.adapters import (
49
49
  ais_user_subscribed,
50
50
  create_khoj_token,
51
51
  get_khoj_tokens,
52
+ get_user_by_email,
52
53
  get_user_name,
53
54
  get_user_notion_config,
54
55
  get_user_subscription_state,
@@ -56,7 +57,7 @@ from khoj.database.adapters import (
56
57
  )
57
58
  from khoj.database.models import (
58
59
  Agent,
59
- ChatModelOptions,
60
+ ChatModel,
60
61
  ClientApplication,
61
62
  Conversation,
62
63
  GithubConfig,
@@ -88,7 +89,10 @@ from khoj.processor.conversation.offline.chat_model import (
88
89
  converse_offline,
89
90
  send_message_to_model_offline,
90
91
  )
91
- from khoj.processor.conversation.openai.gpt import converse, send_message_to_model
92
+ from khoj.processor.conversation.openai.gpt import (
93
+ converse_openai,
94
+ send_message_to_model,
95
+ )
92
96
  from khoj.processor.conversation.utils import (
93
97
  ChatEvent,
94
98
  ThreadedGenerator,
@@ -130,40 +134,40 @@ def is_query_empty(query: str) -> bool:
130
134
  return is_none_or_empty(query.strip())
131
135
 
132
136
 
133
- def validate_conversation_config(user: KhojUser):
134
- default_config = ConversationAdapters.get_default_conversation_config(user)
137
+ def validate_chat_model(user: KhojUser):
138
+ default_chat_model = ConversationAdapters.get_default_chat_model(user)
135
139
 
136
- if default_config is None:
140
+ if default_chat_model is None:
137
141
  raise HTTPException(status_code=500, detail="Contact the server administrator to add a chat model.")
138
142
 
139
- if default_config.model_type == "openai" and not default_config.ai_model_api:
143
+ if default_chat_model.model_type == "openai" and not default_chat_model.ai_model_api:
140
144
  raise HTTPException(status_code=500, detail="Contact the server administrator to add a chat model.")
141
145
 
142
146
 
143
147
  async def is_ready_to_chat(user: KhojUser):
144
- user_conversation_config = await ConversationAdapters.aget_user_conversation_config(user)
145
- if user_conversation_config == None:
146
- user_conversation_config = await ConversationAdapters.aget_default_conversation_config(user)
148
+ user_chat_model = await ConversationAdapters.aget_user_chat_model(user)
149
+ if user_chat_model == None:
150
+ user_chat_model = await ConversationAdapters.aget_default_chat_model(user)
147
151
 
148
- if user_conversation_config and user_conversation_config.model_type == ChatModelOptions.ModelType.OFFLINE:
149
- chat_model = user_conversation_config.chat_model
150
- max_tokens = user_conversation_config.max_prompt_size
152
+ if user_chat_model and user_chat_model.model_type == ChatModel.ModelType.OFFLINE:
153
+ chat_model_name = user_chat_model.name
154
+ max_tokens = user_chat_model.max_prompt_size
151
155
  if state.offline_chat_processor_config is None:
152
156
  logger.info("Loading Offline Chat Model...")
153
- state.offline_chat_processor_config = OfflineChatProcessorModel(chat_model, max_tokens)
157
+ state.offline_chat_processor_config = OfflineChatProcessorModel(chat_model_name, max_tokens)
154
158
  return True
155
159
 
156
160
  if (
157
- user_conversation_config
161
+ user_chat_model
158
162
  and (
159
- user_conversation_config.model_type
163
+ user_chat_model.model_type
160
164
  in [
161
- ChatModelOptions.ModelType.OPENAI,
162
- ChatModelOptions.ModelType.ANTHROPIC,
163
- ChatModelOptions.ModelType.GOOGLE,
165
+ ChatModel.ModelType.OPENAI,
166
+ ChatModel.ModelType.ANTHROPIC,
167
+ ChatModel.ModelType.GOOGLE,
164
168
  ]
165
169
  )
166
- and user_conversation_config.ai_model_api
170
+ and user_chat_model.ai_model_api
167
171
  ):
168
172
  return True
169
173
 
@@ -227,7 +231,7 @@ def get_next_url(request: Request) -> str:
227
231
  return urljoin(str(request.base_url).rstrip("/"), next_path)
228
232
 
229
233
 
230
- def get_conversation_command(query: str, any_references: bool = False) -> ConversationCommand:
234
+ def get_conversation_command(query: str) -> ConversationCommand:
231
235
  if query.startswith("/notes"):
232
236
  return ConversationCommand.Notes
233
237
  elif query.startswith("/help"):
@@ -250,9 +254,6 @@ def get_conversation_command(query: str, any_references: bool = False) -> Conver
250
254
  return ConversationCommand.Code
251
255
  elif query.startswith("/research"):
252
256
  return ConversationCommand.Research
253
- # If no relevant notes found for the given query
254
- elif not any_references:
255
- return ConversationCommand.General
256
257
  else:
257
258
  return ConversationCommand.Default
258
259
 
@@ -404,42 +405,39 @@ async def aget_data_sources_and_output_format(
404
405
  response = clean_json(response)
405
406
  response = json.loads(response)
406
407
 
407
- selected_sources = [q.strip() for q in response.get("source", []) if q.strip()]
408
- selected_output = response.get("output", "text").strip() # Default to text output
408
+ chosen_sources = [s.strip() for s in response.get("source", []) if s.strip()]
409
+ chosen_output = response.get("output", "text").strip() # Default to text output
409
410
 
410
- if not isinstance(selected_sources, list) or not selected_sources or len(selected_sources) == 0:
411
+ if is_none_or_empty(chosen_sources) or not isinstance(chosen_sources, list):
411
412
  raise ValueError(
412
- f"Invalid response for determining relevant tools: {selected_sources}. Raw Response: {response}"
413
+ f"Invalid response for determining relevant tools: {chosen_sources}. Raw Response: {response}"
413
414
  )
414
415
 
415
- result: Dict = {"sources": [], "output": None if not is_task else ConversationCommand.AutomatedTask}
416
- for selected_source in selected_sources:
417
- # Add a double check to verify it's in the agent list, because the LLM sometimes gets confused by the tool options.
418
- if (
419
- selected_source in source_options.keys()
420
- and isinstance(result["sources"], list)
421
- and (len(agent_sources) == 0 or selected_source in agent_sources)
422
- ):
423
- # Check whether the tool exists as a valid ConversationCommand
424
- result["sources"].append(ConversationCommand(selected_source))
425
-
426
- # Add a double check to verify it's in the agent list, because the LLM sometimes gets confused by the tool options.
427
- if selected_output in output_options.keys() and (len(agent_outputs) == 0 or selected_output in agent_outputs):
428
- # Check whether the tool exists as a valid ConversationCommand
429
- result["output"] = ConversationCommand(selected_output)
430
-
431
- if is_none_or_empty(result):
416
+ output_mode = ConversationCommand.Text
417
+ # Verify selected output mode is enabled for the agent, as the LLM can sometimes get confused by the tool options.
418
+ if chosen_output in output_options.keys() and (len(agent_outputs) == 0 or chosen_output in agent_outputs):
419
+ # Ensure that the chosen output mode exists as a valid ConversationCommand
420
+ output_mode = ConversationCommand(chosen_output)
421
+
422
+ data_sources = []
423
+ # Verify selected data sources are enabled for the agent, as the LLM can sometimes get confused by the tool options.
424
+ for chosen_source in chosen_sources:
425
+ # Ensure that the chosen data source exists as a valid ConversationCommand
426
+ if chosen_source in source_options.keys() and (len(agent_sources) == 0 or chosen_source in agent_sources):
427
+ data_sources.append(ConversationCommand(chosen_source))
428
+
429
+ # Fallback to default sources if the inferred data sources are unset or invalid
430
+ if is_none_or_empty(data_sources):
432
431
  if len(agent_sources) == 0:
433
- result = {"sources": [ConversationCommand.Default], "output": ConversationCommand.Text}
432
+ data_sources = [ConversationCommand.Default]
434
433
  else:
435
- result = {"sources": [ConversationCommand.General], "output": ConversationCommand.Text}
434
+ data_sources = [ConversationCommand.General]
436
435
  except Exception as e:
437
436
  logger.error(f"Invalid response for determining relevant tools: {response}. Error: {e}", exc_info=True)
438
- sources = agent_sources if len(agent_sources) > 0 else [ConversationCommand.Default]
439
- output = agent_outputs[0] if len(agent_outputs) > 0 else ConversationCommand.Text
440
- result = {"sources": sources, "output": output}
437
+ data_sources = agent_sources if len(agent_sources) > 0 else [ConversationCommand.Default]
438
+ output_mode = agent_outputs[0] if len(agent_outputs) > 0 else ConversationCommand.Text
441
439
 
442
- return result
440
+ return {"sources": data_sources, "output": output_mode}
443
441
 
444
442
 
445
443
  async def infer_webpage_urls(
@@ -751,7 +749,7 @@ async def generate_excalidraw_diagram(
751
749
  )
752
750
  except Exception as e:
753
751
  logger.error(f"Error generating Excalidraw diagram for {user.email}: {e}", exc_info=True)
754
- yield None, None
752
+ yield better_diagram_description_prompt, None
755
753
  return
756
754
 
757
755
  scratchpad = excalidraw_diagram_description.get("scratchpad")
@@ -939,120 +937,124 @@ async def send_message_to_model_wrapper(
939
937
  query_files: str = None,
940
938
  tracer: dict = {},
941
939
  ):
942
- conversation_config: ChatModelOptions = await ConversationAdapters.aget_default_conversation_config(user)
943
- vision_available = conversation_config.vision_enabled
940
+ chat_model: ChatModel = await ConversationAdapters.aget_default_chat_model(user)
941
+ vision_available = chat_model.vision_enabled
944
942
  if not vision_available and query_images:
945
- logger.warning(f"Vision is not enabled for default model: {conversation_config.chat_model}.")
943
+ logger.warning(f"Vision is not enabled for default model: {chat_model.name}.")
946
944
  vision_enabled_config = await ConversationAdapters.aget_vision_enabled_config()
947
945
  if vision_enabled_config:
948
- conversation_config = vision_enabled_config
946
+ chat_model = vision_enabled_config
949
947
  vision_available = True
950
948
  if vision_available and query_images:
951
- logger.info(f"Using {conversation_config.chat_model} model to understand {len(query_images)} images.")
949
+ logger.info(f"Using {chat_model.name} model to understand {len(query_images)} images.")
952
950
 
953
951
  subscribed = await ais_user_subscribed(user)
954
- chat_model = conversation_config.chat_model
952
+ chat_model_name = chat_model.name
955
953
  max_tokens = (
956
- conversation_config.subscribed_max_prompt_size
957
- if subscribed and conversation_config.subscribed_max_prompt_size
958
- else conversation_config.max_prompt_size
954
+ chat_model.subscribed_max_prompt_size
955
+ if subscribed and chat_model.subscribed_max_prompt_size
956
+ else chat_model.max_prompt_size
959
957
  )
960
- tokenizer = conversation_config.tokenizer
961
- model_type = conversation_config.model_type
962
- vision_available = conversation_config.vision_enabled
958
+ tokenizer = chat_model.tokenizer
959
+ model_type = chat_model.model_type
960
+ vision_available = chat_model.vision_enabled
963
961
 
964
- if model_type == ChatModelOptions.ModelType.OFFLINE:
962
+ if model_type == ChatModel.ModelType.OFFLINE:
965
963
  if state.offline_chat_processor_config is None or state.offline_chat_processor_config.loaded_model is None:
966
- state.offline_chat_processor_config = OfflineChatProcessorModel(chat_model, max_tokens)
964
+ state.offline_chat_processor_config = OfflineChatProcessorModel(chat_model_name, max_tokens)
967
965
 
968
966
  loaded_model = state.offline_chat_processor_config.loaded_model
969
967
  truncated_messages = generate_chatml_messages_with_context(
970
968
  user_message=query,
971
969
  context_message=context,
972
970
  system_message=system_message,
973
- model_name=chat_model,
971
+ model_name=chat_model_name,
974
972
  loaded_model=loaded_model,
975
973
  tokenizer_name=tokenizer,
976
974
  max_prompt_size=max_tokens,
977
975
  vision_enabled=vision_available,
978
- model_type=conversation_config.model_type,
976
+ model_type=chat_model.model_type,
979
977
  query_files=query_files,
980
978
  )
981
979
 
982
980
  return send_message_to_model_offline(
983
981
  messages=truncated_messages,
984
982
  loaded_model=loaded_model,
985
- model=chat_model,
983
+ model_name=chat_model_name,
986
984
  max_prompt_size=max_tokens,
987
985
  streaming=False,
988
986
  response_type=response_type,
989
987
  tracer=tracer,
990
988
  )
991
989
 
992
- elif model_type == ChatModelOptions.ModelType.OPENAI:
993
- openai_chat_config = conversation_config.ai_model_api
990
+ elif model_type == ChatModel.ModelType.OPENAI:
991
+ openai_chat_config = chat_model.ai_model_api
994
992
  api_key = openai_chat_config.api_key
995
993
  api_base_url = openai_chat_config.api_base_url
996
994
  truncated_messages = generate_chatml_messages_with_context(
997
995
  user_message=query,
998
996
  context_message=context,
999
997
  system_message=system_message,
1000
- model_name=chat_model,
998
+ model_name=chat_model_name,
1001
999
  max_prompt_size=max_tokens,
1002
1000
  tokenizer_name=tokenizer,
1003
1001
  vision_enabled=vision_available,
1004
1002
  query_images=query_images,
1005
- model_type=conversation_config.model_type,
1003
+ model_type=chat_model.model_type,
1006
1004
  query_files=query_files,
1007
1005
  )
1008
1006
 
1009
1007
  return send_message_to_model(
1010
1008
  messages=truncated_messages,
1011
1009
  api_key=api_key,
1012
- model=chat_model,
1010
+ model=chat_model_name,
1013
1011
  response_type=response_type,
1014
1012
  api_base_url=api_base_url,
1015
1013
  tracer=tracer,
1016
1014
  )
1017
- elif model_type == ChatModelOptions.ModelType.ANTHROPIC:
1018
- api_key = conversation_config.ai_model_api.api_key
1015
+ elif model_type == ChatModel.ModelType.ANTHROPIC:
1016
+ api_key = chat_model.ai_model_api.api_key
1019
1017
  truncated_messages = generate_chatml_messages_with_context(
1020
1018
  user_message=query,
1021
1019
  context_message=context,
1022
1020
  system_message=system_message,
1023
- model_name=chat_model,
1021
+ model_name=chat_model_name,
1024
1022
  max_prompt_size=max_tokens,
1025
1023
  tokenizer_name=tokenizer,
1026
1024
  vision_enabled=vision_available,
1027
1025
  query_images=query_images,
1028
- model_type=conversation_config.model_type,
1026
+ model_type=chat_model.model_type,
1029
1027
  query_files=query_files,
1030
1028
  )
1031
1029
 
1032
1030
  return anthropic_send_message_to_model(
1033
1031
  messages=truncated_messages,
1034
1032
  api_key=api_key,
1035
- model=chat_model,
1033
+ model=chat_model_name,
1036
1034
  response_type=response_type,
1037
1035
  tracer=tracer,
1038
1036
  )
1039
- elif model_type == ChatModelOptions.ModelType.GOOGLE:
1040
- api_key = conversation_config.ai_model_api.api_key
1037
+ elif model_type == ChatModel.ModelType.GOOGLE:
1038
+ api_key = chat_model.ai_model_api.api_key
1041
1039
  truncated_messages = generate_chatml_messages_with_context(
1042
1040
  user_message=query,
1043
1041
  context_message=context,
1044
1042
  system_message=system_message,
1045
- model_name=chat_model,
1043
+ model_name=chat_model_name,
1046
1044
  max_prompt_size=max_tokens,
1047
1045
  tokenizer_name=tokenizer,
1048
1046
  vision_enabled=vision_available,
1049
1047
  query_images=query_images,
1050
- model_type=conversation_config.model_type,
1048
+ model_type=chat_model.model_type,
1051
1049
  query_files=query_files,
1052
1050
  )
1053
1051
 
1054
1052
  return gemini_send_message_to_model(
1055
- messages=truncated_messages, api_key=api_key, model=chat_model, response_type=response_type, tracer=tracer
1053
+ messages=truncated_messages,
1054
+ api_key=api_key,
1055
+ model=chat_model_name,
1056
+ response_type=response_type,
1057
+ tracer=tracer,
1056
1058
  )
1057
1059
  else:
1058
1060
  raise HTTPException(status_code=500, detail="Invalid conversation config")
@@ -1066,99 +1068,99 @@ def send_message_to_model_wrapper_sync(
1066
1068
  query_files: str = "",
1067
1069
  tracer: dict = {},
1068
1070
  ):
1069
- conversation_config: ChatModelOptions = ConversationAdapters.get_default_conversation_config(user)
1071
+ chat_model: ChatModel = ConversationAdapters.get_default_chat_model(user)
1070
1072
 
1071
- if conversation_config is None:
1073
+ if chat_model is None:
1072
1074
  raise HTTPException(status_code=500, detail="Contact the server administrator to set a default chat model.")
1073
1075
 
1074
- chat_model = conversation_config.chat_model
1075
- max_tokens = conversation_config.max_prompt_size
1076
- vision_available = conversation_config.vision_enabled
1076
+ chat_model_name = chat_model.name
1077
+ max_tokens = chat_model.max_prompt_size
1078
+ vision_available = chat_model.vision_enabled
1077
1079
 
1078
- if conversation_config.model_type == ChatModelOptions.ModelType.OFFLINE:
1080
+ if chat_model.model_type == ChatModel.ModelType.OFFLINE:
1079
1081
  if state.offline_chat_processor_config is None or state.offline_chat_processor_config.loaded_model is None:
1080
- state.offline_chat_processor_config = OfflineChatProcessorModel(chat_model, max_tokens)
1082
+ state.offline_chat_processor_config = OfflineChatProcessorModel(chat_model_name, max_tokens)
1081
1083
 
1082
1084
  loaded_model = state.offline_chat_processor_config.loaded_model
1083
1085
  truncated_messages = generate_chatml_messages_with_context(
1084
1086
  user_message=message,
1085
1087
  system_message=system_message,
1086
- model_name=chat_model,
1088
+ model_name=chat_model_name,
1087
1089
  loaded_model=loaded_model,
1088
1090
  max_prompt_size=max_tokens,
1089
1091
  vision_enabled=vision_available,
1090
- model_type=conversation_config.model_type,
1092
+ model_type=chat_model.model_type,
1091
1093
  query_files=query_files,
1092
1094
  )
1093
1095
 
1094
1096
  return send_message_to_model_offline(
1095
1097
  messages=truncated_messages,
1096
1098
  loaded_model=loaded_model,
1097
- model=chat_model,
1099
+ model_name=chat_model_name,
1098
1100
  max_prompt_size=max_tokens,
1099
1101
  streaming=False,
1100
1102
  response_type=response_type,
1101
1103
  tracer=tracer,
1102
1104
  )
1103
1105
 
1104
- elif conversation_config.model_type == ChatModelOptions.ModelType.OPENAI:
1105
- api_key = conversation_config.ai_model_api.api_key
1106
+ elif chat_model.model_type == ChatModel.ModelType.OPENAI:
1107
+ api_key = chat_model.ai_model_api.api_key
1106
1108
  truncated_messages = generate_chatml_messages_with_context(
1107
1109
  user_message=message,
1108
1110
  system_message=system_message,
1109
- model_name=chat_model,
1111
+ model_name=chat_model_name,
1110
1112
  max_prompt_size=max_tokens,
1111
1113
  vision_enabled=vision_available,
1112
- model_type=conversation_config.model_type,
1114
+ model_type=chat_model.model_type,
1113
1115
  query_files=query_files,
1114
1116
  )
1115
1117
 
1116
1118
  openai_response = send_message_to_model(
1117
1119
  messages=truncated_messages,
1118
1120
  api_key=api_key,
1119
- model=chat_model,
1121
+ model=chat_model_name,
1120
1122
  response_type=response_type,
1121
1123
  tracer=tracer,
1122
1124
  )
1123
1125
 
1124
1126
  return openai_response
1125
1127
 
1126
- elif conversation_config.model_type == ChatModelOptions.ModelType.ANTHROPIC:
1127
- api_key = conversation_config.ai_model_api.api_key
1128
+ elif chat_model.model_type == ChatModel.ModelType.ANTHROPIC:
1129
+ api_key = chat_model.ai_model_api.api_key
1128
1130
  truncated_messages = generate_chatml_messages_with_context(
1129
1131
  user_message=message,
1130
1132
  system_message=system_message,
1131
- model_name=chat_model,
1133
+ model_name=chat_model_name,
1132
1134
  max_prompt_size=max_tokens,
1133
1135
  vision_enabled=vision_available,
1134
- model_type=conversation_config.model_type,
1136
+ model_type=chat_model.model_type,
1135
1137
  query_files=query_files,
1136
1138
  )
1137
1139
 
1138
1140
  return anthropic_send_message_to_model(
1139
1141
  messages=truncated_messages,
1140
1142
  api_key=api_key,
1141
- model=chat_model,
1143
+ model=chat_model_name,
1142
1144
  response_type=response_type,
1143
1145
  tracer=tracer,
1144
1146
  )
1145
1147
 
1146
- elif conversation_config.model_type == ChatModelOptions.ModelType.GOOGLE:
1147
- api_key = conversation_config.ai_model_api.api_key
1148
+ elif chat_model.model_type == ChatModel.ModelType.GOOGLE:
1149
+ api_key = chat_model.ai_model_api.api_key
1148
1150
  truncated_messages = generate_chatml_messages_with_context(
1149
1151
  user_message=message,
1150
1152
  system_message=system_message,
1151
- model_name=chat_model,
1153
+ model_name=chat_model_name,
1152
1154
  max_prompt_size=max_tokens,
1153
1155
  vision_enabled=vision_available,
1154
- model_type=conversation_config.model_type,
1156
+ model_type=chat_model.model_type,
1155
1157
  query_files=query_files,
1156
1158
  )
1157
1159
 
1158
1160
  return gemini_send_message_to_model(
1159
1161
  messages=truncated_messages,
1160
1162
  api_key=api_key,
1161
- model=chat_model,
1163
+ model=chat_model_name,
1162
1164
  response_type=response_type,
1163
1165
  tracer=tracer,
1164
1166
  )
@@ -1189,6 +1191,7 @@ def generate_chat_response(
1189
1191
  raw_generated_files: List[FileAttachment] = [],
1190
1192
  generated_excalidraw_diagram: str = None,
1191
1193
  program_execution_context: List[str] = [],
1194
+ generated_asset_results: Dict[str, Dict] = {},
1192
1195
  tracer: dict = {},
1193
1196
  ) -> Tuple[Union[ThreadedGenerator, Iterator[str]], Dict[str, str]]:
1194
1197
  # Initialize Variables
@@ -1225,15 +1228,15 @@ def generate_chat_response(
1225
1228
  online_results = {}
1226
1229
  code_results = {}
1227
1230
 
1228
- conversation_config = ConversationAdapters.get_valid_conversation_config(user, conversation)
1229
- vision_available = conversation_config.vision_enabled
1231
+ chat_model = ConversationAdapters.get_valid_chat_model(user, conversation)
1232
+ vision_available = chat_model.vision_enabled
1230
1233
  if not vision_available and query_images:
1231
1234
  vision_enabled_config = ConversationAdapters.get_vision_enabled_config()
1232
1235
  if vision_enabled_config:
1233
- conversation_config = vision_enabled_config
1236
+ chat_model = vision_enabled_config
1234
1237
  vision_available = True
1235
1238
 
1236
- if conversation_config.model_type == "offline":
1239
+ if chat_model.model_type == "offline":
1237
1240
  loaded_model = state.offline_chat_processor_config.loaded_model
1238
1241
  chat_response = converse_offline(
1239
1242
  user_query=query_to_run,
@@ -1243,49 +1246,49 @@ def generate_chat_response(
1243
1246
  conversation_log=meta_log,
1244
1247
  completion_func=partial_completion,
1245
1248
  conversation_commands=conversation_commands,
1246
- model=conversation_config.chat_model,
1247
- max_prompt_size=conversation_config.max_prompt_size,
1248
- tokenizer_name=conversation_config.tokenizer,
1249
+ model_name=chat_model.name,
1250
+ max_prompt_size=chat_model.max_prompt_size,
1251
+ tokenizer_name=chat_model.tokenizer,
1249
1252
  location_data=location_data,
1250
1253
  user_name=user_name,
1251
1254
  agent=agent,
1252
1255
  query_files=query_files,
1253
1256
  generated_files=raw_generated_files,
1257
+ generated_asset_results=generated_asset_results,
1254
1258
  tracer=tracer,
1255
1259
  )
1256
1260
 
1257
- elif conversation_config.model_type == ChatModelOptions.ModelType.OPENAI:
1258
- openai_chat_config = conversation_config.ai_model_api
1261
+ elif chat_model.model_type == ChatModel.ModelType.OPENAI:
1262
+ openai_chat_config = chat_model.ai_model_api
1259
1263
  api_key = openai_chat_config.api_key
1260
- chat_model = conversation_config.chat_model
1261
- chat_response = converse(
1264
+ chat_model_name = chat_model.name
1265
+ chat_response = converse_openai(
1262
1266
  compiled_references,
1263
1267
  query_to_run,
1264
1268
  query_images=query_images,
1265
1269
  online_results=online_results,
1266
1270
  code_results=code_results,
1267
1271
  conversation_log=meta_log,
1268
- model=chat_model,
1272
+ model=chat_model_name,
1269
1273
  api_key=api_key,
1270
1274
  api_base_url=openai_chat_config.api_base_url,
1271
1275
  completion_func=partial_completion,
1272
1276
  conversation_commands=conversation_commands,
1273
- max_prompt_size=conversation_config.max_prompt_size,
1274
- tokenizer_name=conversation_config.tokenizer,
1277
+ max_prompt_size=chat_model.max_prompt_size,
1278
+ tokenizer_name=chat_model.tokenizer,
1275
1279
  location_data=location_data,
1276
1280
  user_name=user_name,
1277
1281
  agent=agent,
1278
1282
  vision_available=vision_available,
1279
1283
  query_files=query_files,
1280
1284
  generated_files=raw_generated_files,
1281
- generated_images=generated_images,
1282
- generated_excalidraw_diagram=generated_excalidraw_diagram,
1285
+ generated_asset_results=generated_asset_results,
1283
1286
  program_execution_context=program_execution_context,
1284
1287
  tracer=tracer,
1285
1288
  )
1286
1289
 
1287
- elif conversation_config.model_type == ChatModelOptions.ModelType.ANTHROPIC:
1288
- api_key = conversation_config.ai_model_api.api_key
1290
+ elif chat_model.model_type == ChatModel.ModelType.ANTHROPIC:
1291
+ api_key = chat_model.ai_model_api.api_key
1289
1292
  chat_response = converse_anthropic(
1290
1293
  compiled_references,
1291
1294
  query_to_run,
@@ -1293,37 +1296,36 @@ def generate_chat_response(
1293
1296
  online_results=online_results,
1294
1297
  code_results=code_results,
1295
1298
  conversation_log=meta_log,
1296
- model=conversation_config.chat_model,
1299
+ model=chat_model.name,
1297
1300
  api_key=api_key,
1298
1301
  completion_func=partial_completion,
1299
1302
  conversation_commands=conversation_commands,
1300
- max_prompt_size=conversation_config.max_prompt_size,
1301
- tokenizer_name=conversation_config.tokenizer,
1303
+ max_prompt_size=chat_model.max_prompt_size,
1304
+ tokenizer_name=chat_model.tokenizer,
1302
1305
  location_data=location_data,
1303
1306
  user_name=user_name,
1304
1307
  agent=agent,
1305
1308
  vision_available=vision_available,
1306
1309
  query_files=query_files,
1307
1310
  generated_files=raw_generated_files,
1308
- generated_images=generated_images,
1309
- generated_excalidraw_diagram=generated_excalidraw_diagram,
1311
+ generated_asset_results=generated_asset_results,
1310
1312
  program_execution_context=program_execution_context,
1311
1313
  tracer=tracer,
1312
1314
  )
1313
- elif conversation_config.model_type == ChatModelOptions.ModelType.GOOGLE:
1314
- api_key = conversation_config.ai_model_api.api_key
1315
+ elif chat_model.model_type == ChatModel.ModelType.GOOGLE:
1316
+ api_key = chat_model.ai_model_api.api_key
1315
1317
  chat_response = converse_gemini(
1316
1318
  compiled_references,
1317
1319
  query_to_run,
1318
1320
  online_results,
1319
1321
  code_results,
1320
1322
  meta_log,
1321
- model=conversation_config.chat_model,
1323
+ model=chat_model.name,
1322
1324
  api_key=api_key,
1323
1325
  completion_func=partial_completion,
1324
1326
  conversation_commands=conversation_commands,
1325
- max_prompt_size=conversation_config.max_prompt_size,
1326
- tokenizer_name=conversation_config.tokenizer,
1327
+ max_prompt_size=chat_model.max_prompt_size,
1328
+ tokenizer_name=chat_model.tokenizer,
1327
1329
  location_data=location_data,
1328
1330
  user_name=user_name,
1329
1331
  agent=agent,
@@ -1331,13 +1333,12 @@ def generate_chat_response(
1331
1333
  vision_available=vision_available,
1332
1334
  query_files=query_files,
1333
1335
  generated_files=raw_generated_files,
1334
- generated_images=generated_images,
1335
- generated_excalidraw_diagram=generated_excalidraw_diagram,
1336
+ generated_asset_results=generated_asset_results,
1336
1337
  program_execution_context=program_execution_context,
1337
1338
  tracer=tracer,
1338
1339
  )
1339
1340
 
1340
- metadata.update({"chat_model": conversation_config.chat_model})
1341
+ metadata.update({"chat_model": chat_model.name})
1341
1342
 
1342
1343
  except Exception as e:
1343
1344
  logger.error(e, exc_info=True)
@@ -1357,6 +1358,49 @@ class FeedbackData(BaseModel):
1357
1358
  sentiment: str
1358
1359
 
1359
1360
 
1361
+ class EmailVerificationApiRateLimiter:
1362
+ def __init__(self, requests: int, window: int, slug: str):
1363
+ self.requests = requests
1364
+ self.window = window
1365
+ self.slug = slug
1366
+
1367
+ def __call__(self, request: Request):
1368
+ # Rate limiting disabled if billing is disabled
1369
+ if state.billing_enabled is False:
1370
+ return
1371
+
1372
+ # Extract the email query parameter
1373
+ email = request.query_params.get("email")
1374
+
1375
+ if email:
1376
+ logger.info(f"Email query parameter: {email}")
1377
+
1378
+ user: KhojUser = get_user_by_email(email)
1379
+
1380
+ if not user:
1381
+ raise HTTPException(
1382
+ status_code=404,
1383
+ detail="User not found.",
1384
+ )
1385
+
1386
+ # Remove requests outside of the time window
1387
+ cutoff = datetime.now(tz=timezone.utc) - timedelta(seconds=self.window)
1388
+ count_requests = UserRequests.objects.filter(user=user, created_at__gte=cutoff, slug=self.slug).count()
1389
+
1390
+ # Check if the user has exceeded the rate limit
1391
+ if count_requests >= self.requests:
1392
+ logger.info(
1393
+ f"Rate limit: {count_requests}/{self.requests} requests not allowed in {self.window} seconds for email: {email}."
1394
+ )
1395
+ raise HTTPException(
1396
+ status_code=429,
1397
+ detail="Ran out of login attempts",
1398
+ )
1399
+
1400
+ # Add the current request to the db
1401
+ UserRequests.objects.create(user=user, slug=self.slug)
1402
+
1403
+
1360
1404
  class ApiUserRateLimiter:
1361
1405
  def __init__(self, requests: int, subscribed_requests: int, window: int, slug: str):
1362
1406
  self.requests = requests
@@ -1636,7 +1680,7 @@ def scheduled_chat(
1636
1680
  last_run_time = datetime.strptime(last_run_time, "%Y-%m-%d %I:%M %p %Z").replace(tzinfo=timezone.utc)
1637
1681
 
1638
1682
  # If the last run time was within the last 6 hours, don't run it again. This helps avoid multithreading issues and rate limits.
1639
- if (datetime.now(timezone.utc) - last_run_time).total_seconds() < 21600:
1683
+ if (datetime.now(timezone.utc) - last_run_time).total_seconds() < 6 * 60 * 60:
1640
1684
  logger.info(f"Skipping scheduled chat {job_id} as the next run time is in the future.")
1641
1685
  return
1642
1686
 
@@ -1937,13 +1981,13 @@ def get_user_config(user: KhojUser, request: Request, is_detailed: bool = False)
1937
1981
  current_notion_config = get_user_notion_config(user)
1938
1982
  notion_token = current_notion_config.token if current_notion_config else ""
1939
1983
 
1940
- selected_chat_model_config = ConversationAdapters.get_conversation_config(
1984
+ selected_chat_model_config = ConversationAdapters.get_chat_model(
1941
1985
  user
1942
- ) or ConversationAdapters.get_default_conversation_config(user)
1986
+ ) or ConversationAdapters.get_default_chat_model(user)
1943
1987
  chat_models = ConversationAdapters.get_conversation_processor_options().all()
1944
1988
  chat_model_options = list()
1945
1989
  for chat_model in chat_models:
1946
- chat_model_options.append({"name": chat_model.chat_model, "id": chat_model.id})
1990
+ chat_model_options.append({"name": chat_model.name, "id": chat_model.id})
1947
1991
 
1948
1992
  selected_paint_model_config = ConversationAdapters.get_user_text_to_image_model_config(user)
1949
1993
  paint_model_options = ConversationAdapters.get_text_to_image_model_options().all()