khoj 1.30.11.dev13__py3-none-any.whl → 1.30.11.dev56__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (89) hide show
  1. khoj/app/settings.py +21 -0
  2. khoj/configure.py +3 -3
  3. khoj/database/adapters/__init__.py +15 -15
  4. khoj/database/admin.py +50 -40
  5. khoj/database/migrations/0075_migrate_generated_assets_and_validate.py +85 -0
  6. khoj/database/migrations/0076_rename_openaiprocessorconversationconfig_aimodelapi_and_more.py +26 -0
  7. khoj/database/models/__init__.py +171 -42
  8. khoj/interface/compiled/404/index.html +1 -1
  9. khoj/interface/compiled/_next/static/chunks/1603-e40aadd1e56ab030.js +1 -0
  10. khoj/interface/compiled/_next/static/chunks/5538-0ea2d3944ca051e1.js +1 -0
  11. khoj/interface/compiled/_next/static/chunks/app/agents/layout-1878cc328ea380bd.js +1 -0
  12. khoj/interface/compiled/_next/static/chunks/app/agents/{page-8eead7920b0ff92a.js → page-f5c0801b27a8e95e.js} +1 -1
  13. khoj/interface/compiled/_next/static/chunks/app/automations/layout-7f1b79a2c67af0b4.js +1 -0
  14. khoj/interface/compiled/_next/static/chunks/app/automations/{page-b5800b5286306140.js → page-8691f6c09a0acd44.js} +1 -1
  15. khoj/interface/compiled/_next/static/chunks/app/chat/layout-9219a85f3477e722.js +1 -0
  16. khoj/interface/compiled/_next/static/chunks/app/chat/{page-d7d2ab93e519f0b2.js → page-135d56dd4263e40d.js} +1 -1
  17. khoj/interface/compiled/_next/static/chunks/app/layout-6310c57b674dd6f5.js +1 -0
  18. khoj/interface/compiled/_next/static/chunks/app/{page-3c32ad5472f75965.js → page-e79ace822d51557b.js} +1 -1
  19. khoj/interface/compiled/_next/static/chunks/app/search/{page-faa998c71eb7ca8e.js → page-e8b578d155550386.js} +1 -1
  20. khoj/interface/compiled/_next/static/chunks/app/settings/layout-f285795bc3154b8c.js +1 -0
  21. khoj/interface/compiled/_next/static/chunks/app/settings/{page-cbe7f56b1f87d77a.js → page-b6c835050c970be7.js} +1 -1
  22. khoj/interface/compiled/_next/static/chunks/app/share/chat/layout-6f4879fbbf8b90f7.js +1 -0
  23. khoj/interface/compiled/_next/static/chunks/app/share/chat/{page-cd5757199539bbf2.js → page-635635e4fb39fe29.js} +1 -1
  24. khoj/interface/compiled/_next/static/chunks/{webpack-3a2dfd74acf6e193.js → webpack-5203c3872078c10c.js} +1 -1
  25. khoj/interface/compiled/_next/static/css/{bedf49fbfc598358.css → 089de1d8526b96e9.css} +1 -1
  26. khoj/interface/compiled/_next/static/css/4cae6c0e5c72fb2d.css +1 -0
  27. khoj/interface/compiled/_next/static/css/edd3abaf11580924.css +1 -0
  28. khoj/interface/compiled/_next/static/media/1d8a05b60287ae6c-s.p.woff2 +0 -0
  29. khoj/interface/compiled/_next/static/media/6f22fce21a7c433c-s.woff2 +0 -0
  30. khoj/interface/compiled/_next/static/media/77c207b095007c34-s.p.woff2 +0 -0
  31. khoj/interface/compiled/_next/static/media/82ef96de0e8f4d8c-s.p.woff2 +0 -0
  32. khoj/interface/compiled/_next/static/media/a6ecd16fa044d500-s.p.woff2 +0 -0
  33. khoj/interface/compiled/_next/static/media/bd82c78e5b7b3fe9-s.p.woff2 +0 -0
  34. khoj/interface/compiled/_next/static/media/c32c8052c071fc42-s.woff2 +0 -0
  35. khoj/interface/compiled/_next/static/media/c4250770ab8708b6-s.p.woff2 +0 -0
  36. khoj/interface/compiled/agents/index.html +1 -1
  37. khoj/interface/compiled/agents/index.txt +2 -2
  38. khoj/interface/compiled/assets/icons/khoj_lantern.svg +100 -0
  39. khoj/interface/compiled/assets/icons/khoj_lantern_128x128_dark.png +0 -0
  40. khoj/interface/compiled/automations/index.html +1 -1
  41. khoj/interface/compiled/automations/index.txt +3 -3
  42. khoj/interface/compiled/chat/index.html +1 -1
  43. khoj/interface/compiled/chat/index.txt +2 -2
  44. khoj/interface/compiled/index.html +1 -1
  45. khoj/interface/compiled/index.txt +2 -2
  46. khoj/interface/compiled/search/index.html +1 -1
  47. khoj/interface/compiled/search/index.txt +2 -2
  48. khoj/interface/compiled/settings/index.html +1 -1
  49. khoj/interface/compiled/settings/index.txt +5 -4
  50. khoj/interface/compiled/share/chat/index.html +1 -1
  51. khoj/interface/compiled/share/chat/index.txt +2 -2
  52. khoj/migrations/migrate_server_pg.py +3 -9
  53. khoj/processor/conversation/anthropic/anthropic_chat.py +11 -3
  54. khoj/processor/conversation/google/gemini_chat.py +11 -3
  55. khoj/processor/conversation/offline/chat_model.py +6 -2
  56. khoj/processor/conversation/openai/gpt.py +10 -2
  57. khoj/processor/conversation/openai/utils.py +1 -6
  58. khoj/processor/conversation/prompts.py +18 -0
  59. khoj/processor/conversation/utils.py +82 -26
  60. khoj/processor/image/generate.py +12 -15
  61. khoj/routers/api.py +5 -5
  62. khoj/routers/api_chat.py +49 -98
  63. khoj/routers/helpers.py +52 -12
  64. khoj/utils/initialization.py +10 -12
  65. {khoj-1.30.11.dev13.dist-info → khoj-1.30.11.dev56.dist-info}/METADATA +2 -1
  66. {khoj-1.30.11.dev13.dist-info → khoj-1.30.11.dev56.dist-info}/RECORD +71 -67
  67. khoj/interface/compiled/_next/static/chunks/1603-c68d44bc4ae6039a.js +0 -1
  68. khoj/interface/compiled/_next/static/chunks/5538-e5f3c9f4d67a64b9.js +0 -1
  69. khoj/interface/compiled/_next/static/chunks/app/agents/layout-f2ea2b26fc0e78b1.js +0 -1
  70. khoj/interface/compiled/_next/static/chunks/app/automations/layout-f1050c1f20a3af67.js +0 -1
  71. khoj/interface/compiled/_next/static/chunks/app/chat/layout-1072c3b0ab136e74.js +0 -1
  72. khoj/interface/compiled/_next/static/chunks/app/layout-72ec1be8afd0b1ab.js +0 -1
  73. khoj/interface/compiled/_next/static/chunks/app/settings/layout-fe8a2f65ccafd142.js +0 -1
  74. khoj/interface/compiled/_next/static/chunks/app/share/chat/layout-dc97434f0354a74e.js +0 -1
  75. khoj/interface/compiled/_next/static/css/2d097a35da6bfe8d.css +0 -1
  76. khoj/interface/compiled/_next/static/css/80bd6301fc657983.css +0 -1
  77. khoj/interface/compiled/_next/static/media/5455839c73f146e7-s.p.woff2 +0 -0
  78. khoj/interface/compiled/_next/static/media/5984b96ba4822821-s.p.woff2 +0 -0
  79. khoj/interface/compiled/_next/static/media/684adc3dde1b03f1-s.p.woff2 +0 -0
  80. khoj/interface/compiled/_next/static/media/82e3b9a1bdaf0c26-s.p.woff2 +0 -0
  81. khoj/interface/compiled/_next/static/media/8d1ea331386a0db8-s.p.woff2 +0 -0
  82. khoj/interface/compiled/_next/static/media/91475f6526542a4f-s.woff2 +0 -0
  83. khoj/interface/compiled/_next/static/media/b98b13dbc1c3b59c-s.p.woff2 +0 -0
  84. khoj/interface/compiled/_next/static/media/c824d7a20139e39d-s.woff2 +0 -0
  85. /khoj/interface/compiled/_next/static/{K_WyVARSz0loPVvwOW1gg → J7Vqh1vjCleYuVLeTaJL6}/_buildManifest.js +0 -0
  86. /khoj/interface/compiled/_next/static/{K_WyVARSz0loPVvwOW1gg → J7Vqh1vjCleYuVLeTaJL6}/_ssgManifest.js +0 -0
  87. {khoj-1.30.11.dev13.dist-info → khoj-1.30.11.dev56.dist-info}/WHEEL +0 -0
  88. {khoj-1.30.11.dev13.dist-info → khoj-1.30.11.dev56.dist-info}/entry_points.txt +0 -0
  89. {khoj-1.30.11.dev13.dist-info → khoj-1.30.11.dev56.dist-info}/licenses/LICENSE +0 -0
khoj/routers/api_chat.py CHANGED
@@ -77,6 +77,7 @@ from khoj.utils.helpers import (
77
77
  )
78
78
  from khoj.utils.rawconfig import (
79
79
  ChatRequestBody,
80
+ FileAttachment,
80
81
  FileFilterRequest,
81
82
  FilesFilterRequest,
82
83
  LocationData,
@@ -770,6 +771,11 @@ async def chat(
770
771
  file_filters = conversation.file_filters if conversation and conversation.file_filters else []
771
772
  attached_file_context = gather_raw_query_files(query_files)
772
773
 
774
+ generated_images: List[str] = []
775
+ generated_files: List[FileAttachment] = []
776
+ generated_excalidraw_diagram: str = None
777
+ program_execution_context: List[str] = []
778
+
773
779
  if conversation_commands == [ConversationCommand.Default] or is_automated_task:
774
780
  chosen_io = await aget_data_sources_and_output_format(
775
781
  q,
@@ -875,21 +881,17 @@ async def chat(
875
881
  async for result in send_llm_response(response, tracer.get("usage")):
876
882
  yield result
877
883
 
878
- await sync_to_async(save_to_conversation_log)(
879
- q,
880
- response_log,
881
- user,
882
- meta_log,
883
- user_message_time,
884
- intent_type="summarize",
885
- client_application=request.user.client_app,
886
- conversation_id=conversation_id,
887
- query_images=uploaded_images,
888
- train_of_thought=train_of_thought,
889
- raw_query_files=raw_query_files,
890
- tracer=tracer,
884
+ summarized_document = FileAttachment(
885
+ name="Summarized Document",
886
+ content=response_log,
887
+ type="text/plain",
888
+ size=len(response_log.encode("utf-8")),
891
889
  )
892
- return
890
+
891
+ async for result in send_event(ChatEvent.GENERATED_ASSETS, {"files": [summarized_document.model_dump()]}):
892
+ yield result
893
+
894
+ generated_files.append(summarized_document)
893
895
 
894
896
  custom_filters = []
895
897
  if conversation_commands == [ConversationCommand.Help]:
@@ -1078,6 +1080,7 @@ async def chat(
1078
1080
  async for result in send_event(ChatEvent.STATUS, f"**Ran code snippets**: {len(code_results)}"):
1079
1081
  yield result
1080
1082
  except ValueError as e:
1083
+ program_execution_context.append(f"Failed to run code")
1081
1084
  logger.warning(
1082
1085
  f"Failed to use code tool: {e}. Attempting to respond without code results",
1083
1086
  exc_info=True,
@@ -1115,51 +1118,28 @@ async def chat(
1115
1118
  if isinstance(result, dict) and ChatEvent.STATUS in result:
1116
1119
  yield result[ChatEvent.STATUS]
1117
1120
  else:
1118
- generated_image, status_code, improved_image_prompt, intent_type = result
1121
+ generated_image, status_code, improved_image_prompt = result
1119
1122
 
1123
+ inferred_queries.append(improved_image_prompt)
1120
1124
  if generated_image is None or status_code != 200:
1121
- content_obj = {
1122
- "content-type": "application/json",
1123
- "intentType": intent_type,
1124
- "detail": improved_image_prompt,
1125
- "image": None,
1126
- }
1127
- async for result in send_llm_response(json.dumps(content_obj), tracer.get("usage")):
1125
+ program_execution_context.append(f"Failed to generate image with {improved_image_prompt}")
1126
+ async for result in send_event(ChatEvent.STATUS, f"Failed to generate image"):
1128
1127
  yield result
1129
- return
1128
+ else:
1129
+ generated_images.append(generated_image)
1130
1130
 
1131
- await sync_to_async(save_to_conversation_log)(
1132
- q,
1133
- generated_image,
1134
- user,
1135
- meta_log,
1136
- user_message_time,
1137
- intent_type=intent_type,
1138
- inferred_queries=[improved_image_prompt],
1139
- client_application=request.user.client_app,
1140
- conversation_id=conversation_id,
1141
- compiled_references=compiled_references,
1142
- online_results=online_results,
1143
- code_results=code_results,
1144
- query_images=uploaded_images,
1145
- train_of_thought=train_of_thought,
1146
- raw_query_files=raw_query_files,
1147
- tracer=tracer,
1148
- )
1149
- content_obj = {
1150
- "intentType": intent_type,
1151
- "inferredQueries": [improved_image_prompt],
1152
- "image": generated_image,
1153
- }
1154
- async for result in send_llm_response(json.dumps(content_obj), tracer.get("usage")):
1155
- yield result
1156
- return
1131
+ async for result in send_event(
1132
+ ChatEvent.GENERATED_ASSETS,
1133
+ {
1134
+ "images": [generated_image],
1135
+ },
1136
+ ):
1137
+ yield result
1157
1138
 
1158
1139
  if ConversationCommand.Diagram in conversation_commands:
1159
1140
  async for result in send_event(ChatEvent.STATUS, f"Creating diagram"):
1160
1141
  yield result
1161
1142
 
1162
- intent_type = "excalidraw"
1163
1143
  inferred_queries = []
1164
1144
  diagram_description = ""
1165
1145
 
@@ -1183,62 +1163,29 @@ async def chat(
1183
1163
  if better_diagram_description_prompt and excalidraw_diagram_description:
1184
1164
  inferred_queries.append(better_diagram_description_prompt)
1185
1165
  diagram_description = excalidraw_diagram_description
1166
+
1167
+ generated_excalidraw_diagram = diagram_description
1168
+
1169
+ async for result in send_event(
1170
+ ChatEvent.GENERATED_ASSETS,
1171
+ {
1172
+ "excalidrawDiagram": excalidraw_diagram_description,
1173
+ },
1174
+ ):
1175
+ yield result
1186
1176
  else:
1187
1177
  error_message = "Failed to generate diagram. Please try again later."
1188
- async for result in send_llm_response(error_message, tracer.get("usage")):
1189
- yield result
1190
-
1191
- await sync_to_async(save_to_conversation_log)(
1192
- q,
1193
- error_message,
1194
- user,
1195
- meta_log,
1196
- user_message_time,
1197
- inferred_queries=[better_diagram_description_prompt],
1198
- client_application=request.user.client_app,
1199
- conversation_id=conversation_id,
1200
- compiled_references=compiled_references,
1201
- online_results=online_results,
1202
- code_results=code_results,
1203
- query_images=uploaded_images,
1204
- train_of_thought=train_of_thought,
1205
- raw_query_files=raw_query_files,
1206
- tracer=tracer,
1178
+ program_execution_context.append(
1179
+ f"AI attempted to programmatically generate a diagram but failed due to a program issue. Generally, it is able to do so, but encountered a system issue this time. AI can suggest text description or rendering of the diagram or user can try again with a simpler prompt."
1207
1180
  )
1208
- return
1209
-
1210
- content_obj = {
1211
- "intentType": intent_type,
1212
- "inferredQueries": inferred_queries,
1213
- "image": diagram_description,
1214
- }
1215
1181
 
1216
- await sync_to_async(save_to_conversation_log)(
1217
- q,
1218
- excalidraw_diagram_description,
1219
- user,
1220
- meta_log,
1221
- user_message_time,
1222
- intent_type="excalidraw",
1223
- inferred_queries=[better_diagram_description_prompt],
1224
- client_application=request.user.client_app,
1225
- conversation_id=conversation_id,
1226
- compiled_references=compiled_references,
1227
- online_results=online_results,
1228
- code_results=code_results,
1229
- query_images=uploaded_images,
1230
- train_of_thought=train_of_thought,
1231
- raw_query_files=raw_query_files,
1232
- tracer=tracer,
1233
- )
1234
-
1235
- async for result in send_llm_response(json.dumps(content_obj), tracer.get("usage")):
1236
- yield result
1237
- return
1182
+ async for result in send_event(ChatEvent.STATUS, error_message):
1183
+ yield result
1238
1184
 
1239
1185
  ## Generate Text Output
1240
1186
  async for result in send_event(ChatEvent.STATUS, f"**Generating a well-informed response**"):
1241
1187
  yield result
1188
+
1242
1189
  llm_response, chat_metadata = await agenerate_chat_response(
1243
1190
  defiltered_query,
1244
1191
  meta_log,
@@ -1258,6 +1205,10 @@ async def chat(
1258
1205
  train_of_thought,
1259
1206
  attached_file_context,
1260
1207
  raw_query_files,
1208
+ generated_images,
1209
+ generated_files,
1210
+ generated_excalidraw_diagram,
1211
+ program_execution_context,
1261
1212
  tracer,
1262
1213
  )
1263
1214
 
khoj/routers/helpers.py CHANGED
@@ -136,7 +136,7 @@ def validate_conversation_config(user: KhojUser):
136
136
  if default_config is None:
137
137
  raise HTTPException(status_code=500, detail="Contact the server administrator to add a chat model.")
138
138
 
139
- if default_config.model_type == "openai" and not default_config.openai_config:
139
+ if default_config.model_type == "openai" and not default_config.ai_model_api:
140
140
  raise HTTPException(status_code=500, detail="Contact the server administrator to add a chat model.")
141
141
 
142
142
 
@@ -163,7 +163,7 @@ async def is_ready_to_chat(user: KhojUser):
163
163
  ChatModelOptions.ModelType.GOOGLE,
164
164
  ]
165
165
  )
166
- and user_conversation_config.openai_config
166
+ and user_conversation_config.ai_model_api
167
167
  ):
168
168
  return True
169
169
 
@@ -990,7 +990,7 @@ async def send_message_to_model_wrapper(
990
990
  )
991
991
 
992
992
  elif model_type == ChatModelOptions.ModelType.OPENAI:
993
- openai_chat_config = conversation_config.openai_config
993
+ openai_chat_config = conversation_config.ai_model_api
994
994
  api_key = openai_chat_config.api_key
995
995
  api_base_url = openai_chat_config.api_base_url
996
996
  truncated_messages = generate_chatml_messages_with_context(
@@ -1015,7 +1015,7 @@ async def send_message_to_model_wrapper(
1015
1015
  tracer=tracer,
1016
1016
  )
1017
1017
  elif model_type == ChatModelOptions.ModelType.ANTHROPIC:
1018
- api_key = conversation_config.openai_config.api_key
1018
+ api_key = conversation_config.ai_model_api.api_key
1019
1019
  truncated_messages = generate_chatml_messages_with_context(
1020
1020
  user_message=query,
1021
1021
  context_message=context,
@@ -1037,7 +1037,7 @@ async def send_message_to_model_wrapper(
1037
1037
  tracer=tracer,
1038
1038
  )
1039
1039
  elif model_type == ChatModelOptions.ModelType.GOOGLE:
1040
- api_key = conversation_config.openai_config.api_key
1040
+ api_key = conversation_config.ai_model_api.api_key
1041
1041
  truncated_messages = generate_chatml_messages_with_context(
1042
1042
  user_message=query,
1043
1043
  context_message=context,
@@ -1102,7 +1102,7 @@ def send_message_to_model_wrapper_sync(
1102
1102
  )
1103
1103
 
1104
1104
  elif conversation_config.model_type == ChatModelOptions.ModelType.OPENAI:
1105
- api_key = conversation_config.openai_config.api_key
1105
+ api_key = conversation_config.ai_model_api.api_key
1106
1106
  truncated_messages = generate_chatml_messages_with_context(
1107
1107
  user_message=message,
1108
1108
  system_message=system_message,
@@ -1124,7 +1124,7 @@ def send_message_to_model_wrapper_sync(
1124
1124
  return openai_response
1125
1125
 
1126
1126
  elif conversation_config.model_type == ChatModelOptions.ModelType.ANTHROPIC:
1127
- api_key = conversation_config.openai_config.api_key
1127
+ api_key = conversation_config.ai_model_api.api_key
1128
1128
  truncated_messages = generate_chatml_messages_with_context(
1129
1129
  user_message=message,
1130
1130
  system_message=system_message,
@@ -1144,7 +1144,7 @@ def send_message_to_model_wrapper_sync(
1144
1144
  )
1145
1145
 
1146
1146
  elif conversation_config.model_type == ChatModelOptions.ModelType.GOOGLE:
1147
- api_key = conversation_config.openai_config.api_key
1147
+ api_key = conversation_config.ai_model_api.api_key
1148
1148
  truncated_messages = generate_chatml_messages_with_context(
1149
1149
  user_message=message,
1150
1150
  system_message=system_message,
@@ -1185,6 +1185,10 @@ def generate_chat_response(
1185
1185
  train_of_thought: List[Any] = [],
1186
1186
  query_files: str = None,
1187
1187
  raw_query_files: List[FileAttachment] = None,
1188
+ generated_images: List[str] = None,
1189
+ raw_generated_files: List[FileAttachment] = [],
1190
+ generated_excalidraw_diagram: str = None,
1191
+ program_execution_context: List[str] = [],
1188
1192
  tracer: dict = {},
1189
1193
  ) -> Tuple[Union[ThreadedGenerator, Iterator[str]], Dict[str, str]]:
1190
1194
  # Initialize Variables
@@ -1208,6 +1212,9 @@ def generate_chat_response(
1208
1212
  query_images=query_images,
1209
1213
  train_of_thought=train_of_thought,
1210
1214
  raw_query_files=raw_query_files,
1215
+ generated_images=generated_images,
1216
+ raw_generated_files=raw_generated_files,
1217
+ generated_excalidraw_diagram=generated_excalidraw_diagram,
1211
1218
  tracer=tracer,
1212
1219
  )
1213
1220
 
@@ -1243,11 +1250,12 @@ def generate_chat_response(
1243
1250
  user_name=user_name,
1244
1251
  agent=agent,
1245
1252
  query_files=query_files,
1253
+ generated_files=raw_generated_files,
1246
1254
  tracer=tracer,
1247
1255
  )
1248
1256
 
1249
1257
  elif conversation_config.model_type == ChatModelOptions.ModelType.OPENAI:
1250
- openai_chat_config = conversation_config.openai_config
1258
+ openai_chat_config = conversation_config.ai_model_api
1251
1259
  api_key = openai_chat_config.api_key
1252
1260
  chat_model = conversation_config.chat_model
1253
1261
  chat_response = converse(
@@ -1269,11 +1277,15 @@ def generate_chat_response(
1269
1277
  agent=agent,
1270
1278
  vision_available=vision_available,
1271
1279
  query_files=query_files,
1280
+ generated_files=raw_generated_files,
1281
+ generated_images=generated_images,
1282
+ generated_excalidraw_diagram=generated_excalidraw_diagram,
1283
+ program_execution_context=program_execution_context,
1272
1284
  tracer=tracer,
1273
1285
  )
1274
1286
 
1275
1287
  elif conversation_config.model_type == ChatModelOptions.ModelType.ANTHROPIC:
1276
- api_key = conversation_config.openai_config.api_key
1288
+ api_key = conversation_config.ai_model_api.api_key
1277
1289
  chat_response = converse_anthropic(
1278
1290
  compiled_references,
1279
1291
  query_to_run,
@@ -1292,10 +1304,14 @@ def generate_chat_response(
1292
1304
  agent=agent,
1293
1305
  vision_available=vision_available,
1294
1306
  query_files=query_files,
1307
+ generated_files=raw_generated_files,
1308
+ generated_images=generated_images,
1309
+ generated_excalidraw_diagram=generated_excalidraw_diagram,
1310
+ program_execution_context=program_execution_context,
1295
1311
  tracer=tracer,
1296
1312
  )
1297
1313
  elif conversation_config.model_type == ChatModelOptions.ModelType.GOOGLE:
1298
- api_key = conversation_config.openai_config.api_key
1314
+ api_key = conversation_config.ai_model_api.api_key
1299
1315
  chat_response = converse_gemini(
1300
1316
  compiled_references,
1301
1317
  query_to_run,
@@ -1314,6 +1330,10 @@ def generate_chat_response(
1314
1330
  query_images=query_images,
1315
1331
  vision_available=vision_available,
1316
1332
  query_files=query_files,
1333
+ generated_files=raw_generated_files,
1334
+ generated_images=generated_images,
1335
+ generated_excalidraw_diagram=generated_excalidraw_diagram,
1336
+ program_execution_context=program_execution_context,
1317
1337
  tracer=tracer,
1318
1338
  )
1319
1339
 
@@ -1785,6 +1805,9 @@ class MessageProcessor:
1785
1805
  self.references = {}
1786
1806
  self.usage = {}
1787
1807
  self.raw_response = ""
1808
+ self.generated_images = []
1809
+ self.generated_files = []
1810
+ self.generated_excalidraw_diagram = []
1788
1811
 
1789
1812
  def convert_message_chunk_to_json(self, raw_chunk: str) -> Dict[str, Any]:
1790
1813
  if raw_chunk.startswith("{") and raw_chunk.endswith("}"):
@@ -1823,6 +1846,16 @@ class MessageProcessor:
1823
1846
  self.raw_response += chunk_data
1824
1847
  else:
1825
1848
  self.raw_response += chunk_data
1849
+ elif chunk_type == ChatEvent.GENERATED_ASSETS:
1850
+ chunk_data = chunk["data"]
1851
+ if isinstance(chunk_data, dict):
1852
+ for key in chunk_data:
1853
+ if key == "images":
1854
+ self.generated_images = chunk_data[key]
1855
+ elif key == "files":
1856
+ self.generated_files = chunk_data[key]
1857
+ elif key == "excalidrawDiagram":
1858
+ self.generated_excalidraw_diagram = chunk_data[key]
1826
1859
 
1827
1860
  def handle_json_response(self, json_data: Dict[str, str]) -> str | Dict[str, str]:
1828
1861
  if "image" in json_data or "details" in json_data:
@@ -1853,7 +1886,14 @@ async def read_chat_stream(response_iterator: AsyncGenerator[str, None]) -> Dict
1853
1886
  if buffer:
1854
1887
  processor.process_message_chunk(buffer)
1855
1888
 
1856
- return {"response": processor.raw_response, "references": processor.references, "usage": processor.usage}
1889
+ return {
1890
+ "response": processor.raw_response,
1891
+ "references": processor.references,
1892
+ "usage": processor.usage,
1893
+ "images": processor.generated_images,
1894
+ "files": processor.generated_files,
1895
+ "excalidrawDiagram": processor.generated_excalidraw_diagram,
1896
+ }
1857
1897
 
1858
1898
 
1859
1899
  def get_user_config(user: KhojUser, request: Request, is_detailed: bool = False):
@@ -6,9 +6,9 @@ import openai
6
6
 
7
7
  from khoj.database.adapters import ConversationAdapters
8
8
  from khoj.database.models import (
9
+ AiModelApi,
9
10
  ChatModelOptions,
10
11
  KhojUser,
11
- OpenAIProcessorConversationConfig,
12
12
  SpeechToTextModelOptions,
13
13
  TextToImageModelConfig,
14
14
  )
@@ -98,7 +98,7 @@ def initialization(interactive: bool = True):
98
98
  TextToImageModelConfig.objects.create(
99
99
  model_name=openai_text_to_image_model,
100
100
  model_type=TextToImageModelConfig.ModelType.OPENAI,
101
- openai_config=openai_provider,
101
+ ai_model_api=openai_provider,
102
102
  )
103
103
 
104
104
  # Set up Google's Gemini online chat models
@@ -177,7 +177,7 @@ def initialization(interactive: bool = True):
177
177
  vision_enabled: bool = False,
178
178
  is_offline: bool = False,
179
179
  provider_name: str = None,
180
- ) -> Tuple[bool, OpenAIProcessorConversationConfig]:
180
+ ) -> Tuple[bool, AiModelApi]:
181
181
  supported_vision_models = (
182
182
  default_openai_chat_models + default_anthropic_chat_models + default_gemini_chat_models
183
183
  )
@@ -192,16 +192,14 @@ def initialization(interactive: bool = True):
192
192
 
193
193
  logger.info(f"️💬 Setting up your {provider_name} chat configuration")
194
194
 
195
- chat_provider = None
195
+ ai_model_api = None
196
196
  if not is_offline:
197
197
  if interactive:
198
198
  user_api_key = input(f"Enter your {provider_name} API key (default: {default_api_key}): ")
199
199
  api_key = user_api_key if user_api_key != "" else default_api_key
200
200
  else:
201
201
  api_key = default_api_key
202
- chat_provider = OpenAIProcessorConversationConfig.objects.create(
203
- api_key=api_key, name=provider_name, api_base_url=api_base_url
204
- )
202
+ ai_model_api = AiModelApi.objects.create(api_key=api_key, name=provider_name, api_base_url=api_base_url)
205
203
 
206
204
  if interactive:
207
205
  chat_model_names = input(
@@ -223,19 +221,19 @@ def initialization(interactive: bool = True):
223
221
  "max_prompt_size": default_max_tokens,
224
222
  "vision_enabled": vision_enabled,
225
223
  "tokenizer": default_tokenizer,
226
- "openai_config": chat_provider,
224
+ "ai_model_api": ai_model_api,
227
225
  }
228
226
 
229
227
  ChatModelOptions.objects.create(**chat_model_options)
230
228
 
231
229
  logger.info(f"🗣️ {provider_name} chat model configuration complete")
232
- return True, chat_provider
230
+ return True, ai_model_api
233
231
 
234
232
  def _update_chat_model_options():
235
233
  """Update available chat models for OpenAI-compatible APIs"""
236
234
  try:
237
235
  # Get OpenAI configs with custom base URLs
238
- custom_configs = OpenAIProcessorConversationConfig.objects.exclude(api_base_url__isnull=True)
236
+ custom_configs = AiModelApi.objects.exclude(api_base_url__isnull=True)
239
237
 
240
238
  for config in custom_configs:
241
239
  try:
@@ -247,7 +245,7 @@ def initialization(interactive: bool = True):
247
245
 
248
246
  # Get existing chat model options for this config
249
247
  existing_models = ChatModelOptions.objects.filter(
250
- openai_config=config, model_type=ChatModelOptions.ModelType.OPENAI
248
+ ai_model_api=config, model_type=ChatModelOptions.ModelType.OPENAI
251
249
  )
252
250
 
253
251
  # Add new models
@@ -259,7 +257,7 @@ def initialization(interactive: bool = True):
259
257
  max_prompt_size=model_to_prompt_size.get(model),
260
258
  vision_enabled=model in default_openai_chat_models,
261
259
  tokenizer=model_to_tokenizer.get(model),
262
- openai_config=config,
260
+ ai_model_api=config,
263
261
  )
264
262
 
265
263
  # Remove models that are no longer available
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: khoj
3
- Version: 1.30.11.dev13
3
+ Version: 1.30.11.dev56
4
4
  Summary: Your Second Brain
5
5
  Project-URL: Homepage, https://khoj.dev
6
6
  Project-URL: Documentation, https://docs.khoj.dev
@@ -31,6 +31,7 @@ Requires-Dist: dateparser>=1.1.1
31
31
  Requires-Dist: defusedxml==0.7.1
32
32
  Requires-Dist: django-apscheduler==0.6.2
33
33
  Requires-Dist: django-phonenumber-field==7.3.0
34
+ Requires-Dist: django-unfold==0.42.0
34
35
  Requires-Dist: django==5.0.9
35
36
  Requires-Dist: docx2txt==0.8
36
37
  Requires-Dist: einops==0.8.0