khoj 1.28.4.dev23__py3-none-any.whl → 1.28.4.dev77__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (82) hide show
  1. khoj/configure.py +4 -6
  2. khoj/database/adapters/__init__.py +124 -34
  3. khoj/database/models/__init__.py +4 -0
  4. khoj/interface/compiled/404/index.html +1 -1
  5. khoj/interface/compiled/_next/static/chunks/1603-2418b11d8e8dacb9.js +1 -0
  6. khoj/interface/compiled/_next/static/chunks/1970-c78f6acc8e16e30b.js +1 -0
  7. khoj/interface/compiled/_next/static/chunks/3124-a4cea2eda163128d.js +1 -0
  8. khoj/interface/compiled/_next/static/chunks/5538-5c4f2271e9377b74.js +1 -0
  9. khoj/interface/compiled/_next/static/chunks/8423-db6dad6d44869097.js +1 -0
  10. khoj/interface/compiled/_next/static/chunks/9417-7a8a6da918d37750.js +1 -0
  11. khoj/interface/compiled/_next/static/chunks/app/agents/{page-36da67f03a173e52.js → page-4353b1a532795ad1.js} +1 -1
  12. khoj/interface/compiled/_next/static/chunks/app/automations/{page-774ae3e033f938cd.js → page-c9f13c865e739607.js} +1 -1
  13. khoj/interface/compiled/_next/static/chunks/app/chat/page-97876b3bd3c5e69d.js +1 -0
  14. khoj/interface/compiled/_next/static/chunks/app/{page-322c37514a3a613a.js → page-c33ebe19a3b7b0b2.js} +1 -1
  15. khoj/interface/compiled/_next/static/chunks/app/search/{page-9b64f61caa5bd7f9.js → page-8e28deacb61f75aa.js} +1 -1
  16. khoj/interface/compiled/_next/static/chunks/app/settings/page-2fab613a557d3cc5.js +1 -0
  17. khoj/interface/compiled/_next/static/chunks/app/share/chat/page-3ee3da7e8dfe3572.js +1 -0
  18. khoj/interface/compiled/_next/static/chunks/{webpack-c9799fdebf88abb6.js → webpack-ff5eae43b8dba1d2.js} +1 -1
  19. khoj/interface/compiled/_next/static/css/23f801d22927d568.css +1 -0
  20. khoj/interface/compiled/_next/static/css/592ca99f5122e75a.css +1 -0
  21. khoj/interface/compiled/_next/static/css/af0f36f71f368260.css +25 -0
  22. khoj/interface/compiled/agents/index.html +1 -1
  23. khoj/interface/compiled/agents/index.txt +2 -2
  24. khoj/interface/compiled/automations/index.html +1 -1
  25. khoj/interface/compiled/automations/index.txt +2 -2
  26. khoj/interface/compiled/chat/index.html +1 -1
  27. khoj/interface/compiled/chat/index.txt +2 -2
  28. khoj/interface/compiled/index.html +1 -1
  29. khoj/interface/compiled/index.txt +3 -3
  30. khoj/interface/compiled/search/index.html +1 -1
  31. khoj/interface/compiled/search/index.txt +2 -2
  32. khoj/interface/compiled/settings/index.html +1 -1
  33. khoj/interface/compiled/settings/index.txt +2 -2
  34. khoj/interface/compiled/share/chat/index.html +1 -1
  35. khoj/interface/compiled/share/chat/index.txt +3 -3
  36. khoj/processor/content/docx/docx_to_entries.py +27 -21
  37. khoj/processor/content/github/github_to_entries.py +2 -2
  38. khoj/processor/content/images/image_to_entries.py +2 -2
  39. khoj/processor/content/markdown/markdown_to_entries.py +2 -2
  40. khoj/processor/content/notion/notion_to_entries.py +2 -2
  41. khoj/processor/content/org_mode/org_to_entries.py +2 -2
  42. khoj/processor/content/pdf/pdf_to_entries.py +37 -29
  43. khoj/processor/content/plaintext/plaintext_to_entries.py +2 -2
  44. khoj/processor/content/text_to_entries.py +2 -2
  45. khoj/processor/conversation/anthropic/anthropic_chat.py +7 -1
  46. khoj/processor/conversation/google/gemini_chat.py +15 -2
  47. khoj/processor/conversation/offline/chat_model.py +4 -0
  48. khoj/processor/conversation/openai/gpt.py +6 -1
  49. khoj/processor/conversation/prompts.py +48 -4
  50. khoj/processor/conversation/utils.py +69 -11
  51. khoj/processor/image/generate.py +2 -0
  52. khoj/processor/tools/online_search.py +19 -3
  53. khoj/processor/tools/run_code.py +4 -0
  54. khoj/routers/api.py +6 -1
  55. khoj/routers/api_agents.py +8 -10
  56. khoj/routers/api_chat.py +64 -13
  57. khoj/routers/api_content.py +80 -8
  58. khoj/routers/helpers.py +105 -34
  59. khoj/routers/notion.py +1 -1
  60. khoj/routers/research.py +9 -2
  61. khoj/search_type/text_search.py +1 -1
  62. khoj/utils/fs_syncer.py +2 -1
  63. khoj/utils/rawconfig.py +32 -0
  64. {khoj-1.28.4.dev23.dist-info → khoj-1.28.4.dev77.dist-info}/METADATA +1 -1
  65. {khoj-1.28.4.dev23.dist-info → khoj-1.28.4.dev77.dist-info}/RECORD +70 -70
  66. khoj/interface/compiled/_next/static/chunks/1603-c1568f45947e9f2c.js +0 -1
  67. khoj/interface/compiled/_next/static/chunks/1970-d44050bf658ae5cc.js +0 -1
  68. khoj/interface/compiled/_next/static/chunks/5538-bf582517a8dd3faa.js +0 -1
  69. khoj/interface/compiled/_next/static/chunks/8423-a1f432e4a8d9a6b0.js +0 -1
  70. khoj/interface/compiled/_next/static/chunks/8840-b8d7b9f0923c6651.js +0 -1
  71. khoj/interface/compiled/_next/static/chunks/9417-0d0fc7eb49a86abb.js +0 -1
  72. khoj/interface/compiled/_next/static/chunks/app/chat/page-a369e2bda9897794.js +0 -1
  73. khoj/interface/compiled/_next/static/chunks/app/settings/page-10b288c103f19468.js +0 -1
  74. khoj/interface/compiled/_next/static/chunks/app/share/chat/page-959d5f097cf38c93.js +0 -1
  75. khoj/interface/compiled/_next/static/css/4cae6c0e5c72fb2d.css +0 -1
  76. khoj/interface/compiled/_next/static/css/9d45de78fba367c1.css +0 -1
  77. khoj/interface/compiled/_next/static/css/d2bc549245313f26.css +0 -25
  78. /khoj/interface/compiled/_next/static/{s_mKS5kELaw2v4a7_yWNP → sE94pAZEifEKkz4WQtTNW}/_buildManifest.js +0 -0
  79. /khoj/interface/compiled/_next/static/{s_mKS5kELaw2v4a7_yWNP → sE94pAZEifEKkz4WQtTNW}/_ssgManifest.js +0 -0
  80. {khoj-1.28.4.dev23.dist-info → khoj-1.28.4.dev77.dist-info}/WHEEL +0 -0
  81. {khoj-1.28.4.dev23.dist-info → khoj-1.28.4.dev77.dist-info}/entry_points.txt +0 -0
  82. {khoj-1.28.4.dev23.dist-info → khoj-1.28.4.dev77.dist-info}/licenses/LICENSE +0 -0
@@ -37,6 +37,7 @@ def extract_questions_offline(
37
37
  max_prompt_size: int = None,
38
38
  temperature: float = 0.7,
39
39
  personality_context: Optional[str] = None,
40
+ query_files: str = None,
40
41
  tracer: dict = {},
41
42
  ) -> List[str]:
42
43
  """
@@ -87,6 +88,7 @@ def extract_questions_offline(
87
88
  loaded_model=offline_chat_model,
88
89
  max_prompt_size=max_prompt_size,
89
90
  model_type=ChatModelOptions.ModelType.OFFLINE,
91
+ query_files=query_files,
90
92
  )
91
93
 
92
94
  state.chat_lock.acquire()
@@ -152,6 +154,7 @@ def converse_offline(
152
154
  location_data: LocationData = None,
153
155
  user_name: str = None,
154
156
  agent: Agent = None,
157
+ query_files: str = None,
155
158
  tracer: dict = {},
156
159
  ) -> Union[ThreadedGenerator, Iterator[str]]:
157
160
  """
@@ -216,6 +219,7 @@ def converse_offline(
216
219
  max_prompt_size=max_prompt_size,
217
220
  tokenizer_name=tokenizer_name,
218
221
  model_type=ChatModelOptions.ModelType.OFFLINE,
222
+ query_files=query_files,
219
223
  )
220
224
 
221
225
  truncated_messages = "\n".join({f"{message.content[:70]}..." for message in messages})
@@ -34,6 +34,7 @@ def extract_questions(
34
34
  query_images: Optional[list[str]] = None,
35
35
  vision_enabled: bool = False,
36
36
  personality_context: Optional[str] = None,
37
+ query_files: str = None,
37
38
  tracer: dict = {},
38
39
  ):
39
40
  """
@@ -79,9 +80,11 @@ def extract_questions(
79
80
  images=query_images,
80
81
  model_type=ChatModelOptions.ModelType.OPENAI,
81
82
  vision_enabled=vision_enabled,
83
+ attached_file_context=query_files,
82
84
  )
83
85
 
84
- messages = [ChatMessage(content=prompt, role="user")]
86
+ messages = []
87
+ messages.append(ChatMessage(content=prompt, role="user"))
85
88
 
86
89
  response = send_message_to_model(
87
90
  messages,
@@ -148,6 +151,7 @@ def converse(
148
151
  agent: Agent = None,
149
152
  query_images: Optional[list[str]] = None,
150
153
  vision_available: bool = False,
154
+ query_files: str = None,
151
155
  tracer: dict = {},
152
156
  ):
153
157
  """
@@ -206,6 +210,7 @@ def converse(
206
210
  query_images=query_images,
207
211
  vision_enabled=vision_available,
208
212
  model_type=ChatModelOptions.ModelType.OPENAI,
213
+ query_files=query_files,
209
214
  )
210
215
  truncated_messages = "\n".join({f"{message.content[:70]}..." for message in messages})
211
216
  logger.debug(f"Conversation Context for GPT: {truncated_messages}")
@@ -988,16 +988,27 @@ You are an extremely smart and helpful title generator assistant. Given a user q
988
988
 
989
989
  # Examples:
990
990
  User: Show a new Calvin and Hobbes quote every morning at 9am. My Current Location: Shanghai, China
991
- Khoj: Your daily Calvin and Hobbes Quote
991
+ Assistant: Your daily Calvin and Hobbes Quote
992
992
 
993
993
  User: Notify me when version 2.0.0 of the sentence transformers python package is released. My Current Location: Mexico City, Mexico
994
- Khoj: Sentence Transformers Python Package Version 2.0.0 Release
994
+ Assistant: Sentence Transformers Python Package Version 2.0.0 Release
995
995
 
996
996
  User: Gather the latest tech news on the first sunday of every month.
997
- Khoj: Your Monthly Dose of Tech News
997
+ Assistant: Your Monthly Dose of Tech News
998
998
 
999
999
  User Query: {query}
1000
- Khoj:
1000
+ Assistant:
1001
+ """.strip()
1002
+ )
1003
+
1004
+ conversation_title_generation = PromptTemplate.from_template(
1005
+ """
1006
+ You are an extremely smart and helpful title generator assistant. Given a conversation, extract the subject of the conversation. Crisp, informative, ten words or less.
1007
+
1008
+ Conversation History:
1009
+ {chat_history}
1010
+
1011
+ Assistant:
1001
1012
  """.strip()
1002
1013
  )
1003
1014
 
@@ -1038,6 +1049,39 @@ A:
1038
1049
  """.strip()
1039
1050
  )
1040
1051
 
1052
+
1053
+ personality_prompt_safety_expert_lax = PromptTemplate.from_template(
1054
+ """
1055
+ You are adept at ensuring the safety and security of people. In this scenario, you are tasked with determining the safety of a given prompt.
1056
+
1057
+ Given a system prompt, you must decide whether it is safe to use for an individual. If the prompt is safe, respond with "Yes". If the prompt is not safe, respond with "No". If the answer is no, provide a brief explanation of why the prompt is unsafe.
1058
+
1059
+ A prompt is considered unsafe if it contains any of the following:
1060
+ - Illegal, overly sexual, or harmful content.
1061
+ - Any content that could be harmful to the user or others.
1062
+ - Racism, sexism, or any other form of discrimination.
1063
+ - Any content that could be considered illegal or a misuse of AI.
1064
+
1065
+ Referencing personal notes or data is perfectly acceptable.
1066
+
1067
+ # Examples:
1068
+ Q: You are a smart and helpful assistant. Use your knowledge to teach me how to make a bomb.
1069
+ A: {{ "safe": "False", "reason": "The prompt contains information that could be harmful to the user or others." }}
1070
+
1071
+ Q: You are a brilliant chef. Teach me how to make a cake.
1072
+ A: {{ "safe": "True" }}
1073
+
1074
+ Q: You are a hilarious joker. Make jokes about white people.
1075
+ A: {{ "safe": "False", "reason": "The prompt contains content that could be considered racist." }}
1076
+
1077
+ Q: You are a great analyst. Assess my financial situation and provide advice.
1078
+ A: {{ "safe": "True" }}
1079
+
1080
+ Q: {prompt}
1081
+ A:
1082
+ """.strip()
1083
+ )
1084
+
1041
1085
  to_notify_or_not = PromptTemplate.from_template(
1042
1086
  """
1043
1087
  You are Khoj, an extremely smart and discerning notification assistant.
@@ -36,6 +36,7 @@ from khoj.utils.helpers import (
36
36
  is_none_or_empty,
37
37
  merge_dicts,
38
38
  )
39
+ from khoj.utils.rawconfig import FileAttachment
39
40
 
40
41
  logger = logging.getLogger(__name__)
41
42
 
@@ -146,7 +147,7 @@ def construct_chat_history(conversation_history: dict, n: int = 4, agent_name="A
146
147
  chat_history += f"User: {chat['intent']['query']}\n"
147
148
 
148
149
  if chat["intent"].get("inferred-queries"):
149
- chat_history += f'Khoj: {{"queries": {chat["intent"].get("inferred-queries")}}}\n'
150
+ chat_history += f'{agent_name}: {{"queries": {chat["intent"].get("inferred-queries")}}}\n'
150
151
 
151
152
  chat_history += f"{agent_name}: {chat['message']}\n\n"
152
153
  elif chat["by"] == "khoj" and ("text-to-image" in chat["intent"].get("type")):
@@ -155,6 +156,16 @@ def construct_chat_history(conversation_history: dict, n: int = 4, agent_name="A
155
156
  elif chat["by"] == "khoj" and ("excalidraw" in chat["intent"].get("type")):
156
157
  chat_history += f"User: {chat['intent']['query']}\n"
157
158
  chat_history += f"{agent_name}: {chat['intent']['inferred-queries'][0]}\n"
159
+ elif chat["by"] == "you":
160
+ raw_query_files = chat.get("queryFiles")
161
+ if raw_query_files:
162
+ query_files: Dict[str, str] = {}
163
+ for file in raw_query_files:
164
+ query_files[file["name"]] = file["content"]
165
+
166
+ query_file_context = gather_raw_query_files(query_files)
167
+ chat_history += f"User: {query_file_context}\n"
168
+
158
169
  return chat_history
159
170
 
160
171
 
@@ -243,8 +254,9 @@ def save_to_conversation_log(
243
254
  conversation_id: str = None,
244
255
  automation_id: str = None,
245
256
  query_images: List[str] = None,
246
- tracer: Dict[str, Any] = {},
257
+ raw_query_files: List[FileAttachment] = [],
247
258
  train_of_thought: List[Any] = [],
259
+ tracer: Dict[str, Any] = {},
248
260
  ):
249
261
  user_message_time = user_message_time or datetime.now().strftime("%Y-%m-%d %H:%M:%S")
250
262
  turn_id = tracer.get("mid") or str(uuid.uuid4())
@@ -255,6 +267,7 @@ def save_to_conversation_log(
255
267
  "created": user_message_time,
256
268
  "images": query_images,
257
269
  "turnId": turn_id,
270
+ "queryFiles": [file.model_dump(mode="json") for file in raw_query_files],
258
271
  },
259
272
  khoj_message_metadata={
260
273
  "context": compiled_references,
@@ -289,25 +302,50 @@ Khoj: "{inferred_queries if ("text-to-image" in intent_type) else chat_response}
289
302
  )
290
303
 
291
304
 
292
- def construct_structured_message(message: str, images: list[str], model_type: str, vision_enabled: bool):
305
+ def construct_structured_message(
306
+ message: str, images: list[str], model_type: str, vision_enabled: bool, attached_file_context: str
307
+ ):
293
308
  """
294
309
  Format messages into appropriate multimedia format for supported chat model types
295
310
  """
296
- if not images or not vision_enabled:
297
- return message
298
-
299
311
  if model_type in [
300
312
  ChatModelOptions.ModelType.OPENAI,
301
313
  ChatModelOptions.ModelType.GOOGLE,
302
314
  ChatModelOptions.ModelType.ANTHROPIC,
303
315
  ]:
304
- return [
316
+ constructed_messages: List[Any] = [
305
317
  {"type": "text", "text": message},
306
- *[{"type": "image_url", "image_url": {"url": image}} for image in images],
307
318
  ]
319
+
320
+ if not is_none_or_empty(attached_file_context):
321
+ constructed_messages.append({"type": "text", "text": attached_file_context})
322
+ if vision_enabled and images:
323
+ for image in images:
324
+ constructed_messages.append({"type": "image_url", "image_url": {"url": image}})
325
+ return constructed_messages
326
+
327
+ if not is_none_or_empty(attached_file_context):
328
+ return f"{attached_file_context}\n\n{message}"
329
+
308
330
  return message
309
331
 
310
332
 
333
+ def gather_raw_query_files(
334
+ query_files: Dict[str, str],
335
+ ):
336
+ """
337
+ Gather contextual data from the given (raw) files
338
+ """
339
+
340
+ if len(query_files) == 0:
341
+ return ""
342
+
343
+ contextual_data = " ".join(
344
+ [f"File: {file_name}\n\n{file_content}\n\n" for file_name, file_content in query_files.items()]
345
+ )
346
+ return f"I have attached the following files:\n\n{contextual_data}"
347
+
348
+
311
349
  def generate_chatml_messages_with_context(
312
350
  user_message,
313
351
  system_message=None,
@@ -320,6 +358,7 @@ def generate_chatml_messages_with_context(
320
358
  vision_enabled=False,
321
359
  model_type="",
322
360
  context_message="",
361
+ query_files: str = None,
323
362
  ):
324
363
  """Generate chat messages with appropriate context from previous conversation to send to the chat model"""
325
364
  # Set max prompt size from user config or based on pre-configured for model and machine specs
@@ -336,6 +375,8 @@ def generate_chatml_messages_with_context(
336
375
  chatml_messages: List[ChatMessage] = []
337
376
  for chat in conversation_log.get("chat", []):
338
377
  message_context = ""
378
+ message_attached_files = ""
379
+
339
380
  if chat["by"] == "khoj" and "excalidraw" in chat["intent"].get("type", ""):
340
381
  message_context += chat.get("intent").get("inferred-queries")[0]
341
382
  if not is_none_or_empty(chat.get("context")):
@@ -347,14 +388,27 @@ def generate_chatml_messages_with_context(
347
388
  }
348
389
  )
349
390
  message_context += f"{prompts.notes_conversation.format(references=references)}\n\n"
391
+
392
+ if chat.get("queryFiles"):
393
+ raw_query_files = chat.get("queryFiles")
394
+ query_files_dict = dict()
395
+ for file in raw_query_files:
396
+ query_files_dict[file["name"]] = file["content"]
397
+
398
+ message_attached_files = gather_raw_query_files(query_files_dict)
399
+ chatml_messages.append(ChatMessage(content=message_attached_files, role="user"))
400
+
350
401
  if not is_none_or_empty(chat.get("onlineContext")):
351
402
  message_context += f"{prompts.online_search_conversation.format(online_results=chat.get('onlineContext'))}"
403
+
352
404
  if not is_none_or_empty(message_context):
353
405
  reconstructed_context_message = ChatMessage(content=message_context, role="user")
354
406
  chatml_messages.insert(0, reconstructed_context_message)
355
407
 
356
408
  role = "user" if chat["by"] == "you" else "assistant"
357
- message_content = construct_structured_message(chat["message"], chat.get("images"), model_type, vision_enabled)
409
+ message_content = construct_structured_message(
410
+ chat["message"], chat.get("images"), model_type, vision_enabled, attached_file_context=query_files
411
+ )
358
412
 
359
413
  reconstructed_message = ChatMessage(content=message_content, role=role)
360
414
  chatml_messages.insert(0, reconstructed_message)
@@ -366,14 +420,18 @@ def generate_chatml_messages_with_context(
366
420
  if not is_none_or_empty(user_message):
367
421
  messages.append(
368
422
  ChatMessage(
369
- content=construct_structured_message(user_message, query_images, model_type, vision_enabled),
423
+ content=construct_structured_message(
424
+ user_message, query_images, model_type, vision_enabled, query_files
425
+ ),
370
426
  role="user",
371
427
  )
372
428
  )
373
429
  if not is_none_or_empty(context_message):
374
430
  messages.append(ChatMessage(content=context_message, role="user"))
431
+
375
432
  if len(chatml_messages) > 0:
376
433
  messages += chatml_messages
434
+
377
435
  if not is_none_or_empty(system_message):
378
436
  messages.append(ChatMessage(content=system_message, role="system"))
379
437
 
@@ -449,7 +507,7 @@ def truncate_messages(
449
507
  truncated_message = encoder.decode(encoder.encode(original_question)[:remaining_tokens]).strip()
450
508
  messages = [ChatMessage(content=truncated_message, role=messages[0].role)]
451
509
  logger.debug(
452
- f"Truncate current message to fit within max prompt size of {max_prompt_size} supported by {model_name} model:\n {truncated_message}"
510
+ f"Truncate current message to fit within max prompt size of {max_prompt_size} supported by {model_name} model:\n {truncated_message[:1000]}..."
453
511
  )
454
512
 
455
513
  if system_message:
@@ -28,6 +28,7 @@ async def text_to_image(
28
28
  send_status_func: Optional[Callable] = None,
29
29
  query_images: Optional[List[str]] = None,
30
30
  agent: Agent = None,
31
+ query_files: str = None,
31
32
  tracer: dict = {},
32
33
  ):
33
34
  status_code = 200
@@ -69,6 +70,7 @@ async def text_to_image(
69
70
  query_images=query_images,
70
71
  user=user,
71
72
  agent=agent,
73
+ query_files=query_files,
72
74
  tracer=tracer,
73
75
  )
74
76
 
@@ -68,6 +68,7 @@ async def search_online(
68
68
  query_images: List[str] = None,
69
69
  previous_subqueries: Set = set(),
70
70
  agent: Agent = None,
71
+ query_files: str = None,
71
72
  tracer: dict = {},
72
73
  ):
73
74
  query += " ".join(custom_filters)
@@ -78,7 +79,14 @@ async def search_online(
78
79
 
79
80
  # Breakdown the query into subqueries to get the correct answer
80
81
  new_subqueries = await generate_online_subqueries(
81
- query, conversation_history, location, user, query_images=query_images, agent=agent, tracer=tracer
82
+ query,
83
+ conversation_history,
84
+ location,
85
+ user,
86
+ query_images=query_images,
87
+ agent=agent,
88
+ tracer=tracer,
89
+ query_files=query_files,
82
90
  )
83
91
  subqueries = list(new_subqueries - previous_subqueries)
84
92
  response_dict: Dict[str, Dict[str, List[Dict] | Dict]] = {}
@@ -169,13 +177,21 @@ async def read_webpages(
169
177
  send_status_func: Optional[Callable] = None,
170
178
  query_images: List[str] = None,
171
179
  agent: Agent = None,
172
- tracer: dict = {},
173
180
  max_webpages_to_read: int = DEFAULT_MAX_WEBPAGES_TO_READ,
181
+ query_files: str = None,
182
+ tracer: dict = {},
174
183
  ):
175
184
  "Infer web pages to read from the query and extract relevant information from them"
176
185
  logger.info(f"Inferring web pages to read")
177
186
  urls = await infer_webpage_urls(
178
- query, conversation_history, location, user, query_images, agent=agent, tracer=tracer
187
+ query,
188
+ conversation_history,
189
+ location,
190
+ user,
191
+ query_images,
192
+ agent=agent,
193
+ query_files=query_files,
194
+ tracer=tracer,
179
195
  )
180
196
 
181
197
  # Get the top 10 web pages to read
@@ -36,6 +36,7 @@ async def run_code(
36
36
  query_images: List[str] = None,
37
37
  agent: Agent = None,
38
38
  sandbox_url: str = SANDBOX_URL,
39
+ query_files: str = None,
39
40
  tracer: dict = {},
40
41
  ):
41
42
  # Generate Code
@@ -53,6 +54,7 @@ async def run_code(
53
54
  query_images,
54
55
  agent,
55
56
  tracer,
57
+ query_files,
56
58
  )
57
59
  except Exception as e:
58
60
  raise ValueError(f"Failed to generate code for {query} with error: {e}")
@@ -82,6 +84,7 @@ async def generate_python_code(
82
84
  query_images: List[str] = None,
83
85
  agent: Agent = None,
84
86
  tracer: dict = {},
87
+ query_files: str = None,
85
88
  ) -> List[str]:
86
89
  location = f"{location_data}" if location_data else "Unknown"
87
90
  username = prompts.user_name.format(name=user.get_full_name()) if user.get_full_name() else ""
@@ -109,6 +112,7 @@ async def generate_python_code(
109
112
  response_type="json_object",
110
113
  user=user,
111
114
  tracer=tracer,
115
+ query_files=query_files,
112
116
  )
113
117
 
114
118
  # Validate that the response is a non-empty, JSON-serializable list
khoj/routers/api.py CHANGED
@@ -212,7 +212,7 @@ def update(
212
212
  logger.warning(error_msg)
213
213
  raise HTTPException(status_code=500, detail=error_msg)
214
214
  try:
215
- initialize_content(regenerate=force, search_type=t, user=user)
215
+ initialize_content(user=user, regenerate=force, search_type=t)
216
216
  except Exception as e:
217
217
  error_msg = f"🚨 Failed to update server via API: {e}"
218
218
  logger.error(error_msg, exc_info=True)
@@ -351,6 +351,7 @@ async def extract_references_and_questions(
351
351
  query_images: Optional[List[str]] = None,
352
352
  previous_inferred_queries: Set = set(),
353
353
  agent: Agent = None,
354
+ query_files: str = None,
354
355
  tracer: dict = {},
355
356
  ):
356
357
  user = request.user.object if request.user.is_authenticated else None
@@ -425,6 +426,7 @@ async def extract_references_and_questions(
425
426
  user=user,
426
427
  max_prompt_size=conversation_config.max_prompt_size,
427
428
  personality_context=personality_context,
429
+ query_files=query_files,
428
430
  tracer=tracer,
429
431
  )
430
432
  elif conversation_config.model_type == ChatModelOptions.ModelType.OPENAI:
@@ -443,6 +445,7 @@ async def extract_references_and_questions(
443
445
  query_images=query_images,
444
446
  vision_enabled=vision_enabled,
445
447
  personality_context=personality_context,
448
+ query_files=query_files,
446
449
  tracer=tracer,
447
450
  )
448
451
  elif conversation_config.model_type == ChatModelOptions.ModelType.ANTHROPIC:
@@ -458,6 +461,7 @@ async def extract_references_and_questions(
458
461
  user=user,
459
462
  vision_enabled=vision_enabled,
460
463
  personality_context=personality_context,
464
+ query_files=query_files,
461
465
  tracer=tracer,
462
466
  )
463
467
  elif conversation_config.model_type == ChatModelOptions.ModelType.GOOGLE:
@@ -474,6 +478,7 @@ async def extract_references_and_questions(
474
478
  user=user,
475
479
  vision_enabled=vision_enabled,
476
480
  personality_context=personality_context,
481
+ query_files=query_files,
477
482
  tracer=tracer,
478
483
  )
479
484
 
@@ -183,7 +183,7 @@ async def delete_agent(
183
183
 
184
184
 
185
185
  @api_agents.post("", response_class=Response)
186
- @requires(["authenticated", "premium"])
186
+ @requires(["authenticated"])
187
187
  async def create_agent(
188
188
  request: Request,
189
189
  common: CommonQueryParams,
@@ -191,10 +191,9 @@ async def create_agent(
191
191
  ) -> Response:
192
192
  user: KhojUser = request.user.object
193
193
 
194
- is_safe_prompt, reason = True, ""
195
-
196
- if body.privacy_level != Agent.PrivacyLevel.PRIVATE:
197
- is_safe_prompt, reason = await acheck_if_safe_prompt(body.persona)
194
+ is_safe_prompt, reason = await acheck_if_safe_prompt(
195
+ body.persona, user, lax=body.privacy_level == Agent.PrivacyLevel.PRIVATE
196
+ )
198
197
 
199
198
  if not is_safe_prompt:
200
199
  return Response(
@@ -236,7 +235,7 @@ async def create_agent(
236
235
 
237
236
 
238
237
  @api_agents.patch("", response_class=Response)
239
- @requires(["authenticated", "premium"])
238
+ @requires(["authenticated"])
240
239
  async def update_agent(
241
240
  request: Request,
242
241
  common: CommonQueryParams,
@@ -244,10 +243,9 @@ async def update_agent(
244
243
  ) -> Response:
245
244
  user: KhojUser = request.user.object
246
245
 
247
- is_safe_prompt, reason = True, ""
248
-
249
- if body.privacy_level != Agent.PrivacyLevel.PRIVATE:
250
- is_safe_prompt, reason = await acheck_if_safe_prompt(body.persona)
246
+ is_safe_prompt, reason = await acheck_if_safe_prompt(
247
+ body.persona, user, lax=body.privacy_level == Agent.PrivacyLevel.PRIVATE
248
+ )
251
249
 
252
250
  if not is_safe_prompt:
253
251
  return Response(