khoj 1.21.5__py3-none-any.whl → 1.21.7.dev7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (75) hide show
  1. khoj/database/adapters/__init__.py +12 -0
  2. khoj/database/migrations/0056_chatmodeloptions_vision_enabled.py +17 -0
  3. khoj/database/migrations/0057_merge_20240816_1409.py +13 -0
  4. khoj/database/migrations/0060_merge_20240905_1828.py +14 -0
  5. khoj/database/models/__init__.py +1 -0
  6. khoj/interface/compiled/404/index.html +1 -1
  7. khoj/interface/compiled/_next/static/chunks/{3062-a42d847c919a9ea4.js → 3062-9be9a4e34f82ed3a.js} +1 -1
  8. khoj/interface/compiled/_next/static/chunks/3678-0732dd9d2f472171.js +25 -0
  9. khoj/interface/compiled/_next/static/chunks/8423-b6a61d82233d1a82.js +1 -0
  10. khoj/interface/compiled/_next/static/chunks/9001-3b27af6d5f21df44.js +21 -0
  11. khoj/interface/compiled/_next/static/chunks/9162-0be016519a18568b.js +11 -0
  12. khoj/interface/compiled/_next/static/chunks/{9178-d23cb0dbee40a775.js → 9178-3a0baad1c172d515.js} +1 -1
  13. khoj/interface/compiled/_next/static/chunks/{9693-91b03052c5cabded.js → 9984-e410179c6fac7cf1.js} +1 -1
  14. khoj/interface/compiled/_next/static/chunks/app/agents/{page-922694b75f1fb67b.js → page-462502107217be82.js} +1 -1
  15. khoj/interface/compiled/_next/static/chunks/app/automations/page-e30a75db8719f439.js +1 -0
  16. khoj/interface/compiled/_next/static/chunks/app/chat/page-4bb4f2422f5ec5f2.js +1 -0
  17. khoj/interface/compiled/_next/static/chunks/app/factchecker/page-693fe53982bf33e1.js +1 -0
  18. khoj/interface/compiled/_next/static/chunks/app/page-c26f689e39b400ba.js +1 -0
  19. khoj/interface/compiled/_next/static/chunks/app/search/{page-dcd385f03255ef36.js → page-0798bb43c2e368bf.js} +1 -1
  20. khoj/interface/compiled/_next/static/chunks/app/settings/{page-ddcd51147d18c694.js → page-f518555f8e2fd794.js} +1 -1
  21. khoj/interface/compiled/_next/static/chunks/app/share/chat/page-1a639fb3f120fee6.js +1 -0
  22. khoj/interface/compiled/_next/static/chunks/{webpack-95cfd7a1948cfeed.js → webpack-40d9ecfe7efa5386.js} +1 -1
  23. khoj/interface/compiled/_next/static/css/2a860030cf7c384b.css +1 -0
  24. khoj/interface/compiled/_next/static/css/3e49e5ee49c6bda1.css +25 -0
  25. khoj/interface/compiled/_next/static/css/592ca99f5122e75a.css +1 -0
  26. khoj/interface/compiled/_next/static/css/5a400c87d295e68a.css +1 -0
  27. khoj/interface/compiled/_next/static/css/c808691c459e3887.css +1 -0
  28. khoj/interface/compiled/agents/index.html +1 -1
  29. khoj/interface/compiled/agents/index.txt +2 -2
  30. khoj/interface/compiled/automations/index.html +1 -1
  31. khoj/interface/compiled/automations/index.txt +3 -3
  32. khoj/interface/compiled/chat/index.html +1 -1
  33. khoj/interface/compiled/chat/index.txt +2 -2
  34. khoj/interface/compiled/factchecker/index.html +1 -1
  35. khoj/interface/compiled/factchecker/index.txt +2 -2
  36. khoj/interface/compiled/index.html +1 -1
  37. khoj/interface/compiled/index.txt +2 -2
  38. khoj/interface/compiled/search/index.html +1 -1
  39. khoj/interface/compiled/search/index.txt +2 -2
  40. khoj/interface/compiled/settings/index.html +1 -1
  41. khoj/interface/compiled/settings/index.txt +3 -3
  42. khoj/interface/compiled/share/chat/index.html +1 -1
  43. khoj/interface/compiled/share/chat/index.txt +2 -2
  44. khoj/interface/email/welcome.html +1 -1
  45. khoj/processor/conversation/anthropic/anthropic_chat.py +2 -1
  46. khoj/processor/conversation/offline/chat_model.py +7 -2
  47. khoj/processor/conversation/openai/gpt.py +21 -12
  48. khoj/processor/conversation/utils.py +39 -14
  49. khoj/processor/tools/online_search.py +6 -2
  50. khoj/routers/api.py +4 -0
  51. khoj/routers/api_chat.py +65 -18
  52. khoj/routers/helpers.py +79 -21
  53. khoj/routers/storage.py +28 -0
  54. khoj/utils/helpers.py +15 -0
  55. {khoj-1.21.5.dist-info → khoj-1.21.7.dev7.dist-info}/METADATA +4 -4
  56. {khoj-1.21.5.dist-info → khoj-1.21.7.dev7.dist-info}/RECORD +61 -58
  57. khoj/interface/compiled/_next/static/chunks/3678-8c0e55c3b5d83a22.js +0 -25
  58. khoj/interface/compiled/_next/static/chunks/8423-132ea64eac83fd43.js +0 -1
  59. khoj/interface/compiled/_next/static/chunks/9001-acbca3e19b1a5ddf.js +0 -21
  60. khoj/interface/compiled/_next/static/chunks/9162-4a6d0d0dc5e27618.js +0 -11
  61. khoj/interface/compiled/_next/static/chunks/app/automations/page-fa3163653d2a72ac.js +0 -1
  62. khoj/interface/compiled/_next/static/chunks/app/chat/page-8c9b92236d4daf4b.js +0 -1
  63. khoj/interface/compiled/_next/static/chunks/app/factchecker/page-60be5e3295e2c0bc.js +0 -1
  64. khoj/interface/compiled/_next/static/chunks/app/page-ef4e7248d37fae41.js +0 -1
  65. khoj/interface/compiled/_next/static/chunks/app/share/chat/page-699b364dc6fbf139.js +0 -1
  66. khoj/interface/compiled/_next/static/css/9d5b867ec04494a6.css +0 -25
  67. khoj/interface/compiled/_next/static/css/a22d83f18a32957e.css +0 -1
  68. khoj/interface/compiled/_next/static/css/a3530ec58b0b660f.css +0 -1
  69. khoj/interface/compiled/_next/static/css/b81e909d403fb2df.css +0 -1
  70. khoj/interface/compiled/_next/static/css/df6f4c34ec280d53.css +0 -1
  71. /khoj/interface/compiled/_next/static/{c94a08w_ZKOpws32Cwk3G → r8hsIMReT-pry3sQxOVuA}/_buildManifest.js +0 -0
  72. /khoj/interface/compiled/_next/static/{c94a08w_ZKOpws32Cwk3G → r8hsIMReT-pry3sQxOVuA}/_ssgManifest.js +0 -0
  73. {khoj-1.21.5.dist-info → khoj-1.21.7.dev7.dist-info}/WHEEL +0 -0
  74. {khoj-1.21.5.dist-info → khoj-1.21.7.dev7.dist-info}/entry_points.txt +0 -0
  75. {khoj-1.21.5.dist-info → khoj-1.21.7.dev7.dist-info}/licenses/LICENSE +0 -0
khoj/routers/helpers.py CHANGED
@@ -97,6 +97,7 @@ from khoj.utils.helpers import (
97
97
  LRU,
98
98
  ConversationCommand,
99
99
  ImageIntentType,
100
+ convert_image_to_webp,
100
101
  is_none_or_empty,
101
102
  is_valid_url,
102
103
  log_telemetry,
@@ -252,7 +253,9 @@ async def acreate_title_from_query(query: str) -> str:
252
253
  return response.strip()
253
254
 
254
255
 
255
- async def aget_relevant_information_sources(query: str, conversation_history: dict, is_task: bool, subscribed: bool):
256
+ async def aget_relevant_information_sources(
257
+ query: str, conversation_history: dict, is_task: bool, subscribed: bool, uploaded_image_url: str = None
258
+ ):
256
259
  """
257
260
  Given a query, determine which of the available tools the agent should use in order to answer appropriately.
258
261
  """
@@ -266,6 +269,9 @@ async def aget_relevant_information_sources(query: str, conversation_history: di
266
269
 
267
270
  chat_history = construct_chat_history(conversation_history)
268
271
 
272
+ if uploaded_image_url:
273
+ query = f"[placeholder for image attached to this message]\n{query}"
274
+
269
275
  relevant_tools_prompt = prompts.pick_relevant_information_collection_tools.format(
270
276
  query=query,
271
277
  tools=tool_options_str,
@@ -274,7 +280,9 @@ async def aget_relevant_information_sources(query: str, conversation_history: di
274
280
 
275
281
  with timer("Chat actor: Infer information sources to refer", logger):
276
282
  response = await send_message_to_model_wrapper(
277
- relevant_tools_prompt, response_type="json_object", subscribed=subscribed
283
+ relevant_tools_prompt,
284
+ response_type="json_object",
285
+ subscribed=subscribed,
278
286
  )
279
287
 
280
288
  try:
@@ -302,7 +310,9 @@ async def aget_relevant_information_sources(query: str, conversation_history: di
302
310
  return [ConversationCommand.Default]
303
311
 
304
312
 
305
- async def aget_relevant_output_modes(query: str, conversation_history: dict, is_task: bool = False):
313
+ async def aget_relevant_output_modes(
314
+ query: str, conversation_history: dict, is_task: bool = False, uploaded_image_url: str = None
315
+ ):
306
316
  """
307
317
  Given a query, determine which of the available tools the agent should use in order to answer appropriately.
308
318
  """
@@ -319,6 +329,9 @@ async def aget_relevant_output_modes(query: str, conversation_history: dict, is_
319
329
 
320
330
  chat_history = construct_chat_history(conversation_history)
321
331
 
332
+ if uploaded_image_url:
333
+ query = f"<user uploaded content redacted> \n{query}"
334
+
322
335
  relevant_mode_prompt = prompts.pick_relevant_output_mode.format(
323
336
  query=query,
324
337
  modes=mode_options_str,
@@ -347,7 +360,7 @@ async def aget_relevant_output_modes(query: str, conversation_history: dict, is_
347
360
 
348
361
 
349
362
  async def infer_webpage_urls(
350
- q: str, conversation_history: dict, location_data: LocationData, user: KhojUser
363
+ q: str, conversation_history: dict, location_data: LocationData, user: KhojUser, uploaded_image_url: str = None
351
364
  ) -> List[str]:
352
365
  """
353
366
  Infer webpage links from the given query
@@ -366,7 +379,9 @@ async def infer_webpage_urls(
366
379
  )
367
380
 
368
381
  with timer("Chat actor: Infer webpage urls to read", logger):
369
- response = await send_message_to_model_wrapper(online_queries_prompt, response_type="json_object")
382
+ response = await send_message_to_model_wrapper(
383
+ online_queries_prompt, uploaded_image_url=uploaded_image_url, response_type="json_object"
384
+ )
370
385
 
371
386
  # Validate that the response is a non-empty, JSON-serializable list of URLs
372
387
  try:
@@ -381,7 +396,7 @@ async def infer_webpage_urls(
381
396
 
382
397
 
383
398
  async def generate_online_subqueries(
384
- q: str, conversation_history: dict, location_data: LocationData, user: KhojUser
399
+ q: str, conversation_history: dict, location_data: LocationData, user: KhojUser, uploaded_image_url: str = None
385
400
  ) -> List[str]:
386
401
  """
387
402
  Generate subqueries from the given query
@@ -400,7 +415,9 @@ async def generate_online_subqueries(
400
415
  )
401
416
 
402
417
  with timer("Chat actor: Generate online search subqueries", logger):
403
- response = await send_message_to_model_wrapper(online_queries_prompt, response_type="json_object")
418
+ response = await send_message_to_model_wrapper(
419
+ online_queries_prompt, uploaded_image_url=uploaded_image_url, response_type="json_object"
420
+ )
404
421
 
405
422
  # Validate that the response is a non-empty, JSON-serializable list
406
423
  try:
@@ -419,7 +436,7 @@ async def generate_online_subqueries(
419
436
  return [q]
420
437
 
421
438
 
422
- async def schedule_query(q: str, conversation_history: dict) -> Tuple[str, ...]:
439
+ async def schedule_query(q: str, conversation_history: dict, uploaded_image_url: str = None) -> Tuple[str, ...]:
423
440
  """
424
441
  Schedule the date, time to run the query. Assume the server timezone is UTC.
425
442
  """
@@ -430,7 +447,9 @@ async def schedule_query(q: str, conversation_history: dict) -> Tuple[str, ...]:
430
447
  chat_history=chat_history,
431
448
  )
432
449
 
433
- raw_response = await send_message_to_model_wrapper(crontime_prompt, response_type="json_object")
450
+ raw_response = await send_message_to_model_wrapper(
451
+ crontime_prompt, uploaded_image_url=uploaded_image_url, response_type="json_object"
452
+ )
434
453
 
435
454
  # Validate that the response is a non-empty, JSON-serializable list
436
455
  try:
@@ -468,7 +487,9 @@ async def extract_relevant_info(q: str, corpus: str, subscribed: bool) -> Union[
468
487
  return response.strip()
469
488
 
470
489
 
471
- async def extract_relevant_summary(q: str, corpus: str, subscribed: bool = False) -> Union[str, None]:
490
+ async def extract_relevant_summary(
491
+ q: str, corpus: str, subscribed: bool = False, uploaded_image_url: str = None
492
+ ) -> Union[str, None]:
472
493
  """
473
494
  Extract relevant information for a given query from the target corpus
474
495
  """
@@ -489,6 +510,7 @@ async def extract_relevant_summary(q: str, corpus: str, subscribed: bool = False
489
510
  prompts.system_prompt_extract_relevant_summary,
490
511
  chat_model_option=chat_model,
491
512
  subscribed=subscribed,
513
+ uploaded_image_url=uploaded_image_url,
492
514
  )
493
515
  return response.strip()
494
516
 
@@ -501,6 +523,7 @@ async def generate_better_image_prompt(
501
523
  online_results: Optional[dict] = None,
502
524
  model_type: Optional[str] = None,
503
525
  subscribed: bool = False,
526
+ uploaded_image_url: Optional[str] = None,
504
527
  ) -> str:
505
528
  """
506
529
  Generate a better image prompt from the given query
@@ -549,7 +572,7 @@ async def generate_better_image_prompt(
549
572
 
550
573
  with timer("Chat actor: Generate contextual image prompt", logger):
551
574
  response = await send_message_to_model_wrapper(
552
- image_prompt, chat_model_option=chat_model, subscribed=subscribed
575
+ image_prompt, chat_model_option=chat_model, subscribed=subscribed, uploaded_image_url=uploaded_image_url
553
576
  )
554
577
  response = response.strip()
555
578
  if response.startswith(('"', "'")) and response.endswith(('"', "'")):
@@ -564,11 +587,19 @@ async def send_message_to_model_wrapper(
564
587
  response_type: str = "text",
565
588
  chat_model_option: ChatModelOptions = None,
566
589
  subscribed: bool = False,
590
+ uploaded_image_url: str = None,
567
591
  ):
568
592
  conversation_config: ChatModelOptions = (
569
593
  chat_model_option or await ConversationAdapters.aget_default_conversation_config()
570
594
  )
571
595
 
596
+ vision_available = conversation_config.vision_enabled
597
+ if not vision_available and uploaded_image_url:
598
+ vision_enabled_config = ConversationAdapters.get_vision_enabled_config()
599
+ if vision_enabled_config:
600
+ conversation_config = vision_enabled_config
601
+ vision_available = True
602
+
572
603
  chat_model = conversation_config.chat_model
573
604
  max_tokens = (
574
605
  conversation_config.subscribed_max_prompt_size
@@ -576,6 +607,7 @@ async def send_message_to_model_wrapper(
576
607
  else conversation_config.max_prompt_size
577
608
  )
578
609
  tokenizer = conversation_config.tokenizer
610
+ vision_available = conversation_config.vision_enabled
579
611
 
580
612
  if conversation_config.model_type == "offline":
581
613
  if state.offline_chat_processor_config is None or state.offline_chat_processor_config.loaded_model is None:
@@ -589,6 +621,8 @@ async def send_message_to_model_wrapper(
589
621
  loaded_model=loaded_model,
590
622
  tokenizer_name=tokenizer,
591
623
  max_prompt_size=max_tokens,
624
+ vision_enabled=vision_available,
625
+ model_type=conversation_config.model_type,
592
626
  )
593
627
 
594
628
  return send_message_to_model_offline(
@@ -609,6 +643,9 @@ async def send_message_to_model_wrapper(
609
643
  model_name=chat_model,
610
644
  max_prompt_size=max_tokens,
611
645
  tokenizer_name=tokenizer,
646
+ vision_enabled=vision_available,
647
+ uploaded_image_url=uploaded_image_url,
648
+ model_type=conversation_config.model_type,
612
649
  )
613
650
 
614
651
  openai_response = send_message_to_model(
@@ -628,6 +665,8 @@ async def send_message_to_model_wrapper(
628
665
  model_name=chat_model,
629
666
  max_prompt_size=max_tokens,
630
667
  tokenizer_name=tokenizer,
668
+ vision_enabled=vision_available,
669
+ model_type=conversation_config.model_type,
631
670
  )
632
671
 
633
672
  return anthropic_send_message_to_model(
@@ -651,6 +690,7 @@ def send_message_to_model_wrapper_sync(
651
690
 
652
691
  chat_model = conversation_config.chat_model
653
692
  max_tokens = conversation_config.max_prompt_size
693
+ vision_available = conversation_config.vision_enabled
654
694
 
655
695
  if conversation_config.model_type == "offline":
656
696
  if state.offline_chat_processor_config is None or state.offline_chat_processor_config.loaded_model is None:
@@ -658,7 +698,12 @@ def send_message_to_model_wrapper_sync(
658
698
 
659
699
  loaded_model = state.offline_chat_processor_config.loaded_model
660
700
  truncated_messages = generate_chatml_messages_with_context(
661
- user_message=message, system_message=system_message, model_name=chat_model, loaded_model=loaded_model
701
+ user_message=message,
702
+ system_message=system_message,
703
+ model_name=chat_model,
704
+ loaded_model=loaded_model,
705
+ vision_enabled=vision_available,
706
+ model_type=conversation_config.model_type,
662
707
  )
663
708
 
664
709
  return send_message_to_model_offline(
@@ -672,7 +717,11 @@ def send_message_to_model_wrapper_sync(
672
717
  elif conversation_config.model_type == "openai":
673
718
  api_key = conversation_config.openai_config.api_key
674
719
  truncated_messages = generate_chatml_messages_with_context(
675
- user_message=message, system_message=system_message, model_name=chat_model
720
+ user_message=message,
721
+ system_message=system_message,
722
+ model_name=chat_model,
723
+ vision_enabled=vision_available,
724
+ model_type=conversation_config.model_type,
676
725
  )
677
726
 
678
727
  openai_response = send_message_to_model(
@@ -688,6 +737,8 @@ def send_message_to_model_wrapper_sync(
688
737
  system_message=system_message,
689
738
  model_name=chat_model,
690
739
  max_prompt_size=max_tokens,
740
+ vision_enabled=vision_available,
741
+ model_type=conversation_config.model_type,
691
742
  )
692
743
 
693
744
  return anthropic_send_message_to_model(
@@ -712,6 +763,7 @@ def generate_chat_response(
712
763
  conversation_id: int = None,
713
764
  location_data: LocationData = None,
714
765
  user_name: Optional[str] = None,
766
+ uploaded_image_url: Optional[str] = None,
715
767
  ) -> Tuple[Union[ThreadedGenerator, Iterator[str]], Dict[str, str]]:
716
768
  # Initialize Variables
717
769
  chat_response = None
@@ -719,7 +771,6 @@ def generate_chat_response(
719
771
 
720
772
  metadata = {}
721
773
  agent = AgentAdapters.get_conversation_agent_by_id(conversation.agent.id) if conversation.agent else None
722
-
723
774
  try:
724
775
  partial_completion = partial(
725
776
  save_to_conversation_log,
@@ -731,9 +782,17 @@ def generate_chat_response(
731
782
  inferred_queries=inferred_queries,
732
783
  client_application=client_application,
733
784
  conversation_id=conversation_id,
785
+ uploaded_image_url=uploaded_image_url,
734
786
  )
735
787
 
736
788
  conversation_config = ConversationAdapters.get_valid_conversation_config(user, conversation)
789
+ vision_available = conversation_config.vision_enabled
790
+ if not vision_available and uploaded_image_url:
791
+ vision_enabled_config = ConversationAdapters.get_vision_enabled_config()
792
+ if vision_enabled_config:
793
+ conversation_config = vision_enabled_config
794
+ vision_available = True
795
+
737
796
  if conversation_config.model_type == "offline":
738
797
  loaded_model = state.offline_chat_processor_config.loaded_model
739
798
  chat_response = converse_offline(
@@ -759,6 +818,7 @@ def generate_chat_response(
759
818
  chat_response = converse(
760
819
  compiled_references,
761
820
  q,
821
+ image_url=uploaded_image_url,
762
822
  online_results=online_results,
763
823
  conversation_log=meta_log,
764
824
  model=chat_model,
@@ -771,6 +831,7 @@ def generate_chat_response(
771
831
  location_data=location_data,
772
832
  user_name=user_name,
773
833
  agent=agent,
834
+ vision_available=vision_available,
774
835
  )
775
836
 
776
837
  elif conversation_config.model_type == "anthropic":
@@ -809,6 +870,7 @@ async def text_to_image(
809
870
  online_results: Dict[str, Any],
810
871
  subscribed: bool = False,
811
872
  send_status_func: Optional[Callable] = None,
873
+ uploaded_image_url: Optional[str] = None,
812
874
  ):
813
875
  status_code = 200
814
876
  image = None
@@ -845,6 +907,7 @@ async def text_to_image(
845
907
  online_results=online_results,
846
908
  model_type=text_to_image_config.model_type,
847
909
  subscribed=subscribed,
910
+ uploaded_image_url=uploaded_image_url,
848
911
  )
849
912
 
850
913
  if send_status_func:
@@ -908,13 +971,7 @@ async def text_to_image(
908
971
 
909
972
  with timer("Convert image to webp", logger):
910
973
  # Convert png to webp for faster loading
911
- image_io = io.BytesIO(decoded_image)
912
- png_image = Image.open(image_io)
913
- webp_image_io = io.BytesIO()
914
- png_image.save(webp_image_io, "WEBP")
915
- webp_image_bytes = webp_image_io.getvalue()
916
- webp_image_io.close()
917
- image_io.close()
974
+ webp_image_bytes = convert_image_to_webp(decoded_image)
918
975
 
919
976
  with timer("Upload image to S3", logger):
920
977
  image_url = upload_image(webp_image_bytes, user.uuid)
@@ -1095,6 +1152,7 @@ def should_notify(original_query: str, executed_query: str, ai_response: str) ->
1095
1152
 
1096
1153
  with timer("Chat actor: Decide to notify user of automation response", logger):
1097
1154
  try:
1155
+ # TODO Replace with async call so we don't have to maintain a sync version
1098
1156
  response = send_message_to_model_wrapper_sync(to_notify_or_not)
1099
1157
  should_notify_result = "no" not in response.lower()
1100
1158
  logger.info(f'Decided to {"not " if not should_notify_result else ""}notify user of automation response.')
khoj/routers/storage.py CHANGED
@@ -33,3 +33,31 @@ def upload_image(image: bytes, user_id: uuid.UUID):
33
33
  except Exception as e:
34
34
  logger.error(f"Failed to upload image to S3: {e}")
35
35
  return None
36
+
37
+
38
+ AWS_USER_UPLOADED_IMAGES_BUCKET_NAME = os.getenv("AWS_USER_UPLOADED_IMAGES_BUCKET_NAME")
39
+
40
+
41
+ def upload_image_to_bucket(image: bytes, user_id: uuid.UUID):
42
+ """Upload the image to the S3 bucket"""
43
+ if not aws_enabled:
44
+ logger.info("AWS is not enabled. Skipping image upload")
45
+ return None
46
+
47
+ image_key = f"{user_id}/{uuid.uuid4()}.webp"
48
+ if not AWS_USER_UPLOADED_IMAGES_BUCKET_NAME:
49
+ logger.error("AWS_USER_UPLOADED_IMAGES_BUCKET_NAME is not set")
50
+ return None
51
+
52
+ try:
53
+ s3_client.put_object(
54
+ Bucket=AWS_USER_UPLOADED_IMAGES_BUCKET_NAME,
55
+ Key=image_key,
56
+ Body=image,
57
+ ACL="public-read",
58
+ ContentType="image/webp",
59
+ )
60
+ return f"https://{AWS_USER_UPLOADED_IMAGES_BUCKET_NAME}/{image_key}"
61
+ except Exception as e:
62
+ logger.error(f"Failed to upload image to S3: {e}")
63
+ return None
khoj/utils/helpers.py CHANGED
@@ -1,6 +1,7 @@
1
1
  from __future__ import annotations # to avoid quoting type hints
2
2
 
3
3
  import datetime
4
+ import io
4
5
  import logging
5
6
  import os
6
7
  import platform
@@ -22,6 +23,7 @@ import requests
22
23
  import torch
23
24
  from asgiref.sync import sync_to_async
24
25
  from magika import Magika
26
+ from PIL import Image
25
27
 
26
28
  from khoj.utils import constants
27
29
 
@@ -416,3 +418,16 @@ def is_internet_connected():
416
418
  return response.status_code == 200
417
419
  except:
418
420
  return False
421
+
422
+
423
+ def convert_image_to_webp(image_bytes):
424
+ """Convert image bytes to webp format for faster loading"""
425
+ image_io = io.BytesIO(image_bytes)
426
+ with Image.open(image_io) as original_image:
427
+ webp_image_io = io.BytesIO()
428
+ original_image.save(webp_image_io, "WEBP")
429
+
430
+ # Encode the WebP image back to base64
431
+ webp_image_bytes = webp_image_io.getvalue()
432
+ webp_image_io.close()
433
+ return webp_image_bytes
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: khoj
3
- Version: 1.21.5
3
+ Version: 1.21.7.dev7
4
4
  Summary: Your Second Brain
5
5
  Project-URL: Homepage, https://khoj.dev
6
6
  Project-URL: Documentation, https://docs.khoj.dev
@@ -32,10 +32,10 @@ Requires-Dist: dateparser>=1.1.1
32
32
  Requires-Dist: defusedxml==0.7.1
33
33
  Requires-Dist: django-apscheduler==0.6.2
34
34
  Requires-Dist: django-phonenumber-field==7.3.0
35
- Requires-Dist: django==5.0.7
35
+ Requires-Dist: django==5.0.8
36
36
  Requires-Dist: docx2txt==0.8
37
37
  Requires-Dist: einops==0.8.0
38
- Requires-Dist: fastapi>=0.104.1
38
+ Requires-Dist: fastapi>=0.110.0
39
39
  Requires-Dist: httpx==0.25.0
40
40
  Requires-Dist: huggingface-hub>=0.22.2
41
41
  Requires-Dist: itsdangerous==2.1.2
@@ -71,7 +71,7 @@ Requires-Dist: tiktoken>=0.3.2
71
71
  Requires-Dist: torch==2.2.2
72
72
  Requires-Dist: transformers>=4.28.0
73
73
  Requires-Dist: tzdata==2023.3
74
- Requires-Dist: uvicorn==0.17.6
74
+ Requires-Dist: uvicorn==0.30.6
75
75
  Requires-Dist: websockets==12.0
76
76
  Provides-Extra: dev
77
77
  Requires-Dist: black>=23.1.0; extra == 'dev'