khoj 1.42.9.dev26__py3-none-any.whl → 1.42.10.dev2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (58) hide show
  1. khoj/database/adapters/__init__.py +0 -20
  2. khoj/database/models/__init__.py +0 -1
  3. khoj/interface/compiled/404/index.html +2 -2
  4. khoj/interface/compiled/_next/static/chunks/app/chat/page-4c6b873a4a5c7d2f.js +1 -0
  5. khoj/interface/compiled/agents/index.html +2 -2
  6. khoj/interface/compiled/agents/index.txt +2 -2
  7. khoj/interface/compiled/automations/index.html +2 -2
  8. khoj/interface/compiled/automations/index.txt +3 -3
  9. khoj/interface/compiled/chat/index.html +2 -2
  10. khoj/interface/compiled/chat/index.txt +2 -2
  11. khoj/interface/compiled/index.html +2 -2
  12. khoj/interface/compiled/index.txt +2 -2
  13. khoj/interface/compiled/search/index.html +2 -2
  14. khoj/interface/compiled/search/index.txt +2 -2
  15. khoj/interface/compiled/settings/index.html +2 -2
  16. khoj/interface/compiled/settings/index.txt +4 -4
  17. khoj/interface/compiled/share/chat/index.html +2 -2
  18. khoj/interface/compiled/share/chat/index.txt +2 -2
  19. khoj/processor/content/markdown/markdown_to_entries.py +9 -38
  20. khoj/processor/content/org_mode/org_to_entries.py +2 -18
  21. khoj/processor/content/org_mode/orgnode.py +16 -18
  22. khoj/processor/content/text_to_entries.py +0 -30
  23. khoj/processor/conversation/anthropic/anthropic_chat.py +2 -11
  24. khoj/processor/conversation/anthropic/utils.py +103 -90
  25. khoj/processor/conversation/google/gemini_chat.py +1 -4
  26. khoj/processor/conversation/google/utils.py +18 -80
  27. khoj/processor/conversation/offline/chat_model.py +3 -3
  28. khoj/processor/conversation/openai/gpt.py +38 -13
  29. khoj/processor/conversation/openai/utils.py +12 -113
  30. khoj/processor/conversation/prompts.py +35 -17
  31. khoj/processor/conversation/utils.py +58 -129
  32. khoj/processor/operator/grounding_agent.py +1 -1
  33. khoj/processor/operator/operator_agent_binary.py +3 -4
  34. khoj/processor/tools/online_search.py +0 -18
  35. khoj/processor/tools/run_code.py +1 -1
  36. khoj/routers/api_chat.py +1 -1
  37. khoj/routers/api_content.py +6 -6
  38. khoj/routers/helpers.py +27 -297
  39. khoj/routers/research.py +155 -169
  40. khoj/search_type/text_search.py +0 -2
  41. khoj/utils/helpers.py +8 -284
  42. khoj/utils/initialization.py +2 -0
  43. khoj/utils/rawconfig.py +0 -11
  44. {khoj-1.42.9.dev26.dist-info → khoj-1.42.10.dev2.dist-info}/METADATA +1 -1
  45. {khoj-1.42.9.dev26.dist-info → khoj-1.42.10.dev2.dist-info}/RECORD +57 -57
  46. khoj/interface/compiled/_next/static/chunks/app/chat/page-76fc915800aa90f4.js +0 -1
  47. /khoj/interface/compiled/_next/static/chunks/{1327-3b1a41af530fa8ee.js → 1327-1a9107b9a2a04a98.js} +0 -0
  48. /khoj/interface/compiled/_next/static/chunks/{1915-fbfe167c84ad60c5.js → 1915-5c6508f6ebb62a30.js} +0 -0
  49. /khoj/interface/compiled/_next/static/chunks/{2117-e78b6902ad6f75ec.js → 2117-080746c8e170c81a.js} +0 -0
  50. /khoj/interface/compiled/_next/static/chunks/{2939-4d4084c5b888b960.js → 2939-4af3fd24b8ffc9ad.js} +0 -0
  51. /khoj/interface/compiled/_next/static/chunks/{4447-d6cf93724d57e34b.js → 4447-cd95608f8e93e711.js} +0 -0
  52. /khoj/interface/compiled/_next/static/chunks/{8667-4b7790573b08c50d.js → 8667-50b03a89e82e0ba7.js} +0 -0
  53. /khoj/interface/compiled/_next/static/chunks/{webpack-70e0762712341826.js → webpack-92ce8aaf95718ec4.js} +0 -0
  54. /khoj/interface/compiled/_next/static/{IYGyer2N7GdUJ7QHFghtY → cuzJcS32_a4L4a6gCZ63y}/_buildManifest.js +0 -0
  55. /khoj/interface/compiled/_next/static/{IYGyer2N7GdUJ7QHFghtY → cuzJcS32_a4L4a6gCZ63y}/_ssgManifest.js +0 -0
  56. {khoj-1.42.9.dev26.dist-info → khoj-1.42.10.dev2.dist-info}/WHEEL +0 -0
  57. {khoj-1.42.9.dev26.dist-info → khoj-1.42.10.dev2.dist-info}/entry_points.txt +0 -0
  58. {khoj-1.42.9.dev26.dist-info → khoj-1.42.10.dev2.dist-info}/licenses/LICENSE +0 -0
@@ -10,7 +10,7 @@ from dataclasses import dataclass
10
10
  from datetime import datetime
11
11
  from enum import Enum
12
12
  from io import BytesIO
13
- from typing import Any, Callable, Dict, List, Literal, Optional, Tuple, Union
13
+ from typing import Any, Callable, Dict, List, Literal, Optional, Union
14
14
 
15
15
  import PIL.Image
16
16
  import pyjson5
@@ -137,83 +137,60 @@ class OperatorRun:
137
137
  }
138
138
 
139
139
 
140
- class ToolCall:
141
- def __init__(self, name: str, args: dict, id: str):
142
- self.name = name
143
- self.args = args
144
- self.id = id
145
-
146
-
147
140
  class ResearchIteration:
148
141
  def __init__(
149
142
  self,
150
- query: ToolCall | dict | str,
143
+ tool: str,
144
+ query: str,
151
145
  context: list = None,
152
146
  onlineContext: dict = None,
153
147
  codeContext: dict = None,
154
148
  operatorContext: dict | OperatorRun = None,
155
149
  summarizedResult: str = None,
156
150
  warning: str = None,
157
- raw_response: list = None,
158
151
  ):
159
- self.query = ToolCall(**query) if isinstance(query, dict) else query
152
+ self.tool = tool
153
+ self.query = query
160
154
  self.context = context
161
155
  self.onlineContext = onlineContext
162
156
  self.codeContext = codeContext
163
157
  self.operatorContext = OperatorRun(**operatorContext) if isinstance(operatorContext, dict) else operatorContext
164
158
  self.summarizedResult = summarizedResult
165
159
  self.warning = warning
166
- self.raw_response = raw_response
167
160
 
168
161
  def to_dict(self) -> dict:
169
162
  data = vars(self).copy()
170
- data["query"] = self.query.__dict__ if isinstance(self.query, ToolCall) else self.query
171
163
  data["operatorContext"] = self.operatorContext.to_dict() if self.operatorContext else None
172
164
  return data
173
165
 
174
166
 
175
167
  def construct_iteration_history(
176
168
  previous_iterations: List[ResearchIteration],
169
+ previous_iteration_prompt: str,
177
170
  query: str = None,
178
- query_images: List[str] = None,
179
- query_files: str = None,
180
171
  ) -> list[ChatMessageModel]:
181
172
  iteration_history: list[ChatMessageModel] = []
182
- query_message_content = construct_structured_message(query, query_images, attached_file_context=query_files)
183
- if query_message_content:
184
- iteration_history.append(ChatMessageModel(by="you", message=query_message_content))
173
+ previous_iteration_messages: list[dict] = []
174
+ for idx, iteration in enumerate(previous_iterations):
175
+ iteration_data = previous_iteration_prompt.format(
176
+ tool=iteration.tool,
177
+ query=iteration.query,
178
+ result=iteration.summarizedResult,
179
+ index=idx + 1,
180
+ )
185
181
 
186
- for iteration in previous_iterations:
187
- if not iteration.query or isinstance(iteration.query, str):
188
- iteration_history.append(
189
- ChatMessageModel(
190
- by="you",
191
- message=iteration.summarizedResult
192
- or iteration.warning
193
- or "Please specify what you want to do next.",
194
- )
195
- )
196
- continue
197
- iteration_history += [
182
+ previous_iteration_messages.append({"type": "text", "text": iteration_data})
183
+
184
+ if previous_iteration_messages:
185
+ if query:
186
+ iteration_history.append(ChatMessageModel(by="you", message=query))
187
+ iteration_history.append(
198
188
  ChatMessageModel(
199
189
  by="khoj",
200
- message=iteration.raw_response or [iteration.query.__dict__],
201
- intent=Intent(type="tool_call", query=query),
202
- ),
203
- ChatMessageModel(
204
- by="you",
205
- intent=Intent(type="tool_result"),
206
- message=[
207
- {
208
- "type": "tool_result",
209
- "id": iteration.query.id,
210
- "name": iteration.query.name,
211
- "content": iteration.summarizedResult,
212
- }
213
- ],
214
- ),
215
- ]
216
-
190
+ intent=Intent(type="remember", query=query),
191
+ message=previous_iteration_messages,
192
+ )
193
+ )
217
194
  return iteration_history
218
195
 
219
196
 
@@ -325,44 +302,33 @@ def construct_tool_chat_history(
325
302
  ConversationCommand.Notes: (
326
303
  lambda iteration: [c["query"] for c in iteration.context] if iteration.context else []
327
304
  ),
328
- ConversationCommand.SearchWeb: (
305
+ ConversationCommand.Online: (
329
306
  lambda iteration: list(iteration.onlineContext.keys()) if iteration.onlineContext else []
330
307
  ),
331
- ConversationCommand.ReadWebpage: (
308
+ ConversationCommand.Webpage: (
332
309
  lambda iteration: list(iteration.onlineContext.keys()) if iteration.onlineContext else []
333
310
  ),
334
- ConversationCommand.RunCode: (
311
+ ConversationCommand.Code: (
335
312
  lambda iteration: list(iteration.codeContext.keys()) if iteration.codeContext else []
336
313
  ),
337
314
  }
338
315
  for iteration in previous_iterations:
339
- if not iteration.query or isinstance(iteration.query, str):
340
- chat_history.append(
341
- ChatMessageModel(
342
- by="you",
343
- message=iteration.summarizedResult
344
- or iteration.warning
345
- or "Please specify what you want to do next.",
346
- )
347
- )
348
- continue
349
-
350
316
  # If a tool is provided use the inferred query extractor for that tool if available
351
317
  # If no tool is provided, use inferred query extractor for the tool used in the iteration
352
318
  # Fallback to base extractor if the tool does not have an inferred query extractor
353
319
  inferred_query_extractor = extract_inferred_query_map.get(
354
- tool or ConversationCommand(iteration.query.name), base_extractor
320
+ tool or ConversationCommand(iteration.tool), base_extractor
355
321
  )
356
322
  chat_history += [
357
323
  ChatMessageModel(
358
324
  by="you",
359
- message=yaml.dump(iteration.query.args, default_flow_style=False),
325
+ message=iteration.query,
360
326
  ),
361
327
  ChatMessageModel(
362
328
  by="khoj",
363
329
  intent=Intent(
364
330
  type="remember",
365
- query=yaml.dump(iteration.query.args, default_flow_style=False),
331
+ query=iteration.query,
366
332
  inferred_queries=inferred_query_extractor(iteration),
367
333
  memory_type="notes",
368
334
  ),
@@ -515,32 +481,28 @@ Khoj: "{chat_response}"
515
481
 
516
482
  def construct_structured_message(
517
483
  message: list[dict] | str,
518
- images: list[str] = None,
519
- model_type: str = None,
520
- vision_enabled: bool = True,
484
+ images: list[str],
485
+ model_type: str,
486
+ vision_enabled: bool,
521
487
  attached_file_context: str = None,
522
488
  ):
523
489
  """
524
- Format messages into appropriate multimedia format for supported chat model types.
525
-
526
- Assume vision is enabled and chat model provider supports messages in chatml format, unless specified otherwise.
490
+ Format messages into appropriate multimedia format for supported chat model types
527
491
  """
528
- if not model_type or model_type in [
492
+ if model_type in [
529
493
  ChatModel.ModelType.OPENAI,
530
494
  ChatModel.ModelType.GOOGLE,
531
495
  ChatModel.ModelType.ANTHROPIC,
532
496
  ]:
533
- constructed_messages: List[dict[str, Any]] = []
534
- if not is_none_or_empty(message):
535
- constructed_messages += [{"type": "text", "text": message}] if isinstance(message, str) else message
536
- # Drop image message passed by caller if chat model does not have vision enabled
537
- if not vision_enabled:
538
- constructed_messages = [m for m in constructed_messages if m.get("type") != "image_url"]
497
+ constructed_messages: List[dict[str, Any]] = (
498
+ [{"type": "text", "text": message}] if isinstance(message, str) else message
499
+ )
500
+
539
501
  if not is_none_or_empty(attached_file_context):
540
- constructed_messages += [{"type": "text", "text": attached_file_context}]
502
+ constructed_messages.append({"type": "text", "text": attached_file_context})
541
503
  if vision_enabled and images:
542
504
  for image in images:
543
- constructed_messages += [{"type": "image_url", "image_url": {"url": image}}]
505
+ constructed_messages.append({"type": "image_url", "image_url": {"url": image}})
544
506
  return constructed_messages
545
507
 
546
508
  message = message if isinstance(message, str) else "\n\n".join(m["text"] for m in message)
@@ -646,7 +608,7 @@ def generate_chatml_messages_with_context(
646
608
 
647
609
  if not is_none_or_empty(chat.context):
648
610
  references = "\n\n".join(
649
- {f"# URI: {item.uri}\n## {item.compiled}\n" for item in chat.context or [] if isinstance(item, dict)}
611
+ {f"# File: {item.file}\n## {item.compiled}\n" for item in chat.context or [] if isinstance(item, dict)}
650
612
  )
651
613
  message_context += [{"type": "text", "text": f"{prompts.notes_conversation.format(references=references)}"}]
652
614
 
@@ -676,11 +638,7 @@ def generate_chatml_messages_with_context(
676
638
  chat_message, chat.images if role == "user" else [], model_type, vision_enabled
677
639
  )
678
640
 
679
- reconstructed_message = ChatMessage(
680
- content=message_content,
681
- role=role,
682
- additional_kwargs={"message_type": chat.intent.type if chat.intent else None},
683
- )
641
+ reconstructed_message = ChatMessage(content=message_content, role=role)
684
642
  chatml_messages.insert(0, reconstructed_message)
685
643
 
686
644
  if len(chatml_messages) >= 3 * lookback_turns:
@@ -779,21 +737,10 @@ def count_tokens(
779
737
  message_content_parts: list[str] = []
780
738
  # Collate message content into single string to ease token counting
781
739
  for part in message_content:
782
- if isinstance(part, dict) and part.get("type") == "image_url":
783
- image_count += 1
784
- elif isinstance(part, dict) and part.get("type") == "text":
740
+ if isinstance(part, dict) and part.get("type") == "text":
785
741
  message_content_parts.append(part["text"])
786
- elif isinstance(part, dict) and hasattr(part, "model_dump"):
787
- message_content_parts.append(json.dumps(part.model_dump()))
788
- elif isinstance(part, dict) and hasattr(part, "__dict__"):
789
- message_content_parts.append(json.dumps(part.__dict__))
790
- elif isinstance(part, dict):
791
- # If part is a dict but not a recognized type, convert to JSON string
792
- try:
793
- message_content_parts.append(json.dumps(part))
794
- except (TypeError, ValueError) as e:
795
- logger.warning(f"Failed to serialize part {part} to JSON: {e}. Skipping.")
796
- image_count += 1 # Treat as an image/binary if serialization fails
742
+ elif isinstance(part, dict) and part.get("type") == "image_url":
743
+ image_count += 1
797
744
  elif isinstance(part, str):
798
745
  message_content_parts.append(part)
799
746
  else:
@@ -806,15 +753,6 @@ def count_tokens(
806
753
  return len(encoder.encode(json.dumps(message_content)))
807
754
 
808
755
 
809
- def count_total_tokens(messages: list[ChatMessage], encoder, system_message: Optional[ChatMessage]) -> Tuple[int, int]:
810
- """Count total tokens in messages including system message"""
811
- system_message_tokens = count_tokens(system_message.content, encoder) if system_message else 0
812
- message_tokens = sum([count_tokens(message.content, encoder) for message in messages])
813
- # Reserves 4 tokens to demarcate each message (e.g <|im_start|>user, <|im_end|>, <|endoftext|> etc.)
814
- total_tokens = message_tokens + system_message_tokens + 4 * len(messages)
815
- return total_tokens, system_message_tokens
816
-
817
-
818
756
  def truncate_messages(
819
757
  messages: list[ChatMessage],
820
758
  max_prompt_size: int,
@@ -833,30 +771,23 @@ def truncate_messages(
833
771
  break
834
772
 
835
773
  # Drop older messages until under max supported prompt size by model
836
- total_tokens, system_message_tokens = count_total_tokens(messages, encoder, system_message)
774
+ # Reserves 4 tokens to demarcate each message (e.g <|im_start|>user, <|im_end|>, <|endoftext|> etc.)
775
+ system_message_tokens = count_tokens(system_message.content, encoder) if system_message else 0
776
+ tokens = sum([count_tokens(message.content, encoder) for message in messages])
777
+ total_tokens = tokens + system_message_tokens + 4 * len(messages)
837
778
 
838
779
  while total_tokens > max_prompt_size and (len(messages) > 1 or len(messages[0].content) > 1):
839
- # If the last message has more than one content part, pop the oldest content part.
840
- # For tool calls, the whole message should dropped, assistant's tool call content being truncated annoys AI APIs.
841
- if len(messages[-1].content) > 1 and messages[-1].additional_kwargs.get("message_type") != "tool_call":
780
+ if len(messages[-1].content) > 1:
842
781
  # The oldest content part is earlier in content list. So pop from the front.
843
782
  messages[-1].content.pop(0)
844
- # Otherwise, pop the last message if it has only one content part or is a tool call.
845
783
  else:
846
784
  # The oldest message is the last one. So pop from the back.
847
- dropped_message = messages.pop()
848
- # Drop tool result pair of tool call, if tool call message has been removed
849
- if (
850
- dropped_message.additional_kwargs.get("message_type") == "tool_call"
851
- and messages
852
- and messages[-1].additional_kwargs.get("message_type") == "tool_result"
853
- ):
854
- messages.pop()
855
-
856
- total_tokens, _ = count_total_tokens(messages, encoder, system_message)
785
+ messages.pop()
786
+ tokens = sum([count_tokens(message.content, encoder) for message in messages])
787
+ total_tokens = tokens + system_message_tokens + 4 * len(messages)
857
788
 
858
789
  # Truncate current message if still over max supported prompt size by model
859
- total_tokens, _ = count_total_tokens(messages, encoder, system_message)
790
+ total_tokens = tokens + system_message_tokens + 4 * len(messages)
860
791
  if total_tokens > max_prompt_size:
861
792
  # At this point, a single message with a single content part of type dict should remain
862
793
  assert (
@@ -1218,15 +1149,13 @@ def messages_to_print(messages: list[ChatMessage], max_length: int = 70) -> str:
1218
1149
  return "\n".join([f"{json.dumps(safe_serialize(message.content))[:max_length]}..." for message in messages])
1219
1150
 
1220
1151
 
1221
- class StructuredOutputSupport(int, Enum):
1152
+ class JsonSupport(int, Enum):
1222
1153
  NONE = 0
1223
1154
  OBJECT = 1
1224
1155
  SCHEMA = 2
1225
- TOOL = 3
1226
1156
 
1227
1157
 
1228
1158
  class ResponseWithThought:
1229
- def __init__(self, text: str = None, thought: str = None, raw_content: list = None):
1230
- self.text = text
1159
+ def __init__(self, response: str = None, thought: str = None):
1160
+ self.response = response
1231
1161
  self.thought = thought
1232
- self.raw_content = raw_content
@@ -73,7 +73,7 @@ class GroundingAgent:
73
73
  grounding_user_prompt = self.get_instruction(instruction, self.environment_type)
74
74
  screenshots = [f"data:image/webp;base64,{current_state.screenshot}"]
75
75
  grounding_messages_content = construct_structured_message(
76
- grounding_user_prompt, screenshots, self.model.model_type, vision_enabled=True
76
+ grounding_user_prompt, screenshots, self.model.name, vision_enabled=True
77
77
  )
78
78
  return [{"role": "user", "content": grounding_messages_content}]
79
79
 
@@ -121,7 +121,7 @@ class BinaryOperatorAgent(OperatorAgent):
121
121
  # Construct input for visual reasoner history
122
122
  visual_reasoner_history = self._format_message_for_api(self.messages)
123
123
  try:
124
- raw_response = await send_message_to_model_wrapper(
124
+ natural_language_action = await send_message_to_model_wrapper(
125
125
  query=query_text,
126
126
  query_images=query_screenshot,
127
127
  system_message=reasoning_system_prompt,
@@ -129,7 +129,6 @@ class BinaryOperatorAgent(OperatorAgent):
129
129
  agent_chat_model=self.reasoning_model,
130
130
  tracer=self.tracer,
131
131
  )
132
- natural_language_action = raw_response.text
133
132
 
134
133
  if not isinstance(natural_language_action, str) or not natural_language_action.strip():
135
134
  raise ValueError(f"Natural language action is empty or not a string. Got {natural_language_action}")
@@ -256,10 +255,10 @@ class BinaryOperatorAgent(OperatorAgent):
256
255
 
257
256
  # Append summary messages to history
258
257
  trigger_summary = AgentMessage(role="user", content=summarize_prompt)
259
- summary_message = AgentMessage(role="assistant", content=summary.text)
258
+ summary_message = AgentMessage(role="assistant", content=summary)
260
259
  self.messages.extend([trigger_summary, summary_message])
261
260
 
262
- return summary.text
261
+ return summary
263
262
 
264
263
  def _compile_response(self, response_content: str | List) -> str:
265
264
  """Compile response content into a string, handling OpenAI message structures."""
@@ -390,25 +390,7 @@ async def read_webpages(
390
390
  query_files=query_files,
391
391
  tracer=tracer,
392
392
  )
393
- async for result in read_webpages_content(
394
- query,
395
- urls,
396
- user,
397
- send_status_func=send_status_func,
398
- agent=agent,
399
- tracer=tracer,
400
- ):
401
- yield result
402
-
403
393
 
404
- async def read_webpages_content(
405
- query: str,
406
- urls: List[str],
407
- user: KhojUser,
408
- send_status_func: Optional[Callable] = None,
409
- agent: Agent = None,
410
- tracer: dict = {},
411
- ):
412
394
  logger.info(f"Reading web pages at: {urls}")
413
395
  if send_status_func:
414
396
  webpage_links_str = "\n- " + "\n- ".join(list(urls))
@@ -161,7 +161,7 @@ async def generate_python_code(
161
161
  )
162
162
 
163
163
  # Extract python code wrapped in markdown code blocks from the response
164
- code_blocks = re.findall(r"```(?:python)?\n(.*?)```", response.text, re.DOTALL)
164
+ code_blocks = re.findall(r"```(?:python)?\n(.*?)```", response, re.DOTALL)
165
165
 
166
166
  if not code_blocks:
167
167
  raise ValueError("No Python code blocks found in response")
khoj/routers/api_chat.py CHANGED
@@ -1390,7 +1390,7 @@ async def chat(
1390
1390
  continue
1391
1391
  if cancellation_event.is_set():
1392
1392
  break
1393
- message = item.text
1393
+ message = item.response
1394
1394
  full_response += message if message else ""
1395
1395
  if item.thought:
1396
1396
  async for result in send_event(ChatEvent.THOUGHT, item.thought):
@@ -101,9 +101,9 @@ async def put_content(
101
101
  host: Optional[str] = Header(None),
102
102
  indexed_data_limiter: ApiIndexedDataLimiter = Depends(
103
103
  ApiIndexedDataLimiter(
104
- incoming_entries_size_limit=10,
105
- subscribed_incoming_entries_size_limit=75,
106
- total_entries_size_limit=10,
104
+ incoming_entries_size_limit=50,
105
+ subscribed_incoming_entries_size_limit=100,
106
+ total_entries_size_limit=50,
107
107
  subscribed_total_entries_size_limit=500,
108
108
  )
109
109
  ),
@@ -123,9 +123,9 @@ async def patch_content(
123
123
  host: Optional[str] = Header(None),
124
124
  indexed_data_limiter: ApiIndexedDataLimiter = Depends(
125
125
  ApiIndexedDataLimiter(
126
- incoming_entries_size_limit=10,
127
- subscribed_incoming_entries_size_limit=75,
128
- total_entries_size_limit=10,
126
+ incoming_entries_size_limit=50,
127
+ subscribed_incoming_entries_size_limit=100,
128
+ total_entries_size_limit=50,
129
129
  subscribed_total_entries_size_limit=500,
130
130
  )
131
131
  ),