local-deep-research 0.5.9__py3-none-any.whl → 0.6.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (90) hide show
  1. local_deep_research/__version__.py +1 -1
  2. local_deep_research/advanced_search_system/candidate_exploration/progressive_explorer.py +11 -1
  3. local_deep_research/advanced_search_system/questions/browsecomp_question.py +32 -6
  4. local_deep_research/advanced_search_system/strategies/focused_iteration_strategy.py +32 -8
  5. local_deep_research/advanced_search_system/strategies/source_based_strategy.py +2 -0
  6. local_deep_research/api/__init__.py +2 -0
  7. local_deep_research/api/research_functions.py +177 -3
  8. local_deep_research/benchmarks/graders.py +150 -5
  9. local_deep_research/benchmarks/models/__init__.py +19 -0
  10. local_deep_research/benchmarks/models/benchmark_models.py +283 -0
  11. local_deep_research/benchmarks/ui/__init__.py +1 -0
  12. local_deep_research/benchmarks/web_api/__init__.py +6 -0
  13. local_deep_research/benchmarks/web_api/benchmark_routes.py +862 -0
  14. local_deep_research/benchmarks/web_api/benchmark_service.py +920 -0
  15. local_deep_research/config/llm_config.py +106 -21
  16. local_deep_research/defaults/default_settings.json +447 -2
  17. local_deep_research/error_handling/report_generator.py +10 -0
  18. local_deep_research/llm/__init__.py +19 -0
  19. local_deep_research/llm/llm_registry.py +155 -0
  20. local_deep_research/metrics/db_models.py +3 -7
  21. local_deep_research/metrics/search_tracker.py +25 -11
  22. local_deep_research/search_system.py +12 -9
  23. local_deep_research/utilities/log_utils.py +23 -10
  24. local_deep_research/utilities/thread_context.py +99 -0
  25. local_deep_research/web/app_factory.py +32 -8
  26. local_deep_research/web/database/benchmark_schema.py +230 -0
  27. local_deep_research/web/database/convert_research_id_to_string.py +161 -0
  28. local_deep_research/web/database/models.py +55 -1
  29. local_deep_research/web/database/schema_upgrade.py +397 -2
  30. local_deep_research/web/database/uuid_migration.py +265 -0
  31. local_deep_research/web/routes/api_routes.py +62 -31
  32. local_deep_research/web/routes/history_routes.py +13 -6
  33. local_deep_research/web/routes/metrics_routes.py +264 -4
  34. local_deep_research/web/routes/research_routes.py +45 -18
  35. local_deep_research/web/routes/route_registry.py +352 -0
  36. local_deep_research/web/routes/settings_routes.py +382 -22
  37. local_deep_research/web/services/research_service.py +22 -29
  38. local_deep_research/web/services/settings_manager.py +53 -0
  39. local_deep_research/web/services/settings_service.py +2 -0
  40. local_deep_research/web/static/css/styles.css +8 -0
  41. local_deep_research/web/static/js/components/detail.js +7 -14
  42. local_deep_research/web/static/js/components/details.js +8 -10
  43. local_deep_research/web/static/js/components/fallback/ui.js +4 -4
  44. local_deep_research/web/static/js/components/history.js +6 -6
  45. local_deep_research/web/static/js/components/logpanel.js +14 -11
  46. local_deep_research/web/static/js/components/progress.js +51 -46
  47. local_deep_research/web/static/js/components/research.js +250 -89
  48. local_deep_research/web/static/js/components/results.js +5 -7
  49. local_deep_research/web/static/js/components/settings.js +32 -26
  50. local_deep_research/web/static/js/components/settings_sync.js +24 -23
  51. local_deep_research/web/static/js/config/urls.js +285 -0
  52. local_deep_research/web/static/js/main.js +8 -8
  53. local_deep_research/web/static/js/research_form.js +267 -12
  54. local_deep_research/web/static/js/services/api.js +18 -18
  55. local_deep_research/web/static/js/services/keyboard.js +8 -8
  56. local_deep_research/web/static/js/services/socket.js +53 -35
  57. local_deep_research/web/static/js/services/ui.js +1 -1
  58. local_deep_research/web/templates/base.html +4 -1
  59. local_deep_research/web/templates/components/custom_dropdown.html +5 -3
  60. local_deep_research/web/templates/components/mobile_nav.html +3 -3
  61. local_deep_research/web/templates/components/sidebar.html +9 -3
  62. local_deep_research/web/templates/pages/benchmark.html +2697 -0
  63. local_deep_research/web/templates/pages/benchmark_results.html +1136 -0
  64. local_deep_research/web/templates/pages/benchmark_simple.html +453 -0
  65. local_deep_research/web/templates/pages/cost_analytics.html +1 -1
  66. local_deep_research/web/templates/pages/metrics.html +212 -39
  67. local_deep_research/web/templates/pages/research.html +8 -6
  68. local_deep_research/web/templates/pages/star_reviews.html +1 -1
  69. local_deep_research/web_search_engines/engines/search_engine_arxiv.py +14 -1
  70. local_deep_research/web_search_engines/engines/search_engine_brave.py +15 -1
  71. local_deep_research/web_search_engines/engines/search_engine_ddg.py +20 -1
  72. local_deep_research/web_search_engines/engines/search_engine_google_pse.py +26 -2
  73. local_deep_research/web_search_engines/engines/search_engine_pubmed.py +15 -1
  74. local_deep_research/web_search_engines/engines/search_engine_retriever.py +192 -0
  75. local_deep_research/web_search_engines/engines/search_engine_tavily.py +307 -0
  76. local_deep_research/web_search_engines/rate_limiting/__init__.py +14 -0
  77. local_deep_research/web_search_engines/rate_limiting/__main__.py +9 -0
  78. local_deep_research/web_search_engines/rate_limiting/cli.py +209 -0
  79. local_deep_research/web_search_engines/rate_limiting/exceptions.py +21 -0
  80. local_deep_research/web_search_engines/rate_limiting/tracker.py +506 -0
  81. local_deep_research/web_search_engines/retriever_registry.py +108 -0
  82. local_deep_research/web_search_engines/search_engine_base.py +161 -43
  83. local_deep_research/web_search_engines/search_engine_factory.py +14 -0
  84. local_deep_research/web_search_engines/search_engines_config.py +20 -0
  85. local_deep_research-0.6.0.dist-info/METADATA +374 -0
  86. {local_deep_research-0.5.9.dist-info → local_deep_research-0.6.0.dist-info}/RECORD +89 -64
  87. local_deep_research-0.5.9.dist-info/METADATA +0 -420
  88. {local_deep_research-0.5.9.dist-info → local_deep_research-0.6.0.dist-info}/WHEEL +0 -0
  89. {local_deep_research-0.5.9.dist-info → local_deep_research-0.6.0.dist-info}/entry_points.txt +0 -0
  90. {local_deep_research-0.5.9.dist-info → local_deep_research-0.6.0.dist-info}/licenses/LICENSE +0 -0
@@ -3,7 +3,7 @@ from functools import cache
3
3
 
4
4
  from langchain_anthropic import ChatAnthropic
5
5
  from langchain_community.llms import VLLM
6
- from langchain_core.language_models import FakeListChatModel
6
+ from langchain_core.language_models import FakeListChatModel, BaseChatModel
7
7
  from langchain_ollama import ChatOllama
8
8
  from langchain_openai import ChatOpenAI
9
9
  from loguru import logger
@@ -11,6 +11,7 @@ from loguru import logger
11
11
  from ..utilities.db_utils import get_db_setting
12
12
  from ..utilities.search_utilities import remove_think_tags
13
13
  from ..utilities.url_utils import normalize_url
14
+ from ..llm import get_llm_from_registry, is_llm_registered
14
15
 
15
16
  # Valid provider options
16
17
  VALID_PROVIDERS = [
@@ -206,16 +207,6 @@ def get_llm(
206
207
  if provider is None:
207
208
  provider = get_db_setting("llm.provider", "ollama")
208
209
 
209
- # Check if we're in testing mode and should use fallback
210
- if os.environ.get("LDR_USE_FALLBACK_LLM", ""):
211
- logger.info("LDR_USE_FALLBACK_LLM is set, using fallback model")
212
- return wrap_llm_without_think_tags(
213
- get_fallback_model(temperature),
214
- research_id=research_id,
215
- provider="fallback",
216
- research_context=research_context,
217
- )
218
-
219
210
  # Clean model name: remove quotes and extra whitespace
220
211
  if model_name:
221
212
  model_name = model_name.strip().strip("\"'").strip()
@@ -227,6 +218,74 @@ def get_llm(
227
218
  # Normalize provider: convert to lowercase
228
219
  provider = provider.lower() if provider else None
229
220
 
221
+ # Check if this is a registered custom LLM first
222
+ if provider and is_llm_registered(provider):
223
+ logger.info(f"Using registered custom LLM: {provider}")
224
+ custom_llm = get_llm_from_registry(provider)
225
+
226
+ # Check if it's already a BaseChatModel instance
227
+ if isinstance(custom_llm, BaseChatModel):
228
+ # It's already an LLM instance, use it directly
229
+ llm_instance = custom_llm
230
+ elif callable(custom_llm):
231
+ # It's a factory function, call it with parameters
232
+ try:
233
+ llm_instance = custom_llm(
234
+ model_name=model_name,
235
+ temperature=temperature,
236
+ )
237
+ except Exception as e:
238
+ logger.exception(f"Error creating custom LLM instance: {e}")
239
+ raise
240
+ else:
241
+ raise ValueError(
242
+ f"Registered LLM {provider} is neither a BaseChatModel nor a callable factory"
243
+ )
244
+
245
+ return wrap_llm_without_think_tags(
246
+ llm_instance,
247
+ research_id=research_id,
248
+ provider=provider,
249
+ research_context=research_context,
250
+ )
251
+
252
+ # Check if we're in testing mode and should use fallback (but only when no API keys are configured)
253
+ if os.environ.get("LDR_USE_FALLBACK_LLM", ""):
254
+ # Only use fallback if the provider has no valid configuration
255
+ provider_has_config = False
256
+
257
+ if provider == "openai" and get_db_setting("llm.openai.api_key"):
258
+ provider_has_config = True
259
+ elif provider == "anthropic" and get_db_setting(
260
+ "llm.anthropic.api_key"
261
+ ):
262
+ provider_has_config = True
263
+ elif provider == "openai_endpoint" and get_db_setting(
264
+ "llm.openai_endpoint.api_key"
265
+ ):
266
+ provider_has_config = True
267
+ elif provider == "ollama" and is_ollama_available():
268
+ provider_has_config = True
269
+ elif provider in ["vllm", "lmstudio", "llamacpp"]:
270
+ # These are local providers, check their availability
271
+ if provider == "vllm" and is_vllm_available():
272
+ provider_has_config = True
273
+ elif provider == "lmstudio" and is_lmstudio_available():
274
+ provider_has_config = True
275
+ elif provider == "llamacpp" and is_llamacpp_available():
276
+ provider_has_config = True
277
+
278
+ if not provider_has_config:
279
+ logger.info(
280
+ "LDR_USE_FALLBACK_LLM is set and no valid provider config found, using fallback model"
281
+ )
282
+ return wrap_llm_without_think_tags(
283
+ get_fallback_model(temperature),
284
+ research_id=research_id,
285
+ provider="fallback",
286
+ research_context=research_context,
287
+ )
288
+
230
289
  # Validate provider
231
290
  if provider not in VALID_PROVIDERS:
232
291
  logger.error(f"Invalid provider in settings: {provider}")
@@ -242,16 +301,37 @@ def get_llm(
242
301
  "temperature": temperature,
243
302
  }
244
303
 
245
- # Get context window size from settings
246
- context_window_size = get_db_setting("llm.context_window_size", 32000)
304
+ # Get context window size from settings (use different defaults for local vs cloud providers)
305
+ def get_context_window_size(provider_type):
306
+ if provider_type in ["ollama", "llamacpp", "lmstudio"]:
307
+ # Local providers: use smaller default to prevent memory issues
308
+ return get_db_setting("llm.local_context_window_size", 4096)
309
+ else:
310
+ # Cloud providers: check if unrestricted mode is enabled
311
+ use_unrestricted = get_db_setting(
312
+ "llm.context_window_unrestricted", True
313
+ )
314
+ if use_unrestricted:
315
+ # Let cloud providers auto-handle context (return None or very large value)
316
+ return None # Will be handled per provider
317
+ else:
318
+ # Use user-specified limit
319
+ return get_db_setting("llm.context_window_size", 128000)
320
+
321
+ context_window_size = get_context_window_size(provider)
247
322
 
248
323
  if get_db_setting("llm.supports_max_tokens", True):
249
324
  # Use 80% of context window to leave room for prompts
250
- max_tokens = min(
251
- int(get_db_setting("llm.max_tokens", 30000)),
252
- int(context_window_size * 0.8),
253
- )
254
- common_params["max_tokens"] = max_tokens
325
+ if context_window_size is not None:
326
+ max_tokens = min(
327
+ int(get_db_setting("llm.max_tokens", 100000)),
328
+ int(context_window_size * 0.8),
329
+ )
330
+ common_params["max_tokens"] = max_tokens
331
+ else:
332
+ # Unrestricted context: use provider's default max_tokens
333
+ max_tokens = int(get_db_setting("llm.max_tokens", 100000))
334
+ common_params["max_tokens"] = max_tokens
255
335
 
256
336
  # Handle different providers
257
337
  if provider == "anthropic":
@@ -301,6 +381,7 @@ def get_llm(
301
381
  openai_endpoint_url = get_db_setting(
302
382
  "llm.openai_endpoint.url", "https://openrouter.ai/api/v1"
303
383
  )
384
+ openai_endpoint_url = normalize_url(openai_endpoint_url)
304
385
 
305
386
  llm = ChatOpenAI(
306
387
  model=model_name,
@@ -394,8 +475,12 @@ def get_llm(
394
475
  f"Creating ChatOllama with model={model_name}, base_url={base_url}"
395
476
  )
396
477
  try:
478
+ # Add num_ctx parameter for Ollama context window size
479
+ ollama_params = {**common_params}
480
+ if context_window_size is not None:
481
+ ollama_params["num_ctx"] = context_window_size
397
482
  llm = ChatOllama(
398
- model=model_name, base_url=base_url, **common_params
483
+ model=model_name, base_url=base_url, **ollama_params
399
484
  )
400
485
 
401
486
  # Log the actual client configuration after creation
@@ -475,7 +560,7 @@ def get_llm(
475
560
  llm = LlamaCppClient(
476
561
  server_url=server_url,
477
562
  temperature=temperature,
478
- max_tokens=get_db_setting("llm.max_tokens", 30000),
563
+ max_tokens=get_db_setting("llm.max_tokens", 8192),
479
564
  )
480
565
  else:
481
566
  # Use direct model loading (existing code)
@@ -498,7 +583,7 @@ def get_llm(
498
583
  n_gpu_layers=n_gpu_layers,
499
584
  n_batch=n_batch,
500
585
  f16_kv=f16_kv,
501
- n_ctx=context_window_size, # Set context window size directly
586
+ n_ctx=context_window_size, # Set context window size directly (None = use default)
502
587
  verbose=True,
503
588
  )
504
589
 
@@ -194,13 +194,27 @@
194
194
  "value": 30000,
195
195
  "visible": true
196
196
  },
197
+ "llm.context_window_unrestricted": {
198
+ "category": "llm_parameters",
199
+ "description": "Let cloud providers automatically handle context sizing (recommended). Uncheck to set a specific limit.",
200
+ "editable": true,
201
+ "max_value": null,
202
+ "min_value": null,
203
+ "name": "Use Unrestricted Cloud Context Window",
204
+ "options": null,
205
+ "step": null,
206
+ "type": "LLM",
207
+ "ui_element": "checkbox",
208
+ "value": true,
209
+ "visible": true
210
+ },
197
211
  "llm.context_window_size": {
198
212
  "category": "llm_parameters",
199
- "description": "Maximum context window size in tokens for the LLM",
213
+ "description": "Maximum context window size in tokens for cloud LLMs. Only used when unrestricted context is disabled.",
200
214
  "editable": true,
201
215
  "max_value": 20000000.0,
202
216
  "min_value": 512.0,
203
- "name": "Context Window Size",
217
+ "name": "Cloud Provider Context Window Limit",
204
218
  "options": null,
205
219
  "step": null,
206
220
  "type": "LLM",
@@ -208,6 +222,20 @@
208
222
  "value": 128000,
209
223
  "visible": true
210
224
  },
225
+ "llm.local_context_window_size": {
226
+ "category": "llm_parameters",
227
+ "description": "Context window size in tokens for local LLMs (Ollama, LlamaCpp). Smaller values prevent memory issues.",
228
+ "editable": true,
229
+ "max_value": 131072.0,
230
+ "min_value": 512.0,
231
+ "name": "Local Provider Context Window Size",
232
+ "options": null,
233
+ "step": 512.0,
234
+ "type": "LLM",
235
+ "ui_element": "number",
236
+ "value": 4096,
237
+ "visible": true
238
+ },
211
239
  "llm.supports_max_tokens": {
212
240
  "category": "llm_parameters",
213
241
  "description": "Whether the LLM API supports the 'max_tokens' option.",
@@ -4037,6 +4065,197 @@
4037
4065
  "value": false,
4038
4066
  "visible": true
4039
4067
  },
4068
+ "search.engine.web.tavily.display_name": {
4069
+ "category": "tavily",
4070
+ "description": "Display name to use in the U.I. for this search engine.",
4071
+ "editable": false,
4072
+ "max_value": null,
4073
+ "min_value": null,
4074
+ "name": "Display Name",
4075
+ "options": null,
4076
+ "step": null,
4077
+ "type": "SEARCH",
4078
+ "ui_element": "text",
4079
+ "value": "Tavily",
4080
+ "visible": false
4081
+ },
4082
+ "search.engine.web.tavily.description": {
4083
+ "category": "tavily",
4084
+ "description": "Human-readable description of the search engine.",
4085
+ "editable": false,
4086
+ "max_value": null,
4087
+ "min_value": null,
4088
+ "name": "Description",
4089
+ "options": null,
4090
+ "step": null,
4091
+ "type": "SEARCH",
4092
+ "ui_element": "text",
4093
+ "value": "AI-powered search engine optimized for research with built-in answer extraction.",
4094
+ "visible": false
4095
+ },
4096
+ "search.engine.web.tavily.api_key": {
4097
+ "category": "tavily",
4098
+ "description": "The Tavily API key to use.",
4099
+ "editable": true,
4100
+ "max_value": null,
4101
+ "min_value": null,
4102
+ "name": "Api Key",
4103
+ "options": null,
4104
+ "step": null,
4105
+ "type": "SEARCH",
4106
+ "ui_element": "password",
4107
+ "value": "TAVILY_API_KEY",
4108
+ "visible": true
4109
+ },
4110
+ "search.engine.web.tavily.class_name": {
4111
+ "category": "tavily",
4112
+ "description": "Setting for tavily.class_name",
4113
+ "editable": true,
4114
+ "max_value": null,
4115
+ "min_value": null,
4116
+ "name": "Class Name",
4117
+ "options": null,
4118
+ "step": null,
4119
+ "type": "SEARCH",
4120
+ "ui_element": "text",
4121
+ "value": "TavilySearchEngine",
4122
+ "visible": true
4123
+ },
4124
+ "search.engine.web.tavily.default_params.search_depth": {
4125
+ "category": "tavily",
4126
+ "description": "Search depth - basic for speed, advanced for quality",
4127
+ "editable": true,
4128
+ "max_value": null,
4129
+ "min_value": null,
4130
+ "name": "Search Depth",
4131
+ "options": ["basic", "advanced"],
4132
+ "step": null,
4133
+ "type": "SEARCH",
4134
+ "ui_element": "select",
4135
+ "value": "basic",
4136
+ "visible": true
4137
+ },
4138
+ "search.engine.web.tavily.default_params.include_full_content": {
4139
+ "category": "tavily",
4140
+ "description": "Include full webpage content in results",
4141
+ "editable": true,
4142
+ "max_value": null,
4143
+ "min_value": null,
4144
+ "name": "Include Full Content",
4145
+ "options": null,
4146
+ "step": null,
4147
+ "type": "SEARCH",
4148
+ "ui_element": "checkbox",
4149
+ "value": true,
4150
+ "visible": true
4151
+ },
4152
+ "search.engine.web.tavily.module_path": {
4153
+ "category": "tavily",
4154
+ "description": "Setting for tavily.module_path",
4155
+ "editable": true,
4156
+ "max_value": null,
4157
+ "min_value": null,
4158
+ "name": "Module Path",
4159
+ "options": null,
4160
+ "step": null,
4161
+ "type": "SEARCH",
4162
+ "ui_element": "text",
4163
+ "value": ".engines.search_engine_tavily",
4164
+ "visible": true
4165
+ },
4166
+ "search.engine.web.tavily.requires_api_key": {
4167
+ "category": "tavily",
4168
+ "description": "Setting for tavily.requires_api_key",
4169
+ "editable": true,
4170
+ "max_value": null,
4171
+ "min_value": null,
4172
+ "name": "Requires Api Key",
4173
+ "options": null,
4174
+ "step": null,
4175
+ "type": "SEARCH",
4176
+ "ui_element": "checkbox",
4177
+ "value": true,
4178
+ "visible": true
4179
+ },
4180
+ "search.engine.web.tavily.reliability": {
4181
+ "category": "tavily",
4182
+ "description": "Setting for tavily.reliability",
4183
+ "editable": true,
4184
+ "max_value": 1.0,
4185
+ "min_value": 0.0,
4186
+ "name": "Reliability",
4187
+ "options": null,
4188
+ "step": 0.05,
4189
+ "type": "SEARCH",
4190
+ "ui_element": "range",
4191
+ "value": 0.8,
4192
+ "visible": true
4193
+ },
4194
+ "search.engine.web.tavily.strengths": {
4195
+ "category": "tavily",
4196
+ "description": "Setting for tavily.strengths",
4197
+ "editable": true,
4198
+ "max_value": null,
4199
+ "min_value": null,
4200
+ "name": "Strengths",
4201
+ "options": null,
4202
+ "step": null,
4203
+ "type": "SEARCH",
4204
+ "ui_element": "json",
4205
+ "value": [
4206
+ "AI-powered search optimization",
4207
+ "built-in answer extraction",
4208
+ "research-focused results",
4209
+ "high-quality content filtering",
4210
+ "fast response times"
4211
+ ],
4212
+ "visible": true
4213
+ },
4214
+ "search.engine.web.tavily.supports_full_search": {
4215
+ "category": "tavily",
4216
+ "description": "Setting for tavily.supports_full_search",
4217
+ "editable": true,
4218
+ "max_value": null,
4219
+ "min_value": null,
4220
+ "name": "Supports Full Search",
4221
+ "options": null,
4222
+ "step": null,
4223
+ "type": "SEARCH",
4224
+ "ui_element": "checkbox",
4225
+ "value": true,
4226
+ "visible": true
4227
+ },
4228
+ "search.engine.web.tavily.weaknesses": {
4229
+ "category": "tavily",
4230
+ "description": "Setting for tavily.weaknesses",
4231
+ "editable": true,
4232
+ "max_value": null,
4233
+ "min_value": null,
4234
+ "name": "Weaknesses",
4235
+ "options": null,
4236
+ "step": null,
4237
+ "type": "SEARCH",
4238
+ "ui_element": "json",
4239
+ "value": [
4240
+ "requires API key with usage limits",
4241
+ "newer service with smaller historical data"
4242
+ ],
4243
+ "visible": true
4244
+ },
4245
+ "search.engine.web.tavily.use_in_auto_search": {
4246
+ "category": "tavily",
4247
+ "description": "Include Tavily in auto search mode",
4248
+ "editable": true,
4249
+ "max_value": null,
4250
+ "min_value": null,
4251
+ "name": "Include in Auto Search",
4252
+ "options": null,
4253
+ "step": null,
4254
+ "type": "SEARCH",
4255
+ "ui_element": "checkbox",
4256
+ "value": false,
4257
+ "visible": true
4258
+ },
4040
4259
  "search.engine.local.local_all.use_in_auto_search": {
4041
4260
  "category": "local_all",
4042
4261
  "description": "Include local documents in auto search mode",
@@ -4129,5 +4348,231 @@
4129
4348
  "ui_element": "text",
4130
4349
  "value": "http://localhost:8000",
4131
4350
  "visible": true
4351
+ },
4352
+ "app.warnings.dismiss_high_context": {
4353
+ "category": "warnings",
4354
+ "description": "Dismiss warnings about high context window sizes that may cause memory issues",
4355
+ "editable": true,
4356
+ "max_value": null,
4357
+ "min_value": null,
4358
+ "name": "Dismiss High Context Warnings",
4359
+ "options": null,
4360
+ "step": null,
4361
+ "type": "APP",
4362
+ "ui_element": "checkbox",
4363
+ "value": false,
4364
+ "visible": true
4365
+ },
4366
+ "app.warnings.dismiss_low_context_focused": {
4367
+ "category": "warnings",
4368
+ "description": "Dismiss warnings about using focused iteration with low context window sizes",
4369
+ "editable": true,
4370
+ "max_value": null,
4371
+ "min_value": null,
4372
+ "name": "Dismiss Low Context + Focused Strategy Warnings",
4373
+ "options": null,
4374
+ "step": null,
4375
+ "type": "APP",
4376
+ "ui_element": "checkbox",
4377
+ "value": false,
4378
+ "visible": true
4379
+ },
4380
+ "app.warnings.dismiss_model_mismatch": {
4381
+ "category": "warnings",
4382
+ "description": "Dismiss warnings about context size vs model size mismatches",
4383
+ "editable": true,
4384
+ "max_value": null,
4385
+ "min_value": null,
4386
+ "name": "Dismiss Model Mismatch Warnings",
4387
+ "options": null,
4388
+ "step": null,
4389
+ "type": "APP",
4390
+ "ui_element": "checkbox",
4391
+ "value": false,
4392
+ "visible": true
4393
+ },
4394
+ "app.warnings.dismiss_searxng_recommendation": {
4395
+ "category": "warnings",
4396
+ "description": "Dismiss recommendations about using more questions instead of iterations with SearXNG",
4397
+ "editable": true,
4398
+ "max_value": null,
4399
+ "min_value": null,
4400
+ "name": "Dismiss SearXNG Optimization Tips",
4401
+ "options": null,
4402
+ "step": null,
4403
+ "type": "APP",
4404
+ "ui_element": "checkbox",
4405
+ "value": false,
4406
+ "visible": true
4407
+ },
4408
+ "rate_limiting.enabled": {
4409
+ "category": "rate_limiting",
4410
+ "description": "Enable adaptive rate limiting system that learns optimal wait times for each search engine",
4411
+ "editable": true,
4412
+ "max_value": null,
4413
+ "min_value": null,
4414
+ "name": "Enable Rate Limiting",
4415
+ "options": null,
4416
+ "step": null,
4417
+ "type": "APP",
4418
+ "ui_element": "checkbox",
4419
+ "value": true,
4420
+ "visible": true
4421
+ },
4422
+ "rate_limiting.exploration_rate": {
4423
+ "category": "rate_limiting",
4424
+ "description": "Percentage of attempts that will explore faster rates to discover improvements (0.0-1.0)",
4425
+ "editable": true,
4426
+ "max_value": 1.0,
4427
+ "min_value": 0.0,
4428
+ "name": "Exploration Rate",
4429
+ "options": null,
4430
+ "step": 0.05,
4431
+ "type": "APP",
4432
+ "ui_element": "range",
4433
+ "value": 0.1,
4434
+ "visible": true
4435
+ },
4436
+ "rate_limiting.learning_rate": {
4437
+ "category": "rate_limiting",
4438
+ "description": "How quickly to adapt to new rate limit information (higher = faster adaptation)",
4439
+ "editable": true,
4440
+ "max_value": 1.0,
4441
+ "min_value": 0.05,
4442
+ "name": "Learning Rate",
4443
+ "options": null,
4444
+ "step": 0.05,
4445
+ "type": "APP",
4446
+ "ui_element": "range",
4447
+ "value": 0.3,
4448
+ "visible": true
4449
+ },
4450
+ "rate_limiting.memory_window": {
4451
+ "category": "rate_limiting",
4452
+ "description": "Number of recent attempts to keep in memory for learning",
4453
+ "editable": true,
4454
+ "max_value": 1000,
4455
+ "min_value": 10,
4456
+ "name": "Memory Window",
4457
+ "options": null,
4458
+ "step": 10,
4459
+ "type": "APP",
4460
+ "ui_element": "number",
4461
+ "value": 100,
4462
+ "visible": true
4463
+ },
4464
+ "rate_limiting.profile": {
4465
+ "category": "rate_limiting",
4466
+ "description": "Rate limiting aggressiveness profile",
4467
+ "editable": true,
4468
+ "max_value": null,
4469
+ "min_value": null,
4470
+ "name": "Rate Limiting Profile",
4471
+ "options": [
4472
+ {
4473
+ "label": "Conservative (slower, more reliable)",
4474
+ "value": "conservative"
4475
+ },
4476
+ {
4477
+ "label": "Balanced (default)",
4478
+ "value": "balanced"
4479
+ },
4480
+ {
4481
+ "label": "Aggressive (faster, higher risk)",
4482
+ "value": "aggressive"
4483
+ }
4484
+ ],
4485
+ "step": null,
4486
+ "type": "APP",
4487
+ "ui_element": "select",
4488
+ "value": "balanced",
4489
+ "visible": true
4490
+ },
4491
+ "rate_limiting.decay_per_day": {
4492
+ "category": "rate_limiting",
4493
+ "description": "Confidence decay factor per day for old rate limit estimates (0.5-0.99, lower = faster decay)",
4494
+ "editable": true,
4495
+ "max_value": 0.99,
4496
+ "min_value": 0.5,
4497
+ "name": "Decay Factor Per Day",
4498
+ "options": null,
4499
+ "step": 0.01,
4500
+ "type": "APP",
4501
+ "ui_element": "range",
4502
+ "value": 0.95,
4503
+ "visible": true
4504
+ },
4505
+ "benchmark.evaluation.provider": {
4506
+ "category": "benchmark",
4507
+ "description": "Provider for benchmark evaluation model",
4508
+ "editable": true,
4509
+ "max_value": null,
4510
+ "min_value": null,
4511
+ "name": "Evaluation Provider",
4512
+ "options": [
4513
+ {
4514
+ "label": "OpenRouter (Recommended)",
4515
+ "value": "openai_endpoint"
4516
+ },
4517
+ {
4518
+ "label": "OpenAI API",
4519
+ "value": "openai"
4520
+ },
4521
+ {
4522
+ "label": "Anthropic API",
4523
+ "value": "anthropic"
4524
+ },
4525
+ {
4526
+ "label": "Ollama (Local)",
4527
+ "value": "ollama"
4528
+ }
4529
+ ],
4530
+ "step": null,
4531
+ "type": "SEARCH",
4532
+ "ui_element": "select",
4533
+ "value": "openai_endpoint",
4534
+ "visible": true
4535
+ },
4536
+ "benchmark.evaluation.model": {
4537
+ "category": "benchmark",
4538
+ "description": "Model for evaluating benchmark results",
4539
+ "editable": true,
4540
+ "max_value": null,
4541
+ "min_value": null,
4542
+ "name": "Evaluation Model",
4543
+ "options": null,
4544
+ "step": null,
4545
+ "type": "SEARCH",
4546
+ "ui_element": "text",
4547
+ "value": "anthropic/claude-3.7-sonnet",
4548
+ "visible": true
4549
+ },
4550
+ "benchmark.evaluation.endpoint_url": {
4551
+ "category": "benchmark",
4552
+ "description": "Endpoint URL for evaluation model (when using OpenAI-compatible APIs)",
4553
+ "editable": true,
4554
+ "max_value": null,
4555
+ "min_value": null,
4556
+ "name": "Evaluation Endpoint URL",
4557
+ "options": null,
4558
+ "step": null,
4559
+ "type": "SEARCH",
4560
+ "ui_element": "text",
4561
+ "value": "https://openrouter.ai/api/v1",
4562
+ "visible": true
4563
+ },
4564
+ "benchmark.evaluation.temperature": {
4565
+ "category": "benchmark",
4566
+ "description": "Temperature for evaluation (0 recommended for consistency)",
4567
+ "editable": true,
4568
+ "max_value": 1.0,
4569
+ "min_value": 0.0,
4570
+ "name": "Evaluation Temperature",
4571
+ "options": null,
4572
+ "step": 0.1,
4573
+ "type": "SEARCH",
4574
+ "ui_element": "range",
4575
+ "value": 0,
4576
+ "visible": true
4132
4577
  }
4133
4578
  }
@@ -346,6 +346,16 @@ We're here to help you get this working:
346
346
  "- Check search engine settings in Advanced Options\n"
347
347
  "- Ensure required API keys are set for external search engines"
348
348
  ),
349
+ "No search results found|All search engines.*blocked.*rate.*limited": (
350
+ "No search results were found for your query. This could mean all search engines are unavailable.\n\n"
351
+ "**Try this:**\n"
352
+ "- **If using SearXNG:** Check if your SearXNG Docker container is running: `docker ps`\n"
353
+ "- **Start SearXNG:** `docker run -d -p 8080:8080 searxng/searxng` then set URL to `http://localhost:8080`\n"
354
+ "- **Try different search terms:** Use broader, more general keywords\n"
355
+ "- **Check network connection:** Ensure you can access the internet\n"
356
+ "- **Switch search engines:** Try DuckDuckGo, Brave, or Google (if API key configured)\n"
357
+ "- **Check for typos** in your research query"
358
+ ),
349
359
  "TypeError.*Context.*Size|'<' not supported between": (
350
360
  "Model configuration issue. The context size setting might not be compatible with your model.\n\n"
351
361
  "**Try this:**\n"