local-deep-research 0.5.9__py3-none-any.whl → 0.6.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (90) hide show
  1. local_deep_research/__version__.py +1 -1
  2. local_deep_research/advanced_search_system/candidate_exploration/progressive_explorer.py +11 -1
  3. local_deep_research/advanced_search_system/questions/browsecomp_question.py +32 -6
  4. local_deep_research/advanced_search_system/strategies/focused_iteration_strategy.py +32 -8
  5. local_deep_research/advanced_search_system/strategies/source_based_strategy.py +2 -0
  6. local_deep_research/api/__init__.py +2 -0
  7. local_deep_research/api/research_functions.py +177 -3
  8. local_deep_research/benchmarks/graders.py +150 -5
  9. local_deep_research/benchmarks/models/__init__.py +19 -0
  10. local_deep_research/benchmarks/models/benchmark_models.py +283 -0
  11. local_deep_research/benchmarks/ui/__init__.py +1 -0
  12. local_deep_research/benchmarks/web_api/__init__.py +6 -0
  13. local_deep_research/benchmarks/web_api/benchmark_routes.py +862 -0
  14. local_deep_research/benchmarks/web_api/benchmark_service.py +920 -0
  15. local_deep_research/config/llm_config.py +106 -21
  16. local_deep_research/defaults/default_settings.json +447 -2
  17. local_deep_research/error_handling/report_generator.py +10 -0
  18. local_deep_research/llm/__init__.py +19 -0
  19. local_deep_research/llm/llm_registry.py +155 -0
  20. local_deep_research/metrics/db_models.py +3 -7
  21. local_deep_research/metrics/search_tracker.py +25 -11
  22. local_deep_research/search_system.py +12 -9
  23. local_deep_research/utilities/log_utils.py +23 -10
  24. local_deep_research/utilities/thread_context.py +99 -0
  25. local_deep_research/web/app_factory.py +32 -8
  26. local_deep_research/web/database/benchmark_schema.py +230 -0
  27. local_deep_research/web/database/convert_research_id_to_string.py +161 -0
  28. local_deep_research/web/database/models.py +55 -1
  29. local_deep_research/web/database/schema_upgrade.py +397 -2
  30. local_deep_research/web/database/uuid_migration.py +265 -0
  31. local_deep_research/web/routes/api_routes.py +62 -31
  32. local_deep_research/web/routes/history_routes.py +13 -6
  33. local_deep_research/web/routes/metrics_routes.py +264 -4
  34. local_deep_research/web/routes/research_routes.py +45 -18
  35. local_deep_research/web/routes/route_registry.py +352 -0
  36. local_deep_research/web/routes/settings_routes.py +382 -22
  37. local_deep_research/web/services/research_service.py +22 -29
  38. local_deep_research/web/services/settings_manager.py +53 -0
  39. local_deep_research/web/services/settings_service.py +2 -0
  40. local_deep_research/web/static/css/styles.css +8 -0
  41. local_deep_research/web/static/js/components/detail.js +7 -14
  42. local_deep_research/web/static/js/components/details.js +8 -10
  43. local_deep_research/web/static/js/components/fallback/ui.js +4 -4
  44. local_deep_research/web/static/js/components/history.js +6 -6
  45. local_deep_research/web/static/js/components/logpanel.js +14 -11
  46. local_deep_research/web/static/js/components/progress.js +51 -46
  47. local_deep_research/web/static/js/components/research.js +250 -89
  48. local_deep_research/web/static/js/components/results.js +5 -7
  49. local_deep_research/web/static/js/components/settings.js +32 -26
  50. local_deep_research/web/static/js/components/settings_sync.js +24 -23
  51. local_deep_research/web/static/js/config/urls.js +285 -0
  52. local_deep_research/web/static/js/main.js +8 -8
  53. local_deep_research/web/static/js/research_form.js +267 -12
  54. local_deep_research/web/static/js/services/api.js +18 -18
  55. local_deep_research/web/static/js/services/keyboard.js +8 -8
  56. local_deep_research/web/static/js/services/socket.js +53 -35
  57. local_deep_research/web/static/js/services/ui.js +1 -1
  58. local_deep_research/web/templates/base.html +4 -1
  59. local_deep_research/web/templates/components/custom_dropdown.html +5 -3
  60. local_deep_research/web/templates/components/mobile_nav.html +3 -3
  61. local_deep_research/web/templates/components/sidebar.html +9 -3
  62. local_deep_research/web/templates/pages/benchmark.html +2697 -0
  63. local_deep_research/web/templates/pages/benchmark_results.html +1274 -0
  64. local_deep_research/web/templates/pages/benchmark_simple.html +453 -0
  65. local_deep_research/web/templates/pages/cost_analytics.html +1 -1
  66. local_deep_research/web/templates/pages/metrics.html +212 -39
  67. local_deep_research/web/templates/pages/research.html +8 -6
  68. local_deep_research/web/templates/pages/star_reviews.html +1 -1
  69. local_deep_research/web_search_engines/engines/search_engine_arxiv.py +14 -1
  70. local_deep_research/web_search_engines/engines/search_engine_brave.py +15 -1
  71. local_deep_research/web_search_engines/engines/search_engine_ddg.py +20 -1
  72. local_deep_research/web_search_engines/engines/search_engine_google_pse.py +26 -2
  73. local_deep_research/web_search_engines/engines/search_engine_pubmed.py +15 -1
  74. local_deep_research/web_search_engines/engines/search_engine_retriever.py +192 -0
  75. local_deep_research/web_search_engines/engines/search_engine_tavily.py +307 -0
  76. local_deep_research/web_search_engines/rate_limiting/__init__.py +14 -0
  77. local_deep_research/web_search_engines/rate_limiting/__main__.py +9 -0
  78. local_deep_research/web_search_engines/rate_limiting/cli.py +209 -0
  79. local_deep_research/web_search_engines/rate_limiting/exceptions.py +21 -0
  80. local_deep_research/web_search_engines/rate_limiting/tracker.py +506 -0
  81. local_deep_research/web_search_engines/retriever_registry.py +108 -0
  82. local_deep_research/web_search_engines/search_engine_base.py +161 -43
  83. local_deep_research/web_search_engines/search_engine_factory.py +14 -0
  84. local_deep_research/web_search_engines/search_engines_config.py +20 -0
  85. local_deep_research-0.6.1.dist-info/METADATA +374 -0
  86. {local_deep_research-0.5.9.dist-info → local_deep_research-0.6.1.dist-info}/RECORD +89 -64
  87. local_deep_research-0.5.9.dist-info/METADATA +0 -420
  88. {local_deep_research-0.5.9.dist-info → local_deep_research-0.6.1.dist-info}/WHEEL +0 -0
  89. {local_deep_research-0.5.9.dist-info → local_deep_research-0.6.1.dist-info}/entry_points.txt +0 -0
  90. {local_deep_research-0.5.9.dist-info → local_deep_research-0.6.1.dist-info}/licenses/LICENSE +0 -0
@@ -7,7 +7,6 @@ from typing import Any, Optional, Tuple
7
7
  import requests
8
8
  from flask import (
9
9
  Blueprint,
10
- current_app,
11
10
  flash,
12
11
  jsonify,
13
12
  redirect,
@@ -16,9 +15,8 @@ from flask import (
16
15
  )
17
16
  from flask_wtf.csrf import generate_csrf
18
17
  from loguru import logger
19
- from sqlalchemy.orm import Session
20
18
 
21
- from ...utilities.db_utils import get_db_setting
19
+ from ...utilities.db_utils import get_db_setting, get_db_session
22
20
  from ...utilities.url_utils import normalize_url
23
21
  from ..database.models import Setting, SettingType
24
22
  from ..services.settings_service import (
@@ -27,18 +25,89 @@ from ..services.settings_service import (
27
25
  get_settings_manager,
28
26
  set_setting,
29
27
  )
28
+ from ..services.settings_manager import SettingsManager
30
29
  from ..utils.templates import render_template_with_defaults
31
30
 
32
31
  # Create a Blueprint for settings
33
- settings_bp = Blueprint("settings", __name__, url_prefix="/research/settings")
32
+ settings_bp = Blueprint("settings", __name__, url_prefix="/settings")
34
33
 
35
34
 
36
- def get_db_session() -> Session:
37
- """Get the database session from the app context"""
38
- if hasattr(current_app, "db_session"):
39
- return current_app.db_session
40
- else:
41
- return current_app.extensions["sqlalchemy"].session()
35
+ def calculate_warnings():
36
+ """Calculate current warning conditions based on settings"""
37
+ warnings = []
38
+
39
+ try:
40
+ # Get a fresh database session for safety
41
+ db_session = get_db_session()
42
+
43
+ # Get current settings
44
+ provider = get_setting("llm.provider", "ollama", db_session).lower()
45
+ local_context = get_setting(
46
+ "llm.local_context_window_size", 4096, db_session
47
+ )
48
+
49
+ logger.debug(f"Starting warning calculation - provider={provider}")
50
+
51
+ # Get dismissal settings
52
+ dismiss_high_context = get_setting(
53
+ "app.warnings.dismiss_high_context", False, db_session
54
+ )
55
+
56
+ # Check warning conditions
57
+ is_local_provider = provider in [
58
+ "ollama",
59
+ "llamacpp",
60
+ "lmstudio",
61
+ "vllm",
62
+ ]
63
+
64
+ # High context warning for local providers
65
+ if (
66
+ is_local_provider
67
+ and local_context > 8192
68
+ and not dismiss_high_context
69
+ ):
70
+ warnings.append(
71
+ {
72
+ "type": "high_context",
73
+ "icon": "⚠️",
74
+ "title": "High Context Warning",
75
+ "message": f"Context size ({local_context:,} tokens) may cause memory issues with {provider}. Increase VRAM or reduce context size if you experience slowdowns.",
76
+ "dismissKey": "app.warnings.dismiss_high_context",
77
+ }
78
+ )
79
+
80
+ # Get additional warning settings
81
+ dismiss_model_mismatch = get_setting(
82
+ "app.warnings.dismiss_model_mismatch", False, db_session
83
+ )
84
+
85
+ # Get current strategy and model (these need to be passed from the frontend or retrieved differently)
86
+ # For now, we'll implement basic warnings that don't require form state
87
+
88
+ # Model mismatch warning (simplified - checking setting instead of form value)
89
+ current_model = get_setting("llm.model", "", db_session)
90
+ if (
91
+ current_model
92
+ and "70b" in current_model.lower()
93
+ and is_local_provider
94
+ and local_context > 8192
95
+ and not dismiss_model_mismatch
96
+ ):
97
+ warnings.append(
98
+ {
99
+ "type": "model_mismatch",
100
+ "icon": "🧠",
101
+ "title": "Model & Context Warning",
102
+ "message": f"Large model ({current_model}) with high context ({local_context:,}) may exceed VRAM. Consider reducing context size or upgrading GPU memory.",
103
+ "dismissKey": "app.warnings.dismiss_model_mismatch",
104
+ }
105
+ )
106
+
107
+ except Exception as e:
108
+ logger.warning(f"Error calculating warnings: {e}")
109
+
110
+ return warnings
42
111
 
43
112
 
44
113
  def validate_setting(
@@ -340,15 +409,37 @@ def save_all_settings():
340
409
  # Multiple settings or generic message
341
410
  success_message = f"Settings saved successfully ({len(updated_settings)} updated, {len(created_settings)} created)"
342
411
 
343
- return jsonify(
344
- {
345
- "status": "success",
346
- "message": success_message,
347
- "updated": updated_settings,
348
- "created": created_settings,
349
- "settings": all_settings,
350
- }
351
- )
412
+ # Check if any warning-affecting settings were changed and include warnings
413
+ response_data = {
414
+ "status": "success",
415
+ "message": success_message,
416
+ "updated": updated_settings,
417
+ "created": created_settings,
418
+ "settings": all_settings,
419
+ }
420
+
421
+ warning_affecting_keys = [
422
+ "llm.provider",
423
+ "search.tool",
424
+ "search.iterations",
425
+ "search.questions_per_iteration",
426
+ "llm.local_context_window_size",
427
+ "llm.context_window_unrestricted",
428
+ "llm.context_window_size",
429
+ ]
430
+
431
+ # Check if any warning-affecting settings were changed
432
+ if any(
433
+ key in warning_affecting_keys
434
+ for key in updated_settings + created_settings
435
+ ):
436
+ warnings = calculate_warnings()
437
+ response_data["warnings"] = warnings
438
+ logger.info(
439
+ f"Bulk settings update affected warning keys, calculated {len(warnings)} warnings"
440
+ )
441
+
442
+ return jsonify(response_data)
352
443
 
353
444
  except Exception:
354
445
  logger.exception("Error saving settings")
@@ -511,9 +602,29 @@ def api_update_setting(key):
511
602
  # Update setting
512
603
  success = set_setting(key, value)
513
604
  if success:
514
- return jsonify(
515
- {"message": f"Setting {key} updated successfully"}
516
- )
605
+ response_data = {
606
+ "message": f"Setting {key} updated successfully"
607
+ }
608
+
609
+ # If this is a key that affects warnings, include warning calculations
610
+ warning_affecting_keys = [
611
+ "llm.provider",
612
+ "search.tool",
613
+ "search.iterations",
614
+ "search.questions_per_iteration",
615
+ "llm.local_context_window_size",
616
+ "llm.context_window_unrestricted",
617
+ "llm.context_window_size",
618
+ ]
619
+
620
+ if key in warning_affecting_keys:
621
+ warnings = calculate_warnings()
622
+ response_data["warnings"] = warnings
623
+ logger.debug(
624
+ f"Setting {key} changed to {value}, calculated {len(warnings)} warnings"
625
+ )
626
+
627
+ return jsonify(response_data)
517
628
  else:
518
629
  return jsonify(
519
630
  {"error": f"Failed to update setting {key}"}
@@ -669,6 +780,15 @@ def api_get_ui_elements():
669
780
  def api_get_available_models():
670
781
  """Get available LLM models from various providers"""
671
782
  try:
783
+ from datetime import datetime, timedelta
784
+ from ..database.models import ProviderModel
785
+ from flask import request, current_app
786
+
787
+ # Check if force_refresh is requested
788
+ force_refresh = (
789
+ request.args.get("force_refresh", "false").lower() == "true"
790
+ )
791
+
672
792
  # Define provider options with generic provider names
673
793
  provider_options = [
674
794
  {"value": "OLLAMA", "label": "Ollama (Local)"},
@@ -683,6 +803,56 @@ def api_get_available_models():
683
803
  # Available models by provider
684
804
  providers = {}
685
805
 
806
+ # Check database cache first (unless force_refresh is True)
807
+ if not force_refresh:
808
+ try:
809
+ db_session = current_app.db_session
810
+
811
+ # Define cache expiration (24 hours)
812
+ cache_expiry = datetime.utcnow() - timedelta(hours=24)
813
+
814
+ # Get cached models from database
815
+ cached_models = (
816
+ db_session.query(ProviderModel)
817
+ .filter(ProviderModel.last_updated > cache_expiry)
818
+ .all()
819
+ )
820
+
821
+ if cached_models:
822
+ logger.info(
823
+ f"Found {len(cached_models)} cached models in database"
824
+ )
825
+
826
+ # Group models by provider
827
+ for model in cached_models:
828
+ provider_key = f"{model.provider.lower()}_models"
829
+ if provider_key not in providers:
830
+ providers[provider_key] = []
831
+
832
+ providers[provider_key].append(
833
+ {
834
+ "value": model.model_key,
835
+ "label": model.model_label,
836
+ "provider": model.provider.upper(),
837
+ }
838
+ )
839
+
840
+ # If we have cached data for all providers, return it
841
+ if providers:
842
+ logger.info("Returning cached models from database")
843
+ return jsonify(
844
+ {
845
+ "provider_options": provider_options,
846
+ "providers": providers,
847
+ }
848
+ )
849
+
850
+ except Exception as e:
851
+ logger.warning(
852
+ f"Error reading cached models from database: {e}"
853
+ )
854
+ # Continue to fetch fresh data
855
+
686
856
  # Try to get Ollama models
687
857
  ollama_models = []
688
858
  try:
@@ -1096,6 +1266,45 @@ def api_get_available_models():
1096
1266
  providers["anthropic_models"] = anthropic_models
1097
1267
  logger.info(f"Final Anthropic models count: {len(anthropic_models)}")
1098
1268
 
1269
+ # Save fetched models to database cache
1270
+ if force_refresh or providers:
1271
+ # We fetched fresh data, save it to database
1272
+ try:
1273
+ from datetime import datetime
1274
+
1275
+ db_session = current_app.db_session
1276
+
1277
+ # Clear old cache entries for providers we're updating
1278
+ for provider_key in providers.keys():
1279
+ provider_name = provider_key.replace("_models", "").upper()
1280
+ db_session.query(ProviderModel).filter(
1281
+ ProviderModel.provider == provider_name
1282
+ ).delete()
1283
+
1284
+ # Insert new models
1285
+ for provider_key, models in providers.items():
1286
+ provider_name = provider_key.replace("_models", "").upper()
1287
+ for model in models:
1288
+ if (
1289
+ isinstance(model, dict)
1290
+ and "value" in model
1291
+ and "label" in model
1292
+ ):
1293
+ new_model = ProviderModel(
1294
+ provider=provider_name,
1295
+ model_key=model["value"],
1296
+ model_label=model["label"],
1297
+ last_updated=datetime.utcnow(),
1298
+ )
1299
+ db_session.add(new_model)
1300
+
1301
+ db_session.commit()
1302
+ logger.info("Successfully cached models to database")
1303
+
1304
+ except Exception:
1305
+ logger.exception("Error saving models to database cache")
1306
+ db_session.rollback()
1307
+
1099
1308
  # Return all options
1100
1309
  return jsonify(
1101
1310
  {"provider_options": provider_options, "providers": providers}
@@ -1565,6 +1774,17 @@ def fix_corrupted_settings():
1565
1774
  )
1566
1775
 
1567
1776
 
1777
+ @settings_bp.route("/api/warnings", methods=["GET"])
1778
+ def api_get_warnings():
1779
+ """Get current warnings based on settings"""
1780
+ try:
1781
+ warnings = calculate_warnings()
1782
+ return jsonify({"warnings": warnings})
1783
+ except Exception as e:
1784
+ logger.exception("Error getting warnings")
1785
+ return jsonify({"error": str(e)}), 500
1786
+
1787
+
1568
1788
  @settings_bp.route("/api/ollama-status", methods=["GET"])
1569
1789
  def check_ollama_status():
1570
1790
  """Check if Ollama is running and available"""
@@ -1598,3 +1818,143 @@ def check_ollama_status():
1598
1818
  except requests.exceptions.RequestException as e:
1599
1819
  logger.exception("Ollama check failed")
1600
1820
  return jsonify({"running": False, "error": str(e)})
1821
+
1822
+
1823
+ @settings_bp.route("/api/rate-limiting/status", methods=["GET"])
1824
+ def api_get_rate_limiting_status():
1825
+ """Get current rate limiting status and statistics"""
1826
+ try:
1827
+ from ...web_search_engines.rate_limiting import get_tracker
1828
+
1829
+ tracker = get_tracker()
1830
+
1831
+ # Get basic status
1832
+ status = {
1833
+ "enabled": tracker.enabled,
1834
+ "exploration_rate": tracker.exploration_rate,
1835
+ "learning_rate": tracker.learning_rate,
1836
+ "memory_window": tracker.memory_window,
1837
+ }
1838
+
1839
+ # Get engine statistics
1840
+ engine_stats = tracker.get_stats()
1841
+ engines = []
1842
+
1843
+ for stat in engine_stats:
1844
+ (
1845
+ engine_type,
1846
+ base_wait,
1847
+ min_wait,
1848
+ max_wait,
1849
+ last_updated,
1850
+ total_attempts,
1851
+ success_rate,
1852
+ ) = stat
1853
+ engines.append(
1854
+ {
1855
+ "engine_type": engine_type,
1856
+ "base_wait_seconds": round(base_wait, 2),
1857
+ "min_wait_seconds": round(min_wait, 2),
1858
+ "max_wait_seconds": round(max_wait, 2),
1859
+ "last_updated": last_updated,
1860
+ "total_attempts": total_attempts,
1861
+ "success_rate": round(success_rate * 100, 1)
1862
+ if success_rate
1863
+ else 0.0,
1864
+ }
1865
+ )
1866
+
1867
+ return jsonify({"status": status, "engines": engines})
1868
+
1869
+ except Exception:
1870
+ logger.exception("Error getting rate limiting status")
1871
+ return jsonify({"error": "An internal error occurred"}), 500
1872
+
1873
+
1874
+ @settings_bp.route(
1875
+ "/api/rate-limiting/engines/<engine_type>/reset", methods=["POST"]
1876
+ )
1877
+ def api_reset_engine_rate_limiting(engine_type):
1878
+ """Reset rate limiting data for a specific engine"""
1879
+ try:
1880
+ from ...web_search_engines.rate_limiting import get_tracker
1881
+
1882
+ tracker = get_tracker()
1883
+ tracker.reset_engine(engine_type)
1884
+
1885
+ return jsonify(
1886
+ {"message": f"Rate limiting data reset for {engine_type}"}
1887
+ )
1888
+
1889
+ except Exception:
1890
+ logger.exception(f"Error resetting rate limiting for {engine_type}")
1891
+ return jsonify({"error": "An internal error occurred"}), 500
1892
+
1893
+
1894
+ @settings_bp.route("/api/rate-limiting/cleanup", methods=["POST"])
1895
+ def api_cleanup_rate_limiting():
1896
+ """Clean up old rate limiting data"""
1897
+ try:
1898
+ from ...web_search_engines.rate_limiting import get_tracker
1899
+
1900
+ days = request.json.get("days", 30) if request.is_json else 30
1901
+
1902
+ tracker = get_tracker()
1903
+ tracker.cleanup_old_data(days)
1904
+
1905
+ return jsonify(
1906
+ {"message": f"Cleaned up rate limiting data older than {days} days"}
1907
+ )
1908
+
1909
+ except Exception:
1910
+ logger.exception("Error cleaning up rate limiting data")
1911
+ return jsonify({"error": "An internal error occurred"}), 500
1912
+
1913
+
1914
+ @settings_bp.route("/api/bulk", methods=["GET"])
1915
+ def get_bulk_settings():
1916
+ """Get multiple settings at once for performance."""
1917
+ try:
1918
+ # Get requested settings from query parameters
1919
+ requested = request.args.getlist("keys[]")
1920
+ if not requested:
1921
+ # Default to common settings if none specified
1922
+ requested = [
1923
+ "llm.provider",
1924
+ "llm.model",
1925
+ "search.tool",
1926
+ "search.iterations",
1927
+ "search.questions_per_iteration",
1928
+ "search.search_strategy",
1929
+ "benchmark.evaluation.provider",
1930
+ "benchmark.evaluation.model",
1931
+ "benchmark.evaluation.temperature",
1932
+ "benchmark.evaluation.endpoint_url",
1933
+ ]
1934
+
1935
+ # Fetch all settings at once
1936
+ session = get_db_session()
1937
+ settings_manager = SettingsManager(db_session=session)
1938
+
1939
+ result = {}
1940
+ for key in requested:
1941
+ try:
1942
+ value = settings_manager.get_setting(key)
1943
+ result[key] = {"value": value, "exists": value is not None}
1944
+ except Exception as e:
1945
+ logger.warning(f"Error getting setting {key}: {e}")
1946
+ result[key] = {
1947
+ "value": None,
1948
+ "exists": False,
1949
+ "error": "Failed to retrieve setting",
1950
+ }
1951
+
1952
+ session.close()
1953
+
1954
+ return jsonify({"success": True, "settings": result})
1955
+
1956
+ except Exception:
1957
+ logger.exception("Error getting bulk settings")
1958
+ return jsonify(
1959
+ {"success": False, "error": "An internal error occurred"}
1960
+ ), 500
@@ -200,7 +200,7 @@ def run_research_process(
200
200
  return
201
201
 
202
202
  logger.info(
203
- "Starting research process for ID %s, query: %s", research_id, query
203
+ f"Starting research process for ID {research_id}, query: {query}"
204
204
  )
205
205
 
206
206
  # Extract key parameters
@@ -225,18 +225,11 @@ def run_research_process(
225
225
 
226
226
  # Log all parameters for debugging
227
227
  logger.info(
228
- "Research parameters: provider=%s, model=%s, search_engine=%s, "
229
- "max_results=%s, time_period=%s, iterations=%s, "
230
- "questions_per_iteration=%s, custom_endpoint=%s, strategy=%s",
231
- model_provider,
232
- model,
233
- search_engine,
234
- max_results,
235
- time_period,
236
- iterations,
237
- questions_per_iteration,
238
- custom_endpoint,
239
- strategy,
228
+ f"Research parameters: provider={model_provider}, model={model}, "
229
+ f"search_engine={search_engine}, max_results={max_results}, "
230
+ f"time_period={time_period}, iterations={iterations}, "
231
+ f"questions_per_iteration={questions_per_iteration}, "
232
+ f"custom_endpoint={custom_endpoint}, strategy={strategy}"
240
233
  )
241
234
 
242
235
  # Set up the AI Context Manager
@@ -269,7 +262,9 @@ def run_research_process(
269
262
  )
270
263
  raise Exception("Research was terminated by user")
271
264
 
272
- logger.log("milestone", message)
265
+ # Bind research_id to logger for this specific log
266
+ bound_logger = logger.bind(research_id=research_id)
267
+ bound_logger.log("MILESTONE", message)
273
268
 
274
269
  if "SEARCH_PLAN:" in message:
275
270
  engines = message.split("SEARCH_PLAN:")[1].strip()
@@ -405,15 +400,11 @@ def run_research_process(
405
400
  )
406
401
 
407
402
  logger.info(
408
- "Successfully set LLM to: provider=%s, model=%s",
409
- model_provider,
410
- model,
403
+ f"Successfully set LLM to: provider={model_provider}, model={model}"
411
404
  )
412
405
  except Exception:
413
406
  logger.exception(
414
- "Error setting LLM provider=%s, model=%s",
415
- model_provider,
416
- model,
407
+ f"Error setting LLM provider={model_provider}, model={model}"
417
408
  )
418
409
 
419
410
  # Set the progress callback in the system
@@ -423,12 +414,14 @@ def run_research_process(
423
414
  # Override search engine if specified
424
415
  if search_engine:
425
416
  try:
426
- if iterations:
427
- system.max_iterations = int(iterations)
428
- if questions_per_iteration:
429
- system.questions_per_iteration = int(
430
- questions_per_iteration
431
- )
417
+ # For focused-iteration strategy, don't override iterations/questions - use database settings
418
+ if strategy != "focused-iteration":
419
+ if iterations:
420
+ system.max_iterations = int(iterations)
421
+ if questions_per_iteration:
422
+ system.questions_per_iteration = int(
423
+ questions_per_iteration
424
+ )
432
425
 
433
426
  # Create a new search object with these settings
434
427
  system.search = get_search(
@@ -436,11 +429,11 @@ def run_research_process(
436
429
  )
437
430
 
438
431
  logger.info(
439
- "Successfully set search engine to: %s", search_engine
432
+ f"Successfully set search engine to: {search_engine}"
440
433
  )
441
434
  except Exception:
442
435
  logger.exception(
443
- "Error setting search engine to %s", search_engine
436
+ f"Error setting search engine to {search_engine}"
444
437
  )
445
438
 
446
439
  # Run the search
@@ -1032,7 +1025,7 @@ def run_research_process(
1032
1025
  research.status = status
1033
1026
  research.completed_at = completed_at
1034
1027
  research.duration_seconds = duration_seconds
1035
- research.metadata = metadata
1028
+ research.research_meta = metadata
1036
1029
 
1037
1030
  # Add error report path if available
1038
1031
  if "report_path_to_save" in locals() and report_path_to_save:
@@ -229,6 +229,8 @@ class SettingsManager:
229
229
 
230
230
  if commit:
231
231
  self.db_session.commit()
232
+ # Emit WebSocket event for settings change
233
+ self._emit_settings_changed([key])
232
234
 
233
235
  return True
234
236
  except SQLAlchemyError as e:
@@ -365,6 +367,8 @@ class SettingsManager:
365
367
 
366
368
  if commit:
367
369
  self.db_session.commit()
370
+ # Emit WebSocket event for settings change
371
+ self._emit_settings_changed([setting_obj.key])
368
372
 
369
373
  return db_setting
370
374
 
@@ -522,6 +526,8 @@ class SettingsManager:
522
526
 
523
527
  if commit:
524
528
  self.db_session.commit()
529
+ # Emit WebSocket event for all imported settings
530
+ self._emit_settings_changed(list(settings_data.keys()))
525
531
 
526
532
  def _create_setting(self, key, value, setting_type):
527
533
  """Create a setting with appropriate metadata"""
@@ -578,3 +584,50 @@ class SettingsManager:
578
584
 
579
585
  # Create the setting in the database
580
586
  self.create_or_update_setting(setting_dict, commit=False)
587
+
588
+ def _emit_settings_changed(self, changed_keys: list = None):
589
+ """
590
+ Emit WebSocket event when settings change
591
+
592
+ Args:
593
+ changed_keys: List of setting keys that changed
594
+ """
595
+ try:
596
+ # Import here to avoid circular imports
597
+ from .socket_service import SocketIOService
598
+
599
+ try:
600
+ socket_service = SocketIOService()
601
+ except ValueError:
602
+ logger.debug(
603
+ "Not emitting socket event because server is not initialized."
604
+ )
605
+ return
606
+
607
+ # Get the changed settings
608
+ settings_data = {}
609
+ if changed_keys:
610
+ for key in changed_keys:
611
+ setting_value = self.get_setting(key)
612
+ if setting_value is not None:
613
+ settings_data[key] = {"value": setting_value}
614
+
615
+ # Emit the settings change event
616
+ from datetime import datetime
617
+
618
+ socket_service.emit_socket_event(
619
+ "settings_changed",
620
+ {
621
+ "changed_keys": changed_keys or [],
622
+ "settings": settings_data,
623
+ "timestamp": datetime.utcnow().isoformat(),
624
+ },
625
+ )
626
+
627
+ logger.debug(
628
+ f"Emitted settings_changed event for keys: {changed_keys}"
629
+ )
630
+
631
+ except Exception as e:
632
+ logger.exception(f"Failed to emit settings change event: {e}")
633
+ # Don't let WebSocket emission failures break settings saving
@@ -112,6 +112,8 @@ def bulk_update_settings(
112
112
  if commit and success and manager.db_session:
113
113
  try:
114
114
  manager.db_session.commit()
115
+ # Emit WebSocket event for all changed settings
116
+ manager._emit_settings_changed(list(settings_dict.keys()))
115
117
  except Exception:
116
118
  logger.exception("Error committing bulk settings update")
117
119
  manager.db_session.rollback()
@@ -1678,6 +1678,14 @@ textarea:focus, input[type="text"]:focus {
1678
1678
  transition: all 0.3s ease;
1679
1679
  }
1680
1680
 
1681
+ /* Allow form controls to be interactive even when panel is collapsed */
1682
+ .advanced-options-panel select,
1683
+ .advanced-options-panel input,
1684
+ .advanced-options-panel button,
1685
+ .advanced-options-panel textarea {
1686
+ pointer-events: auto;
1687
+ }
1688
+
1681
1689
  .advanced-options-toggle.open + .advanced-options-panel,
1682
1690
  .advanced-options-panel.expanded {
1683
1691
  max-height: 1000px;