local-deep-research 0.3.12__py3-none-any.whl → 0.4.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- local_deep_research/__version__.py +1 -1
- local_deep_research/advanced_search_system/filters/base_filter.py +2 -3
- local_deep_research/advanced_search_system/filters/cross_engine_filter.py +4 -5
- local_deep_research/advanced_search_system/filters/journal_reputation_filter.py +298 -0
- local_deep_research/advanced_search_system/findings/repository.py +0 -3
- local_deep_research/advanced_search_system/strategies/base_strategy.py +1 -2
- local_deep_research/advanced_search_system/strategies/iterdrag_strategy.py +14 -18
- local_deep_research/advanced_search_system/strategies/parallel_search_strategy.py +4 -8
- local_deep_research/advanced_search_system/strategies/rapid_search_strategy.py +5 -6
- local_deep_research/advanced_search_system/strategies/source_based_strategy.py +2 -2
- local_deep_research/advanced_search_system/strategies/standard_strategy.py +9 -7
- local_deep_research/api/benchmark_functions.py +288 -0
- local_deep_research/api/research_functions.py +8 -4
- local_deep_research/benchmarks/README.md +162 -0
- local_deep_research/benchmarks/__init__.py +51 -0
- local_deep_research/benchmarks/benchmark_functions.py +353 -0
- local_deep_research/benchmarks/cli/__init__.py +16 -0
- local_deep_research/benchmarks/cli/benchmark_commands.py +338 -0
- local_deep_research/benchmarks/cli.py +347 -0
- local_deep_research/benchmarks/comparison/__init__.py +12 -0
- local_deep_research/benchmarks/comparison/evaluator.py +768 -0
- local_deep_research/benchmarks/datasets/__init__.py +53 -0
- local_deep_research/benchmarks/datasets/base.py +295 -0
- local_deep_research/benchmarks/datasets/browsecomp.py +116 -0
- local_deep_research/benchmarks/datasets/custom_dataset_template.py +98 -0
- local_deep_research/benchmarks/datasets/simpleqa.py +74 -0
- local_deep_research/benchmarks/datasets/utils.py +116 -0
- local_deep_research/benchmarks/datasets.py +31 -0
- local_deep_research/benchmarks/efficiency/__init__.py +14 -0
- local_deep_research/benchmarks/efficiency/resource_monitor.py +367 -0
- local_deep_research/benchmarks/efficiency/speed_profiler.py +214 -0
- local_deep_research/benchmarks/evaluators/__init__.py +18 -0
- local_deep_research/benchmarks/evaluators/base.py +74 -0
- local_deep_research/benchmarks/evaluators/browsecomp.py +83 -0
- local_deep_research/benchmarks/evaluators/composite.py +121 -0
- local_deep_research/benchmarks/evaluators/simpleqa.py +271 -0
- local_deep_research/benchmarks/graders.py +410 -0
- local_deep_research/benchmarks/metrics/README.md +80 -0
- local_deep_research/benchmarks/metrics/__init__.py +24 -0
- local_deep_research/benchmarks/metrics/calculation.py +385 -0
- local_deep_research/benchmarks/metrics/reporting.py +155 -0
- local_deep_research/benchmarks/metrics/visualization.py +205 -0
- local_deep_research/benchmarks/metrics.py +11 -0
- local_deep_research/benchmarks/optimization/__init__.py +32 -0
- local_deep_research/benchmarks/optimization/api.py +274 -0
- local_deep_research/benchmarks/optimization/metrics.py +20 -0
- local_deep_research/benchmarks/optimization/optuna_optimizer.py +1163 -0
- local_deep_research/benchmarks/runners.py +434 -0
- local_deep_research/benchmarks/templates.py +65 -0
- local_deep_research/config/llm_config.py +26 -23
- local_deep_research/config/search_config.py +1 -5
- local_deep_research/defaults/default_settings.json +108 -7
- local_deep_research/search_system.py +16 -8
- local_deep_research/utilities/db_utils.py +3 -6
- local_deep_research/utilities/es_utils.py +441 -0
- local_deep_research/utilities/log_utils.py +36 -0
- local_deep_research/utilities/search_utilities.py +8 -9
- local_deep_research/web/app.py +7 -9
- local_deep_research/web/app_factory.py +9 -12
- local_deep_research/web/database/migrations.py +8 -5
- local_deep_research/web/database/models.py +20 -0
- local_deep_research/web/database/schema_upgrade.py +5 -8
- local_deep_research/web/models/database.py +15 -18
- local_deep_research/web/routes/benchmark_routes.py +427 -0
- local_deep_research/web/routes/research_routes.py +13 -17
- local_deep_research/web/routes/settings_routes.py +264 -67
- local_deep_research/web/services/research_service.py +47 -57
- local_deep_research/web/services/settings_manager.py +1 -4
- local_deep_research/web/services/settings_service.py +4 -6
- local_deep_research/web/static/css/styles.css +12 -0
- local_deep_research/web/static/js/components/logpanel.js +164 -155
- local_deep_research/web/static/js/components/research.js +44 -3
- local_deep_research/web/static/js/components/settings.js +27 -0
- local_deep_research/web/static/js/services/socket.js +47 -0
- local_deep_research/web_search_engines/default_search_engines.py +38 -0
- local_deep_research/web_search_engines/engines/meta_search_engine.py +100 -33
- local_deep_research/web_search_engines/engines/search_engine_arxiv.py +31 -17
- local_deep_research/web_search_engines/engines/search_engine_brave.py +8 -3
- local_deep_research/web_search_engines/engines/search_engine_elasticsearch.py +343 -0
- local_deep_research/web_search_engines/engines/search_engine_google_pse.py +14 -6
- local_deep_research/web_search_engines/engines/search_engine_local.py +19 -23
- local_deep_research/web_search_engines/engines/search_engine_local_all.py +9 -12
- local_deep_research/web_search_engines/engines/search_engine_searxng.py +12 -17
- local_deep_research/web_search_engines/engines/search_engine_serpapi.py +8 -4
- local_deep_research/web_search_engines/search_engine_base.py +22 -5
- local_deep_research/web_search_engines/search_engine_factory.py +32 -11
- local_deep_research/web_search_engines/search_engines_config.py +14 -1
- {local_deep_research-0.3.12.dist-info → local_deep_research-0.4.0.dist-info}/METADATA +10 -2
- {local_deep_research-0.3.12.dist-info → local_deep_research-0.4.0.dist-info}/RECORD +92 -49
- {local_deep_research-0.3.12.dist-info → local_deep_research-0.4.0.dist-info}/WHEEL +0 -0
- {local_deep_research-0.3.12.dist-info → local_deep_research-0.4.0.dist-info}/entry_points.txt +0 -0
- {local_deep_research-0.3.12.dist-info → local_deep_research-0.4.0.dist-info}/licenses/LICENSE +0 -0
@@ -1,5 +1,4 @@
|
|
1
1
|
import json
|
2
|
-
import logging
|
3
2
|
import os
|
4
3
|
import platform
|
5
4
|
import subprocess
|
@@ -16,6 +15,7 @@ from flask import (
|
|
16
15
|
url_for,
|
17
16
|
)
|
18
17
|
from flask_wtf.csrf import generate_csrf
|
18
|
+
from loguru import logger
|
19
19
|
from sqlalchemy.orm import Session
|
20
20
|
|
21
21
|
from ...utilities.db_utils import get_db_setting
|
@@ -29,9 +29,6 @@ from ..services.settings_service import (
|
|
29
29
|
)
|
30
30
|
from ..utils.templates import render_template_with_defaults
|
31
31
|
|
32
|
-
# Initialize logger
|
33
|
-
logger = logging.getLogger(__name__)
|
34
|
-
|
35
32
|
# Create a Blueprint for settings
|
36
33
|
settings_bp = Blueprint("settings", __name__, url_prefix="/research/settings")
|
37
34
|
|
@@ -343,7 +340,7 @@ def save_all_settings():
|
|
343
340
|
)
|
344
341
|
|
345
342
|
except Exception as e:
|
346
|
-
logger.
|
343
|
+
logger.exception("Error saving settings")
|
347
344
|
return (
|
348
345
|
jsonify({"status": "error", "message": f"Error saving settings: {str(e)}"}),
|
349
346
|
500,
|
@@ -364,8 +361,8 @@ def reset_to_defaults():
|
|
364
361
|
|
365
362
|
logger.info("Successfully imported settings from default files")
|
366
363
|
|
367
|
-
except Exception
|
368
|
-
logger.
|
364
|
+
except Exception:
|
365
|
+
logger.exception("Error importing default settings")
|
369
366
|
|
370
367
|
# Fallback to predefined settings if file import fails
|
371
368
|
logger.info("Falling back to predefined settings")
|
@@ -414,7 +411,7 @@ def api_get_all_settings():
|
|
414
411
|
|
415
412
|
return jsonify({"status": "success", "settings": settings})
|
416
413
|
except Exception as e:
|
417
|
-
logger.
|
414
|
+
logger.exception("Error getting settings")
|
418
415
|
return jsonify({"error": str(e)}), 500
|
419
416
|
|
420
417
|
|
@@ -457,7 +454,7 @@ def api_get_setting(key):
|
|
457
454
|
|
458
455
|
return jsonify({"settings": setting_data})
|
459
456
|
except Exception as e:
|
460
|
-
logger.
|
457
|
+
logger.exception(f"Error getting setting {key}")
|
461
458
|
return jsonify({"error": str(e)}), 500
|
462
459
|
|
463
460
|
|
@@ -540,7 +537,7 @@ def api_update_setting(key):
|
|
540
537
|
else:
|
541
538
|
return jsonify({"error": f"Failed to create setting {key}"}), 500
|
542
539
|
except Exception as e:
|
543
|
-
logger.
|
540
|
+
logger.exception(f"Error updating setting {key}")
|
544
541
|
return jsonify({"error": str(e)}), 500
|
545
542
|
|
546
543
|
|
@@ -563,7 +560,7 @@ def api_delete_setting(key):
|
|
563
560
|
else:
|
564
561
|
return jsonify({"error": f"Failed to delete setting {key}"}), 500
|
565
562
|
except Exception as e:
|
566
|
-
logger.
|
563
|
+
logger.exception(f"Error deleting setting {key}")
|
567
564
|
return jsonify({"error": str(e)}), 500
|
568
565
|
|
569
566
|
|
@@ -581,7 +578,7 @@ def api_import_settings():
|
|
581
578
|
else:
|
582
579
|
return jsonify({"error": "Failed to import settings"}), 500
|
583
580
|
except Exception as e:
|
584
|
-
logger.
|
581
|
+
logger.exception("Error importing settings")
|
585
582
|
return jsonify({"error": str(e)}), 500
|
586
583
|
|
587
584
|
|
@@ -597,7 +594,7 @@ def api_get_categories():
|
|
597
594
|
|
598
595
|
return jsonify({"categories": category_list})
|
599
596
|
except Exception as e:
|
600
|
-
logger.
|
597
|
+
logger.exception("Error getting categories")
|
601
598
|
return jsonify({"error": str(e)}), 500
|
602
599
|
|
603
600
|
|
@@ -609,7 +606,7 @@ def api_get_types():
|
|
609
606
|
types = [t.value for t in SettingType]
|
610
607
|
return jsonify({"types": types})
|
611
608
|
except Exception as e:
|
612
|
-
logger.
|
609
|
+
logger.exception("Error getting types")
|
613
610
|
return jsonify({"error": str(e)}), 500
|
614
611
|
|
615
612
|
|
@@ -633,7 +630,7 @@ def api_get_ui_elements():
|
|
633
630
|
|
634
631
|
return jsonify({"ui_elements": ui_elements})
|
635
632
|
except Exception as e:
|
636
|
-
logger.
|
633
|
+
logger.exception("Error getting UI elements")
|
637
634
|
return jsonify({"error": str(e)}), 500
|
638
635
|
|
639
636
|
|
@@ -662,11 +659,10 @@ def api_get_available_models():
|
|
662
659
|
import re
|
663
660
|
|
664
661
|
import requests
|
665
|
-
from flask import current_app
|
666
662
|
|
667
663
|
# Try to query the Ollama API directly
|
668
664
|
try:
|
669
|
-
|
665
|
+
logger.info("Attempting to connect to Ollama API")
|
670
666
|
|
671
667
|
raw_base_url = get_db_setting(
|
672
668
|
"llm.ollama.url", "http://localhost:11434"
|
@@ -679,26 +675,24 @@ def api_get_available_models():
|
|
679
675
|
|
680
676
|
ollama_response = requests.get(f"{base_url}/api/tags", timeout=5)
|
681
677
|
|
682
|
-
|
678
|
+
logger.debug(
|
683
679
|
f"Ollama API response: Status {ollama_response.status_code}"
|
684
680
|
)
|
685
681
|
|
686
682
|
# Try to parse the response even if status code is not 200 to help with debugging
|
687
683
|
response_text = ollama_response.text
|
688
|
-
|
689
|
-
f"Ollama API raw response: {response_text[:500]}..."
|
690
|
-
)
|
684
|
+
logger.debug(f"Ollama API raw response: {response_text[:500]}...")
|
691
685
|
|
692
686
|
if ollama_response.status_code == 200:
|
693
687
|
try:
|
694
688
|
ollama_data = ollama_response.json()
|
695
|
-
|
689
|
+
logger.debug(
|
696
690
|
f"Ollama API JSON data: {json.dumps(ollama_data)[:500]}..."
|
697
691
|
)
|
698
692
|
|
699
693
|
if "models" in ollama_data:
|
700
694
|
# Format for newer Ollama API
|
701
|
-
|
695
|
+
logger.info(
|
702
696
|
f"Found {len(ollama_data.get('models', []))} models in newer Ollama API format"
|
703
697
|
)
|
704
698
|
for model in ollama_data.get("models", []):
|
@@ -719,12 +713,12 @@ def api_get_available_models():
|
|
719
713
|
"provider": "OLLAMA", # Add provider field for consistency
|
720
714
|
}
|
721
715
|
)
|
722
|
-
|
716
|
+
logger.debug(
|
723
717
|
f"Added Ollama model: {name} -> {display_name}"
|
724
718
|
)
|
725
719
|
else:
|
726
720
|
# Format for older Ollama API
|
727
|
-
|
721
|
+
logger.info(
|
728
722
|
f"Found {len(ollama_data)} models in older Ollama API format"
|
729
723
|
)
|
730
724
|
for model in ollama_data:
|
@@ -743,7 +737,7 @@ def api_get_available_models():
|
|
743
737
|
"provider": "OLLAMA", # Add provider field for consistency
|
744
738
|
}
|
745
739
|
)
|
746
|
-
|
740
|
+
logger.debug(
|
747
741
|
f"Added Ollama model: {name} -> {display_name}"
|
748
742
|
)
|
749
743
|
|
@@ -751,12 +745,12 @@ def api_get_available_models():
|
|
751
745
|
ollama_models.sort(key=lambda x: x["label"])
|
752
746
|
|
753
747
|
except json.JSONDecodeError as json_err:
|
754
|
-
|
748
|
+
logger.error(
|
755
749
|
f"Failed to parse Ollama API response as JSON: {json_err}"
|
756
750
|
)
|
757
751
|
raise Exception(f"Ollama API returned invalid JSON: {json_err}")
|
758
752
|
else:
|
759
|
-
|
753
|
+
logger.warning(
|
760
754
|
f"Ollama API returned non-200 status code: {ollama_response.status_code}"
|
761
755
|
)
|
762
756
|
raise Exception(
|
@@ -764,11 +758,9 @@ def api_get_available_models():
|
|
764
758
|
)
|
765
759
|
|
766
760
|
except requests.exceptions.RequestException as e:
|
767
|
-
|
761
|
+
logger.warning(f"Could not connect to Ollama API: {str(e)}")
|
768
762
|
# Fallback to default models if Ollama is not running
|
769
|
-
|
770
|
-
"Using fallback Ollama models due to connection error"
|
771
|
-
)
|
763
|
+
logger.info("Using fallback Ollama models due to connection error")
|
772
764
|
ollama_models = [
|
773
765
|
{
|
774
766
|
"value": "llama3",
|
@@ -789,19 +781,17 @@ def api_get_available_models():
|
|
789
781
|
|
790
782
|
# Always set the ollama_models in providers, whether we got real or fallback models
|
791
783
|
providers["ollama_models"] = ollama_models
|
792
|
-
|
784
|
+
logger.info(f"Final Ollama models count: {len(ollama_models)}")
|
793
785
|
|
794
786
|
# Log some model names for debugging
|
795
787
|
if ollama_models:
|
796
788
|
model_names = [m["value"] for m in ollama_models[:5]]
|
797
|
-
|
798
|
-
f"Sample Ollama models: {', '.join(model_names)}"
|
799
|
-
)
|
789
|
+
logger.info(f"Sample Ollama models: {', '.join(model_names)}")
|
800
790
|
|
801
|
-
except Exception
|
802
|
-
|
791
|
+
except Exception:
|
792
|
+
logger.exception("Error getting Ollama models")
|
803
793
|
# Use fallback models
|
804
|
-
|
794
|
+
logger.info("Using fallback Ollama models due to error")
|
805
795
|
providers["ollama_models"] = [
|
806
796
|
{"value": "llama3", "label": "Llama 3 (Ollama)", "provider": "OLLAMA"},
|
807
797
|
{"value": "mistral", "label": "Mistral (Ollama)", "provider": "OLLAMA"},
|
@@ -812,37 +802,243 @@ def api_get_available_models():
|
|
812
802
|
},
|
813
803
|
]
|
814
804
|
|
815
|
-
#
|
816
|
-
|
817
|
-
|
818
|
-
|
819
|
-
|
820
|
-
|
805
|
+
# Try to get Custom OpenAI Endpoint models using the OpenAI package
|
806
|
+
openai_endpoint_models = []
|
807
|
+
try:
|
808
|
+
logger.info(
|
809
|
+
"Attempting to connect to Custom OpenAI Endpoint using OpenAI package"
|
810
|
+
)
|
821
811
|
|
822
|
-
|
823
|
-
|
824
|
-
|
825
|
-
|
826
|
-
|
827
|
-
|
828
|
-
|
829
|
-
|
830
|
-
|
831
|
-
|
832
|
-
|
833
|
-
|
834
|
-
|
812
|
+
# Get the endpoint URL and API key from settings
|
813
|
+
endpoint_url = get_db_setting("llm.openai_endpoint.url", "")
|
814
|
+
api_key = get_db_setting("llm.openai_endpoint.api_key", "")
|
815
|
+
|
816
|
+
if endpoint_url and api_key:
|
817
|
+
# Import OpenAI package here to avoid dependency issues if not installed
|
818
|
+
import openai
|
819
|
+
from openai import OpenAI
|
820
|
+
|
821
|
+
# Create OpenAI client with custom endpoint
|
822
|
+
client = OpenAI(api_key=api_key, base_url=endpoint_url)
|
823
|
+
|
824
|
+
try:
|
825
|
+
# Fetch models using the client
|
826
|
+
logger.debug("Fetching models from OpenAI API")
|
827
|
+
models_response = client.models.list()
|
828
|
+
|
829
|
+
# Process models from the response
|
830
|
+
for model in models_response.data:
|
831
|
+
model_id = model.id
|
832
|
+
if model_id:
|
833
|
+
# Create a clean display name
|
834
|
+
display_name = model_id.replace("-", " ").strip()
|
835
|
+
display_name = " ".join(
|
836
|
+
word.capitalize() for word in display_name.split()
|
837
|
+
)
|
838
|
+
|
839
|
+
openai_endpoint_models.append(
|
840
|
+
{
|
841
|
+
"value": model_id,
|
842
|
+
"label": f"{display_name} (Custom)",
|
843
|
+
"provider": "OPENAI_ENDPOINT",
|
844
|
+
}
|
845
|
+
)
|
846
|
+
logger.debug(
|
847
|
+
f"Added Custom OpenAI Endpoint model: {model_id} -> {display_name}"
|
848
|
+
)
|
849
|
+
|
850
|
+
# Sort models alphabetically
|
851
|
+
openai_endpoint_models.sort(key=lambda x: x["label"])
|
852
|
+
|
853
|
+
except openai.APIError as api_err:
|
854
|
+
logger.error(f"OpenAI API error: {str(api_err)}")
|
855
|
+
raise Exception(f"OpenAI API error: {str(api_err)}")
|
856
|
+
|
857
|
+
else:
|
858
|
+
logger.info("OpenAI Endpoint URL or API key not configured")
|
859
|
+
# Don't raise an exception, just continue with empty models list
|
860
|
+
|
861
|
+
except ImportError:
|
862
|
+
logger.warning(
|
863
|
+
"OpenAI package not installed. Using manual API request fallback."
|
864
|
+
)
|
865
|
+
# Fallback to manual API request if OpenAI package is not installed
|
866
|
+
try:
|
867
|
+
if endpoint_url and api_key:
|
868
|
+
# Ensure the URL ends with a slash
|
869
|
+
if not endpoint_url.endswith("/"):
|
870
|
+
endpoint_url += "/"
|
871
|
+
|
872
|
+
# Make the request to the endpoint's models API
|
873
|
+
headers = {"Authorization": f"Bearer {api_key}"}
|
874
|
+
endpoint_response = requests.get(
|
875
|
+
f"{endpoint_url}models", headers=headers, timeout=5
|
876
|
+
)
|
877
|
+
|
878
|
+
if endpoint_response.status_code == 200:
|
879
|
+
endpoint_data = endpoint_response.json()
|
880
|
+
# Process models from the response
|
881
|
+
if "data" in endpoint_data:
|
882
|
+
for model in endpoint_data.get("data", []):
|
883
|
+
model_id = model.get("id", "")
|
884
|
+
if model_id:
|
885
|
+
# Create a clean display name
|
886
|
+
display_name = model_id.replace("-", " ").strip()
|
887
|
+
display_name = " ".join(
|
888
|
+
word.capitalize()
|
889
|
+
for word in display_name.split()
|
890
|
+
)
|
891
|
+
|
892
|
+
openai_endpoint_models.append(
|
893
|
+
{
|
894
|
+
"value": model_id,
|
895
|
+
"label": f"{display_name} (Custom)",
|
896
|
+
"provider": "OPENAI_ENDPOINT",
|
897
|
+
}
|
898
|
+
)
|
899
|
+
except Exception as e:
|
900
|
+
logger.error(f"Fallback API request failed: {str(e)}")
|
901
|
+
|
902
|
+
except Exception as e:
|
903
|
+
logger.error(f"Error getting OpenAI Endpoint models: {str(e)}")
|
904
|
+
# Use fallback models (empty in this case)
|
905
|
+
logger.info("Using fallback (empty) OpenAI Endpoint models due to error")
|
906
|
+
|
907
|
+
# Always set the openai_endpoint_models in providers
|
908
|
+
providers["openai_endpoint_models"] = openai_endpoint_models
|
909
|
+
logger.info(
|
910
|
+
f"Final OpenAI Endpoint models count: {len(openai_endpoint_models)}"
|
911
|
+
)
|
912
|
+
|
913
|
+
# Get OpenAI models using the OpenAI package
|
914
|
+
openai_models = []
|
915
|
+
try:
|
916
|
+
logger.info("Attempting to connect to OpenAI API using OpenAI package")
|
917
|
+
|
918
|
+
# Get the API key from settings
|
919
|
+
api_key = get_db_setting("llm.openai.api_key", "")
|
920
|
+
|
921
|
+
if api_key:
|
922
|
+
# Import OpenAI package here to avoid dependency issues if not installed
|
923
|
+
import openai
|
924
|
+
from openai import OpenAI
|
925
|
+
|
926
|
+
# Create OpenAI client
|
927
|
+
client = OpenAI(api_key=api_key)
|
928
|
+
|
929
|
+
try:
|
930
|
+
# Fetch models using the client
|
931
|
+
logger.debug("Fetching models from OpenAI API")
|
932
|
+
models_response = client.models.list()
|
933
|
+
|
934
|
+
# Process models from the response
|
935
|
+
for model in models_response.data:
|
936
|
+
model_id = model.id
|
937
|
+
if model_id:
|
938
|
+
# Create a clean display name
|
939
|
+
display_name = model_id.replace("-", " ").strip()
|
940
|
+
display_name = " ".join(
|
941
|
+
word.capitalize() for word in display_name.split()
|
942
|
+
)
|
943
|
+
|
944
|
+
openai_models.append(
|
945
|
+
{
|
946
|
+
"value": model_id,
|
947
|
+
"label": f"{display_name} (OpenAI)",
|
948
|
+
"provider": "OPENAI",
|
949
|
+
}
|
950
|
+
)
|
951
|
+
logger.debug(
|
952
|
+
f"Added OpenAI model: {model_id} -> {display_name}"
|
953
|
+
)
|
954
|
+
|
955
|
+
# Sort models alphabetically
|
956
|
+
openai_models.sort(key=lambda x: x["label"])
|
957
|
+
|
958
|
+
except openai.APIError as api_err:
|
959
|
+
logger.error(f"OpenAI API error: {str(api_err)}")
|
960
|
+
logger.info("No OpenAI models found due to API error")
|
961
|
+
|
962
|
+
else:
|
963
|
+
logger.info("OpenAI API key not configured, no models available")
|
964
|
+
|
965
|
+
except ImportError:
|
966
|
+
logger.warning("OpenAI package not installed. No models available.")
|
967
|
+
except Exception as e:
|
968
|
+
logger.error(f"Error getting OpenAI models: {str(e)}")
|
969
|
+
logger.info("No OpenAI models available due to error")
|
970
|
+
|
971
|
+
# Always set the openai_models in providers (will be empty array if no models found)
|
972
|
+
providers["openai_models"] = openai_models
|
973
|
+
logger.info(f"Final OpenAI models count: {len(openai_models)}")
|
974
|
+
|
975
|
+
# Try to get Anthropic models using the Anthropic package
|
976
|
+
anthropic_models = []
|
977
|
+
try:
|
978
|
+
logger.info(
|
979
|
+
"Attempting to connect to Anthropic API using Anthropic package"
|
980
|
+
)
|
981
|
+
|
982
|
+
# Get the API key from settings
|
983
|
+
api_key = get_db_setting("llm.anthropic.api_key", "")
|
984
|
+
|
985
|
+
if api_key:
|
986
|
+
# Import Anthropic package here to avoid dependency issues if not installed
|
987
|
+
from anthropic import Anthropic
|
988
|
+
|
989
|
+
# Create Anthropic client
|
990
|
+
client = Anthropic(api_key=api_key)
|
991
|
+
|
992
|
+
try:
|
993
|
+
# Fetch models using the client
|
994
|
+
logger.debug("Fetching models from Anthropic API")
|
995
|
+
models_response = client.models.list()
|
996
|
+
|
997
|
+
# Process models from the response
|
998
|
+
for model in models_response.data:
|
999
|
+
model_id = model.id
|
1000
|
+
if model_id:
|
1001
|
+
# Create a clean display name
|
1002
|
+
display_name = model_id.replace("-", " ").strip()
|
1003
|
+
display_name = " ".join(
|
1004
|
+
word.capitalize() for word in display_name.split()
|
1005
|
+
)
|
1006
|
+
|
1007
|
+
anthropic_models.append(
|
1008
|
+
{
|
1009
|
+
"value": model_id,
|
1010
|
+
"label": f"{display_name} (Anthropic)",
|
1011
|
+
"provider": "ANTHROPIC",
|
1012
|
+
}
|
1013
|
+
)
|
1014
|
+
logger.debug(
|
1015
|
+
f"Added Anthropic model: {model_id} -> {display_name}"
|
1016
|
+
)
|
1017
|
+
|
1018
|
+
# Sort models alphabetically
|
1019
|
+
anthropic_models.sort(key=lambda x: x["label"])
|
1020
|
+
|
1021
|
+
except Exception as api_err:
|
1022
|
+
logger.error(f"Anthropic API error: {str(api_err)}")
|
1023
|
+
else:
|
1024
|
+
logger.info("Anthropic API key not configured")
|
1025
|
+
|
1026
|
+
except ImportError:
|
1027
|
+
logger.warning(
|
1028
|
+
"Anthropic package not installed. No models will be available."
|
1029
|
+
)
|
1030
|
+
except Exception as e:
|
1031
|
+
logger.error(f"Error getting Anthropic models: {str(e)}")
|
1032
|
+
|
1033
|
+
# Set anthropic_models in providers (could be empty if API call failed)
|
1034
|
+
providers["anthropic_models"] = anthropic_models
|
1035
|
+
logger.info(f"Final Anthropic models count: {len(anthropic_models)}")
|
835
1036
|
|
836
1037
|
# Return all options
|
837
1038
|
return jsonify({"provider_options": provider_options, "providers": providers})
|
838
1039
|
|
839
1040
|
except Exception as e:
|
840
|
-
|
841
|
-
|
842
|
-
error_trace = traceback.format_exc()
|
843
|
-
current_app.logger.error(
|
844
|
-
f"Error getting available models: {str(e)}\n{error_trace}"
|
845
|
-
)
|
1041
|
+
logger.exception("Error getting available models")
|
846
1042
|
return jsonify({"status": "error", "message": str(e)}), 500
|
847
1043
|
|
848
1044
|
|
@@ -907,7 +1103,7 @@ def api_get_available_search_engines():
|
|
907
1103
|
return jsonify({"engines": engines_dict, "engine_options": engine_options})
|
908
1104
|
|
909
1105
|
except Exception as e:
|
910
|
-
logger.
|
1106
|
+
logger.exception("Error getting available search engines")
|
911
1107
|
return jsonify({"error": str(e)}), 500
|
912
1108
|
|
913
1109
|
|
@@ -959,6 +1155,7 @@ def open_file_location():
|
|
959
1155
|
|
960
1156
|
flash(f"Opening folder: {dir_path}", "success")
|
961
1157
|
except Exception as e:
|
1158
|
+
logger.exception("Error opening folder")
|
962
1159
|
flash(f"Error opening folder: {str(e)}", "error")
|
963
1160
|
|
964
1161
|
# Redirect back to the settings page
|
@@ -1261,7 +1458,7 @@ def fix_corrupted_settings():
|
|
1261
1458
|
)
|
1262
1459
|
|
1263
1460
|
except Exception as e:
|
1264
|
-
logger.
|
1461
|
+
logger.exception("Error fixing corrupted settings")
|
1265
1462
|
db_session.rollback()
|
1266
1463
|
return (
|
1267
1464
|
jsonify(
|
@@ -1298,5 +1495,5 @@ def check_ollama_status():
|
|
1298
1495
|
}
|
1299
1496
|
)
|
1300
1497
|
except requests.exceptions.RequestException as e:
|
1301
|
-
logger.
|
1498
|
+
logger.exception("Ollama check failed")
|
1302
1499
|
return jsonify({"running": False, "error": str(e)})
|