local-deep-research 0.1.24__tar.gz → 0.1.25__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {local_deep_research-0.1.24/src/local_deep_research.egg-info → local_deep_research-0.1.25}/PKG-INFO +38 -25
- {local_deep_research-0.1.24 → local_deep_research-0.1.25}/README.md +36 -24
- {local_deep_research-0.1.24 → local_deep_research-0.1.25}/pyproject.toml +2 -1
- {local_deep_research-0.1.24 → local_deep_research-0.1.25}/src/local_deep_research/config.py +12 -1
- local_deep_research-0.1.25/src/local_deep_research/defaults/.env.template +12 -0
- {local_deep_research-0.1.24 → local_deep_research-0.1.25}/src/local_deep_research/defaults/llm_config.py +90 -49
- {local_deep_research-0.1.24 → local_deep_research-0.1.25}/src/local_deep_research/web_search_engines/search_engine_factory.py +4 -1
- {local_deep_research-0.1.24 → local_deep_research-0.1.25/src/local_deep_research.egg-info}/PKG-INFO +38 -25
- {local_deep_research-0.1.24 → local_deep_research-0.1.25}/src/local_deep_research.egg-info/requires.txt +1 -0
- local_deep_research-0.1.24/src/local_deep_research/defaults/.env.template +0 -11
- {local_deep_research-0.1.24 → local_deep_research-0.1.25}/LICENSE +0 -0
- {local_deep_research-0.1.24 → local_deep_research-0.1.25}/MANIFEST.in +0 -0
- {local_deep_research-0.1.24 → local_deep_research-0.1.25}/setup.cfg +0 -0
- {local_deep_research-0.1.24 → local_deep_research-0.1.25}/src/local_deep_research/__init__.py +0 -0
- {local_deep_research-0.1.24 → local_deep_research-0.1.25}/src/local_deep_research/api/__init__.py +0 -0
- {local_deep_research-0.1.24 → local_deep_research-0.1.25}/src/local_deep_research/api/research_functions.py +0 -0
- {local_deep_research-0.1.24 → local_deep_research-0.1.25}/src/local_deep_research/citation_handler.py +0 -0
- {local_deep_research-0.1.24 → local_deep_research-0.1.25}/src/local_deep_research/defaults/__init__.py +0 -0
- {local_deep_research-0.1.24 → local_deep_research-0.1.25}/src/local_deep_research/defaults/local_collections.toml +0 -0
- {local_deep_research-0.1.24 → local_deep_research-0.1.25}/src/local_deep_research/defaults/main.toml +0 -0
- {local_deep_research-0.1.24 → local_deep_research-0.1.25}/src/local_deep_research/defaults/search_engines.toml +0 -0
- {local_deep_research-0.1.24 → local_deep_research-0.1.25}/src/local_deep_research/main.py +0 -0
- {local_deep_research-0.1.24 → local_deep_research-0.1.25}/src/local_deep_research/report_generator.py +0 -0
- {local_deep_research-0.1.24 → local_deep_research-0.1.25}/src/local_deep_research/search_system.py +0 -0
- {local_deep_research-0.1.24 → local_deep_research-0.1.25}/src/local_deep_research/utilties/__init__.py +0 -0
- {local_deep_research-0.1.24 → local_deep_research-0.1.25}/src/local_deep_research/utilties/enums.py +0 -0
- {local_deep_research-0.1.24 → local_deep_research-0.1.25}/src/local_deep_research/utilties/llm_utils.py +0 -0
- {local_deep_research-0.1.24 → local_deep_research-0.1.25}/src/local_deep_research/utilties/search_utilities.py +0 -0
- {local_deep_research-0.1.24 → local_deep_research-0.1.25}/src/local_deep_research/utilties/setup_utils.py +0 -0
- {local_deep_research-0.1.24 → local_deep_research-0.1.25}/src/local_deep_research/web/__init__.py +0 -0
- {local_deep_research-0.1.24 → local_deep_research-0.1.25}/src/local_deep_research/web/app.py +0 -0
- {local_deep_research-0.1.24 → local_deep_research-0.1.25}/src/local_deep_research/web/static/css/styles.css +0 -0
- {local_deep_research-0.1.24 → local_deep_research-0.1.25}/src/local_deep_research/web/static/js/app.js +0 -0
- {local_deep_research-0.1.24 → local_deep_research-0.1.25}/src/local_deep_research/web/static/sounds/README.md +0 -0
- {local_deep_research-0.1.24 → local_deep_research-0.1.25}/src/local_deep_research/web/static/sounds/error.mp3 +0 -0
- {local_deep_research-0.1.24 → local_deep_research-0.1.25}/src/local_deep_research/web/static/sounds/success.mp3 +0 -0
- {local_deep_research-0.1.24 → local_deep_research-0.1.25}/src/local_deep_research/web/templates/api_keys_config.html +0 -0
- {local_deep_research-0.1.24 → local_deep_research-0.1.25}/src/local_deep_research/web/templates/collections_config.html +0 -0
- {local_deep_research-0.1.24 → local_deep_research-0.1.25}/src/local_deep_research/web/templates/index.html +0 -0
- {local_deep_research-0.1.24 → local_deep_research-0.1.25}/src/local_deep_research/web/templates/llm_config.html +0 -0
- {local_deep_research-0.1.24 → local_deep_research-0.1.25}/src/local_deep_research/web/templates/main_config.html +0 -0
- {local_deep_research-0.1.24 → local_deep_research-0.1.25}/src/local_deep_research/web/templates/search_engines_config.html +0 -0
- {local_deep_research-0.1.24 → local_deep_research-0.1.25}/src/local_deep_research/web/templates/settings.html +0 -0
- {local_deep_research-0.1.24 → local_deep_research-0.1.25}/src/local_deep_research/web/templates/settings_dashboard.html +0 -0
- {local_deep_research-0.1.24 → local_deep_research-0.1.25}/src/local_deep_research/web_search_engines/__init__.py +0 -0
- {local_deep_research-0.1.24 → local_deep_research-0.1.25}/src/local_deep_research/web_search_engines/engines/__init__.py +0 -0
- {local_deep_research-0.1.24 → local_deep_research-0.1.25}/src/local_deep_research/web_search_engines/engines/full_search.py +0 -0
- {local_deep_research-0.1.24 → local_deep_research-0.1.25}/src/local_deep_research/web_search_engines/engines/meta_search_engine.py +0 -0
- {local_deep_research-0.1.24 → local_deep_research-0.1.25}/src/local_deep_research/web_search_engines/engines/search_engine_arxiv.py +0 -0
- {local_deep_research-0.1.24 → local_deep_research-0.1.25}/src/local_deep_research/web_search_engines/engines/search_engine_brave.py +0 -0
- {local_deep_research-0.1.24 → local_deep_research-0.1.25}/src/local_deep_research/web_search_engines/engines/search_engine_ddg.py +0 -0
- {local_deep_research-0.1.24 → local_deep_research-0.1.25}/src/local_deep_research/web_search_engines/engines/search_engine_github.py +0 -0
- {local_deep_research-0.1.24 → local_deep_research-0.1.25}/src/local_deep_research/web_search_engines/engines/search_engine_google_pse.py +0 -0
- {local_deep_research-0.1.24 → local_deep_research-0.1.25}/src/local_deep_research/web_search_engines/engines/search_engine_guardian.py +0 -0
- {local_deep_research-0.1.24 → local_deep_research-0.1.25}/src/local_deep_research/web_search_engines/engines/search_engine_local.py +0 -0
- {local_deep_research-0.1.24 → local_deep_research-0.1.25}/src/local_deep_research/web_search_engines/engines/search_engine_local_all.py +0 -0
- {local_deep_research-0.1.24 → local_deep_research-0.1.25}/src/local_deep_research/web_search_engines/engines/search_engine_pubmed.py +0 -0
- {local_deep_research-0.1.24 → local_deep_research-0.1.25}/src/local_deep_research/web_search_engines/engines/search_engine_searxng.py +0 -0
- {local_deep_research-0.1.24 → local_deep_research-0.1.25}/src/local_deep_research/web_search_engines/engines/search_engine_semantic_scholar.py +0 -0
- {local_deep_research-0.1.24 → local_deep_research-0.1.25}/src/local_deep_research/web_search_engines/engines/search_engine_serpapi.py +0 -0
- {local_deep_research-0.1.24 → local_deep_research-0.1.25}/src/local_deep_research/web_search_engines/engines/search_engine_wayback.py +0 -0
- {local_deep_research-0.1.24 → local_deep_research-0.1.25}/src/local_deep_research/web_search_engines/engines/search_engine_wikipedia.py +0 -0
- {local_deep_research-0.1.24 → local_deep_research-0.1.25}/src/local_deep_research/web_search_engines/search_engine_base.py +0 -0
- {local_deep_research-0.1.24 → local_deep_research-0.1.25}/src/local_deep_research/web_search_engines/search_engines_config.py +0 -0
- {local_deep_research-0.1.24 → local_deep_research-0.1.25}/src/local_deep_research.egg-info/SOURCES.txt +0 -0
- {local_deep_research-0.1.24 → local_deep_research-0.1.25}/src/local_deep_research.egg-info/dependency_links.txt +0 -0
- {local_deep_research-0.1.24 → local_deep_research-0.1.25}/src/local_deep_research.egg-info/entry_points.txt +0 -0
- {local_deep_research-0.1.24 → local_deep_research-0.1.25}/src/local_deep_research.egg-info/top_level.txt +0 -0
- {local_deep_research-0.1.24 → local_deep_research-0.1.25}/tests/test_google_pse.py +0 -0
{local_deep_research-0.1.24/src/local_deep_research.egg-info → local_deep_research-0.1.25}/PKG-INFO
RENAMED
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: local-deep-research
|
3
|
-
Version: 0.1.
|
3
|
+
Version: 0.1.25
|
4
4
|
Summary: AI-powered research assistant with deep, iterative analysis using LLMs and web searches
|
5
5
|
Author-email: LearningCircuit <185559241+LearningCircuit@users.noreply.github.com>, HashedViking <6432677+HashedViking@users.noreply.github.com>
|
6
6
|
License: MIT License
|
@@ -65,6 +65,7 @@ Requires-Dist: xmltodict>=0.13.0
|
|
65
65
|
Requires-Dist: lxml>=4.9.2
|
66
66
|
Requires-Dist: pdfplumber>=0.9.0
|
67
67
|
Requires-Dist: unstructured>=0.10.0
|
68
|
+
Requires-Dist: google-search-results
|
68
69
|
Dynamic: license-file
|
69
70
|
|
70
71
|
# Local Deep Research
|
@@ -79,7 +80,15 @@ A powerful AI-powered research assistant that performs deep, iterative analysis
|
|
79
80
|
</a>
|
80
81
|
</div>
|
81
82
|
|
82
|
-
##
|
83
|
+
## Windows Installation
|
84
|
+
|
85
|
+
Download the [Windows Installer](https://github.com/LearningCircuit/local-deep-research/releases/download/v0.1.0/LocalDeepResearch_Setup.exe) for easy one-click installation.
|
86
|
+
|
87
|
+
**Requires Ollama or other model preinstalled.**
|
88
|
+
Download from https://ollama.ai and then pull a model
|
89
|
+
ollama pull gemma3:12b
|
90
|
+
|
91
|
+
## Quick Start (not required if installed with windows installer)
|
83
92
|
|
84
93
|
```bash
|
85
94
|
# Install the package
|
@@ -107,12 +116,12 @@ Access the web interface at `http://127.0.0.1:5000` in your browser.
|
|
107
116
|
|
108
117
|
## Docker Support
|
109
118
|
|
110
|
-
|
119
|
+
Build the image first if you haven't already
|
111
120
|
```bash
|
112
121
|
docker build -t local-deep-research .
|
113
122
|
```
|
114
123
|
|
115
|
-
|
124
|
+
Quick Docker Run
|
116
125
|
|
117
126
|
```bash
|
118
127
|
# Run with default settings (connects to Ollama running on the host)
|
@@ -126,6 +135,31 @@ For comprehensive Docker setup information, see:
|
|
126
135
|
- [Docker Usage Guide](https://github.com/LearningCircuit/local-deep-research/blob/main/docs/docker-usage-readme.md)
|
127
136
|
- [Docker Compose Guide](https://github.com/LearningCircuit/local-deep-research/blob/main/docs/docker-compose-guide.md)
|
128
137
|
|
138
|
+
## Programmatic Access
|
139
|
+
|
140
|
+
Local Deep Research now provides a simple API for programmatic access to its research capabilities:
|
141
|
+
|
142
|
+
```python
|
143
|
+
import os
|
144
|
+
# Set environment variables to control the LLM
|
145
|
+
os.environ["LDR_LLM__MODEL"] = "mistral" # Specify model name
|
146
|
+
|
147
|
+
from local_deep_research import quick_summary, generate_report, analyze_documents
|
148
|
+
|
149
|
+
# Generate a quick research summary with custom parameters
|
150
|
+
results = quick_summary(
|
151
|
+
query="advances in fusion energy",
|
152
|
+
search_tool="auto", # Auto-select the best search engine
|
153
|
+
iterations=1, # Single research cycle for speed
|
154
|
+
questions_per_iteration=2, # Generate 2 follow-up questions
|
155
|
+
max_results=30, # Consider up to 30 search results
|
156
|
+
temperature=0.7 # Control creativity of generation
|
157
|
+
)
|
158
|
+
print(results["summary"])
|
159
|
+
```
|
160
|
+
|
161
|
+
These functions provide flexible options for customizing the search parameters, iterations, and output formats. For more examples, see the [programmatic access tutorial](https://github.com/LearningCircuit/local-deep-research/blob/main/examples/programmatic_access.ipynb).
|
162
|
+
|
129
163
|
|
130
164
|
## Features
|
131
165
|
|
@@ -308,27 +342,6 @@ You can use local document search in several ways:
|
|
308
342
|
3. **All collections**: Set `tool = "local_all"` to search across all collections
|
309
343
|
4. **Query syntax**: Type `collection:project_docs your query` to target a specific collection
|
310
344
|
|
311
|
-
## Programmatic Access
|
312
|
-
|
313
|
-
Local Deep Research now provides a simple API for programmatic access to its research capabilities:
|
314
|
-
|
315
|
-
```python
|
316
|
-
from local_deep_research import quick_summary, generate_report
|
317
|
-
|
318
|
-
# Generate a quick research summary
|
319
|
-
results = quick_summary("advances in fusion energy")
|
320
|
-
print(results["summary"])
|
321
|
-
|
322
|
-
# Create a comprehensive structured report
|
323
|
-
report = generate_report("impact of quantum computing on cryptography")
|
324
|
-
print(report["content"])
|
325
|
-
|
326
|
-
# Analyze documents in a local collection
|
327
|
-
from local_deep_research import analyze_documents
|
328
|
-
docs = analyze_documents("renewable energy", "research_papers")
|
329
|
-
```
|
330
|
-
|
331
|
-
These functions provide flexible options for customizing the search parameters, iterations, and output formats. For more examples, see the [programmatic access tutorial](https://github.com/LearningCircuit/local-deep-research/blob/programmatic-access/examples/programmatic_access.ipynb).
|
332
345
|
|
333
346
|
## Advanced Configuration
|
334
347
|
|
@@ -10,7 +10,15 @@ A powerful AI-powered research assistant that performs deep, iterative analysis
|
|
10
10
|
</a>
|
11
11
|
</div>
|
12
12
|
|
13
|
-
##
|
13
|
+
## Windows Installation
|
14
|
+
|
15
|
+
Download the [Windows Installer](https://github.com/LearningCircuit/local-deep-research/releases/download/v0.1.0/LocalDeepResearch_Setup.exe) for easy one-click installation.
|
16
|
+
|
17
|
+
**Requires Ollama or other model preinstalled.**
|
18
|
+
Download from https://ollama.ai and then pull a model
|
19
|
+
ollama pull gemma3:12b
|
20
|
+
|
21
|
+
## Quick Start (not required if installed with windows installer)
|
14
22
|
|
15
23
|
```bash
|
16
24
|
# Install the package
|
@@ -38,12 +46,12 @@ Access the web interface at `http://127.0.0.1:5000` in your browser.
|
|
38
46
|
|
39
47
|
## Docker Support
|
40
48
|
|
41
|
-
|
49
|
+
Build the image first if you haven't already
|
42
50
|
```bash
|
43
51
|
docker build -t local-deep-research .
|
44
52
|
```
|
45
53
|
|
46
|
-
|
54
|
+
Quick Docker Run
|
47
55
|
|
48
56
|
```bash
|
49
57
|
# Run with default settings (connects to Ollama running on the host)
|
@@ -57,6 +65,31 @@ For comprehensive Docker setup information, see:
|
|
57
65
|
- [Docker Usage Guide](https://github.com/LearningCircuit/local-deep-research/blob/main/docs/docker-usage-readme.md)
|
58
66
|
- [Docker Compose Guide](https://github.com/LearningCircuit/local-deep-research/blob/main/docs/docker-compose-guide.md)
|
59
67
|
|
68
|
+
## Programmatic Access
|
69
|
+
|
70
|
+
Local Deep Research now provides a simple API for programmatic access to its research capabilities:
|
71
|
+
|
72
|
+
```python
|
73
|
+
import os
|
74
|
+
# Set environment variables to control the LLM
|
75
|
+
os.environ["LDR_LLM__MODEL"] = "mistral" # Specify model name
|
76
|
+
|
77
|
+
from local_deep_research import quick_summary, generate_report, analyze_documents
|
78
|
+
|
79
|
+
# Generate a quick research summary with custom parameters
|
80
|
+
results = quick_summary(
|
81
|
+
query="advances in fusion energy",
|
82
|
+
search_tool="auto", # Auto-select the best search engine
|
83
|
+
iterations=1, # Single research cycle for speed
|
84
|
+
questions_per_iteration=2, # Generate 2 follow-up questions
|
85
|
+
max_results=30, # Consider up to 30 search results
|
86
|
+
temperature=0.7 # Control creativity of generation
|
87
|
+
)
|
88
|
+
print(results["summary"])
|
89
|
+
```
|
90
|
+
|
91
|
+
These functions provide flexible options for customizing the search parameters, iterations, and output formats. For more examples, see the [programmatic access tutorial](https://github.com/LearningCircuit/local-deep-research/blob/main/examples/programmatic_access.ipynb).
|
92
|
+
|
60
93
|
|
61
94
|
## Features
|
62
95
|
|
@@ -239,27 +272,6 @@ You can use local document search in several ways:
|
|
239
272
|
3. **All collections**: Set `tool = "local_all"` to search across all collections
|
240
273
|
4. **Query syntax**: Type `collection:project_docs your query` to target a specific collection
|
241
274
|
|
242
|
-
## Programmatic Access
|
243
|
-
|
244
|
-
Local Deep Research now provides a simple API for programmatic access to its research capabilities:
|
245
|
-
|
246
|
-
```python
|
247
|
-
from local_deep_research import quick_summary, generate_report
|
248
|
-
|
249
|
-
# Generate a quick research summary
|
250
|
-
results = quick_summary("advances in fusion energy")
|
251
|
-
print(results["summary"])
|
252
|
-
|
253
|
-
# Create a comprehensive structured report
|
254
|
-
report = generate_report("impact of quantum computing on cryptography")
|
255
|
-
print(report["content"])
|
256
|
-
|
257
|
-
# Analyze documents in a local collection
|
258
|
-
from local_deep_research import analyze_documents
|
259
|
-
docs = analyze_documents("renewable energy", "research_papers")
|
260
|
-
```
|
261
|
-
|
262
|
-
These functions provide flexible options for customizing the search parameters, iterations, and output formats. For more examples, see the [programmatic access tutorial](https://github.com/LearningCircuit/local-deep-research/blob/programmatic-access/examples/programmatic_access.ipynb).
|
263
275
|
|
264
276
|
## Advanced Configuration
|
265
277
|
|
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
|
|
4
4
|
|
5
5
|
[project]
|
6
6
|
name = "local-deep-research"
|
7
|
-
version = "0.1.
|
7
|
+
version = "0.1.25"
|
8
8
|
description = "AI-powered research assistant with deep, iterative analysis using LLMs and web searches"
|
9
9
|
readme = "README.md"
|
10
10
|
requires-python = ">=3.8"
|
@@ -51,6 +51,7 @@ dependencies = [
|
|
51
51
|
"lxml>=4.9.2",
|
52
52
|
"pdfplumber>=0.9.0",
|
53
53
|
"unstructured>=0.10.0",
|
54
|
+
"google-search-results",
|
54
55
|
|
55
56
|
]
|
56
57
|
|
@@ -6,10 +6,12 @@ from platformdirs import user_documents_dir
|
|
6
6
|
import os
|
7
7
|
# Setup logging
|
8
8
|
logger = logging.getLogger(__name__)
|
9
|
+
from dotenv import load_dotenv
|
10
|
+
import platform
|
9
11
|
|
10
12
|
# Get config directory
|
11
13
|
def get_config_dir():
|
12
|
-
|
14
|
+
|
13
15
|
|
14
16
|
if platform.system() == "Windows":
|
15
17
|
# Windows: Use Documents directory
|
@@ -32,7 +34,16 @@ SEARCH_ENGINES_FILE = CONFIG_DIR / "search_engines.toml"
|
|
32
34
|
|
33
35
|
LOCAL_COLLECTIONS_FILE = CONFIG_DIR / "local_collections.toml"
|
34
36
|
|
37
|
+
# Load the .env file explicitly
|
38
|
+
# Load the .env file explicitly
|
39
|
+
config_dir = get_config_dir()
|
40
|
+
env_file = config_dir / ".env"
|
35
41
|
|
42
|
+
if env_file.exists():
|
43
|
+
logger.info(f"Loading environment variables from: {env_file}")
|
44
|
+
load_dotenv(dotenv_path=env_file)
|
45
|
+
else:
|
46
|
+
logger.warning(f"Warning: .env file not found at {env_file}")
|
36
47
|
# Set environment variable for Dynaconf to use
|
37
48
|
docs_base = Path(user_documents_dir()) / "local_deep_research"
|
38
49
|
os.environ["DOCS_DIR"] = str(docs_base)
|
@@ -0,0 +1,12 @@
|
|
1
|
+
# API Keys
|
2
|
+
# LDR_OPENAI_ENDPOINT_API_KEY=your-api-key-here
|
3
|
+
# LDR_ANTHROPIC_API_KEY=your-api-key-here
|
4
|
+
# LDR_OPENAI_API_KEY=your-openai-key-here
|
5
|
+
# LDR_SERP_API_KEY=your-api-key-here
|
6
|
+
# LDR_GUARDIAN_API_KEY=your-api-key-here
|
7
|
+
# LDR_GOOGLE_PSE_API_KEY=your-google-api-key-here
|
8
|
+
# LDR_GOOGLE_PSE_ENGINE_ID=your-programmable-search-engine-id-here
|
9
|
+
|
10
|
+
# SearXNG Configuration, add at least SEARXNG_INSTANCE to .env file to use this search engine
|
11
|
+
# SEARXNG_INSTANCE = "http://localhost:8080"
|
12
|
+
# SEARXNG_DELAY = 2.0
|
@@ -9,6 +9,7 @@ from langchain_anthropic import ChatAnthropic
|
|
9
9
|
from langchain_openai import ChatOpenAI
|
10
10
|
from langchain_ollama import ChatOllama
|
11
11
|
from langchain_community.llms import VLLM
|
12
|
+
from local_deep_research.utilties.search_utilities import remove_think_tags
|
12
13
|
from local_deep_research.config import settings
|
13
14
|
import os
|
14
15
|
import logging
|
@@ -23,6 +24,8 @@ VALID_PROVIDERS = ["ollama", "openai", "anthropic", "vllm", "openai_endpoint", "
|
|
23
24
|
# LLM FUNCTIONS
|
24
25
|
# ================================
|
25
26
|
|
27
|
+
|
28
|
+
|
26
29
|
def get_llm(model_name=None, temperature=None, provider=None):
|
27
30
|
"""
|
28
31
|
Get LLM instance based on model name and provider.
|
@@ -33,7 +36,7 @@ def get_llm(model_name=None, temperature=None, provider=None):
|
|
33
36
|
provider: Provider to use (if None, uses settings.llm.provider)
|
34
37
|
|
35
38
|
Returns:
|
36
|
-
A LangChain LLM instance
|
39
|
+
A LangChain LLM instance with automatic think-tag removal
|
37
40
|
"""
|
38
41
|
# Use settings values for parameters if not provided
|
39
42
|
if model_name is None:
|
@@ -56,31 +59,42 @@ def get_llm(model_name=None, temperature=None, provider=None):
|
|
56
59
|
|
57
60
|
# Handle different providers
|
58
61
|
if provider == "anthropic":
|
59
|
-
|
62
|
+
api_key_name = 'ANTHROPIC_API_KEY'
|
63
|
+
api_key = settings.get(api_key_name, '')
|
60
64
|
if not api_key:
|
61
|
-
api_key = os.getenv(
|
65
|
+
api_key = os.getenv(api_key_name)
|
66
|
+
if not api_key:
|
67
|
+
api_key = os.getenv("LDR_" + api_key_name)
|
62
68
|
if not api_key:
|
63
69
|
logger.warning("ANTHROPIC_API_KEY not found. Falling back to default model.")
|
64
70
|
return get_fallback_model(temperature)
|
65
71
|
|
66
|
-
|
72
|
+
llm = ChatAnthropic(
|
67
73
|
model=model_name, anthropic_api_key=api_key, **common_params
|
68
74
|
)
|
75
|
+
return wrap_llm_without_think_tags(llm)
|
69
76
|
|
70
77
|
elif provider == "openai":
|
71
|
-
|
78
|
+
api_key_name = 'OPENAI_API_KEY'
|
79
|
+
api_key = settings.get(api_key_name, '')
|
72
80
|
if not api_key:
|
73
|
-
api_key = os.getenv(
|
81
|
+
api_key = os.getenv(api_key_name)
|
82
|
+
if not api_key:
|
83
|
+
api_key = os.getenv("LDR_" + api_key_name)
|
74
84
|
if not api_key:
|
75
85
|
logger.warning("OPENAI_API_KEY not found. Falling back to default model.")
|
76
86
|
return get_fallback_model(temperature)
|
77
87
|
|
78
|
-
|
88
|
+
llm = ChatOpenAI(model=model_name, api_key=api_key, **common_params)
|
89
|
+
return wrap_llm_without_think_tags(llm)
|
79
90
|
|
80
91
|
elif provider == "openai_endpoint":
|
81
|
-
|
92
|
+
api_key_name = 'OPENAI_ENDPOINT_API_KEY'
|
93
|
+
api_key = settings.get(api_key_name, '')
|
82
94
|
if not api_key:
|
83
|
-
api_key = os.getenv(
|
95
|
+
api_key = os.getenv(api_key_name)
|
96
|
+
if not api_key:
|
97
|
+
api_key = os.getenv("LDR_" + api_key_name)
|
84
98
|
if not api_key:
|
85
99
|
logger.warning("OPENAI_ENDPOINT_API_KEY not found. Falling back to default model.")
|
86
100
|
return get_fallback_model(temperature)
|
@@ -88,16 +102,17 @@ def get_llm(model_name=None, temperature=None, provider=None):
|
|
88
102
|
# Get endpoint URL from settings
|
89
103
|
openai_endpoint_url = settings.llm.openai_endpoint_url
|
90
104
|
|
91
|
-
|
105
|
+
llm = ChatOpenAI(
|
92
106
|
model=model_name,
|
93
107
|
api_key=api_key,
|
94
108
|
openai_api_base=openai_endpoint_url,
|
95
109
|
**common_params
|
96
110
|
)
|
111
|
+
return wrap_llm_without_think_tags(llm)
|
97
112
|
|
98
113
|
elif provider == "vllm":
|
99
114
|
try:
|
100
|
-
|
115
|
+
llm = VLLM(
|
101
116
|
model=model_name,
|
102
117
|
trust_remote_code=True,
|
103
118
|
max_new_tokens=128,
|
@@ -105,6 +120,7 @@ def get_llm(model_name=None, temperature=None, provider=None):
|
|
105
120
|
top_p=0.95,
|
106
121
|
temperature=temperature,
|
107
122
|
)
|
123
|
+
return wrap_llm_without_think_tags(llm)
|
108
124
|
except Exception as e:
|
109
125
|
logger.error(f"Error loading VLLM model: {e}")
|
110
126
|
logger.warning("Falling back.")
|
@@ -114,54 +130,54 @@ def get_llm(model_name=None, temperature=None, provider=None):
|
|
114
130
|
try:
|
115
131
|
# Use the configurable Ollama base URL
|
116
132
|
base_url = settings.get('OLLAMA_BASE_URL', settings.llm.get('ollama_base_url', 'http://localhost:11434'))
|
117
|
-
|
133
|
+
llm = ChatOllama(model=model_name, base_url=base_url, **common_params)
|
134
|
+
return wrap_llm_without_think_tags(llm)
|
118
135
|
except Exception as e:
|
119
136
|
logger.error(f"Error loading Ollama model: {e}")
|
120
137
|
return get_fallback_model(temperature)
|
121
138
|
|
122
139
|
elif provider == "lmstudio":
|
123
|
-
|
124
|
-
|
125
|
-
|
126
|
-
|
127
|
-
|
128
|
-
|
129
|
-
|
130
|
-
|
131
|
-
|
132
|
-
|
133
|
-
|
134
|
-
|
140
|
+
# LM Studio supports OpenAI API format, so we can use ChatOpenAI directly
|
141
|
+
lmstudio_url = settings.llm.get('lmstudio_url', "http://localhost:1234")
|
142
|
+
|
143
|
+
llm = ChatOpenAI(
|
144
|
+
model=model_name,
|
145
|
+
api_key="lm-studio", # LM Studio doesn't require a real API key
|
146
|
+
base_url=f"{lmstudio_url}/v1", # Use the configured URL with /v1 endpoint
|
147
|
+
temperature=temperature,
|
148
|
+
max_tokens=settings.llm.max_tokens
|
149
|
+
)
|
150
|
+
return wrap_llm_without_think_tags(llm)
|
135
151
|
|
136
152
|
elif provider == "llamacpp":
|
137
|
-
|
138
|
-
|
139
|
-
|
140
|
-
|
141
|
-
|
142
|
-
|
143
|
-
|
144
|
-
|
145
|
-
raise ValueError("llamacpp_model_path not set in settings.toml")
|
146
|
-
|
147
|
-
# Get additional LlamaCpp parameters
|
148
|
-
n_gpu_layers = settings.llm.get('llamacpp_n_gpu_layers', 1)
|
149
|
-
n_batch = settings.llm.get('llamacpp_n_batch', 512)
|
150
|
-
f16_kv = settings.llm.get('llamacpp_f16_kv', True)
|
153
|
+
# Import LlamaCpp
|
154
|
+
from langchain_community.llms import LlamaCpp
|
155
|
+
|
156
|
+
# Get LlamaCpp model path from settings
|
157
|
+
model_path = settings.llm.get('llamacpp_model_path', "")
|
158
|
+
if not model_path:
|
159
|
+
logger.error("llamacpp_model_path not set in settings")
|
160
|
+
raise ValueError("llamacpp_model_path not set in settings.toml")
|
151
161
|
|
152
|
-
|
153
|
-
|
154
|
-
|
155
|
-
|
156
|
-
|
157
|
-
|
158
|
-
|
159
|
-
|
160
|
-
|
161
|
-
|
162
|
+
# Get additional LlamaCpp parameters
|
163
|
+
n_gpu_layers = settings.llm.get('llamacpp_n_gpu_layers', 1)
|
164
|
+
n_batch = settings.llm.get('llamacpp_n_batch', 512)
|
165
|
+
f16_kv = settings.llm.get('llamacpp_f16_kv', True)
|
166
|
+
|
167
|
+
# Create LlamaCpp instance
|
168
|
+
llm = LlamaCpp(
|
169
|
+
model_path=model_path,
|
170
|
+
temperature=temperature,
|
171
|
+
max_tokens=settings.llm.max_tokens,
|
172
|
+
n_gpu_layers=n_gpu_layers,
|
173
|
+
n_batch=n_batch,
|
174
|
+
f16_kv=f16_kv,
|
175
|
+
verbose=True
|
176
|
+
)
|
177
|
+
return wrap_llm_without_think_tags(llm)
|
162
178
|
|
163
179
|
else:
|
164
|
-
return get_fallback_model(temperature)
|
180
|
+
return wrap_llm_without_think_tags(get_fallback_model(temperature))
|
165
181
|
|
166
182
|
def get_fallback_model(temperature=None):
|
167
183
|
"""Create a dummy model for when no providers are available"""
|
@@ -174,6 +190,31 @@ def get_fallback_model(temperature=None):
|
|
174
190
|
# COMPATIBILITY FUNCTIONS
|
175
191
|
# ================================
|
176
192
|
|
193
|
+
def wrap_llm_without_think_tags(llm):
|
194
|
+
"""Create a wrapper class that processes LLM outputs with remove_think_tags"""
|
195
|
+
|
196
|
+
|
197
|
+
class ProcessingLLMWrapper:
|
198
|
+
def __init__(self, base_llm):
|
199
|
+
self.base_llm = base_llm
|
200
|
+
|
201
|
+
def invoke(self, *args, **kwargs):
|
202
|
+
response = self.base_llm.invoke(*args, **kwargs)
|
203
|
+
|
204
|
+
# Process the response content if it has a content attribute
|
205
|
+
if hasattr(response, 'content'):
|
206
|
+
response.content = remove_think_tags(response.content)
|
207
|
+
elif isinstance(response, str):
|
208
|
+
response = remove_think_tags(response)
|
209
|
+
|
210
|
+
return response
|
211
|
+
|
212
|
+
# Pass through any other attributes to the base LLM
|
213
|
+
def __getattr__(self, name):
|
214
|
+
return getattr(self.base_llm, name)
|
215
|
+
|
216
|
+
return ProcessingLLMWrapper(llm)
|
217
|
+
|
177
218
|
def get_available_provider_types():
|
178
219
|
"""Return available model providers"""
|
179
220
|
providers = {}
|
@@ -46,7 +46,10 @@ def create_search_engine(engine_name: str, llm=None, **kwargs) -> Optional[BaseS
|
|
46
46
|
|
47
47
|
# First check environment variable
|
48
48
|
api_key = os.getenv(api_key_env)
|
49
|
-
|
49
|
+
if not api_key:
|
50
|
+
api_key = os.getenv("LDR_" + api_key_env)
|
51
|
+
|
52
|
+
|
50
53
|
# If not found in environment, check Dynaconf settings
|
51
54
|
if not api_key and api_key_env:
|
52
55
|
# Convert env var name to settings path (e.g., BRAVE_API_KEY -> brave_api_key)
|
{local_deep_research-0.1.24 → local_deep_research-0.1.25/src/local_deep_research.egg-info}/PKG-INFO
RENAMED
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: local-deep-research
|
3
|
-
Version: 0.1.
|
3
|
+
Version: 0.1.25
|
4
4
|
Summary: AI-powered research assistant with deep, iterative analysis using LLMs and web searches
|
5
5
|
Author-email: LearningCircuit <185559241+LearningCircuit@users.noreply.github.com>, HashedViking <6432677+HashedViking@users.noreply.github.com>
|
6
6
|
License: MIT License
|
@@ -65,6 +65,7 @@ Requires-Dist: xmltodict>=0.13.0
|
|
65
65
|
Requires-Dist: lxml>=4.9.2
|
66
66
|
Requires-Dist: pdfplumber>=0.9.0
|
67
67
|
Requires-Dist: unstructured>=0.10.0
|
68
|
+
Requires-Dist: google-search-results
|
68
69
|
Dynamic: license-file
|
69
70
|
|
70
71
|
# Local Deep Research
|
@@ -79,7 +80,15 @@ A powerful AI-powered research assistant that performs deep, iterative analysis
|
|
79
80
|
</a>
|
80
81
|
</div>
|
81
82
|
|
82
|
-
##
|
83
|
+
## Windows Installation
|
84
|
+
|
85
|
+
Download the [Windows Installer](https://github.com/LearningCircuit/local-deep-research/releases/download/v0.1.0/LocalDeepResearch_Setup.exe) for easy one-click installation.
|
86
|
+
|
87
|
+
**Requires Ollama or other model preinstalled.**
|
88
|
+
Download from https://ollama.ai and then pull a model
|
89
|
+
ollama pull gemma3:12b
|
90
|
+
|
91
|
+
## Quick Start (not required if installed with windows installer)
|
83
92
|
|
84
93
|
```bash
|
85
94
|
# Install the package
|
@@ -107,12 +116,12 @@ Access the web interface at `http://127.0.0.1:5000` in your browser.
|
|
107
116
|
|
108
117
|
## Docker Support
|
109
118
|
|
110
|
-
|
119
|
+
Build the image first if you haven't already
|
111
120
|
```bash
|
112
121
|
docker build -t local-deep-research .
|
113
122
|
```
|
114
123
|
|
115
|
-
|
124
|
+
Quick Docker Run
|
116
125
|
|
117
126
|
```bash
|
118
127
|
# Run with default settings (connects to Ollama running on the host)
|
@@ -126,6 +135,31 @@ For comprehensive Docker setup information, see:
|
|
126
135
|
- [Docker Usage Guide](https://github.com/LearningCircuit/local-deep-research/blob/main/docs/docker-usage-readme.md)
|
127
136
|
- [Docker Compose Guide](https://github.com/LearningCircuit/local-deep-research/blob/main/docs/docker-compose-guide.md)
|
128
137
|
|
138
|
+
## Programmatic Access
|
139
|
+
|
140
|
+
Local Deep Research now provides a simple API for programmatic access to its research capabilities:
|
141
|
+
|
142
|
+
```python
|
143
|
+
import os
|
144
|
+
# Set environment variables to control the LLM
|
145
|
+
os.environ["LDR_LLM__MODEL"] = "mistral" # Specify model name
|
146
|
+
|
147
|
+
from local_deep_research import quick_summary, generate_report, analyze_documents
|
148
|
+
|
149
|
+
# Generate a quick research summary with custom parameters
|
150
|
+
results = quick_summary(
|
151
|
+
query="advances in fusion energy",
|
152
|
+
search_tool="auto", # Auto-select the best search engine
|
153
|
+
iterations=1, # Single research cycle for speed
|
154
|
+
questions_per_iteration=2, # Generate 2 follow-up questions
|
155
|
+
max_results=30, # Consider up to 30 search results
|
156
|
+
temperature=0.7 # Control creativity of generation
|
157
|
+
)
|
158
|
+
print(results["summary"])
|
159
|
+
```
|
160
|
+
|
161
|
+
These functions provide flexible options for customizing the search parameters, iterations, and output formats. For more examples, see the [programmatic access tutorial](https://github.com/LearningCircuit/local-deep-research/blob/main/examples/programmatic_access.ipynb).
|
162
|
+
|
129
163
|
|
130
164
|
## Features
|
131
165
|
|
@@ -308,27 +342,6 @@ You can use local document search in several ways:
|
|
308
342
|
3. **All collections**: Set `tool = "local_all"` to search across all collections
|
309
343
|
4. **Query syntax**: Type `collection:project_docs your query` to target a specific collection
|
310
344
|
|
311
|
-
## Programmatic Access
|
312
|
-
|
313
|
-
Local Deep Research now provides a simple API for programmatic access to its research capabilities:
|
314
|
-
|
315
|
-
```python
|
316
|
-
from local_deep_research import quick_summary, generate_report
|
317
|
-
|
318
|
-
# Generate a quick research summary
|
319
|
-
results = quick_summary("advances in fusion energy")
|
320
|
-
print(results["summary"])
|
321
|
-
|
322
|
-
# Create a comprehensive structured report
|
323
|
-
report = generate_report("impact of quantum computing on cryptography")
|
324
|
-
print(report["content"])
|
325
|
-
|
326
|
-
# Analyze documents in a local collection
|
327
|
-
from local_deep_research import analyze_documents
|
328
|
-
docs = analyze_documents("renewable energy", "research_papers")
|
329
|
-
```
|
330
|
-
|
331
|
-
These functions provide flexible options for customizing the search parameters, iterations, and output formats. For more examples, see the [programmatic access tutorial](https://github.com/LearningCircuit/local-deep-research/blob/programmatic-access/examples/programmatic_access.ipynb).
|
332
345
|
|
333
346
|
## Advanced Configuration
|
334
347
|
|
@@ -1,11 +0,0 @@
|
|
1
|
-
# API Keys
|
2
|
-
# ANTHROPIC_API_KEY=your-api-key-here
|
3
|
-
# OPENAI_API_KEY=your-openai-key-here
|
4
|
-
# SERP_API_KEY=your-api-key-here
|
5
|
-
# GUARDIAN_API_KEY=your-api-key-here
|
6
|
-
# GOOGLE_PSE_API_KEY=your-google-api-key-here
|
7
|
-
# GOOGLE_PSE_ENGINE_ID=your-programmable-search-engine-id-here
|
8
|
-
|
9
|
-
# SearXNG Configuration, add at least SEARXNG_INSTANCE to .env file to use this search engine
|
10
|
-
# SEARXNG_INSTANCE = "http://localhost:8080"
|
11
|
-
# SEARXNG_DELAY = 2.0
|
File without changes
|
File without changes
|
File without changes
|
{local_deep_research-0.1.24 → local_deep_research-0.1.25}/src/local_deep_research/__init__.py
RENAMED
File without changes
|
{local_deep_research-0.1.24 → local_deep_research-0.1.25}/src/local_deep_research/api/__init__.py
RENAMED
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
{local_deep_research-0.1.24 → local_deep_research-0.1.25}/src/local_deep_research/defaults/main.toml
RENAMED
File without changes
|
File without changes
|
File without changes
|
File without changes
|
{local_deep_research-0.1.24 → local_deep_research-0.1.25}/src/local_deep_research/search_system.py
RENAMED
File without changes
|
File without changes
|
{local_deep_research-0.1.24 → local_deep_research-0.1.25}/src/local_deep_research/utilties/enums.py
RENAMED
File without changes
|
File without changes
|
File without changes
|
File without changes
|
{local_deep_research-0.1.24 → local_deep_research-0.1.25}/src/local_deep_research/web/__init__.py
RENAMED
File without changes
|
{local_deep_research-0.1.24 → local_deep_research-0.1.25}/src/local_deep_research/web/app.py
RENAMED
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|