local-deep-research 0.1.24__py3-none-any.whl → 0.1.26__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- local_deep_research/config.py +19 -7
- local_deep_research/defaults/.env.template +5 -1
- local_deep_research/defaults/llm_config.py +90 -49
- local_deep_research/web_search_engines/search_engine_factory.py +4 -1
- {local_deep_research-0.1.24.dist-info → local_deep_research-0.1.26.dist-info}/METADATA +32 -25
- {local_deep_research-0.1.24.dist-info → local_deep_research-0.1.26.dist-info}/RECORD +10 -10
- {local_deep_research-0.1.24.dist-info → local_deep_research-0.1.26.dist-info}/WHEEL +0 -0
- {local_deep_research-0.1.24.dist-info → local_deep_research-0.1.26.dist-info}/entry_points.txt +0 -0
- {local_deep_research-0.1.24.dist-info → local_deep_research-0.1.26.dist-info}/licenses/LICENSE +0 -0
- {local_deep_research-0.1.24.dist-info → local_deep_research-0.1.26.dist-info}/top_level.txt +0 -0
local_deep_research/config.py
CHANGED
@@ -6,10 +6,12 @@ from platformdirs import user_documents_dir
|
|
6
6
|
import os
|
7
7
|
# Setup logging
|
8
8
|
logger = logging.getLogger(__name__)
|
9
|
+
from dotenv import load_dotenv
|
10
|
+
import platform
|
9
11
|
|
10
12
|
# Get config directory
|
11
13
|
def get_config_dir():
|
12
|
-
|
14
|
+
|
13
15
|
|
14
16
|
if platform.system() == "Windows":
|
15
17
|
# Windows: Use Documents directory
|
@@ -33,15 +35,25 @@ SEARCH_ENGINES_FILE = CONFIG_DIR / "search_engines.toml"
|
|
33
35
|
LOCAL_COLLECTIONS_FILE = CONFIG_DIR / "local_collections.toml"
|
34
36
|
|
35
37
|
|
36
|
-
|
37
|
-
docs_base = Path(user_documents_dir()) / "local_deep_research"
|
38
|
-
os.environ["DOCS_DIR"] = str(docs_base)
|
39
|
-
|
40
|
-
|
41
|
-
|
38
|
+
env_file = CONFIG_DIR / ".env"
|
42
39
|
|
40
|
+
if env_file.exists():
|
41
|
+
logger.info(f"Loading environment variables from: {env_file}")
|
42
|
+
load_dotenv(dotenv_path=env_file)
|
43
|
+
else:
|
44
|
+
logger.warning(f"Warning: .env file not found at {env_file}. Trying secondary location.")
|
45
|
+
env_file_secondary = get_config_dir() / ".env"
|
46
|
+
if env_file_secondary.exists():
|
47
|
+
get_config_dir() / "config"
|
48
|
+
logger.info(f"Loading environment variables from: {env_file_secondary}")
|
49
|
+
load_dotenv(dotenv_path=env_file_secondary)
|
50
|
+
else:
|
51
|
+
logger.warning(f"Warning: .env file also not found at {env_file_secondary}.")
|
43
52
|
|
44
53
|
|
54
|
+
# Set environment variable for Dynaconf to use
|
55
|
+
docs_base = Path(user_documents_dir()) / "local_deep_research"
|
56
|
+
os.environ["DOCS_DIR"] = str(docs_base)
|
45
57
|
|
46
58
|
|
47
59
|
# Expose get_llm function
|
@@ -1,11 +1,15 @@
|
|
1
1
|
# API Keys
|
2
|
+
# OPENAI_ENDPOINT_API_KEY=your-api-key-here
|
2
3
|
# ANTHROPIC_API_KEY=your-api-key-here
|
3
4
|
# OPENAI_API_KEY=your-openai-key-here
|
5
|
+
|
6
|
+
# BRAVE_API_KEY=
|
4
7
|
# SERP_API_KEY=your-api-key-here
|
5
8
|
# GUARDIAN_API_KEY=your-api-key-here
|
6
9
|
# GOOGLE_PSE_API_KEY=your-google-api-key-here
|
7
10
|
# GOOGLE_PSE_ENGINE_ID=your-programmable-search-engine-id-here
|
8
11
|
|
12
|
+
|
9
13
|
# SearXNG Configuration, add at least SEARXNG_INSTANCE to .env file to use this search engine
|
10
14
|
# SEARXNG_INSTANCE = "http://localhost:8080"
|
11
|
-
# SEARXNG_DELAY = 2.0
|
15
|
+
# SEARXNG_DELAY = 2.0
|
@@ -9,6 +9,7 @@ from langchain_anthropic import ChatAnthropic
|
|
9
9
|
from langchain_openai import ChatOpenAI
|
10
10
|
from langchain_ollama import ChatOllama
|
11
11
|
from langchain_community.llms import VLLM
|
12
|
+
from local_deep_research.utilties.search_utilities import remove_think_tags
|
12
13
|
from local_deep_research.config import settings
|
13
14
|
import os
|
14
15
|
import logging
|
@@ -23,6 +24,8 @@ VALID_PROVIDERS = ["ollama", "openai", "anthropic", "vllm", "openai_endpoint", "
|
|
23
24
|
# LLM FUNCTIONS
|
24
25
|
# ================================
|
25
26
|
|
27
|
+
|
28
|
+
|
26
29
|
def get_llm(model_name=None, temperature=None, provider=None):
|
27
30
|
"""
|
28
31
|
Get LLM instance based on model name and provider.
|
@@ -33,7 +36,7 @@ def get_llm(model_name=None, temperature=None, provider=None):
|
|
33
36
|
provider: Provider to use (if None, uses settings.llm.provider)
|
34
37
|
|
35
38
|
Returns:
|
36
|
-
A LangChain LLM instance
|
39
|
+
A LangChain LLM instance with automatic think-tag removal
|
37
40
|
"""
|
38
41
|
# Use settings values for parameters if not provided
|
39
42
|
if model_name is None:
|
@@ -56,31 +59,42 @@ def get_llm(model_name=None, temperature=None, provider=None):
|
|
56
59
|
|
57
60
|
# Handle different providers
|
58
61
|
if provider == "anthropic":
|
59
|
-
|
62
|
+
api_key_name = 'ANTHROPIC_API_KEY'
|
63
|
+
api_key = settings.get(api_key_name, '')
|
60
64
|
if not api_key:
|
61
|
-
api_key = os.getenv(
|
65
|
+
api_key = os.getenv(api_key_name)
|
66
|
+
if not api_key:
|
67
|
+
api_key = os.getenv("LDR_" + api_key_name)
|
62
68
|
if not api_key:
|
63
69
|
logger.warning("ANTHROPIC_API_KEY not found. Falling back to default model.")
|
64
70
|
return get_fallback_model(temperature)
|
65
71
|
|
66
|
-
|
72
|
+
llm = ChatAnthropic(
|
67
73
|
model=model_name, anthropic_api_key=api_key, **common_params
|
68
74
|
)
|
75
|
+
return wrap_llm_without_think_tags(llm)
|
69
76
|
|
70
77
|
elif provider == "openai":
|
71
|
-
|
78
|
+
api_key_name = 'OPENAI_API_KEY'
|
79
|
+
api_key = settings.get(api_key_name, '')
|
72
80
|
if not api_key:
|
73
|
-
api_key = os.getenv(
|
81
|
+
api_key = os.getenv(api_key_name)
|
82
|
+
if not api_key:
|
83
|
+
api_key = os.getenv("LDR_" + api_key_name)
|
74
84
|
if not api_key:
|
75
85
|
logger.warning("OPENAI_API_KEY not found. Falling back to default model.")
|
76
86
|
return get_fallback_model(temperature)
|
77
87
|
|
78
|
-
|
88
|
+
llm = ChatOpenAI(model=model_name, api_key=api_key, **common_params)
|
89
|
+
return wrap_llm_without_think_tags(llm)
|
79
90
|
|
80
91
|
elif provider == "openai_endpoint":
|
81
|
-
|
92
|
+
api_key_name = 'OPENAI_ENDPOINT_API_KEY'
|
93
|
+
api_key = settings.get(api_key_name, '')
|
82
94
|
if not api_key:
|
83
|
-
api_key = os.getenv(
|
95
|
+
api_key = os.getenv(api_key_name)
|
96
|
+
if not api_key:
|
97
|
+
api_key = os.getenv("LDR_" + api_key_name)
|
84
98
|
if not api_key:
|
85
99
|
logger.warning("OPENAI_ENDPOINT_API_KEY not found. Falling back to default model.")
|
86
100
|
return get_fallback_model(temperature)
|
@@ -88,16 +102,17 @@ def get_llm(model_name=None, temperature=None, provider=None):
|
|
88
102
|
# Get endpoint URL from settings
|
89
103
|
openai_endpoint_url = settings.llm.openai_endpoint_url
|
90
104
|
|
91
|
-
|
105
|
+
llm = ChatOpenAI(
|
92
106
|
model=model_name,
|
93
107
|
api_key=api_key,
|
94
108
|
openai_api_base=openai_endpoint_url,
|
95
109
|
**common_params
|
96
110
|
)
|
111
|
+
return wrap_llm_without_think_tags(llm)
|
97
112
|
|
98
113
|
elif provider == "vllm":
|
99
114
|
try:
|
100
|
-
|
115
|
+
llm = VLLM(
|
101
116
|
model=model_name,
|
102
117
|
trust_remote_code=True,
|
103
118
|
max_new_tokens=128,
|
@@ -105,6 +120,7 @@ def get_llm(model_name=None, temperature=None, provider=None):
|
|
105
120
|
top_p=0.95,
|
106
121
|
temperature=temperature,
|
107
122
|
)
|
123
|
+
return wrap_llm_without_think_tags(llm)
|
108
124
|
except Exception as e:
|
109
125
|
logger.error(f"Error loading VLLM model: {e}")
|
110
126
|
logger.warning("Falling back.")
|
@@ -114,54 +130,54 @@ def get_llm(model_name=None, temperature=None, provider=None):
|
|
114
130
|
try:
|
115
131
|
# Use the configurable Ollama base URL
|
116
132
|
base_url = settings.get('OLLAMA_BASE_URL', settings.llm.get('ollama_base_url', 'http://localhost:11434'))
|
117
|
-
|
133
|
+
llm = ChatOllama(model=model_name, base_url=base_url, **common_params)
|
134
|
+
return wrap_llm_without_think_tags(llm)
|
118
135
|
except Exception as e:
|
119
136
|
logger.error(f"Error loading Ollama model: {e}")
|
120
137
|
return get_fallback_model(temperature)
|
121
138
|
|
122
139
|
elif provider == "lmstudio":
|
123
|
-
|
124
|
-
|
125
|
-
|
126
|
-
|
127
|
-
|
128
|
-
|
129
|
-
|
130
|
-
|
131
|
-
|
132
|
-
|
133
|
-
|
134
|
-
|
140
|
+
# LM Studio supports OpenAI API format, so we can use ChatOpenAI directly
|
141
|
+
lmstudio_url = settings.llm.get('lmstudio_url', "http://localhost:1234")
|
142
|
+
|
143
|
+
llm = ChatOpenAI(
|
144
|
+
model=model_name,
|
145
|
+
api_key="lm-studio", # LM Studio doesn't require a real API key
|
146
|
+
base_url=f"{lmstudio_url}/v1", # Use the configured URL with /v1 endpoint
|
147
|
+
temperature=temperature,
|
148
|
+
max_tokens=settings.llm.max_tokens
|
149
|
+
)
|
150
|
+
return wrap_llm_without_think_tags(llm)
|
135
151
|
|
136
152
|
elif provider == "llamacpp":
|
137
|
-
|
138
|
-
|
139
|
-
|
140
|
-
|
141
|
-
|
142
|
-
|
143
|
-
|
144
|
-
|
145
|
-
raise ValueError("llamacpp_model_path not set in settings.toml")
|
146
|
-
|
147
|
-
# Get additional LlamaCpp parameters
|
148
|
-
n_gpu_layers = settings.llm.get('llamacpp_n_gpu_layers', 1)
|
149
|
-
n_batch = settings.llm.get('llamacpp_n_batch', 512)
|
150
|
-
f16_kv = settings.llm.get('llamacpp_f16_kv', True)
|
153
|
+
# Import LlamaCpp
|
154
|
+
from langchain_community.llms import LlamaCpp
|
155
|
+
|
156
|
+
# Get LlamaCpp model path from settings
|
157
|
+
model_path = settings.llm.get('llamacpp_model_path', "")
|
158
|
+
if not model_path:
|
159
|
+
logger.error("llamacpp_model_path not set in settings")
|
160
|
+
raise ValueError("llamacpp_model_path not set in settings.toml")
|
151
161
|
|
152
|
-
|
153
|
-
|
154
|
-
|
155
|
-
|
156
|
-
|
157
|
-
|
158
|
-
|
159
|
-
|
160
|
-
|
161
|
-
|
162
|
+
# Get additional LlamaCpp parameters
|
163
|
+
n_gpu_layers = settings.llm.get('llamacpp_n_gpu_layers', 1)
|
164
|
+
n_batch = settings.llm.get('llamacpp_n_batch', 512)
|
165
|
+
f16_kv = settings.llm.get('llamacpp_f16_kv', True)
|
166
|
+
|
167
|
+
# Create LlamaCpp instance
|
168
|
+
llm = LlamaCpp(
|
169
|
+
model_path=model_path,
|
170
|
+
temperature=temperature,
|
171
|
+
max_tokens=settings.llm.max_tokens,
|
172
|
+
n_gpu_layers=n_gpu_layers,
|
173
|
+
n_batch=n_batch,
|
174
|
+
f16_kv=f16_kv,
|
175
|
+
verbose=True
|
176
|
+
)
|
177
|
+
return wrap_llm_without_think_tags(llm)
|
162
178
|
|
163
179
|
else:
|
164
|
-
return get_fallback_model(temperature)
|
180
|
+
return wrap_llm_without_think_tags(get_fallback_model(temperature))
|
165
181
|
|
166
182
|
def get_fallback_model(temperature=None):
|
167
183
|
"""Create a dummy model for when no providers are available"""
|
@@ -174,6 +190,31 @@ def get_fallback_model(temperature=None):
|
|
174
190
|
# COMPATIBILITY FUNCTIONS
|
175
191
|
# ================================
|
176
192
|
|
193
|
+
def wrap_llm_without_think_tags(llm):
|
194
|
+
"""Create a wrapper class that processes LLM outputs with remove_think_tags"""
|
195
|
+
|
196
|
+
|
197
|
+
class ProcessingLLMWrapper:
|
198
|
+
def __init__(self, base_llm):
|
199
|
+
self.base_llm = base_llm
|
200
|
+
|
201
|
+
def invoke(self, *args, **kwargs):
|
202
|
+
response = self.base_llm.invoke(*args, **kwargs)
|
203
|
+
|
204
|
+
# Process the response content if it has a content attribute
|
205
|
+
if hasattr(response, 'content'):
|
206
|
+
response.content = remove_think_tags(response.content)
|
207
|
+
elif isinstance(response, str):
|
208
|
+
response = remove_think_tags(response)
|
209
|
+
|
210
|
+
return response
|
211
|
+
|
212
|
+
# Pass through any other attributes to the base LLM
|
213
|
+
def __getattr__(self, name):
|
214
|
+
return getattr(self.base_llm, name)
|
215
|
+
|
216
|
+
return ProcessingLLMWrapper(llm)
|
217
|
+
|
177
218
|
def get_available_provider_types():
|
178
219
|
"""Return available model providers"""
|
179
220
|
providers = {}
|
@@ -46,7 +46,10 @@ def create_search_engine(engine_name: str, llm=None, **kwargs) -> Optional[BaseS
|
|
46
46
|
|
47
47
|
# First check environment variable
|
48
48
|
api_key = os.getenv(api_key_env)
|
49
|
-
|
49
|
+
if not api_key:
|
50
|
+
api_key = os.getenv("LDR_" + api_key_env)
|
51
|
+
|
52
|
+
|
50
53
|
# If not found in environment, check Dynaconf settings
|
51
54
|
if not api_key and api_key_env:
|
52
55
|
# Convert env var name to settings path (e.g., BRAVE_API_KEY -> brave_api_key)
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: local-deep-research
|
3
|
-
Version: 0.1.
|
3
|
+
Version: 0.1.26
|
4
4
|
Summary: AI-powered research assistant with deep, iterative analysis using LLMs and web searches
|
5
5
|
Author-email: LearningCircuit <185559241+LearningCircuit@users.noreply.github.com>, HashedViking <6432677+HashedViking@users.noreply.github.com>
|
6
6
|
License: MIT License
|
@@ -65,6 +65,7 @@ Requires-Dist: xmltodict>=0.13.0
|
|
65
65
|
Requires-Dist: lxml>=4.9.2
|
66
66
|
Requires-Dist: pdfplumber>=0.9.0
|
67
67
|
Requires-Dist: unstructured>=0.10.0
|
68
|
+
Requires-Dist: google-search-results
|
68
69
|
Dynamic: license-file
|
69
70
|
|
70
71
|
# Local Deep Research
|
@@ -79,7 +80,9 @@ A powerful AI-powered research assistant that performs deep, iterative analysis
|
|
79
80
|
</a>
|
80
81
|
</div>
|
81
82
|
|
82
|
-
|
83
|
+
|
84
|
+
|
85
|
+
## Quick Start (not required if installed with windows installer)
|
83
86
|
|
84
87
|
```bash
|
85
88
|
# Install the package
|
@@ -107,12 +110,12 @@ Access the web interface at `http://127.0.0.1:5000` in your browser.
|
|
107
110
|
|
108
111
|
## Docker Support
|
109
112
|
|
110
|
-
|
113
|
+
Build the image first if you haven't already
|
111
114
|
```bash
|
112
115
|
docker build -t local-deep-research .
|
113
116
|
```
|
114
117
|
|
115
|
-
|
118
|
+
Quick Docker Run
|
116
119
|
|
117
120
|
```bash
|
118
121
|
# Run with default settings (connects to Ollama running on the host)
|
@@ -126,6 +129,31 @@ For comprehensive Docker setup information, see:
|
|
126
129
|
- [Docker Usage Guide](https://github.com/LearningCircuit/local-deep-research/blob/main/docs/docker-usage-readme.md)
|
127
130
|
- [Docker Compose Guide](https://github.com/LearningCircuit/local-deep-research/blob/main/docs/docker-compose-guide.md)
|
128
131
|
|
132
|
+
## Programmatic Access
|
133
|
+
|
134
|
+
Local Deep Research now provides a simple API for programmatic access to its research capabilities:
|
135
|
+
|
136
|
+
```python
|
137
|
+
import os
|
138
|
+
# Set environment variables to control the LLM
|
139
|
+
os.environ["LDR_LLM__MODEL"] = "mistral" # Specify model name
|
140
|
+
|
141
|
+
from local_deep_research import quick_summary, generate_report, analyze_documents
|
142
|
+
|
143
|
+
# Generate a quick research summary with custom parameters
|
144
|
+
results = quick_summary(
|
145
|
+
query="advances in fusion energy",
|
146
|
+
search_tool="auto", # Auto-select the best search engine
|
147
|
+
iterations=1, # Single research cycle for speed
|
148
|
+
questions_per_iteration=2, # Generate 2 follow-up questions
|
149
|
+
max_results=30, # Consider up to 30 search results
|
150
|
+
temperature=0.7 # Control creativity of generation
|
151
|
+
)
|
152
|
+
print(results["summary"])
|
153
|
+
```
|
154
|
+
|
155
|
+
These functions provide flexible options for customizing the search parameters, iterations, and output formats. For more examples, see the [programmatic access tutorial](https://github.com/LearningCircuit/local-deep-research/blob/main/examples/programmatic_access.ipynb).
|
156
|
+
|
129
157
|
|
130
158
|
## Features
|
131
159
|
|
@@ -308,27 +336,6 @@ You can use local document search in several ways:
|
|
308
336
|
3. **All collections**: Set `tool = "local_all"` to search across all collections
|
309
337
|
4. **Query syntax**: Type `collection:project_docs your query` to target a specific collection
|
310
338
|
|
311
|
-
## Programmatic Access
|
312
|
-
|
313
|
-
Local Deep Research now provides a simple API for programmatic access to its research capabilities:
|
314
|
-
|
315
|
-
```python
|
316
|
-
from local_deep_research import quick_summary, generate_report
|
317
|
-
|
318
|
-
# Generate a quick research summary
|
319
|
-
results = quick_summary("advances in fusion energy")
|
320
|
-
print(results["summary"])
|
321
|
-
|
322
|
-
# Create a comprehensive structured report
|
323
|
-
report = generate_report("impact of quantum computing on cryptography")
|
324
|
-
print(report["content"])
|
325
|
-
|
326
|
-
# Analyze documents in a local collection
|
327
|
-
from local_deep_research import analyze_documents
|
328
|
-
docs = analyze_documents("renewable energy", "research_papers")
|
329
|
-
```
|
330
|
-
|
331
|
-
These functions provide flexible options for customizing the search parameters, iterations, and output formats. For more examples, see the [programmatic access tutorial](https://github.com/LearningCircuit/local-deep-research/blob/programmatic-access/examples/programmatic_access.ipynb).
|
332
339
|
|
333
340
|
## Advanced Configuration
|
334
341
|
|
@@ -1,14 +1,14 @@
|
|
1
1
|
local_deep_research/__init__.py,sha256=Grde0sFEYyCXrPCfxd-9b9v1M6OurrzQbVYRmKQ9E7w,886
|
2
2
|
local_deep_research/citation_handler.py,sha256=v_fwTy-2XvUuoH3OQRzmBrvaiN7mBk8jbNfySslmt5g,4357
|
3
|
-
local_deep_research/config.py,sha256=
|
3
|
+
local_deep_research/config.py,sha256=FQxtlhw49O-f5iImst84-JwhabfYr-xV-YMEh6hkAwU,10516
|
4
4
|
local_deep_research/main.py,sha256=uQXtGQ6LtZNd5Qw63D5ke4Q_LjYimouWVSUknVsk3JQ,3645
|
5
5
|
local_deep_research/report_generator.py,sha256=EvaArnWirMgg42fMzmZeJczoEYujEbJ2ryHHYuuoXx8,8058
|
6
6
|
local_deep_research/search_system.py,sha256=yY3BEzX68vdtUcYF9h6lC3yVao0YA_NSBj6W3-RwlKk,15459
|
7
7
|
local_deep_research/api/__init__.py,sha256=H0WGFSohUR0T2QswtWngPZWoMYPs9VWQTQYaivAlrJU,440
|
8
8
|
local_deep_research/api/research_functions.py,sha256=Z23wZYsB1x2ivdFYJ9uqIqCAwjR2RdOff7Bq30DxQYU,12099
|
9
|
-
local_deep_research/defaults/.env.template,sha256=
|
9
|
+
local_deep_research/defaults/.env.template,sha256=_eVCy4d_XwpGXy8n50CG3wH9xx2oqJCFKS7IbqgInDk,491
|
10
10
|
local_deep_research/defaults/__init__.py,sha256=2Vvlkl-gmP_qPYWegE4JBgummypogl3VXrQ1XzptFDU,1381
|
11
|
-
local_deep_research/defaults/llm_config.py,sha256=
|
11
|
+
local_deep_research/defaults/llm_config.py,sha256=1KiW9k8kmsUD5u9VgEdgWZBNMmK1BA0ZxoGbuC2spAk,11652
|
12
12
|
local_deep_research/defaults/local_collections.toml,sha256=zNa03PVnFrZ757JdZOuW6QDxkOc6ep5tG8baGBrMmXM,1778
|
13
13
|
local_deep_research/defaults/main.toml,sha256=6Lzbc5sVLxMwu83bLBp_tpYOZgmtThCfPL1L42eTGro,1939
|
14
14
|
local_deep_research/defaults/search_engines.toml,sha256=g0-qrw10oMgW74z_lYpPDkGwMje25mvalfY1EJ0nL3g,8134
|
@@ -34,7 +34,7 @@ local_deep_research/web/templates/settings.html,sha256=S9A-tdpzMhP2Zw7kp2jxKlwaW
|
|
34
34
|
local_deep_research/web/templates/settings_dashboard.html,sha256=De-v1KNdVvkXme5i3YZ6sIfU9aAKDc_N-AW9n4PZoso,9109
|
35
35
|
local_deep_research/web_search_engines/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
36
36
|
local_deep_research/web_search_engines/search_engine_base.py,sha256=QmhfjuHK2deomh8tARghKuYnF-5t3wwBB661odS2VtU,8065
|
37
|
-
local_deep_research/web_search_engines/search_engine_factory.py,sha256=
|
37
|
+
local_deep_research/web_search_engines/search_engine_factory.py,sha256=8REYoRdDWvB6XLhBym8rqzuULX28VQ-UKWNcRA5tLTQ,11189
|
38
38
|
local_deep_research/web_search_engines/search_engines_config.py,sha256=5C0tCmy_Jpv1YHLZLlyS7h5B2XToYcWPAaBDEOsxMo0,2739
|
39
39
|
local_deep_research/web_search_engines/engines/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
40
40
|
local_deep_research/web_search_engines/engines/full_search.py,sha256=BuOz8dX-XocazCG7gGBKFnIY99FZtNFI0-Wq3fhsfp4,4689
|
@@ -53,9 +53,9 @@ local_deep_research/web_search_engines/engines/search_engine_semantic_scholar.py
|
|
53
53
|
local_deep_research/web_search_engines/engines/search_engine_serpapi.py,sha256=XikEYnM-pAaR70VeAJ28lbqpRzCj4bCA9xY29taTV8g,9215
|
54
54
|
local_deep_research/web_search_engines/engines/search_engine_wayback.py,sha256=astAvSLajDZ6rwgthJ3iBcHSWuDSYPO7uilIxaJhXmU,18132
|
55
55
|
local_deep_research/web_search_engines/engines/search_engine_wikipedia.py,sha256=KSGJECbEcxZpVK-PhYsTCtzedSK0l1AjQmvGtx8KBks,9799
|
56
|
-
local_deep_research-0.1.
|
57
|
-
local_deep_research-0.1.
|
58
|
-
local_deep_research-0.1.
|
59
|
-
local_deep_research-0.1.
|
60
|
-
local_deep_research-0.1.
|
61
|
-
local_deep_research-0.1.
|
56
|
+
local_deep_research-0.1.26.dist-info/licenses/LICENSE,sha256=Qg2CaTdu6SWnSqk1_JtgBPp_Da-LdqJDhT1Vt1MUc5s,1072
|
57
|
+
local_deep_research-0.1.26.dist-info/METADATA,sha256=lxkwHdoKtXh2HlxFL_XZ6NGn6aWm-mVjBQR7GclJpqE,17384
|
58
|
+
local_deep_research-0.1.26.dist-info/WHEEL,sha256=CmyFI0kx5cdEMTLiONQRbGQwjIoR1aIYB7eCAQ4KPJ0,91
|
59
|
+
local_deep_research-0.1.26.dist-info/entry_points.txt,sha256=u-Y6Z3MWtR3dmsTDFYhXyfkPv7mALUA7YAnY4Fi1XDs,97
|
60
|
+
local_deep_research-0.1.26.dist-info/top_level.txt,sha256=h6-uVE_wSuLOcoWwT9szhX23mBWufu77MqmM25UfbCY,20
|
61
|
+
local_deep_research-0.1.26.dist-info/RECORD,,
|
File without changes
|
{local_deep_research-0.1.24.dist-info → local_deep_research-0.1.26.dist-info}/entry_points.txt
RENAMED
File without changes
|
{local_deep_research-0.1.24.dist-info → local_deep_research-0.1.26.dist-info}/licenses/LICENSE
RENAMED
File without changes
|
File without changes
|