local-deep-research 0.1.20__py3-none-any.whl → 0.1.22__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -57,6 +57,8 @@ def get_llm(model_name=None, temperature=None, provider=None):
57
57
  # Handle different providers
58
58
  if provider == "anthropic":
59
59
  api_key = settings.get('ANTHROPIC_API_KEY', '')
60
+ if not api_key:
61
+ api_key = os.getenv('ANTHROPIC_API_KEY')
60
62
  if not api_key:
61
63
  logger.warning("ANTHROPIC_API_KEY not found. Falling back to default model.")
62
64
  return get_fallback_model(temperature)
@@ -67,6 +69,8 @@ def get_llm(model_name=None, temperature=None, provider=None):
67
69
 
68
70
  elif provider == "openai":
69
71
  api_key = settings.get('OPENAI_API_KEY', '')
72
+ if not api_key:
73
+ api_key = os.getenv('OPENAI_API_KEY')
70
74
  if not api_key:
71
75
  logger.warning("OPENAI_API_KEY not found. Falling back to default model.")
72
76
  return get_fallback_model(temperature)
@@ -75,7 +79,8 @@ def get_llm(model_name=None, temperature=None, provider=None):
75
79
 
76
80
  elif provider == "openai_endpoint":
77
81
  api_key = settings.get('OPENAI_ENDPOINT_API_KEY', '')
78
-
82
+ if not api_key:
83
+ api_key = os.getenv('OPENAI_ENDPOINT_API_KEY')
79
84
  if not api_key:
80
85
  logger.warning("OPENAI_ENDPOINT_API_KEY not found. Falling back to default model.")
81
86
  return get_fallback_model(temperature)
@@ -213,6 +218,8 @@ def is_openai_available():
213
218
  """Check if OpenAI is available"""
214
219
  try:
215
220
  api_key = settings.get('OPENAI_API_KEY', '')
221
+ if not api_key:
222
+ api_key = os.getenv('OPENAI_API_KEY')
216
223
  return bool(api_key)
217
224
  except:
218
225
  return False
@@ -221,6 +228,8 @@ def is_anthropic_available():
221
228
  """Check if Anthropic is available"""
222
229
  try:
223
230
  api_key = settings.get('ANTHROPIC_API_KEY', '')
231
+ if not api_key:
232
+ api_key = os.getenv('ANTHROPIC_API_KEY')
224
233
  return bool(api_key)
225
234
  except:
226
235
  return False
@@ -229,6 +238,8 @@ def is_openai_endpoint_available():
229
238
  """Check if OpenAI endpoint is available"""
230
239
  try:
231
240
  api_key = settings.get('OPENAI_ENDPOINT_API_KEY', '')
241
+ if not api_key:
242
+ api_key = os.getenv('OPENAI_ENDPOINT_API_KEY')
232
243
  return bool(api_key)
233
244
  except:
234
245
  return False
@@ -186,40 +186,40 @@ strengths = ["searches all local collections", "personal documents", "offline ac
186
186
  weaknesses = ["may return too many results", "requires indexing"]
187
187
  requires_llm = true
188
188
 
189
- [semantic_scholar]
190
- module_path = "local_deep_research.web_search_engines.engines.search_engine_semantic_scholar"
191
- class_name = "SemanticScholarSearchEngine"
192
- requires_api_key = false
193
- api_key_env = "S2_API_KEY"
194
- reliability = 0.87
195
- strengths = [
196
- "comprehensive scientific literature",
197
- "extensive citation network",
198
- "AI-generated summaries (TLDRs)",
199
- "academic paper metadata",
200
- "cross-disciplinary coverage",
201
- "200M+ papers across all fields",
202
- "usable without API key"
203
- ]
204
- weaknesses = [
205
- "rate limited (1000 requests/day) without API key",
206
- "limited to academic content"
207
- ]
208
- supports_full_search = true
209
- requires_llm = false
189
+ #[semantic_scholar]
190
+ #module_path = "local_deep_research.web_search_engines.engines.search_engine_semantic_scholar"
191
+ #class_name = "SemanticScholarSearchEngine"
192
+ #requires_api_key = false
193
+ #api_key_env = "S2_API_KEY"
194
+ #reliability = 0.87
195
+ #strengths = [
196
+ # "comprehensive scientific literature",
197
+ # "extensive citation network",
198
+ # "AI-generated summaries (TLDRs)",
199
+ # "academic paper metadata",
200
+ # "cross-disciplinary coverage",
201
+ # "200M+ papers across all fields",
202
+ # "usable without API key"
203
+ #]
204
+ #weaknesses = [
205
+ # "rate limited (1000 requests/day) without API key",
206
+ # "limited to academic content"
207
+ #]
208
+ #supports_full_search = true
209
+ #requires_llm = false
210
210
 
211
- [semantic_scholar.default_params]
212
- max_results = 20
213
- get_abstracts = true
214
- get_tldr = true
215
- get_references = false
216
- get_citations = false
217
- get_embeddings = false
218
- citation_limit = 10
219
- reference_limit = 10
220
- optimize_queries = true
221
- max_retries = 5
222
- retry_backoff_factor = 1.0
211
+ #[semantic_scholar.default_params]
212
+ #max_results = 20
213
+ #get_abstracts = true
214
+ #get_tldr = true
215
+ #get_references = false
216
+ #get_citations = false
217
+ #get_embeddings = false
218
+ #citation_limit = 10
219
+ #reference_limit = 10
220
+ #optimize_queries = true
221
+ #max_retries = 5
222
+ #retry_backoff_factor = 1.0
223
223
 
224
224
  # Default search engine to use if none specified
225
225
  DEFAULT_SEARCH_ENGINE = "wikipedia"
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: local-deep-research
3
- Version: 0.1.20
3
+ Version: 0.1.22
4
4
  Summary: AI-powered research assistant with deep, iterative analysis using LLMs and web searches
5
5
  Author-email: LearningCircuit <185559241+LearningCircuit@users.noreply.github.com>, HashedViking <6432677+HashedViking@users.noreply.github.com>
6
6
  License: MIT License
@@ -151,17 +151,17 @@ The package automatically creates and manages configuration files in your user d
151
151
 
152
152
  ### Default Configuration Files
153
153
 
154
- If you prefere environment variables please refere to this file: https://github.com/LearningCircuit/local-deep-research/blob/main/docs/env_configuration.md
155
-
156
154
  When you first run the tool, it creates these configuration files:
157
155
 
158
156
  | File | Purpose |
159
157
  |------|---------|
160
158
  | `settings.toml` | General settings for research, web interface, and search |
161
- | `llm_config.py` | Configure which LLM to use (local or cloud-based) |
159
+ | `llm_config.py` | Advanced LLM configuration (rarely needs modification) |
162
160
  | `search_engines.toml` | Define and configure search engines |
163
161
  | `local_collections.toml` | Configure local document collections for RAG |
164
- | `.secrets.toml` | Store API keys for cloud services |
162
+ | `.env` | Environment variables for configuration (recommended for API keys) |
163
+
164
+ > **Note:** For comprehensive environment variable configuration, see our [Environment Variables Guide](https://github.com/LearningCircuit/local-deep-research/blob/main/docs/env_configuration.md).
165
165
 
166
166
  ## Setting Up AI Models
167
167
 
@@ -175,42 +175,34 @@ The system supports multiple LLM providers:
175
175
 
176
176
  ### Cloud Models
177
177
 
178
- Edit your `.secrets.toml` file to add API keys:
178
+ Add API keys to your environment variables (recommended) by creating a `.env` file in your config directory:
179
179
 
180
- ```toml
181
- ANTHROPIC_API_KEY = "your-api-key-here" # For Claude models
182
- OPENAI_API_KEY = "your-openai-key-here" # For GPT models
183
- OPENAI_ENDPOINT_API_KEY = "your-key-here" # For OpenRouter or similar services
180
+ ```bash
181
+ # Set API keys for cloud providers in .env
182
+ ANTHROPIC_API_KEY=your-api-key-here # For Claude models
183
+ OPENAI_API_KEY=your-openai-key-here # For GPT models
184
+ OPENAI_ENDPOINT_API_KEY=your-key-here # For OpenRouter or similar services
185
+
186
+ # Set your preferred LLM provider and model (no need to edit llm_config.py)
187
+ LDR_LLM__PROVIDER=ollama # Options: ollama, openai, anthropic, etc.
188
+ LDR_LLM__MODEL=gemma3:12b # Model name to use
184
189
  ```
185
190
 
186
- Then edit `llm_config.py` to change the default provider:
187
-
188
- ```python
189
- # Set your preferred model provider here
190
- DEFAULT_PROVIDER = ModelProvider.OLLAMA # Change to your preferred provider
191
-
192
- # Set your default model name here
193
- DEFAULT_MODEL = "mistral" # Change to your preferred model
194
- ```
191
+ > **Important:** In most cases, you don't need to modify the `llm_config.py` file. Simply set the `LDR_LLM__PROVIDER` and `LDR_LLM__MODEL` environment variables to use your preferred model.
195
192
 
196
193
  ### Supported LLM Providers
197
194
 
198
195
  The system supports multiple LLM providers:
199
196
 
200
- | Provider | Type | Configuration | Notes |
201
- |----------|------|--------------|-------|
202
- | `OLLAMA` | Local | No API key needed | Default - install from ollama.ai |
203
- | `OPENAI` | Cloud | Requires `OPENAI_API_KEY` | GPT models (3.5, 4, 4o) |
204
- | `ANTHROPIC` | Cloud | Requires `ANTHROPIC_API_KEY` | Claude models (3 Opus, Sonnet, Haiku) |
205
- | `OPENAI_ENDPOINT` | Cloud | Requires `OPENAI_ENDPOINT_API_KEY` | For any OpenAI-compatible API |
206
- | `VLLM` | Local | No API key needed | For hosting models via vLLM |
207
-
208
- You can configure the OpenAI-compatible endpoint URL in `llm_config.py`:
209
-
210
- ```python
211
- # For OpenRouter, Together.ai, Azure OpenAI, or any compatible endpoint
212
- OPENAI_ENDPOINT_URL = "https://openrouter.ai/api/v1"
213
- ```
197
+ | Provider | Type | API Key | Setup Details | Models |
198
+ |----------|------|---------|---------------|--------|
199
+ | `OLLAMA` | Local | No | Install from [ollama.ai](https://ollama.ai) | Mistral, Llama, Gemma, etc. |
200
+ | `OPENAI` | Cloud | `OPENAI_API_KEY` | Set in environment | GPT-3.5, GPT-4, GPT-4o |
201
+ | `ANTHROPIC` | Cloud | `ANTHROPIC_API_KEY` | Set in environment | Claude 3 Opus, Sonnet, Haiku |
202
+ | `OPENAI_ENDPOINT` | Cloud | `OPENAI_ENDPOINT_API_KEY` | Set in environment | Any OpenAI-compatible model |
203
+ | `VLLM` | Local | No | Requires GPU setup | Any supported by vLLM |
204
+ | `LMSTUDIO` | Local | No | Use LM Studio server | Models from LM Studio |
205
+ | `LLAMACPP` | Local | No | Configure model path | GGUF model formats |
214
206
 
215
207
  The `OPENAI_ENDPOINT` provider can access any service with an OpenAI-compatible API, including:
216
208
  - OpenRouter (access to hundreds of models)
@@ -219,26 +211,43 @@ The `OPENAI_ENDPOINT` provider can access any service with an OpenAI-compatible
219
211
  - Groq
220
212
  - Anyscale
221
213
  - Self-hosted LLM servers with OpenAI compatibility
222
- - Any other service that implements the OpenAI API specification
223
214
 
224
215
  ## Setting Up Search Engines
225
216
 
226
- The system includes multiple search engines. Some require API keys:
217
+ Some search engines require API keys. Add them to your environment variables by creating a `.env` file in your config directory:
227
218
 
228
- Use .env in config folder if .secrets.toml doesnt work.
219
+ ```bash
220
+ # Search engine API keys (add to .env file)
221
+ SERP_API_KEY=your-serpapi-key-here # For Google results via SerpAPI
222
+ GOOGLE_PSE_API_KEY=your-google-key-here # For Google Programmable Search
223
+ GOOGLE_PSE_ENGINE_ID=your-pse-id-here # For Google Programmable Search
224
+ BRAVE_API_KEY=your-brave-search-key-here # For Brave Search
225
+ GUARDIAN_API_KEY=your-guardian-key-here # For The Guardian
226
+
227
+ # Set your preferred search tool
228
+ LDR_SEARCH__TOOL=auto # Default: intelligently selects best engine
229
+ ```
229
230
 
230
- You can also overwrite other settings via environment variables, e.g. to overwrite [web] port setting in settings.toml please use: **LDR_WEB__PORT=8080**
231
+ > **Tip:** To override other settings via environment variables (e.g., to change the web port), use: **LDR_WEB__PORT=8080**
231
232
 
232
- ```toml
233
- # Add to .secrets.toml
234
- SERP_API_KEY = "your-serpapi-key-here" # For Google results via SerpAPI
235
- GOOGLE_PSE_API_KEY = "your-google-key-here" # For Google Programmable Search
236
- GOOGLE_PSE_ENGINE_ID = "your-pse-id-here" # For Google Programmable Search
237
- BRAVE_API_KEY = "your-brave-search-key-here" # For Brave Search
238
- GUARDIAN_API_KEY = "your-guardian-key-here" # For The Guardian
239
- ```
233
+ ### Available Search Engines
234
+
235
+ | Engine | Purpose | API Key Required? | Rate Limit |
236
+ |--------|---------|-------------------|------------|
237
+ | `auto` | Intelligently selects the best engine | No | Based on selected engine |
238
+ | `wikipedia` | General knowledge and facts | No | No strict limit |
239
+ | `arxiv` | Scientific papers and research | No | No strict limit |
240
+ | `pubmed` | Medical and biomedical research | No | No strict limit |
241
+ | `semantic_scholar` | Academic literature across all fields | No | 100/5min |
242
+ | `github` | Code repositories and documentation | No | 60/hour (unauthenticated) |
243
+ | `brave` | Web search (privacy-focused) | Yes | Based on plan |
244
+ | `serpapi` | Google search results | Yes | Based on plan |
245
+ | `google_pse` | Custom Google search | Yes | 100/day free tier |
246
+ | `wayback` | Historical web content | No | No strict limit |
247
+ | `searxng` | Local web search engine | No (requires local server) | No limit |
248
+ | Any collection name | Search your local documents | No | No limit |
240
249
 
241
- No API key required for: Wikipedia, arXiv, PubMed, Semantic Scholar, and local collections.
250
+ > **Note:** For detailed SearXNG setup, see our [SearXNG Setup Guide](https://github.com/LearningCircuit/local-deep-research/blob/main/docs/SearXNG-Setup.md).
242
251
 
243
252
  ## Local Document Search (RAG)
244
253
 
@@ -262,8 +271,6 @@ max_filtered_results = 5
262
271
  chunk_size = 1000
263
272
  chunk_overlap = 200
264
273
  cache_dir = "__CACHE_DIR__/local_search/project_docs"
265
-
266
- # More collections defined in the file...
267
274
  ```
268
275
 
269
276
  2. Create your document directories:
@@ -279,34 +286,29 @@ You can use local document search in several ways:
279
286
  3. **All collections**: Set `tool = "local_all"` to search across all collections
280
287
  4. **Query syntax**: Type `collection:project_docs your query` to target a specific collection
281
288
 
282
- ## Available Search Engines
289
+ ## Docker Support
283
290
 
284
- | Engine | Purpose | API Key Required? | Rate Limit |
285
- |--------|---------|-------------------|------------|
286
- | `auto` | Intelligently selects the best engine | No | Based on selected engine |
287
- | `wikipedia` | General knowledge and facts | No | No strict limit |
288
- | `arxiv` | Scientific papers and research | No | No strict limit |
289
- | `pubmed` | Medical and biomedical research | No | No strict limit |
290
- | `semantic_scholar` | Academic literature across all fields | No | 100/5min |
291
- | `github` | Code repositories and documentation | No | 60/hour (unauthenticated) |
292
- | `brave` | Web search (privacy-focused) | Yes | Based on plan |
293
- | `serpapi` | Google search results | Yes | Based on plan |
294
- | `google_pse` | Custom Google search | Yes | 100/day free tier |
295
- | `wayback` | Historical web content | No | No strict limit |
296
- | `searxng` | Local web search engine | No (requires local server) | No limit |
297
- | Any collection name | Search your local documents | No | No limit |
291
+ Local Deep Research can run in Docker containers for easy deployment across environments.
298
292
 
299
- > **Support Free Knowledge:** If you frequently use the search engines in this tool, please consider making a donation to these organizations. They provide valuable services and rely on user support to maintain their operations:
300
- > - [Donate to Wikipedia](https://donate.wikimedia.org)
301
- > - [Support arXiv](https://arxiv.org/about/give)
302
- > - [Donate to DuckDuckGo](https://duckduckgo.com/donations)
303
- > - [Support PubMed/NCBI](https://www.nlm.nih.gov/pubs/donations/donations.html)
293
+ ### Quick Docker Run
294
+
295
+ ```bash
296
+ # Run with default settings (connects to Ollama running on the host)
297
+ docker run --network=host \
298
+ -e LDR_LLM__PROVIDER="ollama" \
299
+ -e LDR_LLM__MODEL="mistral" \
300
+ local-deep-research
301
+ ```
302
+
303
+ For comprehensive Docker setup information, see:
304
+ - [Docker Usage Guide](https://github.com/LearningCircuit/local-deep-research/blob/main/docs/docker-usage-readme.md)
305
+ - [Docker Compose Guide](https://github.com/LearningCircuit/local-deep-research/blob/main/docs/docker-compose-guide.md)
304
306
 
305
307
  ## Advanced Configuration
306
308
 
307
309
  ### Research Parameters
308
310
 
309
- Edit `settings.toml` to customize research parameters:
311
+ Edit `settings.toml` to customize research parameters or use environment variables:
310
312
 
311
313
  ```toml
312
314
  [search]
@@ -324,8 +326,13 @@ max_results = 50
324
326
 
325
327
  # Results after relevance filtering
326
328
  max_filtered_results = 5
329
+ ```
327
330
 
328
- # More settings available...
331
+ Using environment variables:
332
+ ```bash
333
+ LDR_SEARCH__TOOL=auto
334
+ LDR_SEARCH__ITERATIONS=3
335
+ LDR_SEARCH__QUESTIONS_PER_ITERATION=2
329
336
  ```
330
337
 
331
338
  ## Web Interface
@@ -338,9 +345,6 @@ The web interface offers several features:
338
345
  - **PDF Export**: Download reports
339
346
  - **Research Management**: Terminate processes or delete records
340
347
 
341
- ![Web Interface](./web1.png)
342
- ![Web Interface](./web2.png)
343
-
344
348
  ## Command Line Interface
345
349
 
346
350
  The CLI version allows you to:
@@ -363,8 +367,6 @@ cd local-deep-research
363
367
  pip install -e .
364
368
  ```
365
369
 
366
- This creates an "editable" installation that uses your local code, so any changes you make are immediately available without reinstalling.
367
-
368
370
  You can run the application directly using Python module syntax:
369
371
 
370
372
  ```bash
@@ -375,12 +377,6 @@ python -m local_deep_research.web.app
375
377
  python -m local_deep_research.main
376
378
  ```
377
379
 
378
- This approach is useful for development and debugging, as it provides more detailed error messages and allows you to make code changes on the fly.
379
-
380
- ## Example Research
381
-
382
- The repository includes complete research examples like our [fusion energy research analysis](https://github.com/LearningCircuit/local-deep-research/blob/main/examples/fusion-energy-research-developments.md) showcasing the system's capabilities.
383
-
384
380
  ## Community & Support
385
381
 
386
382
  Join our [Discord server](https://discord.gg/2E6gYU2Z) to exchange ideas, discuss usage patterns, and share research approaches.
@@ -393,24 +389,33 @@ This project is licensed under the MIT License.
393
389
 
394
390
  - Built with [Ollama](https://ollama.ai) for local AI processing
395
391
  - Search powered by multiple sources:
396
- - [Wikipedia](https://www.wikipedia.org/) for factual knowledge (default search engine)
392
+ - [Wikipedia](https://www.wikipedia.org/) for factual knowledge
397
393
  - [arXiv](https://arxiv.org/) for scientific papers
398
394
  - [PubMed](https://pubmed.ncbi.nlm.nih.gov/) for biomedical literature
395
+ - [Semantic Scholar](https://www.semanticscholar.org/) for academic literature
399
396
  - [DuckDuckGo](https://duckduckgo.com) for web search
400
397
  - [The Guardian](https://www.theguardian.com/) for journalism
401
- - [SerpAPI](https://serpapi.com) for Google search results (requires API key)
398
+ - [SerpAPI](https://serpapi.com) for Google search results
402
399
  - [SearXNG](https://searxng.org/) for local web-search engine
403
400
  - [Brave Search](https://search.brave.com/) for privacy-focused web search
404
- - [Semantic Scholar](https://www.semanticscholar.org/) for academic literature
405
401
  - Built on [LangChain](https://github.com/hwchase17/langchain) framework
406
402
  - Uses [justext](https://github.com/miso-belica/justext), [Playwright](https://playwright.dev), [FAISS](https://github.com/facebookresearch/faiss), and more
407
403
 
404
+ > **Support Free Knowledge:** If you frequently use the search engines in this tool, please consider making a donation to these organizations:
405
+ > - [Donate to Wikipedia](https://donate.wikimedia.org)
406
+ > - [Support arXiv](https://arxiv.org/about/give)
407
+ > - [Donate to DuckDuckGo](https://duckduckgo.com/donations)
408
+ > - [Support PubMed/NCBI](https://www.nlm.nih.gov/pubs/donations/donations.html)
409
+
408
410
  ## Contributing
409
411
 
410
412
  Contributions are welcome! Please feel free to submit a Pull Request.
411
413
 
412
414
  1. Fork the repository
413
415
  2. Create your feature branch (`git checkout -b feature/AmazingFeature`)
414
- 3. Commit your changes (`git commit -m 'Add some AmazingFeature'`)
415
- 4. Push to the branch (`git push origin feature/AmazingFeature`)
416
- 5. Open a Pull Request
416
+ 3. Make your changes
417
+ 4. Commit your changes (`git commit -m 'Add some AmazingFeature'`)
418
+ 5. Push to the branch (`git push origin feature/AmazingFeature`)
419
+ 6. **Important:** Open a Pull Request against the `dev` branch, not the `main` branch
420
+
421
+ We prefer all pull requests to be submitted against the `dev` branch for easier testing and integration before releasing to the main branch.
@@ -5,10 +5,10 @@ local_deep_research/main.py,sha256=uQXtGQ6LtZNd5Qw63D5ke4Q_LjYimouWVSUknVsk3JQ,3
5
5
  local_deep_research/report_generator.py,sha256=EvaArnWirMgg42fMzmZeJczoEYujEbJ2ryHHYuuoXx8,8058
6
6
  local_deep_research/search_system.py,sha256=yY3BEzX68vdtUcYF9h6lC3yVao0YA_NSBj6W3-RwlKk,15459
7
7
  local_deep_research/defaults/__init__.py,sha256=2Vvlkl-gmP_qPYWegE4JBgummypogl3VXrQ1XzptFDU,1381
8
- local_deep_research/defaults/llm_config.py,sha256=7wTIugVYD_ypG7Xwvu3DBt0yO8TWBf_drOIQOSOkdQQ,9628
8
+ local_deep_research/defaults/llm_config.py,sha256=Ql0euemgLw_Uwg5g05sA1SkVzAYK7O_ZAnnBi3rsAi4,10095
9
9
  local_deep_research/defaults/local_collections.toml,sha256=zNa03PVnFrZ757JdZOuW6QDxkOc6ep5tG8baGBrMmXM,1778
10
10
  local_deep_research/defaults/main.toml,sha256=6Lzbc5sVLxMwu83bLBp_tpYOZgmtThCfPL1L42eTGro,1939
11
- local_deep_research/defaults/search_engines.toml,sha256=TYkOqVaZq9JPawz4fIPyGdkAtYa4t8F9H50VY-wv2ak,8101
11
+ local_deep_research/defaults/search_engines.toml,sha256=g0-qrw10oMgW74z_lYpPDkGwMje25mvalfY1EJ0nL3g,8134
12
12
  local_deep_research/utilties/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
13
13
  local_deep_research/utilties/enums.py,sha256=TVAZiu9szNbdacfb7whgaQJJlSk7oYByADaAierD4CE,229
14
14
  local_deep_research/utilties/llm_utils.py,sha256=IGv-_gJWqLTpO3_op1NHIwxKaFEzmXhhVYSLTTSMnIA,4522
@@ -50,9 +50,9 @@ local_deep_research/web_search_engines/engines/search_engine_semantic_scholar.py
50
50
  local_deep_research/web_search_engines/engines/search_engine_serpapi.py,sha256=XikEYnM-pAaR70VeAJ28lbqpRzCj4bCA9xY29taTV8g,9215
51
51
  local_deep_research/web_search_engines/engines/search_engine_wayback.py,sha256=astAvSLajDZ6rwgthJ3iBcHSWuDSYPO7uilIxaJhXmU,18132
52
52
  local_deep_research/web_search_engines/engines/search_engine_wikipedia.py,sha256=KSGJECbEcxZpVK-PhYsTCtzedSK0l1AjQmvGtx8KBks,9799
53
- local_deep_research-0.1.20.dist-info/licenses/LICENSE,sha256=Qg2CaTdu6SWnSqk1_JtgBPp_Da-LdqJDhT1Vt1MUc5s,1072
54
- local_deep_research-0.1.20.dist-info/METADATA,sha256=29URKDSkO8eCFRa5NkCoPIZ_lHYH5xOeK8ORQp5-v6k,15608
55
- local_deep_research-0.1.20.dist-info/WHEEL,sha256=DK49LOLCYiurdXXOXwGJm6U4DkHkg4lcxjhqwRa0CP4,91
56
- local_deep_research-0.1.20.dist-info/entry_points.txt,sha256=u-Y6Z3MWtR3dmsTDFYhXyfkPv7mALUA7YAnY4Fi1XDs,97
57
- local_deep_research-0.1.20.dist-info/top_level.txt,sha256=h6-uVE_wSuLOcoWwT9szhX23mBWufu77MqmM25UfbCY,20
58
- local_deep_research-0.1.20.dist-info/RECORD,,
53
+ local_deep_research-0.1.22.dist-info/licenses/LICENSE,sha256=Qg2CaTdu6SWnSqk1_JtgBPp_Da-LdqJDhT1Vt1MUc5s,1072
54
+ local_deep_research-0.1.22.dist-info/METADATA,sha256=_kjb5M093i9x4yhJ1cQ198P1bnDJg-atHCc2otwcrc0,16181
55
+ local_deep_research-0.1.22.dist-info/WHEEL,sha256=CmyFI0kx5cdEMTLiONQRbGQwjIoR1aIYB7eCAQ4KPJ0,91
56
+ local_deep_research-0.1.22.dist-info/entry_points.txt,sha256=u-Y6Z3MWtR3dmsTDFYhXyfkPv7mALUA7YAnY4Fi1XDs,97
57
+ local_deep_research-0.1.22.dist-info/top_level.txt,sha256=h6-uVE_wSuLOcoWwT9szhX23mBWufu77MqmM25UfbCY,20
58
+ local_deep_research-0.1.22.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (78.0.2)
2
+ Generator: setuptools (78.1.0)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5