local-deep-research 0.1.24__py3-none-any.whl → 0.1.25__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -6,10 +6,12 @@ from platformdirs import user_documents_dir
6
6
  import os
7
7
  # Setup logging
8
8
  logger = logging.getLogger(__name__)
9
+ from dotenv import load_dotenv
10
+ import platform
9
11
 
10
12
  # Get config directory
11
13
  def get_config_dir():
12
- import platform
14
+
13
15
 
14
16
  if platform.system() == "Windows":
15
17
  # Windows: Use Documents directory
@@ -32,7 +34,16 @@ SEARCH_ENGINES_FILE = CONFIG_DIR / "search_engines.toml"
32
34
 
33
35
  LOCAL_COLLECTIONS_FILE = CONFIG_DIR / "local_collections.toml"
34
36
 
37
+ # Load the .env file explicitly
38
+ # Load the .env file explicitly
39
+ config_dir = get_config_dir()
40
+ env_file = config_dir / ".env"
35
41
 
42
+ if env_file.exists():
43
+ logger.info(f"Loading environment variables from: {env_file}")
44
+ load_dotenv(dotenv_path=env_file)
45
+ else:
46
+ logger.warning(f"Warning: .env file not found at {env_file}")
36
47
  # Set environment variable for Dynaconf to use
37
48
  docs_base = Path(user_documents_dir()) / "local_deep_research"
38
49
  os.environ["DOCS_DIR"] = str(docs_base)
@@ -1,11 +1,12 @@
1
1
  # API Keys
2
- # ANTHROPIC_API_KEY=your-api-key-here
3
- # OPENAI_API_KEY=your-openai-key-here
4
- # SERP_API_KEY=your-api-key-here
5
- # GUARDIAN_API_KEY=your-api-key-here
6
- # GOOGLE_PSE_API_KEY=your-google-api-key-here
7
- # GOOGLE_PSE_ENGINE_ID=your-programmable-search-engine-id-here
2
+ # LDR_OPENAI_ENDPOINT_API_KEY=your-api-key-here
3
+ # LDR_ANTHROPIC_API_KEY=your-api-key-here
4
+ # LDR_OPENAI_API_KEY=your-openai-key-here
5
+ # LDR_SERP_API_KEY=your-api-key-here
6
+ # LDR_GUARDIAN_API_KEY=your-api-key-here
7
+ # LDR_GOOGLE_PSE_API_KEY=your-google-api-key-here
8
+ # LDR_GOOGLE_PSE_ENGINE_ID=your-programmable-search-engine-id-here
8
9
 
9
10
  # SearXNG Configuration, add at least SEARXNG_INSTANCE to .env file to use this search engine
10
11
  # SEARXNG_INSTANCE = "http://localhost:8080"
11
- # SEARXNG_DELAY = 2.0
12
+ # SEARXNG_DELAY = 2.0
@@ -9,6 +9,7 @@ from langchain_anthropic import ChatAnthropic
9
9
  from langchain_openai import ChatOpenAI
10
10
  from langchain_ollama import ChatOllama
11
11
  from langchain_community.llms import VLLM
12
+ from local_deep_research.utilties.search_utilities import remove_think_tags
12
13
  from local_deep_research.config import settings
13
14
  import os
14
15
  import logging
@@ -23,6 +24,8 @@ VALID_PROVIDERS = ["ollama", "openai", "anthropic", "vllm", "openai_endpoint", "
23
24
  # LLM FUNCTIONS
24
25
  # ================================
25
26
 
27
+
28
+
26
29
  def get_llm(model_name=None, temperature=None, provider=None):
27
30
  """
28
31
  Get LLM instance based on model name and provider.
@@ -33,7 +36,7 @@ def get_llm(model_name=None, temperature=None, provider=None):
33
36
  provider: Provider to use (if None, uses settings.llm.provider)
34
37
 
35
38
  Returns:
36
- A LangChain LLM instance
39
+ A LangChain LLM instance with automatic think-tag removal
37
40
  """
38
41
  # Use settings values for parameters if not provided
39
42
  if model_name is None:
@@ -56,31 +59,42 @@ def get_llm(model_name=None, temperature=None, provider=None):
56
59
 
57
60
  # Handle different providers
58
61
  if provider == "anthropic":
59
- api_key = settings.get('ANTHROPIC_API_KEY', '')
62
+ api_key_name = 'ANTHROPIC_API_KEY'
63
+ api_key = settings.get(api_key_name, '')
60
64
  if not api_key:
61
- api_key = os.getenv('ANTHROPIC_API_KEY')
65
+ api_key = os.getenv(api_key_name)
66
+ if not api_key:
67
+ api_key = os.getenv("LDR_" + api_key_name)
62
68
  if not api_key:
63
69
  logger.warning("ANTHROPIC_API_KEY not found. Falling back to default model.")
64
70
  return get_fallback_model(temperature)
65
71
 
66
- return ChatAnthropic(
72
+ llm = ChatAnthropic(
67
73
  model=model_name, anthropic_api_key=api_key, **common_params
68
74
  )
75
+ return wrap_llm_without_think_tags(llm)
69
76
 
70
77
  elif provider == "openai":
71
- api_key = settings.get('OPENAI_API_KEY', '')
78
+ api_key_name = 'OPENAI_API_KEY'
79
+ api_key = settings.get(api_key_name, '')
72
80
  if not api_key:
73
- api_key = os.getenv('OPENAI_API_KEY')
81
+ api_key = os.getenv(api_key_name)
82
+ if not api_key:
83
+ api_key = os.getenv("LDR_" + api_key_name)
74
84
  if not api_key:
75
85
  logger.warning("OPENAI_API_KEY not found. Falling back to default model.")
76
86
  return get_fallback_model(temperature)
77
87
 
78
- return ChatOpenAI(model=model_name, api_key=api_key, **common_params)
88
+ llm = ChatOpenAI(model=model_name, api_key=api_key, **common_params)
89
+ return wrap_llm_without_think_tags(llm)
79
90
 
80
91
  elif provider == "openai_endpoint":
81
- api_key = settings.get('OPENAI_ENDPOINT_API_KEY', '')
92
+ api_key_name = 'OPENAI_ENDPOINT_API_KEY'
93
+ api_key = settings.get(api_key_name, '')
82
94
  if not api_key:
83
- api_key = os.getenv('OPENAI_ENDPOINT_API_KEY')
95
+ api_key = os.getenv(api_key_name)
96
+ if not api_key:
97
+ api_key = os.getenv("LDR_" + api_key_name)
84
98
  if not api_key:
85
99
  logger.warning("OPENAI_ENDPOINT_API_KEY not found. Falling back to default model.")
86
100
  return get_fallback_model(temperature)
@@ -88,16 +102,17 @@ def get_llm(model_name=None, temperature=None, provider=None):
88
102
  # Get endpoint URL from settings
89
103
  openai_endpoint_url = settings.llm.openai_endpoint_url
90
104
 
91
- return ChatOpenAI(
105
+ llm = ChatOpenAI(
92
106
  model=model_name,
93
107
  api_key=api_key,
94
108
  openai_api_base=openai_endpoint_url,
95
109
  **common_params
96
110
  )
111
+ return wrap_llm_without_think_tags(llm)
97
112
 
98
113
  elif provider == "vllm":
99
114
  try:
100
- return VLLM(
115
+ llm = VLLM(
101
116
  model=model_name,
102
117
  trust_remote_code=True,
103
118
  max_new_tokens=128,
@@ -105,6 +120,7 @@ def get_llm(model_name=None, temperature=None, provider=None):
105
120
  top_p=0.95,
106
121
  temperature=temperature,
107
122
  )
123
+ return wrap_llm_without_think_tags(llm)
108
124
  except Exception as e:
109
125
  logger.error(f"Error loading VLLM model: {e}")
110
126
  logger.warning("Falling back.")
@@ -114,54 +130,54 @@ def get_llm(model_name=None, temperature=None, provider=None):
114
130
  try:
115
131
  # Use the configurable Ollama base URL
116
132
  base_url = settings.get('OLLAMA_BASE_URL', settings.llm.get('ollama_base_url', 'http://localhost:11434'))
117
- return ChatOllama(model=model_name, base_url=base_url, **common_params)
133
+ llm = ChatOllama(model=model_name, base_url=base_url, **common_params)
134
+ return wrap_llm_without_think_tags(llm)
118
135
  except Exception as e:
119
136
  logger.error(f"Error loading Ollama model: {e}")
120
137
  return get_fallback_model(temperature)
121
138
 
122
139
  elif provider == "lmstudio":
123
-
124
- # LM Studio supports OpenAI API format, so we can use ChatOpenAI directly
125
- lmstudio_url = settings.llm.get('lmstudio_url', "http://localhost:1234")
126
-
127
- return ChatOpenAI(
128
- model=model_name,
129
- api_key="lm-studio", # LM Studio doesn't require a real API key
130
- base_url=f"{lmstudio_url}/v1", # Use the configured URL with /v1 endpoint
131
- temperature=temperature,
132
- max_tokens=settings.llm.max_tokens
133
- )
134
-
140
+ # LM Studio supports OpenAI API format, so we can use ChatOpenAI directly
141
+ lmstudio_url = settings.llm.get('lmstudio_url', "http://localhost:1234")
142
+
143
+ llm = ChatOpenAI(
144
+ model=model_name,
145
+ api_key="lm-studio", # LM Studio doesn't require a real API key
146
+ base_url=f"{lmstudio_url}/v1", # Use the configured URL with /v1 endpoint
147
+ temperature=temperature,
148
+ max_tokens=settings.llm.max_tokens
149
+ )
150
+ return wrap_llm_without_think_tags(llm)
135
151
 
136
152
  elif provider == "llamacpp":
137
-
138
- # Import LlamaCpp
139
- from langchain_community.llms import LlamaCpp
140
-
141
- # Get LlamaCpp model path from settings
142
- model_path = settings.llm.get('llamacpp_model_path', "")
143
- if not model_path:
144
- logger.error("llamacpp_model_path not set in settings")
145
- raise ValueError("llamacpp_model_path not set in settings.toml")
146
-
147
- # Get additional LlamaCpp parameters
148
- n_gpu_layers = settings.llm.get('llamacpp_n_gpu_layers', 1)
149
- n_batch = settings.llm.get('llamacpp_n_batch', 512)
150
- f16_kv = settings.llm.get('llamacpp_f16_kv', True)
153
+ # Import LlamaCpp
154
+ from langchain_community.llms import LlamaCpp
155
+
156
+ # Get LlamaCpp model path from settings
157
+ model_path = settings.llm.get('llamacpp_model_path', "")
158
+ if not model_path:
159
+ logger.error("llamacpp_model_path not set in settings")
160
+ raise ValueError("llamacpp_model_path not set in settings.toml")
151
161
 
152
- # Create LlamaCpp instance
153
- return LlamaCpp(
154
- model_path=model_path,
155
- temperature=temperature,
156
- max_tokens=settings.llm.max_tokens,
157
- n_gpu_layers=n_gpu_layers,
158
- n_batch=n_batch,
159
- f16_kv=f16_kv,
160
- verbose=True
161
- )
162
+ # Get additional LlamaCpp parameters
163
+ n_gpu_layers = settings.llm.get('llamacpp_n_gpu_layers', 1)
164
+ n_batch = settings.llm.get('llamacpp_n_batch', 512)
165
+ f16_kv = settings.llm.get('llamacpp_f16_kv', True)
166
+
167
+ # Create LlamaCpp instance
168
+ llm = LlamaCpp(
169
+ model_path=model_path,
170
+ temperature=temperature,
171
+ max_tokens=settings.llm.max_tokens,
172
+ n_gpu_layers=n_gpu_layers,
173
+ n_batch=n_batch,
174
+ f16_kv=f16_kv,
175
+ verbose=True
176
+ )
177
+ return wrap_llm_without_think_tags(llm)
162
178
 
163
179
  else:
164
- return get_fallback_model(temperature)
180
+ return wrap_llm_without_think_tags(get_fallback_model(temperature))
165
181
 
166
182
  def get_fallback_model(temperature=None):
167
183
  """Create a dummy model for when no providers are available"""
@@ -174,6 +190,31 @@ def get_fallback_model(temperature=None):
174
190
  # COMPATIBILITY FUNCTIONS
175
191
  # ================================
176
192
 
193
+ def wrap_llm_without_think_tags(llm):
194
+ """Create a wrapper class that processes LLM outputs with remove_think_tags"""
195
+
196
+
197
+ class ProcessingLLMWrapper:
198
+ def __init__(self, base_llm):
199
+ self.base_llm = base_llm
200
+
201
+ def invoke(self, *args, **kwargs):
202
+ response = self.base_llm.invoke(*args, **kwargs)
203
+
204
+ # Process the response content if it has a content attribute
205
+ if hasattr(response, 'content'):
206
+ response.content = remove_think_tags(response.content)
207
+ elif isinstance(response, str):
208
+ response = remove_think_tags(response)
209
+
210
+ return response
211
+
212
+ # Pass through any other attributes to the base LLM
213
+ def __getattr__(self, name):
214
+ return getattr(self.base_llm, name)
215
+
216
+ return ProcessingLLMWrapper(llm)
217
+
177
218
  def get_available_provider_types():
178
219
  """Return available model providers"""
179
220
  providers = {}
@@ -46,7 +46,10 @@ def create_search_engine(engine_name: str, llm=None, **kwargs) -> Optional[BaseS
46
46
 
47
47
  # First check environment variable
48
48
  api_key = os.getenv(api_key_env)
49
-
49
+ if not api_key:
50
+ api_key = os.getenv("LDR_" + api_key_env)
51
+
52
+
50
53
  # If not found in environment, check Dynaconf settings
51
54
  if not api_key and api_key_env:
52
55
  # Convert env var name to settings path (e.g., BRAVE_API_KEY -> brave_api_key)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: local-deep-research
3
- Version: 0.1.24
3
+ Version: 0.1.25
4
4
  Summary: AI-powered research assistant with deep, iterative analysis using LLMs and web searches
5
5
  Author-email: LearningCircuit <185559241+LearningCircuit@users.noreply.github.com>, HashedViking <6432677+HashedViking@users.noreply.github.com>
6
6
  License: MIT License
@@ -65,6 +65,7 @@ Requires-Dist: xmltodict>=0.13.0
65
65
  Requires-Dist: lxml>=4.9.2
66
66
  Requires-Dist: pdfplumber>=0.9.0
67
67
  Requires-Dist: unstructured>=0.10.0
68
+ Requires-Dist: google-search-results
68
69
  Dynamic: license-file
69
70
 
70
71
  # Local Deep Research
@@ -79,7 +80,15 @@ A powerful AI-powered research assistant that performs deep, iterative analysis
79
80
  </a>
80
81
  </div>
81
82
 
82
- ## Quick Start
83
+ ## Windows Installation
84
+
85
+ Download the [Windows Installer](https://github.com/LearningCircuit/local-deep-research/releases/download/v0.1.0/LocalDeepResearch_Setup.exe) for easy one-click installation.
86
+
87
+ **Requires Ollama or other model preinstalled.**
88
+ Download from https://ollama.ai and then pull a model
89
+ ollama pull gemma3:12b
90
+
91
+ ## Quick Start (not required if installed with windows installer)
83
92
 
84
93
  ```bash
85
94
  # Install the package
@@ -107,12 +116,12 @@ Access the web interface at `http://127.0.0.1:5000` in your browser.
107
116
 
108
117
  ## Docker Support
109
118
 
110
- ### Build the image first if you haven't already
119
+ Build the image first if you haven't already
111
120
  ```bash
112
121
  docker build -t local-deep-research .
113
122
  ```
114
123
 
115
- ### Quick Docker Run
124
+ Quick Docker Run
116
125
 
117
126
  ```bash
118
127
  # Run with default settings (connects to Ollama running on the host)
@@ -126,6 +135,31 @@ For comprehensive Docker setup information, see:
126
135
  - [Docker Usage Guide](https://github.com/LearningCircuit/local-deep-research/blob/main/docs/docker-usage-readme.md)
127
136
  - [Docker Compose Guide](https://github.com/LearningCircuit/local-deep-research/blob/main/docs/docker-compose-guide.md)
128
137
 
138
+ ## Programmatic Access
139
+
140
+ Local Deep Research now provides a simple API for programmatic access to its research capabilities:
141
+
142
+ ```python
143
+ import os
144
+ # Set environment variables to control the LLM
145
+ os.environ["LDR_LLM__MODEL"] = "mistral" # Specify model name
146
+
147
+ from local_deep_research import quick_summary, generate_report, analyze_documents
148
+
149
+ # Generate a quick research summary with custom parameters
150
+ results = quick_summary(
151
+ query="advances in fusion energy",
152
+ search_tool="auto", # Auto-select the best search engine
153
+ iterations=1, # Single research cycle for speed
154
+ questions_per_iteration=2, # Generate 2 follow-up questions
155
+ max_results=30, # Consider up to 30 search results
156
+ temperature=0.7 # Control creativity of generation
157
+ )
158
+ print(results["summary"])
159
+ ```
160
+
161
+ These functions provide flexible options for customizing the search parameters, iterations, and output formats. For more examples, see the [programmatic access tutorial](https://github.com/LearningCircuit/local-deep-research/blob/main/examples/programmatic_access.ipynb).
162
+
129
163
 
130
164
  ## Features
131
165
 
@@ -308,27 +342,6 @@ You can use local document search in several ways:
308
342
  3. **All collections**: Set `tool = "local_all"` to search across all collections
309
343
  4. **Query syntax**: Type `collection:project_docs your query` to target a specific collection
310
344
 
311
- ## Programmatic Access
312
-
313
- Local Deep Research now provides a simple API for programmatic access to its research capabilities:
314
-
315
- ```python
316
- from local_deep_research import quick_summary, generate_report
317
-
318
- # Generate a quick research summary
319
- results = quick_summary("advances in fusion energy")
320
- print(results["summary"])
321
-
322
- # Create a comprehensive structured report
323
- report = generate_report("impact of quantum computing on cryptography")
324
- print(report["content"])
325
-
326
- # Analyze documents in a local collection
327
- from local_deep_research import analyze_documents
328
- docs = analyze_documents("renewable energy", "research_papers")
329
- ```
330
-
331
- These functions provide flexible options for customizing the search parameters, iterations, and output formats. For more examples, see the [programmatic access tutorial](https://github.com/LearningCircuit/local-deep-research/blob/programmatic-access/examples/programmatic_access.ipynb).
332
345
 
333
346
  ## Advanced Configuration
334
347
 
@@ -1,14 +1,14 @@
1
1
  local_deep_research/__init__.py,sha256=Grde0sFEYyCXrPCfxd-9b9v1M6OurrzQbVYRmKQ9E7w,886
2
2
  local_deep_research/citation_handler.py,sha256=v_fwTy-2XvUuoH3OQRzmBrvaiN7mBk8jbNfySslmt5g,4357
3
- local_deep_research/config.py,sha256=3g8-QPMrxoIMjHvyjSJBFUELmAIyOQFHApUnd8p50a8,9881
3
+ local_deep_research/config.py,sha256=n6TSkNtdie86Sc71jjnejwK_hBIDpJahNZwbiDEfzXg,10233
4
4
  local_deep_research/main.py,sha256=uQXtGQ6LtZNd5Qw63D5ke4Q_LjYimouWVSUknVsk3JQ,3645
5
5
  local_deep_research/report_generator.py,sha256=EvaArnWirMgg42fMzmZeJczoEYujEbJ2ryHHYuuoXx8,8058
6
6
  local_deep_research/search_system.py,sha256=yY3BEzX68vdtUcYF9h6lC3yVao0YA_NSBj6W3-RwlKk,15459
7
7
  local_deep_research/api/__init__.py,sha256=H0WGFSohUR0T2QswtWngPZWoMYPs9VWQTQYaivAlrJU,440
8
8
  local_deep_research/api/research_functions.py,sha256=Z23wZYsB1x2ivdFYJ9uqIqCAwjR2RdOff7Bq30DxQYU,12099
9
- local_deep_research/defaults/.env.template,sha256=U4B_InwGZl4IVuAdbY_u0nKN_akHtebMBwUU_e_eljc,427
9
+ local_deep_research/defaults/.env.template,sha256=SI8WDMFrj-yANlnfd6jJ4fLYke7zSzCd9Ukk_HpyM88,500
10
10
  local_deep_research/defaults/__init__.py,sha256=2Vvlkl-gmP_qPYWegE4JBgummypogl3VXrQ1XzptFDU,1381
11
- local_deep_research/defaults/llm_config.py,sha256=Ql0euemgLw_Uwg5g05sA1SkVzAYK7O_ZAnnBi3rsAi4,10095
11
+ local_deep_research/defaults/llm_config.py,sha256=1KiW9k8kmsUD5u9VgEdgWZBNMmK1BA0ZxoGbuC2spAk,11652
12
12
  local_deep_research/defaults/local_collections.toml,sha256=zNa03PVnFrZ757JdZOuW6QDxkOc6ep5tG8baGBrMmXM,1778
13
13
  local_deep_research/defaults/main.toml,sha256=6Lzbc5sVLxMwu83bLBp_tpYOZgmtThCfPL1L42eTGro,1939
14
14
  local_deep_research/defaults/search_engines.toml,sha256=g0-qrw10oMgW74z_lYpPDkGwMje25mvalfY1EJ0nL3g,8134
@@ -34,7 +34,7 @@ local_deep_research/web/templates/settings.html,sha256=S9A-tdpzMhP2Zw7kp2jxKlwaW
34
34
  local_deep_research/web/templates/settings_dashboard.html,sha256=De-v1KNdVvkXme5i3YZ6sIfU9aAKDc_N-AW9n4PZoso,9109
35
35
  local_deep_research/web_search_engines/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
36
36
  local_deep_research/web_search_engines/search_engine_base.py,sha256=QmhfjuHK2deomh8tARghKuYnF-5t3wwBB661odS2VtU,8065
37
- local_deep_research/web_search_engines/search_engine_factory.py,sha256=Sld6bYTwcyTxgVLx04t00sD7vfJhSHFOl6iiGJ08ZUE,11118
37
+ local_deep_research/web_search_engines/search_engine_factory.py,sha256=8REYoRdDWvB6XLhBym8rqzuULX28VQ-UKWNcRA5tLTQ,11189
38
38
  local_deep_research/web_search_engines/search_engines_config.py,sha256=5C0tCmy_Jpv1YHLZLlyS7h5B2XToYcWPAaBDEOsxMo0,2739
39
39
  local_deep_research/web_search_engines/engines/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
40
40
  local_deep_research/web_search_engines/engines/full_search.py,sha256=BuOz8dX-XocazCG7gGBKFnIY99FZtNFI0-Wq3fhsfp4,4689
@@ -53,9 +53,9 @@ local_deep_research/web_search_engines/engines/search_engine_semantic_scholar.py
53
53
  local_deep_research/web_search_engines/engines/search_engine_serpapi.py,sha256=XikEYnM-pAaR70VeAJ28lbqpRzCj4bCA9xY29taTV8g,9215
54
54
  local_deep_research/web_search_engines/engines/search_engine_wayback.py,sha256=astAvSLajDZ6rwgthJ3iBcHSWuDSYPO7uilIxaJhXmU,18132
55
55
  local_deep_research/web_search_engines/engines/search_engine_wikipedia.py,sha256=KSGJECbEcxZpVK-PhYsTCtzedSK0l1AjQmvGtx8KBks,9799
56
- local_deep_research-0.1.24.dist-info/licenses/LICENSE,sha256=Qg2CaTdu6SWnSqk1_JtgBPp_Da-LdqJDhT1Vt1MUc5s,1072
57
- local_deep_research-0.1.24.dist-info/METADATA,sha256=mM-b8LezrBKyR-VjOYE0lMqoKzEwfvID2kMYgUJH9Z4,17096
58
- local_deep_research-0.1.24.dist-info/WHEEL,sha256=CmyFI0kx5cdEMTLiONQRbGQwjIoR1aIYB7eCAQ4KPJ0,91
59
- local_deep_research-0.1.24.dist-info/entry_points.txt,sha256=u-Y6Z3MWtR3dmsTDFYhXyfkPv7mALUA7YAnY4Fi1XDs,97
60
- local_deep_research-0.1.24.dist-info/top_level.txt,sha256=h6-uVE_wSuLOcoWwT9szhX23mBWufu77MqmM25UfbCY,20
61
- local_deep_research-0.1.24.dist-info/RECORD,,
56
+ local_deep_research-0.1.25.dist-info/licenses/LICENSE,sha256=Qg2CaTdu6SWnSqk1_JtgBPp_Da-LdqJDhT1Vt1MUc5s,1072
57
+ local_deep_research-0.1.25.dist-info/METADATA,sha256=sVEzW1cEvbnt0d-FtGmnZLqzf7_D1cF8PWC13bxbmBM,17711
58
+ local_deep_research-0.1.25.dist-info/WHEEL,sha256=CmyFI0kx5cdEMTLiONQRbGQwjIoR1aIYB7eCAQ4KPJ0,91
59
+ local_deep_research-0.1.25.dist-info/entry_points.txt,sha256=u-Y6Z3MWtR3dmsTDFYhXyfkPv7mALUA7YAnY4Fi1XDs,97
60
+ local_deep_research-0.1.25.dist-info/top_level.txt,sha256=h6-uVE_wSuLOcoWwT9szhX23mBWufu77MqmM25UfbCY,20
61
+ local_deep_research-0.1.25.dist-info/RECORD,,