unique_deep_research 3.1.0__tar.gz → 3.2.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (23) hide show
  1. {unique_deep_research-3.1.0 → unique_deep_research-3.2.1}/CHANGELOG.md +7 -1
  2. {unique_deep_research-3.1.0 → unique_deep_research-3.2.1}/PKG-INFO +8 -2
  3. {unique_deep_research-3.1.0 → unique_deep_research-3.2.1}/pyproject.toml +1 -1
  4. {unique_deep_research-3.1.0 → unique_deep_research-3.2.1}/unique_deep_research/config.py +7 -22
  5. {unique_deep_research-3.1.0 → unique_deep_research-3.2.1}/unique_deep_research/service.py +27 -26
  6. {unique_deep_research-3.1.0 → unique_deep_research-3.2.1}/unique_deep_research/unique_custom/tools.py +1 -1
  7. {unique_deep_research-3.1.0 → unique_deep_research-3.2.1}/LICENSE +0 -0
  8. {unique_deep_research-3.1.0 → unique_deep_research-3.2.1}/README.md +0 -0
  9. {unique_deep_research-3.1.0 → unique_deep_research-3.2.1}/unique_deep_research/__init__.py +0 -0
  10. {unique_deep_research-3.1.0 → unique_deep_research-3.2.1}/unique_deep_research/markdown_utils.py +0 -0
  11. {unique_deep_research-3.1.0 → unique_deep_research-3.2.1}/unique_deep_research/templates/clarifying_agent.j2 +0 -0
  12. {unique_deep_research-3.1.0 → unique_deep_research-3.2.1}/unique_deep_research/templates/openai/oai_research_system_message.j2 +0 -0
  13. {unique_deep_research-3.1.0 → unique_deep_research-3.2.1}/unique_deep_research/templates/report_cleanup_prompt.j2 +0 -0
  14. {unique_deep_research-3.1.0 → unique_deep_research-3.2.1}/unique_deep_research/templates/research_instructions_agent.j2 +0 -0
  15. {unique_deep_research-3.1.0 → unique_deep_research-3.2.1}/unique_deep_research/templates/unique/compress_research_system.j2 +0 -0
  16. {unique_deep_research-3.1.0 → unique_deep_research-3.2.1}/unique_deep_research/templates/unique/lead_agent_system.j2 +0 -0
  17. {unique_deep_research-3.1.0 → unique_deep_research-3.2.1}/unique_deep_research/templates/unique/report_writer_system_open_deep_research.j2 +0 -0
  18. {unique_deep_research-3.1.0 → unique_deep_research-3.2.1}/unique_deep_research/templates/unique/research_agent_system.j2 +0 -0
  19. {unique_deep_research-3.1.0 → unique_deep_research-3.2.1}/unique_deep_research/unique_custom/__init__.py +0 -0
  20. {unique_deep_research-3.1.0 → unique_deep_research-3.2.1}/unique_deep_research/unique_custom/agents.py +0 -0
  21. {unique_deep_research-3.1.0 → unique_deep_research-3.2.1}/unique_deep_research/unique_custom/citation.py +0 -0
  22. {unique_deep_research-3.1.0 → unique_deep_research-3.2.1}/unique_deep_research/unique_custom/state.py +0 -0
  23. {unique_deep_research-3.1.0 → unique_deep_research-3.2.1}/unique_deep_research/unique_custom/utils.py +0 -0
@@ -5,7 +5,13 @@ All notable changes to this project will be documented in this file.
5
5
  The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
6
6
  and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
7
7
 
8
- ## [3.1.00] - 2026-01-30
8
+ ## [3.2.1] - 2026-02-05
9
+ - Use deep research logger instead of tool logger
10
+
11
+ ## [3.2.0] - 2026-02-03
12
+ - Use a backwards compatible config style
13
+
14
+ ## [3.1.0] - 2026-01-30
9
15
  - Support other search engines than Google
10
16
 
11
17
  ## [3.0.28] - 2026-01-26
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: unique_deep_research
3
- Version: 3.1.0
3
+ Version: 3.2.1
4
4
  Summary: Deep Research Tool for complex research tasks
5
5
  License: Proprietary
6
6
  Author: Martin Fadler
@@ -36,7 +36,13 @@ All notable changes to this project will be documented in this file.
36
36
  The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
37
37
  and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
38
38
 
39
- ## [3.1.00] - 2026-01-30
39
+ ## [3.2.1] - 2026-02-05
40
+ - Use deep research logger instead of tool logger
41
+
42
+ ## [3.2.0] - 2026-02-03
43
+ - Use a backwards compatible config style
44
+
45
+ ## [3.1.0] - 2026-01-30
40
46
  - Support other search engines than Google
41
47
 
42
48
  ## [3.0.28] - 2026-01-26
@@ -1,6 +1,6 @@
1
1
  [tool.poetry]
2
2
  name = "unique_deep_research"
3
- version = "3.1.00"
3
+ version = "3.2.1"
4
4
  description = "Deep Research Tool for complex research tasks"
5
5
  authors = [
6
6
  "Martin Fadler <martin.fadler@unique.ch>",
@@ -1,10 +1,10 @@
1
1
  from dataclasses import dataclass
2
2
  from enum import StrEnum
3
3
  from pathlib import Path
4
- from typing import Any, Generic, Literal, TypeVar
4
+ from typing import Generic, Literal, TypeVar
5
5
 
6
6
  from jinja2 import Environment, FileSystemLoader
7
- from pydantic import BaseModel, Field, field_validator
7
+ from pydantic import BaseModel, Field
8
8
  from unique_toolkit._common.validators import LMI, get_LMI_default_field
9
9
  from unique_toolkit.agentic.tools.config import get_configuration_dict
10
10
  from unique_toolkit.agentic.tools.schemas import BaseToolConfig
@@ -13,7 +13,6 @@ from unique_web_search.config import (
13
13
  ActivatedSearchEngine,
14
14
  DefaultSearchEngine,
15
15
  )
16
- from unique_web_search.services.search_engine import GoogleConfig
17
16
 
18
17
  # Global template environment for the deep research tool
19
18
  TEMPLATE_DIR = Path(__file__).parent / "templates"
@@ -80,11 +79,6 @@ class OpenAIEngine(BaseEngine[Literal[DeepResearchEngine.OPENAI]]):
80
79
  class WebToolsConfig(BaseModel):
81
80
  model_config = get_configuration_dict()
82
81
 
83
- enable: bool = Field(
84
- default=True,
85
- description="Allow agent to use web search tools to access the web",
86
- )
87
-
88
82
  search_engine: ActivatedSearchEngine = Field( # pyright: ignore[reportInvalidTypeForm]
89
83
  default_factory=DefaultSearchEngine, # pyright: ignore[reportArgumentType]
90
84
  description="Search Engine Configuration",
@@ -96,7 +90,11 @@ class WebToolsConfig(BaseModel):
96
90
  class Tools(BaseModel):
97
91
  model_config = get_configuration_dict()
98
92
 
99
- web_tools: WebToolsConfig = Field(
93
+ web_tools: bool = Field(
94
+ default=True,
95
+ description="Allow agent to use web search tools to access the web",
96
+ )
97
+ web_tools_config: WebToolsConfig = Field(
100
98
  default=WebToolsConfig(),
101
99
  description="Configuration for web search tools",
102
100
  )
@@ -105,19 +103,6 @@ class Tools(BaseModel):
105
103
  description="Allow agent to use internal search tools access information from the knowledge base and uploaded documents",
106
104
  )
107
105
 
108
- @field_validator("web_tools", mode="before")
109
- @classmethod
110
- def handle_bool_case(cls, v: Any) -> Any:
111
- if isinstance(v, bool):
112
- if v:
113
- # Backward compatibility with old config behaviour
114
- return WebToolsConfig(
115
- enable=True, search_engine=GoogleConfig(fetch_size=10)
116
- )
117
- else:
118
- return WebToolsConfig(enable=False)
119
- return v
120
-
121
106
 
122
107
  class UniqueEngine(BaseEngine[Literal[DeepResearchEngine.UNIQUE]]):
123
108
  model_config = get_configuration_dict()
@@ -1,3 +1,4 @@
1
+ import logging
1
2
  from typing import Any, Optional
2
3
 
3
4
  from httpx import AsyncClient
@@ -65,6 +66,8 @@ from .unique_custom.utils import (
65
66
  get_next_message_order,
66
67
  )
67
68
 
69
+ _LOGGER = logging.getLogger(__name__)
70
+
68
71
 
69
72
  class DeepResearchToolInput(BaseModel):
70
73
  model_config = ConfigDict(extra="forbid")
@@ -109,7 +112,7 @@ class DeepResearchTool(Tool[DeepResearchToolConfig]):
109
112
 
110
113
  self.client = get_async_openai_client()
111
114
 
112
- self.logger.info(f"Using async OpenAI client pointed to {self.client.base_url}")
115
+ _LOGGER.info(f"Using async OpenAI client pointed to {self.client.base_url}")
113
116
 
114
117
  self.content_service = ContentService(
115
118
  company_id=self.company_id,
@@ -207,7 +210,7 @@ class DeepResearchTool(Tool[DeepResearchToolConfig]):
207
210
  except Exception as e:
208
211
  if self.is_message_execution():
209
212
  await self._update_execution_status(MessageExecutionUpdateStatus.FAILED)
210
- self.logger.exception(f"Deep Research tool run failed: {e}")
213
+ _LOGGER.exception(f"Deep Research tool run failed: {e}")
211
214
  await self.chat_service.modify_assistant_message_async(
212
215
  content="Deep Research failed to complete for an unknown reason",
213
216
  set_completed_at=True,
@@ -220,13 +223,13 @@ class DeepResearchTool(Tool[DeepResearchToolConfig]):
220
223
  )
221
224
 
222
225
  async def _run(self, tool_call: LanguageModelFunction) -> ToolCallResponse:
223
- self.logger.info("Starting Deep Research tool run")
226
+ _LOGGER.info("Starting Deep Research tool run")
224
227
 
225
228
  await self._clear_original_message()
226
229
 
227
230
  # Question answer and message execution will have the same message id, so we need to check if it is a message execution
228
231
  if await self.is_followup_question_answer() and not self.is_message_execution():
229
- self.logger.info("This is a follow-up question answer")
232
+ _LOGGER.info("This is a follow-up question answer")
230
233
  self.chat_service.create_message_execution(
231
234
  message_id=self.event.payload.assistant_message.id,
232
235
  type=MessageExecutionType.DEEP_RESEARCH,
@@ -240,7 +243,7 @@ class DeepResearchTool(Tool[DeepResearchToolConfig]):
240
243
  content="",
241
244
  )
242
245
  if self.is_message_execution():
243
- self.logger.info("Starting research")
246
+ _LOGGER.info("Starting research")
244
247
  # Run research
245
248
  self.write_message_log_text_message("**Generating research plan**")
246
249
  research_brief = await self.generate_research_brief_from_dict(
@@ -386,15 +389,15 @@ class DeepResearchTool(Tool[DeepResearchToolConfig]):
386
389
  result = "", []
387
390
  match self.config.engine.get_type():
388
391
  case DeepResearchEngine.OPENAI:
389
- self.logger.info("Running OpenAI research")
392
+ _LOGGER.info("Running OpenAI research")
390
393
  result = await self.openai_research(research_brief)
391
394
  case DeepResearchEngine.UNIQUE:
392
- self.logger.info("Running Custom research")
395
+ _LOGGER.info("Running Custom research")
393
396
  result = await self.custom_research(research_brief)
394
397
  self.write_message_log_text_message("**Research done**")
395
398
  return result
396
399
  except Exception as e:
397
- self.logger.exception(f"Research failed: {e}")
400
+ _LOGGER.exception(f"Research failed: {e}")
398
401
  return "", []
399
402
 
400
403
  async def custom_research(self, research_brief: str) -> tuple[str, list[Any]]:
@@ -427,7 +430,7 @@ class DeepResearchTool(Tool[DeepResearchToolConfig]):
427
430
  enable_web_tools = True
428
431
  enable_internal_tools = True
429
432
  if isinstance(self.config.engine, UniqueEngine):
430
- enable_web_tools = self.config.engine.tools.web_tools.enable
433
+ enable_web_tools = self.config.engine.tools.web_tools
431
434
  enable_internal_tools = self.config.engine.tools.internal_tools
432
435
 
433
436
  config = {
@@ -464,14 +467,14 @@ class DeepResearchTool(Tool[DeepResearchToolConfig]):
464
467
  content=processed_result,
465
468
  references=references,
466
469
  )
467
- self.logger.info(
470
+ _LOGGER.info(
468
471
  f"Custom research completed with {len(references)} validated citations"
469
472
  )
470
473
  return processed_result, []
471
474
 
472
475
  except Exception as e:
473
476
  error_msg = f"Custom research failed: {str(e)}"
474
- self.logger.exception(error_msg)
477
+ _LOGGER.exception(error_msg)
475
478
  return error_msg, []
476
479
 
477
480
  async def openai_research(self, research_brief: str) -> tuple[str, list[Any]]:
@@ -553,14 +556,14 @@ class DeepResearchTool(Tool[DeepResearchToolConfig]):
553
556
  match event.type:
554
557
  case "response.completed":
555
558
  if event.response.usage:
556
- self.logger.info(
559
+ _LOGGER.info(
557
560
  f"OpenAI research token usage: {event.response.usage}"
558
561
  )
559
562
  # Extract the final output with annotations
560
563
  if event.response.output and len(event.response.output) > 0:
561
564
  final_output = event.response.output[-1]
562
565
  if not isinstance(final_output, ResponseOutputMessage):
563
- self.logger.warning(
566
+ _LOGGER.warning(
564
567
  f"Unexpected output type: {type(final_output)}"
565
568
  )
566
569
  continue
@@ -573,9 +576,7 @@ class DeepResearchTool(Tool[DeepResearchToolConfig]):
573
576
  # Extract final report and references
574
577
  report_text = content_item.text
575
578
  annotations = content_item.annotations or []
576
- self.logger.info(
577
- "Final report extracted from OpenAI stream"
578
- )
579
+ _LOGGER.info("Final report extracted from OpenAI stream")
579
580
  return report_text, annotations
580
581
  return event.response.output_text or "", []
581
582
  case "response.incomplete":
@@ -607,7 +608,7 @@ class DeepResearchTool(Tool[DeepResearchToolConfig]):
607
608
  if isinstance(
608
609
  event.item.action, ActionSearch
609
610
  ) and isinstance(event.item.action.query, str):
610
- self.logger.info("OpenAI web search")
611
+ _LOGGER.info("OpenAI web search")
611
612
  self.chat_service.create_message_log(
612
613
  message_id=self.event.payload.assistant_message.id,
613
614
  text="**Searching the web**",
@@ -630,13 +631,13 @@ class DeepResearchTool(Tool[DeepResearchToolConfig]):
630
631
  elif isinstance(
631
632
  event.item.action, ActionOpenPage
632
633
  ) or isinstance(event.item.action, ActionFind):
633
- self.logger.info("OpenAI reading web page")
634
+ _LOGGER.info("OpenAI reading web page")
634
635
  if (
635
636
  not event.item.action.url
636
637
  or not isinstance(event.item.action.url, str)
637
638
  or "https://" not in event.item.action.url
638
639
  ):
639
- self.logger.warning(
640
+ _LOGGER.warning(
640
641
  f"Invalid URL from OpenAI: {event.item.action}"
641
642
  )
642
643
  continue
@@ -646,12 +647,12 @@ class DeepResearchTool(Tool[DeepResearchToolConfig]):
646
647
  event.item.action.url,
647
648
  )
648
649
  if not success:
649
- self.logger.info(
650
+ _LOGGER.info(
650
651
  f"Failed to crawl URL: {event.item.action.url} but openai still opened the page"
651
652
  )
652
653
  continue
653
654
  if not title:
654
- self.logger.info(
655
+ _LOGGER.info(
655
656
  f"No title found for URL: {event.item.action.url}"
656
657
  )
657
658
  continue
@@ -678,7 +679,7 @@ class DeepResearchTool(Tool[DeepResearchToolConfig]):
678
679
  ),
679
680
  )
680
681
  else:
681
- self.logger.info(
682
+ _LOGGER.info(
682
683
  f"OpenAI web action unexpected type: {type(event.item)}"
683
684
  )
684
685
  case "response.failed":
@@ -699,9 +700,9 @@ class DeepResearchTool(Tool[DeepResearchToolConfig]):
699
700
  if event.response.error:
700
701
  return event.response.error.message, []
701
702
  except Exception as e:
702
- self.logger.exception(f"Error processing research stream event: {e}")
703
+ _LOGGER.exception(f"Error processing research stream event: {e}")
703
704
 
704
- self.logger.error("Stream ended without completion")
705
+ _LOGGER.error("Stream ended without completion")
705
706
  return "", []
706
707
 
707
708
  async def _postprocess_report_with_gpt(self, research_result: str) -> str:
@@ -737,10 +738,10 @@ class DeepResearchTool(Tool[DeepResearchToolConfig]):
737
738
 
738
739
  formatted_result = response.choices[0].message.content
739
740
  if formatted_result:
740
- self.logger.info("Successfully post-processed research report")
741
+ _LOGGER.info("Successfully post-processed research report")
741
742
  return formatted_result
742
743
  else:
743
- self.logger.warning("Post-processing returned empty result, using original")
744
+ _LOGGER.warning("Post-processing returned empty result, using original")
744
745
  return research_result
745
746
 
746
747
  def get_tool_call_result_for_loop_history(
@@ -156,7 +156,7 @@ async def web_search(query: str, config: RunnableConfig) -> str:
156
156
  raise ValueError("RunnableConfig missing 'configurable' section")
157
157
 
158
158
  configurable = config["configurable"]
159
- engine_config = configurable["engine_config"].tools.web_tools.search_engine
159
+ engine_config = configurable["engine_config"].tools.web_tools_config.search_engine
160
160
 
161
161
  search_engine_service = get_search_engine_service(
162
162
  engine_config, configurable["language_model_service"]