unique_deep_research 3.2.2__py3-none-any.whl → 3.2.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- unique_deep_research/service.py +89 -97
- unique_deep_research/unique_custom/utils.py +1 -0
- {unique_deep_research-3.2.2.dist-info → unique_deep_research-3.2.3.dist-info}/METADATA +4 -1
- {unique_deep_research-3.2.2.dist-info → unique_deep_research-3.2.3.dist-info}/RECORD +6 -6
- {unique_deep_research-3.2.2.dist-info → unique_deep_research-3.2.3.dist-info}/LICENSE +0 -0
- {unique_deep_research-3.2.2.dist-info → unique_deep_research-3.2.3.dist-info}/WHEEL +0 -0
unique_deep_research/service.py
CHANGED
|
@@ -1,4 +1,5 @@
|
|
|
1
1
|
import logging
|
|
2
|
+
import traceback
|
|
2
3
|
from typing import Any, Optional
|
|
3
4
|
|
|
4
5
|
from httpx import AsyncClient
|
|
@@ -215,13 +216,25 @@ class DeepResearchTool(Tool[DeepResearchToolConfig]):
|
|
|
215
216
|
try:
|
|
216
217
|
return await self._run(tool_call)
|
|
217
218
|
except Exception as e:
|
|
219
|
+
self.write_message_log_text_message(
|
|
220
|
+
"**Research failed for an unknown reason**"
|
|
221
|
+
)
|
|
218
222
|
if self.is_message_execution():
|
|
219
223
|
await self._update_execution_status(MessageExecutionUpdateStatus.FAILED)
|
|
224
|
+
|
|
220
225
|
_LOGGER.exception(f"Deep Research tool run failed: {e}")
|
|
226
|
+
|
|
227
|
+
debug_info = {
|
|
228
|
+
**self._get_tool_debug_info(),
|
|
229
|
+
"error": str(e),
|
|
230
|
+
"traceback": traceback.format_exc(),
|
|
231
|
+
}
|
|
221
232
|
await self.chat_service.modify_assistant_message_async(
|
|
222
233
|
content="Deep Research failed to complete for an unknown reason",
|
|
223
234
|
set_completed_at=True,
|
|
235
|
+
debug_info=debug_info,
|
|
224
236
|
)
|
|
237
|
+
|
|
225
238
|
return ToolCallResponse(
|
|
226
239
|
id=tool_call.id or "",
|
|
227
240
|
name=self.name,
|
|
@@ -258,22 +271,8 @@ class DeepResearchTool(Tool[DeepResearchToolConfig]):
|
|
|
258
271
|
)
|
|
259
272
|
processed_result, content_chunks = await self.run_research(research_brief)
|
|
260
273
|
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
await self._update_execution_status(MessageExecutionUpdateStatus.FAILED)
|
|
264
|
-
self.write_message_log_text_message(
|
|
265
|
-
"**Research failed for an unknown reason**"
|
|
266
|
-
)
|
|
267
|
-
await self.chat_service.modify_assistant_message_async(
|
|
268
|
-
content="Deep Research failed to complete for an unknown reason",
|
|
269
|
-
set_completed_at=True,
|
|
270
|
-
)
|
|
271
|
-
return ToolCallResponse(
|
|
272
|
-
id=tool_call.id or "",
|
|
273
|
-
name=self.name,
|
|
274
|
-
content=processed_result or "Failed to complete research",
|
|
275
|
-
error_message="Research process failed or returned empty results",
|
|
276
|
-
)
|
|
274
|
+
if processed_result == "":
|
|
275
|
+
raise ValueError("Research returned empty result")
|
|
277
276
|
|
|
278
277
|
await self.chat_service.modify_assistant_message_async(
|
|
279
278
|
set_completed_at=True,
|
|
@@ -294,7 +293,10 @@ class DeepResearchTool(Tool[DeepResearchToolConfig]):
|
|
|
294
293
|
await self.chat_service.modify_assistant_message_async(
|
|
295
294
|
set_completed_at=True,
|
|
296
295
|
)
|
|
297
|
-
await self.
|
|
296
|
+
await self.chat_service.update_debug_info_async(
|
|
297
|
+
debug_info=self._get_tool_debug_info()
|
|
298
|
+
)
|
|
299
|
+
|
|
298
300
|
# put message in short term memory to remember that we asked the followup questions
|
|
299
301
|
await self.memory_service.save_async(
|
|
300
302
|
MemorySchema(message_id=self.event.payload.assistant_message.id),
|
|
@@ -324,7 +326,7 @@ class DeepResearchTool(Tool[DeepResearchToolConfig]):
|
|
|
324
326
|
percentage_completed=percentage,
|
|
325
327
|
)
|
|
326
328
|
|
|
327
|
-
|
|
329
|
+
def _get_tool_debug_info(self) -> dict[str, Any]:
|
|
328
330
|
"""
|
|
329
331
|
Update debug info for the tool execution.
|
|
330
332
|
Note: Tool call logging should be handled by the orchestrator.
|
|
@@ -350,7 +352,7 @@ class DeepResearchTool(Tool[DeepResearchToolConfig]):
|
|
|
350
352
|
"userMetadata": self.event.payload.user_metadata,
|
|
351
353
|
"toolParameters": self.event.payload.tool_parameters,
|
|
352
354
|
}
|
|
353
|
-
|
|
355
|
+
return debug_info_event
|
|
354
356
|
|
|
355
357
|
def write_message_log_text_message(self, text: str):
|
|
356
358
|
create_message_log_entry(
|
|
@@ -392,97 +394,87 @@ class DeepResearchTool(Tool[DeepResearchToolConfig]):
|
|
|
392
394
|
Run the research using the configured strategy.
|
|
393
395
|
Returns a tuple of (processed_result, content_chunks)
|
|
394
396
|
"""
|
|
395
|
-
|
|
396
|
-
|
|
397
|
-
|
|
398
|
-
|
|
399
|
-
|
|
400
|
-
|
|
401
|
-
|
|
402
|
-
|
|
403
|
-
|
|
404
|
-
|
|
405
|
-
return result
|
|
406
|
-
except Exception as e:
|
|
407
|
-
_LOGGER.exception(f"Research failed: {e}")
|
|
408
|
-
return "", []
|
|
397
|
+
result = "", []
|
|
398
|
+
match self.config.engine.get_type():
|
|
399
|
+
case DeepResearchEngine.OPENAI:
|
|
400
|
+
_LOGGER.info("Running OpenAI research")
|
|
401
|
+
result = await self.openai_research(research_brief)
|
|
402
|
+
case DeepResearchEngine.UNIQUE:
|
|
403
|
+
_LOGGER.info("Running Custom research")
|
|
404
|
+
result = await self.custom_research(research_brief)
|
|
405
|
+
self.write_message_log_text_message("**Research done**")
|
|
406
|
+
return result
|
|
409
407
|
|
|
410
408
|
async def custom_research(self, research_brief: str) -> tuple[str, list[Any]]:
|
|
411
409
|
"""
|
|
412
410
|
Run Custom research using LangGraph multi-agent orchestration.
|
|
413
411
|
Returns a tuple of (processed_result, content_chunks)
|
|
414
412
|
"""
|
|
415
|
-
|
|
416
|
-
|
|
417
|
-
|
|
418
|
-
|
|
419
|
-
|
|
420
|
-
|
|
421
|
-
|
|
422
|
-
|
|
423
|
-
|
|
424
|
-
|
|
413
|
+
# Create citation manager for this research session
|
|
414
|
+
citation_manager = GlobalCitationManager()
|
|
415
|
+
|
|
416
|
+
# Initialize LangGraph state with required services
|
|
417
|
+
initial_state = {
|
|
418
|
+
"messages": [HumanMessage(content=research_brief)],
|
|
419
|
+
"research_brief": research_brief,
|
|
420
|
+
"notes": [],
|
|
421
|
+
"final_report": "",
|
|
422
|
+
"chat_service": self.chat_service,
|
|
423
|
+
"message_id": self.event.payload.assistant_message.id,
|
|
424
|
+
}
|
|
425
|
+
|
|
426
|
+
# Prepare configuration for LangGraph
|
|
427
|
+
additional_openai_proxy_headers = {
|
|
428
|
+
"x-company-id": self.company_id,
|
|
429
|
+
"x-user-id": self.user_id,
|
|
430
|
+
"x-assistant-id": self.event.payload.assistant_id,
|
|
431
|
+
"x-chat-id": self.chat_id,
|
|
432
|
+
}
|
|
433
|
+
# Extract tool enablement settings from engine config if it's a UniqueEngine
|
|
434
|
+
enable_web_tools = True
|
|
435
|
+
enable_internal_tools = True
|
|
436
|
+
if isinstance(self.config.engine, UniqueEngine):
|
|
437
|
+
enable_web_tools = self.config.engine.tools.web_tools
|
|
438
|
+
enable_internal_tools = self.config.engine.tools.internal_tools
|
|
439
|
+
|
|
440
|
+
config = {
|
|
441
|
+
"configurable": {
|
|
442
|
+
"engine_config": self.config.engine,
|
|
443
|
+
"language_model_service": self.language_model_service,
|
|
444
|
+
"openai_client": self.client,
|
|
425
445
|
"chat_service": self.chat_service,
|
|
446
|
+
"content_service": self.content_service,
|
|
426
447
|
"message_id": self.event.payload.assistant_message.id,
|
|
427
|
-
|
|
428
|
-
|
|
429
|
-
|
|
430
|
-
|
|
431
|
-
|
|
432
|
-
|
|
433
|
-
"x-assistant-id": self.event.payload.assistant_id,
|
|
434
|
-
"x-chat-id": self.chat_id,
|
|
435
|
-
}
|
|
436
|
-
# Extract tool enablement settings from engine config if it's a UniqueEngine
|
|
437
|
-
enable_web_tools = True
|
|
438
|
-
enable_internal_tools = True
|
|
439
|
-
if isinstance(self.config.engine, UniqueEngine):
|
|
440
|
-
enable_web_tools = self.config.engine.tools.web_tools
|
|
441
|
-
enable_internal_tools = self.config.engine.tools.internal_tools
|
|
442
|
-
|
|
443
|
-
config = {
|
|
444
|
-
"configurable": {
|
|
445
|
-
"engine_config": self.config.engine,
|
|
446
|
-
"language_model_service": self.language_model_service,
|
|
447
|
-
"openai_client": self.client,
|
|
448
|
-
"chat_service": self.chat_service,
|
|
449
|
-
"content_service": self.content_service,
|
|
450
|
-
"message_id": self.event.payload.assistant_message.id,
|
|
451
|
-
"citation_manager": citation_manager,
|
|
452
|
-
"additional_openai_proxy_headers": additional_openai_proxy_headers,
|
|
453
|
-
"enable_web_tools": enable_web_tools,
|
|
454
|
-
"enable_internal_tools": enable_internal_tools,
|
|
455
|
-
},
|
|
456
|
-
}
|
|
457
|
-
|
|
458
|
-
result = await custom_agent.ainvoke(initial_state, config=config) # type: ignore[arg-type]
|
|
448
|
+
"citation_manager": citation_manager,
|
|
449
|
+
"additional_openai_proxy_headers": additional_openai_proxy_headers,
|
|
450
|
+
"enable_web_tools": enable_web_tools,
|
|
451
|
+
"enable_internal_tools": enable_internal_tools,
|
|
452
|
+
},
|
|
453
|
+
}
|
|
459
454
|
|
|
460
|
-
|
|
461
|
-
research_result = result.get("final_report", "")
|
|
455
|
+
result = await custom_agent.ainvoke(initial_state, config=config) # type: ignore[arg-type]
|
|
462
456
|
|
|
463
|
-
|
|
464
|
-
|
|
457
|
+
# Extract final report (citations already refined by agents.py)
|
|
458
|
+
research_result = result.get("final_report", "")
|
|
465
459
|
|
|
466
|
-
|
|
467
|
-
|
|
468
|
-
processed_result, references = validate_and_map_citations(
|
|
469
|
-
research_result, citation_registry
|
|
470
|
-
)
|
|
460
|
+
if not research_result:
|
|
461
|
+
return "", []
|
|
471
462
|
|
|
472
|
-
|
|
473
|
-
|
|
474
|
-
|
|
475
|
-
|
|
476
|
-
|
|
477
|
-
_LOGGER.info(
|
|
478
|
-
f"Custom research completed with {len(references)} validated citations"
|
|
479
|
-
)
|
|
480
|
-
return processed_result, []
|
|
463
|
+
# Validate and map citations using the citation registry
|
|
464
|
+
citation_registry = citation_manager.get_all_citations()
|
|
465
|
+
processed_result, references = validate_and_map_citations(
|
|
466
|
+
research_result, citation_registry
|
|
467
|
+
)
|
|
481
468
|
|
|
482
|
-
|
|
483
|
-
|
|
484
|
-
|
|
485
|
-
|
|
469
|
+
# Update the assistant message with the results
|
|
470
|
+
await self.chat_service.modify_assistant_message_async(
|
|
471
|
+
content=processed_result,
|
|
472
|
+
references=references,
|
|
473
|
+
)
|
|
474
|
+
_LOGGER.info(
|
|
475
|
+
f"Custom research completed with {len(references)} validated citations"
|
|
476
|
+
)
|
|
477
|
+
return processed_result, []
|
|
486
478
|
|
|
487
479
|
async def openai_research(self, research_brief: str) -> tuple[str, list[Any]]:
|
|
488
480
|
"""
|
|
@@ -570,6 +570,7 @@ async def ainvoke_with_token_handling(
|
|
|
570
570
|
try:
|
|
571
571
|
return await model.ainvoke(prepared_messages)
|
|
572
572
|
except Exception as e:
|
|
573
|
+
_LOGGER.exception(f"Error invoking model: {e}")
|
|
573
574
|
# Handle token errors by truncating history and retrying
|
|
574
575
|
if is_token_error(e):
|
|
575
576
|
# Filtering is not perfect, so in unlikely case we need to truncate the message history to the last AI message
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: unique_deep_research
|
|
3
|
-
Version: 3.2.
|
|
3
|
+
Version: 3.2.3
|
|
4
4
|
Summary: Deep Research Tool for complex research tasks
|
|
5
5
|
License: Proprietary
|
|
6
6
|
Author: Martin Fadler
|
|
@@ -36,6 +36,9 @@ All notable changes to this project will be documented in this file.
|
|
|
36
36
|
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
|
|
37
37
|
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
|
38
38
|
|
|
39
|
+
## [3.2.3] - 2026-02-05
|
|
40
|
+
- Improve error handling and logging
|
|
41
|
+
|
|
39
42
|
## [3.2.2] - 2026-02-05
|
|
40
43
|
- Fix bug where deep research tool was not using the correct headers causing an authentication error
|
|
41
44
|
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
unique_deep_research/__init__.py,sha256=HMksc03A7au88XB0jjeIR3zGt3YGYTA7ZIAkR8hmQZs,396
|
|
2
2
|
unique_deep_research/config.py,sha256=1DV3CBxNTDTDoxsxeMLI8ARDR3-iRlhL4yhXAKmxeI4,4137
|
|
3
3
|
unique_deep_research/markdown_utils.py,sha256=zyKO4c9T_KnZzOYlYZEDEvC1NHRR9koCzcWmOJbrcok,11024
|
|
4
|
-
unique_deep_research/service.py,sha256=
|
|
4
|
+
unique_deep_research/service.py,sha256=yLQ6lyg27fjN04z4H--g_rdetaeq_lnofAypvfNSrxk,35605
|
|
5
5
|
unique_deep_research/templates/clarifying_agent.j2,sha256=TOgD5ezrlRyurpquBGT902oa7PpkdHxsvrkIFWBFa2A,3938
|
|
6
6
|
unique_deep_research/templates/openai/oai_research_system_message.j2,sha256=2YipVh0V4eEikg9iXqIaBVtYwS8ycHMDu3xiFDoJ0vI,1362
|
|
7
7
|
unique_deep_research/templates/report_cleanup_prompt.j2,sha256=9mjj3GX1HEQ0kkxzqvB3D_2wFgAJ-drMbTYDiSc2riI,2049
|
|
@@ -15,8 +15,8 @@ unique_deep_research/unique_custom/agents.py,sha256=zMJ8zCz-0714bMPZTzIlMqG79AdZ
|
|
|
15
15
|
unique_deep_research/unique_custom/citation.py,sha256=dfElOqBTvbXlHXxshT6heNzeHVVKTlgSlIEFTuoNv20,4315
|
|
16
16
|
unique_deep_research/unique_custom/state.py,sha256=tn4pIKN-ZCH6Tqu9XL31lziyrwFkhTVf_lBiUpETVyo,3515
|
|
17
17
|
unique_deep_research/unique_custom/tools.py,sha256=vlR0cooDrXzKUNofPxXX1dcfnhoYoSBcFUylsiGyPqo,25431
|
|
18
|
-
unique_deep_research/unique_custom/utils.py,sha256=
|
|
19
|
-
unique_deep_research-3.2.
|
|
20
|
-
unique_deep_research-3.2.
|
|
21
|
-
unique_deep_research-3.2.
|
|
22
|
-
unique_deep_research-3.2.
|
|
18
|
+
unique_deep_research/unique_custom/utils.py,sha256=NvPCxDNH8P7y9s5fqgxpt-xPksU1s4Nkeukq3GDNjb8,20129
|
|
19
|
+
unique_deep_research-3.2.3.dist-info/LICENSE,sha256=GlN8wHNdh53xwOPg44URnwag6TEolCjoq3YD_KrWgss,193
|
|
20
|
+
unique_deep_research-3.2.3.dist-info/METADATA,sha256=ZvQRb_zf9bmwdTCRAPc_t8xEG8K017OJKE2xTiuFCf8,7496
|
|
21
|
+
unique_deep_research-3.2.3.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
|
|
22
|
+
unique_deep_research-3.2.3.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|