unique_deep_research 3.2.1__py3-none-any.whl → 3.2.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- unique_deep_research/service.py +97 -98
- unique_deep_research/unique_custom/utils.py +1 -0
- {unique_deep_research-3.2.1.dist-info → unique_deep_research-3.2.3.dist-info}/METADATA +7 -1
- {unique_deep_research-3.2.1.dist-info → unique_deep_research-3.2.3.dist-info}/RECORD +6 -6
- {unique_deep_research-3.2.1.dist-info → unique_deep_research-3.2.3.dist-info}/LICENSE +0 -0
- {unique_deep_research-3.2.1.dist-info → unique_deep_research-3.2.3.dist-info}/WHEEL +0 -0
unique_deep_research/service.py
CHANGED
|
@@ -1,4 +1,5 @@
|
|
|
1
1
|
import logging
|
|
2
|
+
import traceback
|
|
2
3
|
from typing import Any, Optional
|
|
3
4
|
|
|
4
5
|
from httpx import AsyncClient
|
|
@@ -110,7 +111,14 @@ class DeepResearchTool(Tool[DeepResearchToolConfig]):
|
|
|
110
111
|
self.company_id = event.company_id
|
|
111
112
|
self.user_id = event.user_id
|
|
112
113
|
|
|
113
|
-
self.client = get_async_openai_client(
|
|
114
|
+
self.client = get_async_openai_client(
|
|
115
|
+
additional_headers={
|
|
116
|
+
"x-company-id": self.company_id,
|
|
117
|
+
"x-user-id": self.user_id,
|
|
118
|
+
"x-assistant-id": self.event.payload.assistant_id,
|
|
119
|
+
"x-chat-id": self.chat_id,
|
|
120
|
+
}
|
|
121
|
+
)
|
|
114
122
|
|
|
115
123
|
_LOGGER.info(f"Using async OpenAI client pointed to {self.client.base_url}")
|
|
116
124
|
|
|
@@ -208,13 +216,25 @@ class DeepResearchTool(Tool[DeepResearchToolConfig]):
|
|
|
208
216
|
try:
|
|
209
217
|
return await self._run(tool_call)
|
|
210
218
|
except Exception as e:
|
|
219
|
+
self.write_message_log_text_message(
|
|
220
|
+
"**Research failed for an unknown reason**"
|
|
221
|
+
)
|
|
211
222
|
if self.is_message_execution():
|
|
212
223
|
await self._update_execution_status(MessageExecutionUpdateStatus.FAILED)
|
|
224
|
+
|
|
213
225
|
_LOGGER.exception(f"Deep Research tool run failed: {e}")
|
|
226
|
+
|
|
227
|
+
debug_info = {
|
|
228
|
+
**self._get_tool_debug_info(),
|
|
229
|
+
"error": str(e),
|
|
230
|
+
"traceback": traceback.format_exc(),
|
|
231
|
+
}
|
|
214
232
|
await self.chat_service.modify_assistant_message_async(
|
|
215
233
|
content="Deep Research failed to complete for an unknown reason",
|
|
216
234
|
set_completed_at=True,
|
|
235
|
+
debug_info=debug_info,
|
|
217
236
|
)
|
|
237
|
+
|
|
218
238
|
return ToolCallResponse(
|
|
219
239
|
id=tool_call.id or "",
|
|
220
240
|
name=self.name,
|
|
@@ -251,22 +271,8 @@ class DeepResearchTool(Tool[DeepResearchToolConfig]):
|
|
|
251
271
|
)
|
|
252
272
|
processed_result, content_chunks = await self.run_research(research_brief)
|
|
253
273
|
|
|
254
|
-
|
|
255
|
-
|
|
256
|
-
await self._update_execution_status(MessageExecutionUpdateStatus.FAILED)
|
|
257
|
-
self.write_message_log_text_message(
|
|
258
|
-
"**Research failed for an unknown reason**"
|
|
259
|
-
)
|
|
260
|
-
await self.chat_service.modify_assistant_message_async(
|
|
261
|
-
content="Deep Research failed to complete for an unknown reason",
|
|
262
|
-
set_completed_at=True,
|
|
263
|
-
)
|
|
264
|
-
return ToolCallResponse(
|
|
265
|
-
id=tool_call.id or "",
|
|
266
|
-
name=self.name,
|
|
267
|
-
content=processed_result or "Failed to complete research",
|
|
268
|
-
error_message="Research process failed or returned empty results",
|
|
269
|
-
)
|
|
274
|
+
if processed_result == "":
|
|
275
|
+
raise ValueError("Research returned empty result")
|
|
270
276
|
|
|
271
277
|
await self.chat_service.modify_assistant_message_async(
|
|
272
278
|
set_completed_at=True,
|
|
@@ -287,7 +293,10 @@ class DeepResearchTool(Tool[DeepResearchToolConfig]):
|
|
|
287
293
|
await self.chat_service.modify_assistant_message_async(
|
|
288
294
|
set_completed_at=True,
|
|
289
295
|
)
|
|
290
|
-
await self.
|
|
296
|
+
await self.chat_service.update_debug_info_async(
|
|
297
|
+
debug_info=self._get_tool_debug_info()
|
|
298
|
+
)
|
|
299
|
+
|
|
291
300
|
# put message in short term memory to remember that we asked the followup questions
|
|
292
301
|
await self.memory_service.save_async(
|
|
293
302
|
MemorySchema(message_id=self.event.payload.assistant_message.id),
|
|
@@ -317,7 +326,7 @@ class DeepResearchTool(Tool[DeepResearchToolConfig]):
|
|
|
317
326
|
percentage_completed=percentage,
|
|
318
327
|
)
|
|
319
328
|
|
|
320
|
-
|
|
329
|
+
def _get_tool_debug_info(self) -> dict[str, Any]:
|
|
321
330
|
"""
|
|
322
331
|
Update debug info for the tool execution.
|
|
323
332
|
Note: Tool call logging should be handled by the orchestrator.
|
|
@@ -343,7 +352,7 @@ class DeepResearchTool(Tool[DeepResearchToolConfig]):
|
|
|
343
352
|
"userMetadata": self.event.payload.user_metadata,
|
|
344
353
|
"toolParameters": self.event.payload.tool_parameters,
|
|
345
354
|
}
|
|
346
|
-
|
|
355
|
+
return debug_info_event
|
|
347
356
|
|
|
348
357
|
def write_message_log_text_message(self, text: str):
|
|
349
358
|
create_message_log_entry(
|
|
@@ -385,97 +394,87 @@ class DeepResearchTool(Tool[DeepResearchToolConfig]):
|
|
|
385
394
|
Run the research using the configured strategy.
|
|
386
395
|
Returns a tuple of (processed_result, content_chunks)
|
|
387
396
|
"""
|
|
388
|
-
|
|
389
|
-
|
|
390
|
-
|
|
391
|
-
|
|
392
|
-
|
|
393
|
-
|
|
394
|
-
|
|
395
|
-
|
|
396
|
-
|
|
397
|
-
|
|
398
|
-
return result
|
|
399
|
-
except Exception as e:
|
|
400
|
-
_LOGGER.exception(f"Research failed: {e}")
|
|
401
|
-
return "", []
|
|
397
|
+
result = "", []
|
|
398
|
+
match self.config.engine.get_type():
|
|
399
|
+
case DeepResearchEngine.OPENAI:
|
|
400
|
+
_LOGGER.info("Running OpenAI research")
|
|
401
|
+
result = await self.openai_research(research_brief)
|
|
402
|
+
case DeepResearchEngine.UNIQUE:
|
|
403
|
+
_LOGGER.info("Running Custom research")
|
|
404
|
+
result = await self.custom_research(research_brief)
|
|
405
|
+
self.write_message_log_text_message("**Research done**")
|
|
406
|
+
return result
|
|
402
407
|
|
|
403
408
|
async def custom_research(self, research_brief: str) -> tuple[str, list[Any]]:
|
|
404
409
|
"""
|
|
405
410
|
Run Custom research using LangGraph multi-agent orchestration.
|
|
406
411
|
Returns a tuple of (processed_result, content_chunks)
|
|
407
412
|
"""
|
|
408
|
-
|
|
409
|
-
|
|
410
|
-
|
|
411
|
-
|
|
412
|
-
|
|
413
|
-
|
|
414
|
-
|
|
415
|
-
|
|
416
|
-
|
|
417
|
-
|
|
413
|
+
# Create citation manager for this research session
|
|
414
|
+
citation_manager = GlobalCitationManager()
|
|
415
|
+
|
|
416
|
+
# Initialize LangGraph state with required services
|
|
417
|
+
initial_state = {
|
|
418
|
+
"messages": [HumanMessage(content=research_brief)],
|
|
419
|
+
"research_brief": research_brief,
|
|
420
|
+
"notes": [],
|
|
421
|
+
"final_report": "",
|
|
422
|
+
"chat_service": self.chat_service,
|
|
423
|
+
"message_id": self.event.payload.assistant_message.id,
|
|
424
|
+
}
|
|
425
|
+
|
|
426
|
+
# Prepare configuration for LangGraph
|
|
427
|
+
additional_openai_proxy_headers = {
|
|
428
|
+
"x-company-id": self.company_id,
|
|
429
|
+
"x-user-id": self.user_id,
|
|
430
|
+
"x-assistant-id": self.event.payload.assistant_id,
|
|
431
|
+
"x-chat-id": self.chat_id,
|
|
432
|
+
}
|
|
433
|
+
# Extract tool enablement settings from engine config if it's a UniqueEngine
|
|
434
|
+
enable_web_tools = True
|
|
435
|
+
enable_internal_tools = True
|
|
436
|
+
if isinstance(self.config.engine, UniqueEngine):
|
|
437
|
+
enable_web_tools = self.config.engine.tools.web_tools
|
|
438
|
+
enable_internal_tools = self.config.engine.tools.internal_tools
|
|
439
|
+
|
|
440
|
+
config = {
|
|
441
|
+
"configurable": {
|
|
442
|
+
"engine_config": self.config.engine,
|
|
443
|
+
"language_model_service": self.language_model_service,
|
|
444
|
+
"openai_client": self.client,
|
|
418
445
|
"chat_service": self.chat_service,
|
|
446
|
+
"content_service": self.content_service,
|
|
419
447
|
"message_id": self.event.payload.assistant_message.id,
|
|
420
|
-
|
|
421
|
-
|
|
422
|
-
|
|
423
|
-
|
|
424
|
-
|
|
425
|
-
|
|
426
|
-
"x-assistant-id": self.event.payload.assistant_id,
|
|
427
|
-
"x-chat-id": self.chat_id,
|
|
428
|
-
}
|
|
429
|
-
# Extract tool enablement settings from engine config if it's a UniqueEngine
|
|
430
|
-
enable_web_tools = True
|
|
431
|
-
enable_internal_tools = True
|
|
432
|
-
if isinstance(self.config.engine, UniqueEngine):
|
|
433
|
-
enable_web_tools = self.config.engine.tools.web_tools
|
|
434
|
-
enable_internal_tools = self.config.engine.tools.internal_tools
|
|
435
|
-
|
|
436
|
-
config = {
|
|
437
|
-
"configurable": {
|
|
438
|
-
"engine_config": self.config.engine,
|
|
439
|
-
"language_model_service": self.language_model_service,
|
|
440
|
-
"openai_client": self.client,
|
|
441
|
-
"chat_service": self.chat_service,
|
|
442
|
-
"content_service": self.content_service,
|
|
443
|
-
"message_id": self.event.payload.assistant_message.id,
|
|
444
|
-
"citation_manager": citation_manager,
|
|
445
|
-
"additional_openai_proxy_headers": additional_openai_proxy_headers,
|
|
446
|
-
"enable_web_tools": enable_web_tools,
|
|
447
|
-
"enable_internal_tools": enable_internal_tools,
|
|
448
|
-
},
|
|
449
|
-
}
|
|
450
|
-
|
|
451
|
-
result = await custom_agent.ainvoke(initial_state, config=config) # type: ignore[arg-type]
|
|
448
|
+
"citation_manager": citation_manager,
|
|
449
|
+
"additional_openai_proxy_headers": additional_openai_proxy_headers,
|
|
450
|
+
"enable_web_tools": enable_web_tools,
|
|
451
|
+
"enable_internal_tools": enable_internal_tools,
|
|
452
|
+
},
|
|
453
|
+
}
|
|
452
454
|
|
|
453
|
-
|
|
454
|
-
research_result = result.get("final_report", "")
|
|
455
|
+
result = await custom_agent.ainvoke(initial_state, config=config) # type: ignore[arg-type]
|
|
455
456
|
|
|
456
|
-
|
|
457
|
-
|
|
457
|
+
# Extract final report (citations already refined by agents.py)
|
|
458
|
+
research_result = result.get("final_report", "")
|
|
458
459
|
|
|
459
|
-
|
|
460
|
-
|
|
461
|
-
processed_result, references = validate_and_map_citations(
|
|
462
|
-
research_result, citation_registry
|
|
463
|
-
)
|
|
460
|
+
if not research_result:
|
|
461
|
+
return "", []
|
|
464
462
|
|
|
465
|
-
|
|
466
|
-
|
|
467
|
-
|
|
468
|
-
|
|
469
|
-
|
|
470
|
-
_LOGGER.info(
|
|
471
|
-
f"Custom research completed with {len(references)} validated citations"
|
|
472
|
-
)
|
|
473
|
-
return processed_result, []
|
|
463
|
+
# Validate and map citations using the citation registry
|
|
464
|
+
citation_registry = citation_manager.get_all_citations()
|
|
465
|
+
processed_result, references = validate_and_map_citations(
|
|
466
|
+
research_result, citation_registry
|
|
467
|
+
)
|
|
474
468
|
|
|
475
|
-
|
|
476
|
-
|
|
477
|
-
|
|
478
|
-
|
|
469
|
+
# Update the assistant message with the results
|
|
470
|
+
await self.chat_service.modify_assistant_message_async(
|
|
471
|
+
content=processed_result,
|
|
472
|
+
references=references,
|
|
473
|
+
)
|
|
474
|
+
_LOGGER.info(
|
|
475
|
+
f"Custom research completed with {len(references)} validated citations"
|
|
476
|
+
)
|
|
477
|
+
return processed_result, []
|
|
479
478
|
|
|
480
479
|
async def openai_research(self, research_brief: str) -> tuple[str, list[Any]]:
|
|
481
480
|
"""
|
|
@@ -570,6 +570,7 @@ async def ainvoke_with_token_handling(
|
|
|
570
570
|
try:
|
|
571
571
|
return await model.ainvoke(prepared_messages)
|
|
572
572
|
except Exception as e:
|
|
573
|
+
_LOGGER.exception(f"Error invoking model: {e}")
|
|
573
574
|
# Handle token errors by truncating history and retrying
|
|
574
575
|
if is_token_error(e):
|
|
575
576
|
# Filtering is not perfect, so in unlikely case we need to truncate the message history to the last AI message
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: unique_deep_research
|
|
3
|
-
Version: 3.2.
|
|
3
|
+
Version: 3.2.3
|
|
4
4
|
Summary: Deep Research Tool for complex research tasks
|
|
5
5
|
License: Proprietary
|
|
6
6
|
Author: Martin Fadler
|
|
@@ -36,6 +36,12 @@ All notable changes to this project will be documented in this file.
|
|
|
36
36
|
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
|
|
37
37
|
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
|
38
38
|
|
|
39
|
+
## [3.2.3] - 2026-02-05
|
|
40
|
+
- Improve error handling and logging
|
|
41
|
+
|
|
42
|
+
## [3.2.2] - 2026-02-05
|
|
43
|
+
- Fix bug where deep research tool was not using the correct headers causing an authentication error
|
|
44
|
+
|
|
39
45
|
## [3.2.1] - 2026-02-05
|
|
40
46
|
- Use deep research logger instead of tool logger
|
|
41
47
|
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
unique_deep_research/__init__.py,sha256=HMksc03A7au88XB0jjeIR3zGt3YGYTA7ZIAkR8hmQZs,396
|
|
2
2
|
unique_deep_research/config.py,sha256=1DV3CBxNTDTDoxsxeMLI8ARDR3-iRlhL4yhXAKmxeI4,4137
|
|
3
3
|
unique_deep_research/markdown_utils.py,sha256=zyKO4c9T_KnZzOYlYZEDEvC1NHRR9koCzcWmOJbrcok,11024
|
|
4
|
-
unique_deep_research/service.py,sha256=
|
|
4
|
+
unique_deep_research/service.py,sha256=yLQ6lyg27fjN04z4H--g_rdetaeq_lnofAypvfNSrxk,35605
|
|
5
5
|
unique_deep_research/templates/clarifying_agent.j2,sha256=TOgD5ezrlRyurpquBGT902oa7PpkdHxsvrkIFWBFa2A,3938
|
|
6
6
|
unique_deep_research/templates/openai/oai_research_system_message.j2,sha256=2YipVh0V4eEikg9iXqIaBVtYwS8ycHMDu3xiFDoJ0vI,1362
|
|
7
7
|
unique_deep_research/templates/report_cleanup_prompt.j2,sha256=9mjj3GX1HEQ0kkxzqvB3D_2wFgAJ-drMbTYDiSc2riI,2049
|
|
@@ -15,8 +15,8 @@ unique_deep_research/unique_custom/agents.py,sha256=zMJ8zCz-0714bMPZTzIlMqG79AdZ
|
|
|
15
15
|
unique_deep_research/unique_custom/citation.py,sha256=dfElOqBTvbXlHXxshT6heNzeHVVKTlgSlIEFTuoNv20,4315
|
|
16
16
|
unique_deep_research/unique_custom/state.py,sha256=tn4pIKN-ZCH6Tqu9XL31lziyrwFkhTVf_lBiUpETVyo,3515
|
|
17
17
|
unique_deep_research/unique_custom/tools.py,sha256=vlR0cooDrXzKUNofPxXX1dcfnhoYoSBcFUylsiGyPqo,25431
|
|
18
|
-
unique_deep_research/unique_custom/utils.py,sha256=
|
|
19
|
-
unique_deep_research-3.2.
|
|
20
|
-
unique_deep_research-3.2.
|
|
21
|
-
unique_deep_research-3.2.
|
|
22
|
-
unique_deep_research-3.2.
|
|
18
|
+
unique_deep_research/unique_custom/utils.py,sha256=NvPCxDNH8P7y9s5fqgxpt-xPksU1s4Nkeukq3GDNjb8,20129
|
|
19
|
+
unique_deep_research-3.2.3.dist-info/LICENSE,sha256=GlN8wHNdh53xwOPg44URnwag6TEolCjoq3YD_KrWgss,193
|
|
20
|
+
unique_deep_research-3.2.3.dist-info/METADATA,sha256=ZvQRb_zf9bmwdTCRAPc_t8xEG8K017OJKE2xTiuFCf8,7496
|
|
21
|
+
unique_deep_research-3.2.3.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
|
|
22
|
+
unique_deep_research-3.2.3.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|