unique_toolkit 1.8.1__py3-none-any.whl → 1.23.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of unique_toolkit might be problematic. Click here for more details.

Files changed (105) hide show
  1. unique_toolkit/__init__.py +20 -0
  2. unique_toolkit/_common/api_calling/human_verification_manager.py +121 -28
  3. unique_toolkit/_common/chunk_relevancy_sorter/config.py +3 -3
  4. unique_toolkit/_common/chunk_relevancy_sorter/tests/test_service.py +2 -5
  5. unique_toolkit/_common/default_language_model.py +9 -3
  6. unique_toolkit/_common/docx_generator/__init__.py +7 -0
  7. unique_toolkit/_common/docx_generator/config.py +12 -0
  8. unique_toolkit/_common/docx_generator/schemas.py +80 -0
  9. unique_toolkit/_common/docx_generator/service.py +252 -0
  10. unique_toolkit/_common/docx_generator/template/Doc Template.docx +0 -0
  11. unique_toolkit/_common/endpoint_builder.py +138 -117
  12. unique_toolkit/_common/endpoint_requestor.py +240 -14
  13. unique_toolkit/_common/exception.py +20 -0
  14. unique_toolkit/_common/feature_flags/schema.py +1 -5
  15. unique_toolkit/_common/referencing.py +53 -0
  16. unique_toolkit/_common/string_utilities.py +52 -1
  17. unique_toolkit/_common/tests/test_referencing.py +521 -0
  18. unique_toolkit/_common/tests/test_string_utilities.py +506 -0
  19. unique_toolkit/_common/utils/files.py +43 -0
  20. unique_toolkit/agentic/debug_info_manager/debug_info_manager.py +16 -6
  21. unique_toolkit/agentic/debug_info_manager/test/test_debug_info_manager.py +278 -0
  22. unique_toolkit/agentic/evaluation/config.py +3 -2
  23. unique_toolkit/agentic/evaluation/context_relevancy/service.py +2 -2
  24. unique_toolkit/agentic/evaluation/evaluation_manager.py +9 -5
  25. unique_toolkit/agentic/evaluation/hallucination/constants.py +1 -1
  26. unique_toolkit/agentic/evaluation/hallucination/hallucination_evaluation.py +26 -3
  27. unique_toolkit/agentic/history_manager/history_manager.py +14 -11
  28. unique_toolkit/agentic/history_manager/loop_token_reducer.py +3 -4
  29. unique_toolkit/agentic/history_manager/utils.py +10 -87
  30. unique_toolkit/agentic/postprocessor/postprocessor_manager.py +107 -16
  31. unique_toolkit/agentic/reference_manager/reference_manager.py +1 -1
  32. unique_toolkit/agentic/responses_api/__init__.py +19 -0
  33. unique_toolkit/agentic/responses_api/postprocessors/code_display.py +63 -0
  34. unique_toolkit/agentic/responses_api/postprocessors/generated_files.py +145 -0
  35. unique_toolkit/agentic/responses_api/stream_handler.py +15 -0
  36. unique_toolkit/agentic/tools/a2a/__init__.py +18 -2
  37. unique_toolkit/agentic/tools/a2a/evaluation/__init__.py +2 -0
  38. unique_toolkit/agentic/tools/a2a/evaluation/_utils.py +3 -3
  39. unique_toolkit/agentic/tools/a2a/evaluation/config.py +1 -1
  40. unique_toolkit/agentic/tools/a2a/evaluation/evaluator.py +143 -91
  41. unique_toolkit/agentic/tools/a2a/manager.py +7 -1
  42. unique_toolkit/agentic/tools/a2a/postprocessing/__init__.py +11 -3
  43. unique_toolkit/agentic/tools/a2a/postprocessing/_display_utils.py +185 -0
  44. unique_toolkit/agentic/tools/a2a/postprocessing/_ref_utils.py +73 -0
  45. unique_toolkit/agentic/tools/a2a/postprocessing/config.py +21 -0
  46. unique_toolkit/agentic/tools/a2a/postprocessing/display.py +180 -0
  47. unique_toolkit/agentic/tools/a2a/postprocessing/references.py +101 -0
  48. unique_toolkit/agentic/tools/a2a/postprocessing/test/test_display_utils.py +1335 -0
  49. unique_toolkit/agentic/tools/a2a/postprocessing/test/test_ref_utils.py +603 -0
  50. unique_toolkit/agentic/tools/a2a/prompts.py +46 -0
  51. unique_toolkit/agentic/tools/a2a/response_watcher/__init__.py +6 -0
  52. unique_toolkit/agentic/tools/a2a/response_watcher/service.py +91 -0
  53. unique_toolkit/agentic/tools/a2a/tool/config.py +15 -5
  54. unique_toolkit/agentic/tools/a2a/tool/service.py +69 -36
  55. unique_toolkit/agentic/tools/config.py +16 -2
  56. unique_toolkit/agentic/tools/factory.py +4 -0
  57. unique_toolkit/agentic/tools/mcp/tool_wrapper.py +7 -35
  58. unique_toolkit/agentic/tools/openai_builtin/__init__.py +11 -0
  59. unique_toolkit/agentic/tools/openai_builtin/base.py +30 -0
  60. unique_toolkit/agentic/tools/openai_builtin/code_interpreter/__init__.py +8 -0
  61. unique_toolkit/agentic/tools/openai_builtin/code_interpreter/config.py +57 -0
  62. unique_toolkit/agentic/tools/openai_builtin/code_interpreter/service.py +230 -0
  63. unique_toolkit/agentic/tools/openai_builtin/manager.py +62 -0
  64. unique_toolkit/agentic/tools/test/test_mcp_manager.py +95 -7
  65. unique_toolkit/agentic/tools/test/test_tool_progress_reporter.py +240 -0
  66. unique_toolkit/agentic/tools/tool.py +0 -11
  67. unique_toolkit/agentic/tools/tool_manager.py +337 -122
  68. unique_toolkit/agentic/tools/tool_progress_reporter.py +81 -15
  69. unique_toolkit/agentic/tools/utils/__init__.py +18 -0
  70. unique_toolkit/agentic/tools/utils/execution/execution.py +8 -4
  71. unique_toolkit/agentic/tools/utils/source_handling/schema.py +1 -1
  72. unique_toolkit/chat/__init__.py +8 -1
  73. unique_toolkit/chat/deprecated/service.py +232 -0
  74. unique_toolkit/chat/functions.py +54 -40
  75. unique_toolkit/chat/rendering.py +34 -0
  76. unique_toolkit/chat/responses_api.py +461 -0
  77. unique_toolkit/chat/schemas.py +1 -1
  78. unique_toolkit/chat/service.py +96 -1569
  79. unique_toolkit/content/functions.py +116 -1
  80. unique_toolkit/content/schemas.py +59 -0
  81. unique_toolkit/content/service.py +5 -37
  82. unique_toolkit/content/smart_rules.py +301 -0
  83. unique_toolkit/framework_utilities/langchain/client.py +27 -3
  84. unique_toolkit/framework_utilities/openai/client.py +12 -1
  85. unique_toolkit/framework_utilities/openai/message_builder.py +85 -1
  86. unique_toolkit/language_model/default_language_model.py +3 -0
  87. unique_toolkit/language_model/functions.py +25 -9
  88. unique_toolkit/language_model/infos.py +72 -4
  89. unique_toolkit/language_model/schemas.py +246 -40
  90. unique_toolkit/protocols/support.py +91 -9
  91. unique_toolkit/services/__init__.py +7 -0
  92. unique_toolkit/services/chat_service.py +1630 -0
  93. unique_toolkit/services/knowledge_base.py +861 -0
  94. unique_toolkit/smart_rules/compile.py +56 -301
  95. unique_toolkit/test_utilities/events.py +197 -0
  96. {unique_toolkit-1.8.1.dist-info → unique_toolkit-1.23.0.dist-info}/METADATA +173 -3
  97. {unique_toolkit-1.8.1.dist-info → unique_toolkit-1.23.0.dist-info}/RECORD +99 -67
  98. unique_toolkit/agentic/tools/a2a/postprocessing/_display.py +0 -122
  99. unique_toolkit/agentic/tools/a2a/postprocessing/_utils.py +0 -19
  100. unique_toolkit/agentic/tools/a2a/postprocessing/postprocessor.py +0 -230
  101. unique_toolkit/agentic/tools/a2a/postprocessing/test/test_consolidate_references.py +0 -665
  102. unique_toolkit/agentic/tools/a2a/postprocessing/test/test_display.py +0 -391
  103. unique_toolkit/agentic/tools/a2a/postprocessing/test/test_postprocessor_reference_functions.py +0 -256
  104. {unique_toolkit-1.8.1.dist-info → unique_toolkit-1.23.0.dist-info}/LICENSE +0 -0
  105. {unique_toolkit-1.8.1.dist-info → unique_toolkit-1.23.0.dist-info}/WHEEL +0 -0
@@ -1,9 +1,9 @@
1
1
  import logging
2
- from typing import override
2
+ from typing import NamedTuple, override
3
3
 
4
4
  import unique_sdk
5
5
  from jinja2 import Template
6
- from typing_extensions import TypedDict
6
+ from pydantic import BaseModel
7
7
 
8
8
  from unique_toolkit.agentic.evaluation.evaluation_manager import Evaluation
9
9
  from unique_toolkit.agentic.evaluation.schemas import (
@@ -12,15 +12,19 @@ from unique_toolkit.agentic.evaluation.schemas import (
12
12
  EvaluationMetricResult,
13
13
  )
14
14
  from unique_toolkit.agentic.tools.a2a.evaluation._utils import (
15
- _get_valid_assessments,
16
- _sort_assessments,
17
- _worst_label,
15
+ get_valid_assessments,
16
+ get_worst_label,
17
+ sort_assessments,
18
18
  )
19
19
  from unique_toolkit.agentic.tools.a2a.evaluation.config import (
20
20
  SubAgentEvaluationConfig,
21
21
  SubAgentEvaluationServiceConfig,
22
22
  )
23
- from unique_toolkit.agentic.tools.a2a.tool import SubAgentTool
23
+ from unique_toolkit.agentic.tools.a2a.response_watcher import (
24
+ SubAgentResponse,
25
+ SubAgentResponseWatcher,
26
+ )
27
+ from unique_toolkit.agentic.tools.utils import failsafe
24
28
  from unique_toolkit.chat.schemas import (
25
29
  ChatMessageAssessmentLabel,
26
30
  ChatMessageAssessmentStatus,
@@ -33,12 +37,40 @@ from unique_toolkit.language_model.service import LanguageModelService
33
37
  logger = logging.getLogger(__name__)
34
38
 
35
39
 
36
- class _SubAgentToolInfo(TypedDict):
37
- assessments: dict[int, list[unique_sdk.Space.Assessment]]
40
+ class SubAgentEvaluationSpec(NamedTuple):
38
41
  display_name: str
42
+ assistant_id: str
43
+ config: SubAgentEvaluationConfig
44
+
45
+
46
+ _NO_ASSESSMENTS_FOUND = "NO_ASSESSMENTS_FOUND"
47
+
48
+
49
+ class _SingleAssessmentData(BaseModel):
50
+ name: str
51
+ explanation: str
52
+
53
+
54
+ def _format_single_assessment_found(name: str, explanation: str) -> str:
55
+ return _SingleAssessmentData(name=name, explanation=explanation).model_dump_json()
56
+
39
57
 
58
+ @failsafe(failure_return_value=None, log_exceptions=False)
59
+ def _parse_single_assesment_found(value: str) -> _SingleAssessmentData | None:
60
+ return _SingleAssessmentData.model_validate_json(value)
40
61
 
41
- NO_ASSESSMENTS_FOUND = "NO_ASSESSMENTS_FOUND"
62
+
63
+ def _find_single_assessment(
64
+ responses: dict[str, list[SubAgentResponse]],
65
+ ) -> unique_sdk.Space.Assessment | None:
66
+ if len(responses) == 1:
67
+ sub_agent_responses = next(iter(responses.values()))
68
+ if len(sub_agent_responses) == 1:
69
+ response = sub_agent_responses[0].message
70
+ if response["assessment"] is not None and len(response["assessment"]) == 1:
71
+ return response["assessment"][0]
72
+
73
+ return None
42
74
 
43
75
 
44
76
  class SubAgentEvaluationService(Evaluation):
@@ -48,17 +80,67 @@ class SubAgentEvaluationService(Evaluation):
48
80
  self,
49
81
  config: SubAgentEvaluationServiceConfig,
50
82
  language_model_service: LanguageModelService,
51
- ):
83
+ response_watcher: SubAgentResponseWatcher,
84
+ evaluation_specs: list[SubAgentEvaluationSpec],
85
+ ) -> None:
52
86
  super().__init__(EvaluationMetricName.SUB_AGENT)
53
87
  self._config = config
54
88
 
55
- self._assistant_id_to_tool_info: dict[str, _SubAgentToolInfo] = {}
89
+ self._response_watcher = response_watcher
56
90
  self._language_model_service = language_model_service
57
91
 
92
+ self._evaluation_specs: dict[str, SubAgentEvaluationSpec] = {
93
+ spec.assistant_id: spec
94
+ for spec in evaluation_specs
95
+ if spec.config.include_evaluation
96
+ }
97
+
58
98
  @override
59
99
  def get_assessment_type(self) -> ChatMessageAssessmentType:
60
100
  return self._config.assessment_type
61
101
 
102
+ def _get_included_sub_agent_responses(
103
+ self,
104
+ ) -> dict[str, list[SubAgentResponse]]:
105
+ responses = {}
106
+ for assistant_id, eval_spec in self._evaluation_specs.items():
107
+ sub_agent_responses = self._response_watcher.get_responses(
108
+ eval_spec.assistant_id
109
+ )
110
+ if len(sub_agent_responses) == 0:
111
+ logger.debug(
112
+ "No responses for sub agent %s (%s)",
113
+ eval_spec.display_name,
114
+ eval_spec.assistant_id,
115
+ )
116
+ continue
117
+
118
+ responses_with_assessment = []
119
+ for response in sub_agent_responses:
120
+ assessments = response.message["assessment"]
121
+
122
+ if assessments is None or len(assessments) == 0:
123
+ logger.debug(
124
+ "No assessment for sub agent %s (%s) response with sequence number %s",
125
+ eval_spec.display_name,
126
+ eval_spec.assistant_id,
127
+ response.sequence_number,
128
+ )
129
+ continue
130
+
131
+ assessments = get_valid_assessments(
132
+ assessments=assessments,
133
+ display_name=eval_spec.display_name,
134
+ sequence_number=response.sequence_number,
135
+ )
136
+
137
+ if len(assessments) > 0:
138
+ responses_with_assessment.append(response)
139
+
140
+ responses[assistant_id] = responses_with_assessment
141
+
142
+ return responses
143
+
62
144
  @override
63
145
  async def run(
64
146
  self, loop_response: LanguageModelStreamResponse
@@ -67,47 +149,56 @@ class SubAgentEvaluationService(Evaluation):
67
149
 
68
150
  sub_agents_display_data = []
69
151
 
70
- value = ChatMessageAssessmentLabel.GREEN
152
+ responses = self._get_included_sub_agent_responses()
71
153
 
72
- for tool_info in self._assistant_id_to_tool_info.values():
73
- sub_agent_assessments = tool_info["assessments"] or []
74
- display_name = tool_info["display_name"]
154
+ # No valid assessments found
155
+ if len(responses) == 0:
156
+ logger.warning("No valid sub agent assessments found")
75
157
 
76
- for sequence_number in sorted(sub_agent_assessments):
77
- assessments = sub_agent_assessments[sequence_number]
158
+ return EvaluationMetricResult(
159
+ name=self.get_name(),
160
+ # This is a trick to be able to indicate to `evaluation_metric_to_assessment`
161
+ # that no valid assessments were found
162
+ value=_NO_ASSESSMENTS_FOUND,
163
+ reason="No sub agents assessments found",
164
+ )
78
165
 
79
- valid_assessments = _get_valid_assessments(
80
- assessments, display_name, sequence_number
81
- )
82
- if len(valid_assessments) == 0:
83
- logger.info(
84
- "No valid assessment found for assistant %s (sequence number: %s)",
85
- display_name,
86
- sequence_number,
87
- )
88
- continue
166
+ single_assessment = _find_single_assessment(responses)
167
+ # Only one valid assessment found, no need to perform summarization
168
+ if single_assessment is not None:
169
+ assistant_id = next(iter(responses))
170
+ explanation = single_assessment["explanation"] or ""
171
+ name = self._evaluation_specs[assistant_id].display_name
172
+ label = single_assessment["label"] or ""
89
173
 
90
- assessments = _sort_assessments(valid_assessments)
91
- value = _worst_label(value, assessments[0]["label"]) # type: ignore
174
+ return EvaluationMetricResult(
175
+ name=self.get_name(),
176
+ value=label,
177
+ # This is a trick to be able to pass the display name to the UI in `evaluation_metric_to_assessment`
178
+ reason=_format_single_assessment_found(name, explanation),
179
+ is_positive=label == ChatMessageAssessmentLabel.GREEN,
180
+ )
181
+
182
+ sub_agents_display_data = []
183
+
184
+ # Multiple Assessments found
185
+ value = ChatMessageAssessmentLabel.GREEN
186
+ for assistant_id, sub_agent_responses in responses.items():
187
+ display_name = self._evaluation_specs[assistant_id].display_name
188
+
189
+ for response in sub_agent_responses:
190
+ assessments = sort_assessments(response.message["assessment"]) #  type:ignore
191
+ value = get_worst_label(value, assessments[0]["label"]) # type: ignore
92
192
 
93
193
  data = {
94
- "name": tool_info["display_name"],
194
+ "name": display_name,
95
195
  "assessments": assessments,
96
196
  }
97
- if len(sub_agent_assessments) > 1:
98
- data["name"] += f" {sequence_number}"
197
+ if len(sub_agent_responses) > 1:
198
+ data["name"] += f" {response.sequence_number}"
99
199
 
100
200
  sub_agents_display_data.append(data)
101
201
 
102
- if len(sub_agents_display_data) == 0:
103
- logger.warning("No valid sub agent assessments found")
104
-
105
- return EvaluationMetricResult(
106
- name=self.get_name(),
107
- value=NO_ASSESSMENTS_FOUND,
108
- reason="No sub agents assessments found",
109
- )
110
-
111
202
  reason = await self._get_reason(sub_agents_display_data)
112
203
 
113
204
  return EvaluationMetricResult(
@@ -121,7 +212,7 @@ class SubAgentEvaluationService(Evaluation):
121
212
  async def evaluation_metric_to_assessment(
122
213
  self, evaluation_result: EvaluationMetricResult
123
214
  ) -> EvaluationAssessmentMessage:
124
- if evaluation_result.value == NO_ASSESSMENTS_FOUND:
215
+ if evaluation_result.value == _NO_ASSESSMENTS_FOUND:
125
216
  return EvaluationAssessmentMessage(
126
217
  status=ChatMessageAssessmentStatus.DONE,
127
218
  explanation="No valid sub agents assessments found to consolidate.",
@@ -130,6 +221,16 @@ class SubAgentEvaluationService(Evaluation):
130
221
  type=self.get_assessment_type(),
131
222
  )
132
223
 
224
+ single_assessment_data = _parse_single_assesment_found(evaluation_result.reason)
225
+ if single_assessment_data is not None:
226
+ return EvaluationAssessmentMessage(
227
+ status=ChatMessageAssessmentStatus.DONE,
228
+ explanation=single_assessment_data.explanation,
229
+ title=single_assessment_data.name,
230
+ label=evaluation_result.value, # type: ignore
231
+ type=self.get_assessment_type(),
232
+ )
233
+
133
234
  return EvaluationAssessmentMessage(
134
235
  status=ChatMessageAssessmentStatus.DONE,
135
236
  explanation=evaluation_result.reason,
@@ -138,56 +239,7 @@ class SubAgentEvaluationService(Evaluation):
138
239
  type=self.get_assessment_type(),
139
240
  )
140
241
 
141
- def register_sub_agent_tool(
142
- self, tool: SubAgentTool, evaluation_config: SubAgentEvaluationConfig
143
- ) -> None:
144
- if not evaluation_config.include_evaluation:
145
- logger.warning(
146
- "Sub agent tool %s has evaluation config `include_evaluation` set to False, responses will be ignored.",
147
- tool.config.assistant_id,
148
- )
149
- return
150
-
151
- if tool.config.assistant_id not in self._assistant_id_to_tool_info:
152
- tool.subscribe(self)
153
- self._assistant_id_to_tool_info[tool.config.assistant_id] = (
154
- _SubAgentToolInfo(
155
- display_name=tool.display_name(),
156
- assessments={},
157
- )
158
- )
159
-
160
- def notify_sub_agent_response(
161
- self,
162
- response: unique_sdk.Space.Message,
163
- sub_agent_assistant_id: str,
164
- sequence_number: int,
165
- ) -> None:
166
- if sub_agent_assistant_id not in self._assistant_id_to_tool_info:
167
- logger.warning(
168
- "Unknown assistant id %s received, assessment will be ignored.",
169
- sub_agent_assistant_id,
170
- )
171
- return
172
-
173
- sub_agent_assessments = self._assistant_id_to_tool_info[sub_agent_assistant_id][
174
- "assessments"
175
- ]
176
- sub_agent_assessments[sequence_number] = (
177
- response[
178
- "assessment"
179
- ].copy() # Shallow copy as we don't modify individual assessments
180
- if response["assessment"] is not None
181
- else []
182
- )
183
-
184
242
  async def _get_reason(self, sub_agents_display_data: list[dict]) -> str:
185
- if (
186
- len(sub_agents_display_data) == 1
187
- and len(sub_agents_display_data[0]["assessments"]) == 1
188
- ):
189
- return sub_agents_display_data[0]["assessments"][0]["explanation"] or ""
190
-
191
243
  messages = (
192
244
  MessagesBuilder()
193
245
  .system_message_append(self._config.summarization_system_message)
@@ -1,5 +1,6 @@
1
1
  from logging import Logger
2
2
 
3
+ from unique_toolkit.agentic.tools.a2a.response_watcher import SubAgentResponseWatcher
3
4
  from unique_toolkit.agentic.tools.a2a.tool import SubAgentTool, SubAgentToolConfig
4
5
  from unique_toolkit.agentic.tools.config import ToolBuildConfig
5
6
  from unique_toolkit.agentic.tools.tool_progress_reporter import ToolProgressReporter
@@ -11,12 +12,16 @@ class A2AManager:
11
12
  self,
12
13
  logger: Logger,
13
14
  tool_progress_reporter: ToolProgressReporter,
15
+ response_watcher: SubAgentResponseWatcher,
14
16
  ):
15
17
  self._logger = logger
16
18
  self._tool_progress_reporter = tool_progress_reporter
19
+ self._response_watcher = response_watcher
17
20
 
18
21
  def get_all_sub_agents(
19
- self, tool_configs: list[ToolBuildConfig], event: ChatEvent
22
+ self,
23
+ tool_configs: list[ToolBuildConfig],
24
+ event: ChatEvent,
20
25
  ) -> tuple[list[ToolBuildConfig], list[SubAgentTool]]:
21
26
  sub_agents = []
22
27
 
@@ -39,6 +44,7 @@ class A2AManager:
39
44
  tool_progress_reporter=self._tool_progress_reporter,
40
45
  name=tool_config.name,
41
46
  display_name=tool_config.display_name,
47
+ response_watcher=self._response_watcher,
42
48
  )
43
49
  )
44
50
 
@@ -2,12 +2,20 @@ from unique_toolkit.agentic.tools.a2a.postprocessing.config import (
2
2
  SubAgentDisplayConfig,
3
3
  SubAgentResponseDisplayMode,
4
4
  )
5
- from unique_toolkit.agentic.tools.a2a.postprocessing.postprocessor import (
6
- SubAgentResponsesPostprocessor,
5
+ from unique_toolkit.agentic.tools.a2a.postprocessing.display import (
6
+ SubAgentDisplaySpec,
7
+ SubAgentResponsesDisplayPostprocessor,
8
+ SubAgentResponsesPostprocessorConfig,
9
+ )
10
+ from unique_toolkit.agentic.tools.a2a.postprocessing.references import (
11
+ SubAgentReferencesPostprocessor,
7
12
  )
8
13
 
9
14
  __all__ = [
10
- "SubAgentResponsesPostprocessor",
15
+ "SubAgentResponsesDisplayPostprocessor",
16
+ "SubAgentResponsesPostprocessorConfig",
17
+ "SubAgentDisplaySpec",
11
18
  "SubAgentResponseDisplayMode",
12
19
  "SubAgentDisplayConfig",
20
+ "SubAgentReferencesPostprocessor",
13
21
  ]
@@ -0,0 +1,185 @@
1
+ import re
2
+ from typing import Literal
3
+
4
+ from unique_toolkit.agentic.tools.a2a.postprocessing.config import (
5
+ SubAgentDisplayConfig,
6
+ SubAgentResponseDisplayMode,
7
+ )
8
+
9
+
10
+ def _wrap_text(text: str, start_text: str, end_text: str) -> str:
11
+ text = text.strip()
12
+ start_text = start_text.strip()
13
+ end_text = end_text.strip()
14
+
15
+ if start_text != "":
16
+ start_text = f"{start_text}\n"
17
+
18
+ if end_text != "":
19
+ end_text = f"\n{end_text}"
20
+
21
+ return f"{start_text}{text}{end_text}"
22
+
23
+
24
+ def _join_text_blocks(*blocks: str, sep: str = "\n") -> str:
25
+ return sep.join(block.strip() for block in blocks)
26
+
27
+
28
+ def _wrap_with_details_tag(
29
+ text, mode: Literal["open", "closed"], summary_name: str | None = None
30
+ ) -> str:
31
+ if summary_name is not None:
32
+ summary_tag = _wrap_text(summary_name, "<summary>", "</summary>")
33
+ text = _join_text_blocks(summary_tag, text)
34
+
35
+ if mode == "open":
36
+ text = _wrap_text(text, "<details open>", "</details>")
37
+ else:
38
+ text = _wrap_text(text, "<details>", "</details>")
39
+
40
+ return text
41
+
42
+
43
+ _BLOCK_BORDER_STYLE = (
44
+ "overflow-y: auto; border: 1px solid #ccc; padding: 8px; margin-top: 8px;"
45
+ )
46
+
47
+
48
+ def _wrap_with_block_border(text: str) -> str:
49
+ return _wrap_text(text, f"<div style='{_BLOCK_BORDER_STYLE}'>", "</div>")
50
+
51
+
52
+ _QUOTE_BORDER_STYLE = (
53
+ "margin-left: 20px; border-left: 2px solid #ccc; padding-left: 10px;"
54
+ )
55
+
56
+
57
+ def _wrap_with_quote_border(text: str) -> str:
58
+ return _wrap_text(text, f"<div style='{_QUOTE_BORDER_STYLE}'>", "</div>")
59
+
60
+
61
+ def _wrap_strong(text: str) -> str:
62
+ return _wrap_text(text, "<strong>", "</strong>")
63
+
64
+
65
+ def _wrap_hidden_div(text: str) -> str:
66
+ return _wrap_text(text, '<div style="display: none;">', "</div>")
67
+
68
+
69
+ def _add_line_break(text: str, before: bool = True, after: bool = True) -> str:
70
+ start_tag = ""
71
+ if before:
72
+ start_tag = "<br>"
73
+
74
+ end_tag = ""
75
+ if after:
76
+ end_tag = "<br>"
77
+
78
+ return _wrap_text(text, start_tag, end_tag)
79
+
80
+
81
+ def _prepare_title_template(
82
+ display_title_template: str, display_name_placeholder: str
83
+ ) -> str:
84
+ return display_title_template.replace("{}", "{%s}" % display_name_placeholder)
85
+
86
+
87
+ def _get_display_template(
88
+ mode: SubAgentResponseDisplayMode,
89
+ add_quote_border: bool,
90
+ add_block_border: bool,
91
+ display_title_template: str,
92
+ answer_placeholder: str = "answer",
93
+ assistant_id_placeholder: str = "assistant_id",
94
+ display_name_placeholder: str = "display_name",
95
+ ) -> str:
96
+ if mode == SubAgentResponseDisplayMode.HIDDEN:
97
+ return ""
98
+
99
+ assistant_id_placeholder = _wrap_hidden_div("{%s}" % assistant_id_placeholder)
100
+ title_template = _prepare_title_template(
101
+ display_title_template, display_name_placeholder
102
+ )
103
+ template = _join_text_blocks(
104
+ assistant_id_placeholder, "{%s}" % answer_placeholder, sep="\n\n"
105
+ ) # Double line break is needed for markdown formatting
106
+
107
+ template = _add_line_break(template, before=True, after=False)
108
+
109
+ if add_quote_border:
110
+ template = _wrap_with_quote_border(template)
111
+
112
+ match mode:
113
+ case SubAgentResponseDisplayMode.DETAILS_OPEN:
114
+ template = _wrap_with_details_tag(
115
+ template,
116
+ "open",
117
+ title_template,
118
+ )
119
+ case SubAgentResponseDisplayMode.DETAILS_CLOSED:
120
+ template = _wrap_with_details_tag(template, "closed", title_template)
121
+ case SubAgentResponseDisplayMode.PLAIN:
122
+ # Add a hidden block border to seperate sub agent answers from the rest of the text.
123
+ hidden_block_border = _wrap_hidden_div("sub_agent_answer_block")
124
+ template = _join_text_blocks(title_template, template, hidden_block_border)
125
+
126
+ if add_block_border:
127
+ template = _wrap_with_block_border(template)
128
+
129
+ return template
130
+
131
+
132
+ def _get_display_removal_re(
133
+ assistant_id: str,
134
+ mode: SubAgentResponseDisplayMode,
135
+ add_quote_border: bool,
136
+ add_block_border: bool,
137
+ display_title_template: str,
138
+ ) -> re.Pattern[str]:
139
+ template = _get_display_template(
140
+ mode=mode,
141
+ add_quote_border=add_quote_border,
142
+ add_block_border=add_block_border,
143
+ display_title_template=display_title_template,
144
+ )
145
+
146
+ pattern = template.format(
147
+ assistant_id=re.escape(assistant_id), answer=r"(.*?)", display_name=r"(.*?)"
148
+ )
149
+
150
+ return re.compile(pattern, flags=re.DOTALL)
151
+
152
+
153
+ def get_sub_agent_answer_display(
154
+ display_name: str,
155
+ display_config: SubAgentDisplayConfig,
156
+ answer: str,
157
+ assistant_id: str,
158
+ ) -> str:
159
+ template = _get_display_template(
160
+ mode=display_config.mode,
161
+ add_quote_border=display_config.add_quote_border,
162
+ add_block_border=display_config.add_block_border,
163
+ display_title_template=display_config.display_title_template,
164
+ )
165
+ return template.format(
166
+ display_name=display_name, answer=answer, assistant_id=assistant_id
167
+ )
168
+
169
+
170
+ def remove_sub_agent_answer_from_text(
171
+ display_config: SubAgentDisplayConfig,
172
+ text: str,
173
+ assistant_id: str,
174
+ ) -> str:
175
+ if not display_config.remove_from_history:
176
+ return text
177
+
178
+ pattern = _get_display_removal_re(
179
+ assistant_id=assistant_id,
180
+ mode=display_config.mode,
181
+ add_quote_border=display_config.add_quote_border,
182
+ add_block_border=display_config.add_block_border,
183
+ display_title_template=display_config.display_title_template,
184
+ )
185
+ return re.sub(pattern, "", text)
@@ -0,0 +1,73 @@
1
+ from typing import Callable, Iterable, Mapping, Sequence
2
+
3
+ from unique_toolkit._common.referencing import get_reference_pattern
4
+ from unique_toolkit._common.string_utilities import replace_in_text
5
+ from unique_toolkit.content import ContentReference
6
+
7
+ SourceId = str
8
+ SequenceNumber = int
9
+
10
+
11
+ def _add_source_ids(
12
+ existing_refs: Mapping[SourceId, SequenceNumber],
13
+ new_refs: Iterable[SourceId],
14
+ ) -> dict[SourceId, SequenceNumber]:
15
+ next_seq_num = max(existing_refs.values(), default=0) + 1
16
+ new_seq_nums: dict[SourceId, SequenceNumber] = {}
17
+
18
+ for source_id in new_refs:
19
+ seq_num = existing_refs.get(source_id, None) or new_seq_nums.get(
20
+ source_id, None
21
+ )
22
+ if seq_num is None:
23
+ new_seq_nums[source_id] = next_seq_num
24
+ next_seq_num += 1
25
+
26
+ return new_seq_nums
27
+
28
+
29
+ def add_content_refs(
30
+ message_refs: Sequence[ContentReference],
31
+ new_refs: Sequence[ContentReference],
32
+ ) -> list[ContentReference]:
33
+ message_refs = list(message_refs)
34
+
35
+ if len(new_refs) == 0:
36
+ return message_refs
37
+
38
+ existing_refs = {ref.source_id: ref.sequence_number for ref in message_refs}
39
+ new_refs_by_source_id = {
40
+ ref.source_id: ref for ref in sorted(new_refs, key=lambda x: x.sequence_number)
41
+ }
42
+ new_seq_nums = _add_source_ids(existing_refs, new_refs_by_source_id.keys())
43
+
44
+ for source_id, seq_num in new_seq_nums.items():
45
+ ref = new_refs_by_source_id[source_id]
46
+ message_refs.append(
47
+ ref.model_copy(update={"sequence_number": seq_num}, deep=True)
48
+ )
49
+
50
+ return message_refs
51
+
52
+
53
+ def add_content_refs_and_replace_in_text(
54
+ message_text: str,
55
+ message_refs: Sequence[ContentReference],
56
+ new_refs: Sequence[ContentReference],
57
+ ref_pattern_f: Callable[[int], str] = get_reference_pattern,
58
+ ref_replacement_f: Callable[[int], str] = get_reference_pattern,
59
+ ) -> tuple[str, list[ContentReference]]:
60
+ if len(new_refs) == 0:
61
+ return message_text, list(message_refs)
62
+
63
+ references = add_content_refs(message_refs, new_refs)
64
+ seq_num_for_source_id = {ref.source_id: ref.sequence_number for ref in references}
65
+ ref_map = []
66
+
67
+ for ref in new_refs:
68
+ old_seq_num = ref.sequence_number
69
+ new_seq_num = seq_num_for_source_id[ref.source_id]
70
+
71
+ ref_map.append((ref_pattern_f(old_seq_num), ref_replacement_f(new_seq_num)))
72
+
73
+ return replace_in_text(message_text, ref_map), references
@@ -1,4 +1,5 @@
1
1
  from enum import StrEnum
2
+ from typing import Literal
2
3
 
3
4
  from pydantic import BaseModel, Field
4
5
 
@@ -9,6 +10,7 @@ class SubAgentResponseDisplayMode(StrEnum):
9
10
  HIDDEN = "hidden"
10
11
  DETAILS_OPEN = "details_open"
11
12
  DETAILS_CLOSED = "details_closed"
13
+ PLAIN = "plain"
12
14
 
13
15
 
14
16
  class SubAgentDisplayConfig(BaseModel):
@@ -22,3 +24,22 @@ class SubAgentDisplayConfig(BaseModel):
22
24
  default=True,
23
25
  description="If set, sub agent responses will be removed from the history on subsequent calls to the assistant.",
24
26
  )
27
+ add_quote_border: bool = Field(
28
+ default=True,
29
+ description="If set, a quote border is added to the left of the sub agent response.",
30
+ )
31
+ add_block_border: bool = Field(
32
+ default=False,
33
+ description="If set, a block border is added around the sub agent response.",
34
+ )
35
+ display_title_template: str = Field(
36
+ default="Answer from <strong>{}</strong>",
37
+ description=(
38
+ "The template to use for the display title of the sub agent response."
39
+ "If a placeholder '{}' is present, it will be replaced with the display name of the sub agent."
40
+ ),
41
+ )
42
+ position: Literal["before", "after"] = Field(
43
+ default="before",
44
+ description="The position of the sub agent response in the main agent response.",
45
+ )