agently 4.0.6.9__py3-none-any.whl → 4.0.6.11__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agently/base.py +3 -5
- agently/builtins/agent_extensions/ConfigurePromptExtension.py +38 -9
- agently/builtins/hookers/SystemMessageHooker.py +51 -7
- agently/builtins/plugins/ModelRequester/OpenAICompatible.py +32 -0
- agently/builtins/plugins/PromptGenerator/AgentlyPromptGenerator.py +10 -4
- agently/builtins/plugins/ResponseParser/AgentlyResponseParser.py +22 -14
- agently/core/Agent.py +15 -4
- agently/core/ModelRequest.py +45 -11
- agently/core/PluginManager.py +2 -0
- agently/core/Prompt.py +7 -45
- agently/core/TriggerFlow/Chunk.py +5 -4
- agently/core/TriggerFlow/Execution.py +2 -2
- agently/core/TriggerFlow/TriggerFlow.py +3 -4
- agently/core/TriggerFlow/process/BaseProcess.py +51 -19
- agently/core/TriggerFlow/process/ForEachProcess.py +3 -3
- agently/core/TriggerFlow/process/MatchCaseProcess.py +6 -6
- agently/integrations/chromadb.py +15 -0
- agently/types/data/response.py +10 -1
- agently/types/plugins/ResponseParser.py +26 -6
- agently/types/trigger_flow/trigger_flow.py +6 -6
- agently/utils/DataFormatter.py +77 -0
- agently/utils/PythonSandbox.py +101 -0
- agently/utils/Settings.py +19 -2
- agently/utils/__init__.py +1 -0
- {agently-4.0.6.9.dist-info → agently-4.0.6.11.dist-info}/METADATA +1 -1
- {agently-4.0.6.9.dist-info → agently-4.0.6.11.dist-info}/RECORD +28 -27
- {agently-4.0.6.9.dist-info → agently-4.0.6.11.dist-info}/WHEEL +0 -0
- {agently-4.0.6.9.dist-info → agently-4.0.6.11.dist-info}/licenses/LICENSE +0 -0
agently/base.py
CHANGED
|
@@ -14,7 +14,7 @@
|
|
|
14
14
|
|
|
15
15
|
from typing import Any, Literal, Type, TYPE_CHECKING, TypeVar, Generic, cast
|
|
16
16
|
|
|
17
|
-
from agently.utils import Settings, create_logger, FunctionShifter
|
|
17
|
+
from agently.utils import Settings, create_logger, FunctionShifter, DataFormatter
|
|
18
18
|
from agently.core import PluginManager, EventCenter, Tool, Prompt, ModelRequest, BaseAgent
|
|
19
19
|
from agently._default_init import _load_default_settings, _load_default_plugins, _hook_default_event_handlers
|
|
20
20
|
|
|
@@ -117,6 +117,8 @@ class AgentlyMain(Generic[A]):
|
|
|
117
117
|
self.tool = tool
|
|
118
118
|
self.AgentType = AgentType
|
|
119
119
|
|
|
120
|
+
self.set_settings = self.settings.set_settings
|
|
121
|
+
|
|
120
122
|
def set_debug_console(self, debug_console_status: Literal["ON", "OFF"]):
|
|
121
123
|
match debug_console_status:
|
|
122
124
|
case "OFF":
|
|
@@ -130,10 +132,6 @@ class AgentlyMain(Generic[A]):
|
|
|
130
132
|
self.logger.setLevel(log_level)
|
|
131
133
|
return self
|
|
132
134
|
|
|
133
|
-
def set_settings(self, key: str, value: "SerializableValue"):
|
|
134
|
-
self.settings.set_settings(key, value)
|
|
135
|
-
return self
|
|
136
|
-
|
|
137
135
|
def create_prompt(self, name: str = "agently_prompt") -> Prompt:
|
|
138
136
|
return Prompt(
|
|
139
137
|
self.plugin_manager,
|
|
@@ -21,6 +21,7 @@ from typing import Any
|
|
|
21
21
|
from json import JSONDecodeError
|
|
22
22
|
|
|
23
23
|
from agently.core import BaseAgent
|
|
24
|
+
from agently.utils import DataLocator
|
|
24
25
|
|
|
25
26
|
|
|
26
27
|
class ConfigurePromptExtension(BaseAgent):
|
|
@@ -168,46 +169,74 @@ class ConfigurePromptExtension(BaseAgent):
|
|
|
168
169
|
variable_mappings,
|
|
169
170
|
)
|
|
170
171
|
|
|
171
|
-
def load_yaml_prompt(
|
|
172
|
+
def load_yaml_prompt(
|
|
173
|
+
self,
|
|
174
|
+
path_or_content: str | Path,
|
|
175
|
+
mappings: dict[str, Any] | None = None,
|
|
176
|
+
*,
|
|
177
|
+
prompt_key_path: str | None = None,
|
|
178
|
+
encoding: str | None = "utf-8",
|
|
179
|
+
):
|
|
172
180
|
path = Path(path_or_content)
|
|
173
181
|
if path.exists() and path.is_file():
|
|
174
182
|
try:
|
|
175
|
-
with path.open("r", encoding=
|
|
183
|
+
with path.open("r", encoding=encoding) as file:
|
|
176
184
|
prompt = yaml.safe_load(file)
|
|
177
185
|
except yaml.YAMLError as e:
|
|
178
186
|
raise ValueError(f"Cannot load YAML file '{ path_or_content }'.\nError: { e }")
|
|
179
187
|
else:
|
|
180
188
|
try:
|
|
181
|
-
prompt = yaml.safe_load(path_or_content)
|
|
189
|
+
prompt = yaml.safe_load(str(path_or_content))
|
|
182
190
|
except yaml.YAMLError as e:
|
|
183
191
|
raise ValueError(f"Cannot load YAML content or file path not existed.\nError: { e }")
|
|
192
|
+
if not isinstance(prompt, dict):
|
|
193
|
+
raise TypeError(
|
|
194
|
+
"Cannot execute YAML prompt configures, expect prompt configures as a dictionary data but got:"
|
|
195
|
+
f"{ prompt }"
|
|
196
|
+
)
|
|
197
|
+
if prompt_key_path is not None:
|
|
198
|
+
prompt = DataLocator.locate_path_in_dict(prompt, prompt_key_path)
|
|
184
199
|
if isinstance(prompt, dict):
|
|
185
200
|
self._execute_prompt_configure(prompt, mappings)
|
|
186
201
|
else:
|
|
187
202
|
raise TypeError(
|
|
188
|
-
"Cannot execute YAML prompt configures, expect prompt configures as a dictionary data but got:"
|
|
203
|
+
f"Cannot execute YAML prompt configures, expect prompt configures{ ' from [' + prompt_key_path + '] ' if prompt_key_path is not None else '' } as a dictionary data but got:"
|
|
189
204
|
f"{ prompt }"
|
|
190
205
|
)
|
|
191
206
|
return self
|
|
192
207
|
|
|
193
|
-
def load_json_prompt(
|
|
208
|
+
def load_json_prompt(
|
|
209
|
+
self,
|
|
210
|
+
path_or_content: str | Path,
|
|
211
|
+
mappings: dict[str, Any] | None = None,
|
|
212
|
+
*,
|
|
213
|
+
prompt_key_path: str | None = None,
|
|
214
|
+
encoding: str | None = "utf-8",
|
|
215
|
+
):
|
|
194
216
|
path = Path(path_or_content)
|
|
195
217
|
if path.exists() and path.is_file():
|
|
196
218
|
try:
|
|
197
|
-
with path.open("r", encoding=
|
|
219
|
+
with path.open("r", encoding=encoding) as file:
|
|
198
220
|
prompt = json5.load(file)
|
|
199
221
|
except JSONDecodeError as e:
|
|
200
222
|
raise ValueError(f"Cannot load JSON file '{ path_or_content }'.\nError: { e }")
|
|
201
223
|
else:
|
|
202
224
|
try:
|
|
203
|
-
prompt = json5.loads(path_or_content)
|
|
204
|
-
except
|
|
225
|
+
prompt = json5.loads(str(path_or_content))
|
|
226
|
+
except JSONDecodeError as e:
|
|
205
227
|
raise ValueError(f"Cannot load JSON content or file path not existed.\nError: { e }")
|
|
228
|
+
if not isinstance(prompt, dict):
|
|
229
|
+
raise TypeError(
|
|
230
|
+
"Cannot execute JSON prompt configures, expect prompt configures as a dictionary data but got:"
|
|
231
|
+
f"{ prompt }"
|
|
232
|
+
)
|
|
233
|
+
if prompt_key_path is not None:
|
|
234
|
+
prompt = DataLocator.locate_path_in_dict(prompt, prompt_key_path)
|
|
206
235
|
if isinstance(prompt, dict):
|
|
207
236
|
self._execute_prompt_configure(prompt, mappings)
|
|
208
237
|
else:
|
|
209
238
|
raise TypeError(
|
|
210
|
-
"Cannot execute JSON prompt configures, expect prompt configures as a dictionary data but got:"
|
|
239
|
+
f"Cannot execute JSON prompt configures, expect prompt configures{ ' from [' + prompt_key_path + '] ' if prompt_key_path is not None else '' }as a dictionary data but got:"
|
|
211
240
|
f"{ prompt }"
|
|
212
241
|
)
|
|
213
242
|
return self
|
|
@@ -17,6 +17,32 @@ from typing import TYPE_CHECKING
|
|
|
17
17
|
|
|
18
18
|
from agently.types.plugins import EventHooker
|
|
19
19
|
|
|
20
|
+
COLORS = {
|
|
21
|
+
"black": 30,
|
|
22
|
+
"red": 31,
|
|
23
|
+
"green": 32,
|
|
24
|
+
"yellow": 33,
|
|
25
|
+
"blue": 34,
|
|
26
|
+
"magenta": 35,
|
|
27
|
+
"cyan": 36,
|
|
28
|
+
"white": 37,
|
|
29
|
+
"gray": 90,
|
|
30
|
+
}
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
def color_text(text: str, color: str | None = None, bold: bool = False, underline: bool = False) -> str:
|
|
34
|
+
codes = []
|
|
35
|
+
if bold:
|
|
36
|
+
codes.append("1")
|
|
37
|
+
if underline:
|
|
38
|
+
codes.append("4")
|
|
39
|
+
if color and color in COLORS:
|
|
40
|
+
codes.append(str(COLORS[color]))
|
|
41
|
+
if not codes:
|
|
42
|
+
return text
|
|
43
|
+
return f"\x1b[{';'.join(codes)}m{text}\x1b[0m"
|
|
44
|
+
|
|
45
|
+
|
|
20
46
|
if TYPE_CHECKING:
|
|
21
47
|
from agently.types.data import EventMessage, AgentlySystemEvent
|
|
22
48
|
|
|
@@ -72,12 +98,18 @@ class SystemMessageHooker(EventHooker):
|
|
|
72
98
|
and SystemMessageHooker._current_meta["row_id"] == message_data["response_id"]
|
|
73
99
|
and SystemMessageHooker._current_meta["stage"] == content["stage"]
|
|
74
100
|
):
|
|
75
|
-
print(content["detail"], end="")
|
|
101
|
+
print(color_text(content["detail"], color="gray"), end="", flush=True)
|
|
76
102
|
else:
|
|
77
|
-
|
|
78
|
-
f"[Agent-{ message_data['agent_name'] }] - [Request-{ message_data['response_id'] }]
|
|
79
|
-
|
|
103
|
+
header = color_text(
|
|
104
|
+
f"[Agent-{ message_data['agent_name'] }] - [Request-{ message_data['response_id'] }]",
|
|
105
|
+
color="blue",
|
|
106
|
+
bold=True,
|
|
80
107
|
)
|
|
108
|
+
stage_label = color_text("Stage:", color="cyan", bold=True)
|
|
109
|
+
stage_val = color_text(content["stage"], color="yellow", underline=True)
|
|
110
|
+
detail_label = color_text("Detail:\n", color="cyan", bold=True)
|
|
111
|
+
detail = color_text(content["detail"], color="green")
|
|
112
|
+
print(f"{header}\n{stage_label} {stage_val}\n{detail_label}{detail}", end="")
|
|
81
113
|
SystemMessageHooker._current_meta["table_name"] = message_data["agent_name"]
|
|
82
114
|
SystemMessageHooker._current_meta["row_id"] = message_data["response_id"]
|
|
83
115
|
SystemMessageHooker._current_meta["stage"] = content["stage"]
|
|
@@ -99,28 +131,40 @@ class SystemMessageHooker(EventHooker):
|
|
|
99
131
|
},
|
|
100
132
|
)
|
|
101
133
|
if settings["runtime.show_model_logs"]:
|
|
134
|
+
header = color_text(
|
|
135
|
+
f"[Agent-{ message_data['agent_name'] }] - [Response-{ message_data['response_id'] }]",
|
|
136
|
+
color="blue",
|
|
137
|
+
bold=True,
|
|
138
|
+
)
|
|
139
|
+
stage_label = color_text("Stage:", color="cyan", bold=True)
|
|
140
|
+
stage_val = color_text(content["stage"], color="yellow", underline=True)
|
|
141
|
+
detail_label = color_text("Detail:\n", color="cyan", bold=True)
|
|
142
|
+
detail = color_text(f"{content['detail']}", color="gray")
|
|
102
143
|
await event_center.async_emit(
|
|
103
144
|
"log",
|
|
104
145
|
{
|
|
105
146
|
"level": "INFO",
|
|
106
|
-
"content": f"
|
|
147
|
+
"content": f"{header}\n{stage_label} {stage_val}\n{detail_label}{detail}",
|
|
107
148
|
},
|
|
108
149
|
)
|
|
109
150
|
case "TOOL":
|
|
110
151
|
if settings["runtime.show_tool_logs"]:
|
|
152
|
+
tool_title = color_text("[Tool Using Result]:", color="blue", bold=True)
|
|
153
|
+
tool_body = color_text(str(message.content["data"]), color="gray")
|
|
111
154
|
await event_center.async_emit(
|
|
112
155
|
"log",
|
|
113
156
|
{
|
|
114
157
|
"level": "INFO",
|
|
115
|
-
"content": f"
|
|
158
|
+
"content": f"{tool_title}\n{tool_body}",
|
|
116
159
|
},
|
|
117
160
|
)
|
|
118
161
|
case "TRIGGER_FLOW":
|
|
119
162
|
if settings["runtime.show_trigger_flow_logs"]:
|
|
163
|
+
trigger = color_text(f"[TriggerFlow] { message.content['data'] }", color="yellow", bold=True)
|
|
120
164
|
await event_center.async_emit(
|
|
121
165
|
"log",
|
|
122
166
|
{
|
|
123
167
|
"level": "INFO",
|
|
124
|
-
"content":
|
|
168
|
+
"content": trigger,
|
|
125
169
|
},
|
|
126
170
|
)
|
|
@@ -45,6 +45,7 @@ if TYPE_CHECKING:
|
|
|
45
45
|
class ContentMapping(TypedDict):
|
|
46
46
|
id: str | None
|
|
47
47
|
role: str | None
|
|
48
|
+
reasoning: str | None
|
|
48
49
|
delta: str | None
|
|
49
50
|
tool_calls: str | None
|
|
50
51
|
done: str | None
|
|
@@ -114,6 +115,7 @@ class OpenAICompatible(ModelRequester):
|
|
|
114
115
|
"content_mapping": {
|
|
115
116
|
"id": "id",
|
|
116
117
|
"role": "choices[0].delta.role",
|
|
118
|
+
"reasoning": "choices[0].delta.reasoning_content",
|
|
117
119
|
"delta": "choices[0].delta.content",
|
|
118
120
|
"tool_calls": "choices[0].delta.tool_calls",
|
|
119
121
|
"done": None,
|
|
@@ -124,6 +126,7 @@ class OpenAICompatible(ModelRequester):
|
|
|
124
126
|
},
|
|
125
127
|
"extra_done": None,
|
|
126
128
|
},
|
|
129
|
+
"yield_extra_content_separately": True,
|
|
127
130
|
"content_mapping_style": "dot",
|
|
128
131
|
"timeout": {
|
|
129
132
|
"connect": 30.0,
|
|
@@ -146,6 +149,10 @@ class OpenAICompatible(ModelRequester):
|
|
|
146
149
|
self.model_type = cast(str, self.plugin_settings.get("model_type"))
|
|
147
150
|
self._messenger = event_center.create_messenger(self.name)
|
|
148
151
|
|
|
152
|
+
# check if has attachment prompt
|
|
153
|
+
if self.prompt["attachment"]:
|
|
154
|
+
self.plugin_settings["rich_content"] = True
|
|
155
|
+
|
|
149
156
|
@staticmethod
|
|
150
157
|
def _on_register():
|
|
151
158
|
pass
|
|
@@ -501,6 +508,7 @@ class OpenAICompatible(ModelRequester):
|
|
|
501
508
|
async def broadcast_response(self, response_generator: AsyncGenerator) -> "AgentlyResponseGenerator":
|
|
502
509
|
meta = {}
|
|
503
510
|
message_record = {}
|
|
511
|
+
reasoning_buffer = ""
|
|
504
512
|
content_buffer = ""
|
|
505
513
|
|
|
506
514
|
content_mapping = cast(
|
|
@@ -512,6 +520,7 @@ class OpenAICompatible(ModelRequester):
|
|
|
512
520
|
)
|
|
513
521
|
id_mapping = content_mapping["id"]
|
|
514
522
|
role_mapping = content_mapping["role"]
|
|
523
|
+
reasoning_mapping = content_mapping["reasoning"]
|
|
515
524
|
delta_mapping = content_mapping["delta"]
|
|
516
525
|
tool_calls_mapping = content_mapping["tool_calls"]
|
|
517
526
|
done_mapping = content_mapping["done"]
|
|
@@ -519,6 +528,7 @@ class OpenAICompatible(ModelRequester):
|
|
|
519
528
|
finish_reason_mapping = content_mapping["finish_reason"]
|
|
520
529
|
extra_delta_mapping = content_mapping["extra_delta"]
|
|
521
530
|
extra_done_mapping = content_mapping["extra_done"]
|
|
531
|
+
yield_extra_content_separately = self.plugin_settings.get("yield_extra_content_separately", True)
|
|
522
532
|
|
|
523
533
|
content_mapping_style = str(self.plugin_settings.get("content_mapping_style"))
|
|
524
534
|
if content_mapping_style not in ("dot", "slash"):
|
|
@@ -548,6 +558,15 @@ class OpenAICompatible(ModelRequester):
|
|
|
548
558
|
)
|
|
549
559
|
if role:
|
|
550
560
|
meta.update({"role": role})
|
|
561
|
+
if reasoning_mapping:
|
|
562
|
+
reasoning = DataLocator.locate_path_in_dict(
|
|
563
|
+
loaded_message,
|
|
564
|
+
reasoning_mapping,
|
|
565
|
+
style=content_mapping_style,
|
|
566
|
+
)
|
|
567
|
+
if reasoning:
|
|
568
|
+
reasoning_buffer += str(reasoning)
|
|
569
|
+
yield "reasoning_delta", reasoning
|
|
551
570
|
if delta_mapping:
|
|
552
571
|
delta = DataLocator.locate_path_in_dict(
|
|
553
572
|
loaded_message,
|
|
@@ -574,6 +593,8 @@ class OpenAICompatible(ModelRequester):
|
|
|
574
593
|
)
|
|
575
594
|
if extra_value:
|
|
576
595
|
yield "extra", {extra_key: extra_value}
|
|
596
|
+
if yield_extra_content_separately:
|
|
597
|
+
yield extra_key, extra_value # type: ignore
|
|
577
598
|
else:
|
|
578
599
|
done_content = None
|
|
579
600
|
if self.model_type == "embeddings" and done_mapping is None:
|
|
@@ -589,6 +610,17 @@ class OpenAICompatible(ModelRequester):
|
|
|
589
610
|
yield "done", done_content
|
|
590
611
|
else:
|
|
591
612
|
yield "done", content_buffer
|
|
613
|
+
reasoning_content = None
|
|
614
|
+
if reasoning_mapping:
|
|
615
|
+
reasoning_content = DataLocator.locate_path_in_dict(
|
|
616
|
+
message_record,
|
|
617
|
+
reasoning_mapping,
|
|
618
|
+
style=content_mapping_style,
|
|
619
|
+
)
|
|
620
|
+
if reasoning_content:
|
|
621
|
+
yield "reasoning_done", reasoning_content
|
|
622
|
+
else:
|
|
623
|
+
yield "reasoning_done", reasoning_buffer
|
|
592
624
|
match self.model_type:
|
|
593
625
|
case "embeddings":
|
|
594
626
|
yield "original_done", message_record
|
|
@@ -725,10 +725,16 @@ class AgentlyPromptGenerator(PromptGenerator):
|
|
|
725
725
|
"$type": output_prompt_part[0],
|
|
726
726
|
}
|
|
727
727
|
case _:
|
|
728
|
-
|
|
729
|
-
|
|
730
|
-
|
|
731
|
-
|
|
728
|
+
desc_text = ";".join([item for item in output_prompt_part[1:] if item])
|
|
729
|
+
if desc_text:
|
|
730
|
+
return {
|
|
731
|
+
"$type": output_prompt_part[0],
|
|
732
|
+
"$desc": desc_text,
|
|
733
|
+
}
|
|
734
|
+
else:
|
|
735
|
+
return {
|
|
736
|
+
"$type": output_prompt_part[0],
|
|
737
|
+
}
|
|
732
738
|
else:
|
|
733
739
|
return list(output_prompt_part)
|
|
734
740
|
|
|
@@ -282,8 +282,10 @@ class AgentlyResponseParser(ResponseParser):
|
|
|
282
282
|
|
|
283
283
|
async def get_async_generator(
|
|
284
284
|
self,
|
|
285
|
-
type: Literal['all', 'delta', '
|
|
286
|
-
content: Literal['all', 'delta', '
|
|
285
|
+
type: Literal['all', 'delta', 'specific', 'original', 'instant', 'streaming_parse'] | None = "delta",
|
|
286
|
+
content: Literal['all', 'delta', 'specific', 'original', 'instant', 'streaming_parse'] | None = "delta",
|
|
287
|
+
*,
|
|
288
|
+
specific: list[str] | str | None = ["reasoning_delta", "delta", "reasoning_done", "done", "tool_calls"],
|
|
287
289
|
) -> AsyncGenerator:
|
|
288
290
|
await self._ensure_consumer()
|
|
289
291
|
parsed_generator = cast(GeneratorConsumer, self._response_consumer).get_async_generator()
|
|
@@ -300,11 +302,13 @@ class AgentlyResponseParser(ResponseParser):
|
|
|
300
302
|
case "delta":
|
|
301
303
|
if event == "delta":
|
|
302
304
|
yield data
|
|
303
|
-
case "
|
|
304
|
-
if
|
|
305
|
-
|
|
306
|
-
elif
|
|
307
|
-
|
|
305
|
+
case "specific":
|
|
306
|
+
if specific is None:
|
|
307
|
+
specific = ["delta"]
|
|
308
|
+
elif isinstance(specific, str):
|
|
309
|
+
specific = [specific]
|
|
310
|
+
if event in specific:
|
|
311
|
+
yield event, data
|
|
308
312
|
case "instant" | "streaming_parse":
|
|
309
313
|
if self._streaming_json_parser is not None:
|
|
310
314
|
streaming_parsed = None
|
|
@@ -325,8 +329,10 @@ class AgentlyResponseParser(ResponseParser):
|
|
|
325
329
|
|
|
326
330
|
def get_generator(
|
|
327
331
|
self,
|
|
328
|
-
type: Literal['all', 'delta', '
|
|
329
|
-
content: Literal['all', 'delta', '
|
|
332
|
+
type: Literal['all', 'delta', 'specific', 'original', 'instant', 'streaming_parse'] | None = "delta",
|
|
333
|
+
content: Literal['all', 'delta', 'specific', 'original', 'instant', 'streaming_parse'] | None = "delta",
|
|
334
|
+
*,
|
|
335
|
+
specific: list[str] | str | None = ["reasoning_delta", "delta", "reasoning_done", "done", "tool_calls"],
|
|
330
336
|
) -> Generator:
|
|
331
337
|
asyncio.run(self._ensure_consumer())
|
|
332
338
|
parsed_generator = cast(GeneratorConsumer, self._response_consumer).get_generator()
|
|
@@ -343,11 +349,13 @@ class AgentlyResponseParser(ResponseParser):
|
|
|
343
349
|
case "delta":
|
|
344
350
|
if event == "delta":
|
|
345
351
|
yield data
|
|
346
|
-
case "
|
|
347
|
-
if
|
|
348
|
-
|
|
349
|
-
elif
|
|
350
|
-
|
|
352
|
+
case "specific":
|
|
353
|
+
if specific is None:
|
|
354
|
+
specific = ["delta"]
|
|
355
|
+
elif isinstance(specific, str):
|
|
356
|
+
specific = [specific]
|
|
357
|
+
if event in specific:
|
|
358
|
+
yield event, data
|
|
351
359
|
case "instant" | "streaming_parse":
|
|
352
360
|
if self._streaming_json_parser is not None:
|
|
353
361
|
streaming_parsed = None
|
agently/core/Agent.py
CHANGED
|
@@ -66,6 +66,8 @@ class BaseAgent:
|
|
|
66
66
|
self.request_prompt = self.request.prompt
|
|
67
67
|
self.prompt = self.request_prompt
|
|
68
68
|
|
|
69
|
+
self.set_settings = self.settings.set_settings
|
|
70
|
+
|
|
69
71
|
self.get_response = self.request.get_response
|
|
70
72
|
self.get_result = self.request.get_result
|
|
71
73
|
self.get_meta = self.request.get_meta
|
|
@@ -83,10 +85,6 @@ class BaseAgent:
|
|
|
83
85
|
self.async_start = self.async_get_data
|
|
84
86
|
|
|
85
87
|
# Basic Methods
|
|
86
|
-
def set_settings(self, key: str, value: "SerializableValue"):
|
|
87
|
-
self.settings.set_settings(key, value)
|
|
88
|
-
return self
|
|
89
|
-
|
|
90
88
|
def set_agent_prompt(
|
|
91
89
|
self,
|
|
92
90
|
key: "PromptStandardSlot | str",
|
|
@@ -273,6 +271,19 @@ class BaseAgent:
|
|
|
273
271
|
self.request.prompt.set("output", prompt, mappings)
|
|
274
272
|
return self
|
|
275
273
|
|
|
274
|
+
def attachment(
|
|
275
|
+
self,
|
|
276
|
+
prompt: list[dict[str, Any]],
|
|
277
|
+
mappings: dict[str, Any] | None = None,
|
|
278
|
+
*,
|
|
279
|
+
always: bool = False,
|
|
280
|
+
):
|
|
281
|
+
if always:
|
|
282
|
+
self.agent_prompt.set("attachment", prompt, mappings)
|
|
283
|
+
else:
|
|
284
|
+
self.request_prompt.set("attachment", prompt, mappings)
|
|
285
|
+
return self
|
|
286
|
+
|
|
276
287
|
def options(
|
|
277
288
|
self,
|
|
278
289
|
options: dict[str, Any],
|
agently/core/ModelRequest.py
CHANGED
|
@@ -425,14 +425,13 @@ class ModelRequest:
|
|
|
425
425
|
parent=parent_extension_handlers,
|
|
426
426
|
)
|
|
427
427
|
|
|
428
|
+
self.set_settings = self.settings.set_settings
|
|
429
|
+
|
|
428
430
|
self.get_meta = FunctionShifter.syncify(self.async_get_meta)
|
|
429
431
|
self.get_text = FunctionShifter.syncify(self.async_get_text)
|
|
430
432
|
self.get_data = FunctionShifter.syncify(self.async_get_data)
|
|
431
433
|
self.get_data_object = FunctionShifter.syncify(self.async_get_data_object)
|
|
432
434
|
|
|
433
|
-
def set_settings(self, key: str, value: "SerializableValue"):
|
|
434
|
-
self.settings.set_settings(key, value)
|
|
435
|
-
|
|
436
435
|
def set_prompt(
|
|
437
436
|
self,
|
|
438
437
|
key: "PromptStandardSlot | str",
|
|
@@ -523,6 +522,15 @@ class ModelRequest:
|
|
|
523
522
|
self.prompt.set("output", prompt, mappings)
|
|
524
523
|
return self
|
|
525
524
|
|
|
525
|
+
def attachment(
|
|
526
|
+
self,
|
|
527
|
+
prompt: list[dict[str, Any]],
|
|
528
|
+
mappings: dict[str, Any] | None = None,
|
|
529
|
+
):
|
|
530
|
+
self.prompt.set("attachment", prompt, mappings)
|
|
531
|
+
return self
|
|
532
|
+
|
|
533
|
+
# Response & Result
|
|
526
534
|
def get_response(self):
|
|
527
535
|
response = ModelResponse(
|
|
528
536
|
self.agent_name,
|
|
@@ -581,58 +589,84 @@ class ModelRequest:
|
|
|
581
589
|
def get_generator(
|
|
582
590
|
self,
|
|
583
591
|
type: Literal["instant", "streaming_parse"],
|
|
592
|
+
*,
|
|
593
|
+
specific: list[str] | str | None = ["reasoning_delta", "delta", "reasoning_done", "done", "tool_calls"],
|
|
584
594
|
) -> Generator["StreamingData", None, None]: ...
|
|
585
595
|
|
|
586
596
|
@overload
|
|
587
597
|
def get_generator(
|
|
588
598
|
self,
|
|
589
599
|
type: Literal["all"],
|
|
600
|
+
*,
|
|
601
|
+
specific: list[str] | str | None = ["reasoning_delta", "delta", "reasoning_done", "done", "tool_calls"],
|
|
590
602
|
) -> Generator[tuple[str, Any], None, None]: ...
|
|
591
603
|
|
|
592
604
|
@overload
|
|
593
605
|
def get_generator(
|
|
594
606
|
self,
|
|
595
|
-
type: Literal["delta", "
|
|
607
|
+
type: Literal["delta", "specific", "original"],
|
|
608
|
+
*,
|
|
609
|
+
specific: list[str] | str | None = ["reasoning_delta", "delta", "reasoning_done", "done", "tool_calls"],
|
|
596
610
|
) -> Generator[str, None, None]: ...
|
|
597
611
|
|
|
598
612
|
@overload
|
|
599
613
|
def get_generator(
|
|
600
614
|
self,
|
|
601
|
-
type: Literal["all", "original", "delta", "
|
|
615
|
+
type: Literal["all", "original", "delta", "specific", "instant", "streaming_parse"] | None = "delta",
|
|
616
|
+
*,
|
|
617
|
+
specific: list[str] | str | None = ["reasoning_delta", "delta", "reasoning_done", "done", "tool_calls"],
|
|
602
618
|
) -> Generator: ...
|
|
603
619
|
|
|
604
620
|
def get_generator(
|
|
605
621
|
self,
|
|
606
|
-
type: Literal["all", "original", "delta", "
|
|
622
|
+
type: Literal["all", "original", "delta", "specific", "instant", "streaming_parse"] | None = "delta",
|
|
623
|
+
*,
|
|
624
|
+
specific: list[str] | str | None = ["reasoning_delta", "delta", "reasoning_done", "done", "tool_calls"],
|
|
607
625
|
) -> Generator:
|
|
608
|
-
return self.get_response().get_generator(
|
|
626
|
+
return self.get_response().get_generator(
|
|
627
|
+
type=type,
|
|
628
|
+
specific=specific,
|
|
629
|
+
)
|
|
609
630
|
|
|
610
631
|
@overload
|
|
611
632
|
def get_async_generator(
|
|
612
633
|
self,
|
|
613
634
|
type: Literal["instant", "streaming_parse"],
|
|
635
|
+
*,
|
|
636
|
+
specific: list[str] | str | None = ["reasoning_delta", "delta", "reasoning_done", "done", "tool_calls"],
|
|
614
637
|
) -> AsyncGenerator["StreamingData", None]: ...
|
|
615
638
|
|
|
616
639
|
@overload
|
|
617
640
|
def get_async_generator(
|
|
618
641
|
self,
|
|
619
642
|
type: Literal["all"],
|
|
643
|
+
*,
|
|
644
|
+
specific: list[str] | str | None = ["reasoning_delta", "delta", "reasoning_done", "done", "tool_calls"],
|
|
620
645
|
) -> AsyncGenerator[tuple[str, Any], None]: ...
|
|
621
646
|
|
|
622
647
|
@overload
|
|
623
648
|
def get_async_generator(
|
|
624
649
|
self,
|
|
625
|
-
type: Literal["delta", "
|
|
650
|
+
type: Literal["delta", "specific", "original"],
|
|
651
|
+
*,
|
|
652
|
+
specific: list[str] | str | None = ["reasoning_delta", "delta", "reasoning_done", "done", "tool_calls"],
|
|
626
653
|
) -> AsyncGenerator[str, None]: ...
|
|
627
654
|
|
|
628
655
|
@overload
|
|
629
656
|
def get_async_generator(
|
|
630
657
|
self,
|
|
631
|
-
type: Literal["all", "original", "delta", "
|
|
658
|
+
type: Literal["all", "original", "delta", "specific", "instant", "streaming_parse"] | None = "delta",
|
|
659
|
+
*,
|
|
660
|
+
specific: list[str] | str | None = ["reasoning_delta", "delta", "reasoning_done", "done", "tool_calls"],
|
|
632
661
|
) -> AsyncGenerator: ...
|
|
633
662
|
|
|
634
663
|
def get_async_generator(
|
|
635
664
|
self,
|
|
636
|
-
type: Literal["all", "original", "delta", "
|
|
665
|
+
type: Literal["all", "original", "delta", "specific", "instant", "streaming_parse"] | None = "delta",
|
|
666
|
+
*,
|
|
667
|
+
specific: list[str] | str | None = ["reasoning_delta", "delta", "reasoning_done", "done", "tool_calls"],
|
|
637
668
|
) -> AsyncGenerator:
|
|
638
|
-
return self.get_response().get_async_generator(
|
|
669
|
+
return self.get_response().get_async_generator(
|
|
670
|
+
type=type,
|
|
671
|
+
specific=specific,
|
|
672
|
+
)
|