agently 4.0.6.10__tar.gz → 4.0.7__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {agently-4.0.6.10 → agently-4.0.7}/PKG-INFO +1 -1
- {agently-4.0.6.10 → agently-4.0.7}/agently/base.py +3 -5
- {agently-4.0.6.10 → agently-4.0.7}/agently/builtins/agent_extensions/ConfigurePromptExtension.py +38 -9
- {agently-4.0.6.10 → agently-4.0.7}/agently/builtins/plugins/ModelRequester/OpenAICompatible.py +28 -0
- {agently-4.0.6.10 → agently-4.0.7}/agently/builtins/plugins/ResponseParser/AgentlyResponseParser.py +22 -14
- {agently-4.0.6.10 → agently-4.0.7}/agently/core/Agent.py +2 -4
- {agently-4.0.6.10 → agently-4.0.7}/agently/core/ModelRequest.py +38 -10
- {agently-4.0.6.10 → agently-4.0.7}/agently/core/PluginManager.py +2 -0
- {agently-4.0.6.10 → agently-4.0.7}/agently/core/Prompt.py +7 -45
- {agently-4.0.6.10 → agently-4.0.7}/agently/core/TriggerFlow/BluePrint.py +2 -0
- {agently-4.0.6.10 → agently-4.0.7}/agently/core/TriggerFlow/Chunk.py +3 -2
- {agently-4.0.6.10 → agently-4.0.7}/agently/core/TriggerFlow/Execution.py +28 -11
- {agently-4.0.6.10 → agently-4.0.7}/agently/core/TriggerFlow/TriggerFlow.py +24 -10
- {agently-4.0.6.10 → agently-4.0.7}/agently/core/TriggerFlow/process/BaseProcess.py +27 -8
- {agently-4.0.6.10 → agently-4.0.7}/agently/core/TriggerFlow/process/ForEachProcess.py +29 -23
- {agently-4.0.6.10 → agently-4.0.7}/agently/integrations/chromadb.py +15 -0
- {agently-4.0.6.10 → agently-4.0.7}/agently/types/data/response.py +10 -1
- {agently-4.0.6.10 → agently-4.0.7}/agently/types/plugins/ResponseParser.py +26 -6
- {agently-4.0.6.10 → agently-4.0.7}/agently/utils/DataFormatter.py +77 -0
- agently-4.0.7/agently/utils/PythonSandbox.py +101 -0
- {agently-4.0.6.10 → agently-4.0.7}/agently/utils/Settings.py +19 -2
- {agently-4.0.6.10 → agently-4.0.7}/agently/utils/__init__.py +1 -0
- {agently-4.0.6.10 → agently-4.0.7}/pyproject.toml +1 -1
- {agently-4.0.6.10 → agently-4.0.7}/LICENSE +0 -0
- {agently-4.0.6.10 → agently-4.0.7}/README.md +0 -0
- {agently-4.0.6.10 → agently-4.0.7}/agently/__init__.py +0 -0
- {agently-4.0.6.10 → agently-4.0.7}/agently/_default_init.py +0 -0
- {agently-4.0.6.10 → agently-4.0.7}/agently/_default_settings.yaml +0 -0
- {agently-4.0.6.10 → agently-4.0.7}/agently/builtins/agent_extensions/AutoFuncExtension.py +0 -0
- {agently-4.0.6.10 → agently-4.0.7}/agently/builtins/agent_extensions/ChatSessionExtension.py +0 -0
- {agently-4.0.6.10 → agently-4.0.7}/agently/builtins/agent_extensions/KeyWaiterExtension.py +0 -0
- {agently-4.0.6.10 → agently-4.0.7}/agently/builtins/agent_extensions/ToolExtension.py +0 -0
- {agently-4.0.6.10 → agently-4.0.7}/agently/builtins/agent_extensions/__init__.py +0 -0
- {agently-4.0.6.10 → agently-4.0.7}/agently/builtins/hookers/ConsoleHooker.py +0 -0
- {agently-4.0.6.10 → agently-4.0.7}/agently/builtins/hookers/PureLoggerHooker.py +0 -0
- {agently-4.0.6.10 → agently-4.0.7}/agently/builtins/hookers/SystemMessageHooker.py +0 -0
- {agently-4.0.6.10 → agently-4.0.7}/agently/builtins/plugins/PromptGenerator/AgentlyPromptGenerator.py +0 -0
- {agently-4.0.6.10 → agently-4.0.7}/agently/builtins/plugins/ToolManager/AgentlyToolManager.py +0 -0
- {agently-4.0.6.10 → agently-4.0.7}/agently/builtins/plugins/__init__.py +0 -0
- {agently-4.0.6.10 → agently-4.0.7}/agently/builtins/tools/Browse.py +0 -0
- {agently-4.0.6.10 → agently-4.0.7}/agently/builtins/tools/Search.py +0 -0
- {agently-4.0.6.10 → agently-4.0.7}/agently/builtins/tools/__init__.py +0 -0
- {agently-4.0.6.10 → agently-4.0.7}/agently/core/EventCenter.py +0 -0
- {agently-4.0.6.10 → agently-4.0.7}/agently/core/ExtensionHandlers.py +0 -0
- {agently-4.0.6.10 → agently-4.0.7}/agently/core/Tool.py +0 -0
- {agently-4.0.6.10 → agently-4.0.7}/agently/core/TriggerFlow/Process.py +0 -0
- {agently-4.0.6.10 → agently-4.0.7}/agently/core/TriggerFlow/__init__.py +0 -0
- {agently-4.0.6.10 → agently-4.0.7}/agently/core/TriggerFlow/process/MatchCaseProcess.py +0 -0
- {agently-4.0.6.10 → agently-4.0.7}/agently/core/TriggerFlow/process/__init__.py +0 -0
- {agently-4.0.6.10 → agently-4.0.7}/agently/core/__init__.py +0 -0
- {agently-4.0.6.10 → agently-4.0.7}/agently/types/__init__.py +0 -0
- {agently-4.0.6.10 → agently-4.0.7}/agently/types/data/__init__.py +0 -0
- {agently-4.0.6.10 → agently-4.0.7}/agently/types/data/event.py +0 -0
- {agently-4.0.6.10 → agently-4.0.7}/agently/types/data/prompt.py +0 -0
- {agently-4.0.6.10 → agently-4.0.7}/agently/types/data/request.py +0 -0
- {agently-4.0.6.10 → agently-4.0.7}/agently/types/data/serializable.py +0 -0
- {agently-4.0.6.10 → agently-4.0.7}/agently/types/data/tool.py +0 -0
- {agently-4.0.6.10 → agently-4.0.7}/agently/types/plugins/EventHooker.py +0 -0
- {agently-4.0.6.10 → agently-4.0.7}/agently/types/plugins/ModelRequester.py +0 -0
- {agently-4.0.6.10 → agently-4.0.7}/agently/types/plugins/PromptGenerator.py +0 -0
- {agently-4.0.6.10 → agently-4.0.7}/agently/types/plugins/ToolManager.py +0 -0
- {agently-4.0.6.10 → agently-4.0.7}/agently/types/plugins/__init__.py +0 -0
- {agently-4.0.6.10 → agently-4.0.7}/agently/types/plugins/base.py +0 -0
- {agently-4.0.6.10 → agently-4.0.7}/agently/types/trigger_flow/__init__.py +0 -0
- {agently-4.0.6.10 → agently-4.0.7}/agently/types/trigger_flow/trigger_flow.py +0 -0
- {agently-4.0.6.10 → agently-4.0.7}/agently/utils/DataLocator.py +0 -0
- {agently-4.0.6.10 → agently-4.0.7}/agently/utils/DataPathBuilder.py +0 -0
- {agently-4.0.6.10 → agently-4.0.7}/agently/utils/FunctionShifter.py +0 -0
- {agently-4.0.6.10 → agently-4.0.7}/agently/utils/GeneratorConsumer.py +0 -0
- {agently-4.0.6.10 → agently-4.0.7}/agently/utils/LazyImport.py +0 -0
- {agently-4.0.6.10 → agently-4.0.7}/agently/utils/Logger.py +0 -0
- {agently-4.0.6.10 → agently-4.0.7}/agently/utils/Messenger.py +0 -0
- {agently-4.0.6.10 → agently-4.0.7}/agently/utils/RuntimeData.py +0 -0
- {agently-4.0.6.10 → agently-4.0.7}/agently/utils/SerializableRuntimeData.py +0 -0
- {agently-4.0.6.10 → agently-4.0.7}/agently/utils/Storage.py +0 -0
- {agently-4.0.6.10 → agently-4.0.7}/agently/utils/StreamingJSONCompleter.py +0 -0
- {agently-4.0.6.10 → agently-4.0.7}/agently/utils/StreamingJSONParser.py +0 -0
|
@@ -14,7 +14,7 @@
|
|
|
14
14
|
|
|
15
15
|
from typing import Any, Literal, Type, TYPE_CHECKING, TypeVar, Generic, cast
|
|
16
16
|
|
|
17
|
-
from agently.utils import Settings, create_logger, FunctionShifter
|
|
17
|
+
from agently.utils import Settings, create_logger, FunctionShifter, DataFormatter
|
|
18
18
|
from agently.core import PluginManager, EventCenter, Tool, Prompt, ModelRequest, BaseAgent
|
|
19
19
|
from agently._default_init import _load_default_settings, _load_default_plugins, _hook_default_event_handlers
|
|
20
20
|
|
|
@@ -117,6 +117,8 @@ class AgentlyMain(Generic[A]):
|
|
|
117
117
|
self.tool = tool
|
|
118
118
|
self.AgentType = AgentType
|
|
119
119
|
|
|
120
|
+
self.set_settings = self.settings.set_settings
|
|
121
|
+
|
|
120
122
|
def set_debug_console(self, debug_console_status: Literal["ON", "OFF"]):
|
|
121
123
|
match debug_console_status:
|
|
122
124
|
case "OFF":
|
|
@@ -130,10 +132,6 @@ class AgentlyMain(Generic[A]):
|
|
|
130
132
|
self.logger.setLevel(log_level)
|
|
131
133
|
return self
|
|
132
134
|
|
|
133
|
-
def set_settings(self, key: str, value: "SerializableValue"):
|
|
134
|
-
self.settings.set_settings(key, value)
|
|
135
|
-
return self
|
|
136
|
-
|
|
137
135
|
def create_prompt(self, name: str = "agently_prompt") -> Prompt:
|
|
138
136
|
return Prompt(
|
|
139
137
|
self.plugin_manager,
|
{agently-4.0.6.10 → agently-4.0.7}/agently/builtins/agent_extensions/ConfigurePromptExtension.py
RENAMED
|
@@ -21,6 +21,7 @@ from typing import Any
|
|
|
21
21
|
from json import JSONDecodeError
|
|
22
22
|
|
|
23
23
|
from agently.core import BaseAgent
|
|
24
|
+
from agently.utils import DataLocator
|
|
24
25
|
|
|
25
26
|
|
|
26
27
|
class ConfigurePromptExtension(BaseAgent):
|
|
@@ -168,46 +169,74 @@ class ConfigurePromptExtension(BaseAgent):
|
|
|
168
169
|
variable_mappings,
|
|
169
170
|
)
|
|
170
171
|
|
|
171
|
-
def load_yaml_prompt(
|
|
172
|
+
def load_yaml_prompt(
|
|
173
|
+
self,
|
|
174
|
+
path_or_content: str | Path,
|
|
175
|
+
mappings: dict[str, Any] | None = None,
|
|
176
|
+
*,
|
|
177
|
+
prompt_key_path: str | None = None,
|
|
178
|
+
encoding: str | None = "utf-8",
|
|
179
|
+
):
|
|
172
180
|
path = Path(path_or_content)
|
|
173
181
|
if path.exists() and path.is_file():
|
|
174
182
|
try:
|
|
175
|
-
with path.open("r", encoding=
|
|
183
|
+
with path.open("r", encoding=encoding) as file:
|
|
176
184
|
prompt = yaml.safe_load(file)
|
|
177
185
|
except yaml.YAMLError as e:
|
|
178
186
|
raise ValueError(f"Cannot load YAML file '{ path_or_content }'.\nError: { e }")
|
|
179
187
|
else:
|
|
180
188
|
try:
|
|
181
|
-
prompt = yaml.safe_load(path_or_content)
|
|
189
|
+
prompt = yaml.safe_load(str(path_or_content))
|
|
182
190
|
except yaml.YAMLError as e:
|
|
183
191
|
raise ValueError(f"Cannot load YAML content or file path not existed.\nError: { e }")
|
|
192
|
+
if not isinstance(prompt, dict):
|
|
193
|
+
raise TypeError(
|
|
194
|
+
"Cannot execute YAML prompt configures, expect prompt configures as a dictionary data but got:"
|
|
195
|
+
f"{ prompt }"
|
|
196
|
+
)
|
|
197
|
+
if prompt_key_path is not None:
|
|
198
|
+
prompt = DataLocator.locate_path_in_dict(prompt, prompt_key_path)
|
|
184
199
|
if isinstance(prompt, dict):
|
|
185
200
|
self._execute_prompt_configure(prompt, mappings)
|
|
186
201
|
else:
|
|
187
202
|
raise TypeError(
|
|
188
|
-
"Cannot execute YAML prompt configures, expect prompt configures as a dictionary data but got:"
|
|
203
|
+
f"Cannot execute YAML prompt configures, expect prompt configures{ ' from [' + prompt_key_path + '] ' if prompt_key_path is not None else '' } as a dictionary data but got:"
|
|
189
204
|
f"{ prompt }"
|
|
190
205
|
)
|
|
191
206
|
return self
|
|
192
207
|
|
|
193
|
-
def load_json_prompt(
|
|
208
|
+
def load_json_prompt(
|
|
209
|
+
self,
|
|
210
|
+
path_or_content: str | Path,
|
|
211
|
+
mappings: dict[str, Any] | None = None,
|
|
212
|
+
*,
|
|
213
|
+
prompt_key_path: str | None = None,
|
|
214
|
+
encoding: str | None = "utf-8",
|
|
215
|
+
):
|
|
194
216
|
path = Path(path_or_content)
|
|
195
217
|
if path.exists() and path.is_file():
|
|
196
218
|
try:
|
|
197
|
-
with path.open("r", encoding=
|
|
219
|
+
with path.open("r", encoding=encoding) as file:
|
|
198
220
|
prompt = json5.load(file)
|
|
199
221
|
except JSONDecodeError as e:
|
|
200
222
|
raise ValueError(f"Cannot load JSON file '{ path_or_content }'.\nError: { e }")
|
|
201
223
|
else:
|
|
202
224
|
try:
|
|
203
|
-
prompt = json5.loads(path_or_content)
|
|
204
|
-
except
|
|
225
|
+
prompt = json5.loads(str(path_or_content))
|
|
226
|
+
except JSONDecodeError as e:
|
|
205
227
|
raise ValueError(f"Cannot load JSON content or file path not existed.\nError: { e }")
|
|
228
|
+
if not isinstance(prompt, dict):
|
|
229
|
+
raise TypeError(
|
|
230
|
+
"Cannot execute JSON prompt configures, expect prompt configures as a dictionary data but got:"
|
|
231
|
+
f"{ prompt }"
|
|
232
|
+
)
|
|
233
|
+
if prompt_key_path is not None:
|
|
234
|
+
prompt = DataLocator.locate_path_in_dict(prompt, prompt_key_path)
|
|
206
235
|
if isinstance(prompt, dict):
|
|
207
236
|
self._execute_prompt_configure(prompt, mappings)
|
|
208
237
|
else:
|
|
209
238
|
raise TypeError(
|
|
210
|
-
"Cannot execute JSON prompt configures, expect prompt configures as a dictionary data but got:"
|
|
239
|
+
f"Cannot execute JSON prompt configures, expect prompt configures{ ' from [' + prompt_key_path + '] ' if prompt_key_path is not None else '' }as a dictionary data but got:"
|
|
211
240
|
f"{ prompt }"
|
|
212
241
|
)
|
|
213
242
|
return self
|
{agently-4.0.6.10 → agently-4.0.7}/agently/builtins/plugins/ModelRequester/OpenAICompatible.py
RENAMED
|
@@ -45,6 +45,7 @@ if TYPE_CHECKING:
|
|
|
45
45
|
class ContentMapping(TypedDict):
|
|
46
46
|
id: str | None
|
|
47
47
|
role: str | None
|
|
48
|
+
reasoning: str | None
|
|
48
49
|
delta: str | None
|
|
49
50
|
tool_calls: str | None
|
|
50
51
|
done: str | None
|
|
@@ -114,6 +115,7 @@ class OpenAICompatible(ModelRequester):
|
|
|
114
115
|
"content_mapping": {
|
|
115
116
|
"id": "id",
|
|
116
117
|
"role": "choices[0].delta.role",
|
|
118
|
+
"reasoning": "choices[0].delta.reasoning_content",
|
|
117
119
|
"delta": "choices[0].delta.content",
|
|
118
120
|
"tool_calls": "choices[0].delta.tool_calls",
|
|
119
121
|
"done": None,
|
|
@@ -124,6 +126,7 @@ class OpenAICompatible(ModelRequester):
|
|
|
124
126
|
},
|
|
125
127
|
"extra_done": None,
|
|
126
128
|
},
|
|
129
|
+
"yield_extra_content_separately": True,
|
|
127
130
|
"content_mapping_style": "dot",
|
|
128
131
|
"timeout": {
|
|
129
132
|
"connect": 30.0,
|
|
@@ -505,6 +508,7 @@ class OpenAICompatible(ModelRequester):
|
|
|
505
508
|
async def broadcast_response(self, response_generator: AsyncGenerator) -> "AgentlyResponseGenerator":
|
|
506
509
|
meta = {}
|
|
507
510
|
message_record = {}
|
|
511
|
+
reasoning_buffer = ""
|
|
508
512
|
content_buffer = ""
|
|
509
513
|
|
|
510
514
|
content_mapping = cast(
|
|
@@ -516,6 +520,7 @@ class OpenAICompatible(ModelRequester):
|
|
|
516
520
|
)
|
|
517
521
|
id_mapping = content_mapping["id"]
|
|
518
522
|
role_mapping = content_mapping["role"]
|
|
523
|
+
reasoning_mapping = content_mapping["reasoning"]
|
|
519
524
|
delta_mapping = content_mapping["delta"]
|
|
520
525
|
tool_calls_mapping = content_mapping["tool_calls"]
|
|
521
526
|
done_mapping = content_mapping["done"]
|
|
@@ -523,6 +528,7 @@ class OpenAICompatible(ModelRequester):
|
|
|
523
528
|
finish_reason_mapping = content_mapping["finish_reason"]
|
|
524
529
|
extra_delta_mapping = content_mapping["extra_delta"]
|
|
525
530
|
extra_done_mapping = content_mapping["extra_done"]
|
|
531
|
+
yield_extra_content_separately = self.plugin_settings.get("yield_extra_content_separately", True)
|
|
526
532
|
|
|
527
533
|
content_mapping_style = str(self.plugin_settings.get("content_mapping_style"))
|
|
528
534
|
if content_mapping_style not in ("dot", "slash"):
|
|
@@ -552,6 +558,15 @@ class OpenAICompatible(ModelRequester):
|
|
|
552
558
|
)
|
|
553
559
|
if role:
|
|
554
560
|
meta.update({"role": role})
|
|
561
|
+
if reasoning_mapping:
|
|
562
|
+
reasoning = DataLocator.locate_path_in_dict(
|
|
563
|
+
loaded_message,
|
|
564
|
+
reasoning_mapping,
|
|
565
|
+
style=content_mapping_style,
|
|
566
|
+
)
|
|
567
|
+
if reasoning:
|
|
568
|
+
reasoning_buffer += str(reasoning)
|
|
569
|
+
yield "reasoning_delta", reasoning
|
|
555
570
|
if delta_mapping:
|
|
556
571
|
delta = DataLocator.locate_path_in_dict(
|
|
557
572
|
loaded_message,
|
|
@@ -578,6 +593,8 @@ class OpenAICompatible(ModelRequester):
|
|
|
578
593
|
)
|
|
579
594
|
if extra_value:
|
|
580
595
|
yield "extra", {extra_key: extra_value}
|
|
596
|
+
if yield_extra_content_separately:
|
|
597
|
+
yield extra_key, extra_value # type: ignore
|
|
581
598
|
else:
|
|
582
599
|
done_content = None
|
|
583
600
|
if self.model_type == "embeddings" and done_mapping is None:
|
|
@@ -593,6 +610,17 @@ class OpenAICompatible(ModelRequester):
|
|
|
593
610
|
yield "done", done_content
|
|
594
611
|
else:
|
|
595
612
|
yield "done", content_buffer
|
|
613
|
+
reasoning_content = None
|
|
614
|
+
if reasoning_mapping:
|
|
615
|
+
reasoning_content = DataLocator.locate_path_in_dict(
|
|
616
|
+
message_record,
|
|
617
|
+
reasoning_mapping,
|
|
618
|
+
style=content_mapping_style,
|
|
619
|
+
)
|
|
620
|
+
if reasoning_content:
|
|
621
|
+
yield "reasoning_done", reasoning_content
|
|
622
|
+
else:
|
|
623
|
+
yield "reasoning_done", reasoning_buffer
|
|
596
624
|
match self.model_type:
|
|
597
625
|
case "embeddings":
|
|
598
626
|
yield "original_done", message_record
|
{agently-4.0.6.10 → agently-4.0.7}/agently/builtins/plugins/ResponseParser/AgentlyResponseParser.py
RENAMED
|
@@ -282,8 +282,10 @@ class AgentlyResponseParser(ResponseParser):
|
|
|
282
282
|
|
|
283
283
|
async def get_async_generator(
|
|
284
284
|
self,
|
|
285
|
-
type: Literal['all', 'delta', '
|
|
286
|
-
content: Literal['all', 'delta', '
|
|
285
|
+
type: Literal['all', 'delta', 'specific', 'original', 'instant', 'streaming_parse'] | None = "delta",
|
|
286
|
+
content: Literal['all', 'delta', 'specific', 'original', 'instant', 'streaming_parse'] | None = "delta",
|
|
287
|
+
*,
|
|
288
|
+
specific: list[str] | str | None = ["reasoning_delta", "delta", "reasoning_done", "done", "tool_calls"],
|
|
287
289
|
) -> AsyncGenerator:
|
|
288
290
|
await self._ensure_consumer()
|
|
289
291
|
parsed_generator = cast(GeneratorConsumer, self._response_consumer).get_async_generator()
|
|
@@ -300,11 +302,13 @@ class AgentlyResponseParser(ResponseParser):
|
|
|
300
302
|
case "delta":
|
|
301
303
|
if event == "delta":
|
|
302
304
|
yield data
|
|
303
|
-
case "
|
|
304
|
-
if
|
|
305
|
-
|
|
306
|
-
elif
|
|
307
|
-
|
|
305
|
+
case "specific":
|
|
306
|
+
if specific is None:
|
|
307
|
+
specific = ["delta"]
|
|
308
|
+
elif isinstance(specific, str):
|
|
309
|
+
specific = [specific]
|
|
310
|
+
if event in specific:
|
|
311
|
+
yield event, data
|
|
308
312
|
case "instant" | "streaming_parse":
|
|
309
313
|
if self._streaming_json_parser is not None:
|
|
310
314
|
streaming_parsed = None
|
|
@@ -325,8 +329,10 @@ class AgentlyResponseParser(ResponseParser):
|
|
|
325
329
|
|
|
326
330
|
def get_generator(
|
|
327
331
|
self,
|
|
328
|
-
type: Literal['all', 'delta', '
|
|
329
|
-
content: Literal['all', 'delta', '
|
|
332
|
+
type: Literal['all', 'delta', 'specific', 'original', 'instant', 'streaming_parse'] | None = "delta",
|
|
333
|
+
content: Literal['all', 'delta', 'specific', 'original', 'instant', 'streaming_parse'] | None = "delta",
|
|
334
|
+
*,
|
|
335
|
+
specific: list[str] | str | None = ["reasoning_delta", "delta", "reasoning_done", "done", "tool_calls"],
|
|
330
336
|
) -> Generator:
|
|
331
337
|
asyncio.run(self._ensure_consumer())
|
|
332
338
|
parsed_generator = cast(GeneratorConsumer, self._response_consumer).get_generator()
|
|
@@ -343,11 +349,13 @@ class AgentlyResponseParser(ResponseParser):
|
|
|
343
349
|
case "delta":
|
|
344
350
|
if event == "delta":
|
|
345
351
|
yield data
|
|
346
|
-
case "
|
|
347
|
-
if
|
|
348
|
-
|
|
349
|
-
elif
|
|
350
|
-
|
|
352
|
+
case "specific":
|
|
353
|
+
if specific is None:
|
|
354
|
+
specific = ["delta"]
|
|
355
|
+
elif isinstance(specific, str):
|
|
356
|
+
specific = [specific]
|
|
357
|
+
if event in specific:
|
|
358
|
+
yield event, data
|
|
351
359
|
case "instant" | "streaming_parse":
|
|
352
360
|
if self._streaming_json_parser is not None:
|
|
353
361
|
streaming_parsed = None
|
|
@@ -66,6 +66,8 @@ class BaseAgent:
|
|
|
66
66
|
self.request_prompt = self.request.prompt
|
|
67
67
|
self.prompt = self.request_prompt
|
|
68
68
|
|
|
69
|
+
self.set_settings = self.settings.set_settings
|
|
70
|
+
|
|
69
71
|
self.get_response = self.request.get_response
|
|
70
72
|
self.get_result = self.request.get_result
|
|
71
73
|
self.get_meta = self.request.get_meta
|
|
@@ -83,10 +85,6 @@ class BaseAgent:
|
|
|
83
85
|
self.async_start = self.async_get_data
|
|
84
86
|
|
|
85
87
|
# Basic Methods
|
|
86
|
-
def set_settings(self, key: str, value: "SerializableValue"):
|
|
87
|
-
self.settings.set_settings(key, value)
|
|
88
|
-
return self
|
|
89
|
-
|
|
90
88
|
def set_agent_prompt(
|
|
91
89
|
self,
|
|
92
90
|
key: "PromptStandardSlot | str",
|
|
@@ -425,13 +425,15 @@ class ModelRequest:
|
|
|
425
425
|
parent=parent_extension_handlers,
|
|
426
426
|
)
|
|
427
427
|
|
|
428
|
+
self.set_settings = self.settings.set_settings
|
|
429
|
+
|
|
428
430
|
self.get_meta = FunctionShifter.syncify(self.async_get_meta)
|
|
429
431
|
self.get_text = FunctionShifter.syncify(self.async_get_text)
|
|
430
432
|
self.get_data = FunctionShifter.syncify(self.async_get_data)
|
|
431
433
|
self.get_data_object = FunctionShifter.syncify(self.async_get_data_object)
|
|
432
434
|
|
|
433
|
-
|
|
434
|
-
self.
|
|
435
|
+
self.start = self.get_data
|
|
436
|
+
self.async_start = self.async_get_data
|
|
435
437
|
|
|
436
438
|
def set_prompt(
|
|
437
439
|
self,
|
|
@@ -590,58 +592,84 @@ class ModelRequest:
|
|
|
590
592
|
def get_generator(
|
|
591
593
|
self,
|
|
592
594
|
type: Literal["instant", "streaming_parse"],
|
|
595
|
+
*,
|
|
596
|
+
specific: list[str] | str | None = ["reasoning_delta", "delta", "reasoning_done", "done", "tool_calls"],
|
|
593
597
|
) -> Generator["StreamingData", None, None]: ...
|
|
594
598
|
|
|
595
599
|
@overload
|
|
596
600
|
def get_generator(
|
|
597
601
|
self,
|
|
598
602
|
type: Literal["all"],
|
|
603
|
+
*,
|
|
604
|
+
specific: list[str] | str | None = ["reasoning_delta", "delta", "reasoning_done", "done", "tool_calls"],
|
|
599
605
|
) -> Generator[tuple[str, Any], None, None]: ...
|
|
600
606
|
|
|
601
607
|
@overload
|
|
602
608
|
def get_generator(
|
|
603
609
|
self,
|
|
604
|
-
type: Literal["delta", "
|
|
610
|
+
type: Literal["delta", "specific", "original"],
|
|
611
|
+
*,
|
|
612
|
+
specific: list[str] | str | None = ["reasoning_delta", "delta", "reasoning_done", "done", "tool_calls"],
|
|
605
613
|
) -> Generator[str, None, None]: ...
|
|
606
614
|
|
|
607
615
|
@overload
|
|
608
616
|
def get_generator(
|
|
609
617
|
self,
|
|
610
|
-
type: Literal["all", "original", "delta", "
|
|
618
|
+
type: Literal["all", "original", "delta", "specific", "instant", "streaming_parse"] | None = "delta",
|
|
619
|
+
*,
|
|
620
|
+
specific: list[str] | str | None = ["reasoning_delta", "delta", "reasoning_done", "done", "tool_calls"],
|
|
611
621
|
) -> Generator: ...
|
|
612
622
|
|
|
613
623
|
def get_generator(
|
|
614
624
|
self,
|
|
615
|
-
type: Literal["all", "original", "delta", "
|
|
625
|
+
type: Literal["all", "original", "delta", "specific", "instant", "streaming_parse"] | None = "delta",
|
|
626
|
+
*,
|
|
627
|
+
specific: list[str] | str | None = ["reasoning_delta", "delta", "reasoning_done", "done", "tool_calls"],
|
|
616
628
|
) -> Generator:
|
|
617
|
-
return self.get_response().get_generator(
|
|
629
|
+
return self.get_response().get_generator(
|
|
630
|
+
type=type,
|
|
631
|
+
specific=specific,
|
|
632
|
+
)
|
|
618
633
|
|
|
619
634
|
@overload
|
|
620
635
|
def get_async_generator(
|
|
621
636
|
self,
|
|
622
637
|
type: Literal["instant", "streaming_parse"],
|
|
638
|
+
*,
|
|
639
|
+
specific: list[str] | str | None = ["reasoning_delta", "delta", "reasoning_done", "done", "tool_calls"],
|
|
623
640
|
) -> AsyncGenerator["StreamingData", None]: ...
|
|
624
641
|
|
|
625
642
|
@overload
|
|
626
643
|
def get_async_generator(
|
|
627
644
|
self,
|
|
628
645
|
type: Literal["all"],
|
|
646
|
+
*,
|
|
647
|
+
specific: list[str] | str | None = ["reasoning_delta", "delta", "reasoning_done", "done", "tool_calls"],
|
|
629
648
|
) -> AsyncGenerator[tuple[str, Any], None]: ...
|
|
630
649
|
|
|
631
650
|
@overload
|
|
632
651
|
def get_async_generator(
|
|
633
652
|
self,
|
|
634
|
-
type: Literal["delta", "
|
|
653
|
+
type: Literal["delta", "specific", "original"],
|
|
654
|
+
*,
|
|
655
|
+
specific: list[str] | str | None = ["reasoning_delta", "delta", "reasoning_done", "done", "tool_calls"],
|
|
635
656
|
) -> AsyncGenerator[str, None]: ...
|
|
636
657
|
|
|
637
658
|
@overload
|
|
638
659
|
def get_async_generator(
|
|
639
660
|
self,
|
|
640
|
-
type: Literal["all", "original", "delta", "
|
|
661
|
+
type: Literal["all", "original", "delta", "specific", "instant", "streaming_parse"] | None = "delta",
|
|
662
|
+
*,
|
|
663
|
+
specific: list[str] | str | None = ["reasoning_delta", "delta", "reasoning_done", "done", "tool_calls"],
|
|
641
664
|
) -> AsyncGenerator: ...
|
|
642
665
|
|
|
643
666
|
def get_async_generator(
|
|
644
667
|
self,
|
|
645
|
-
type: Literal["all", "original", "delta", "
|
|
668
|
+
type: Literal["all", "original", "delta", "specific", "instant", "streaming_parse"] | None = "delta",
|
|
669
|
+
*,
|
|
670
|
+
specific: list[str] | str | None = ["reasoning_delta", "delta", "reasoning_done", "done", "tool_calls"],
|
|
646
671
|
) -> AsyncGenerator:
|
|
647
|
-
return self.get_response().get_async_generator(
|
|
672
|
+
return self.get_response().get_async_generator(
|
|
673
|
+
type=type,
|
|
674
|
+
specific=specific,
|
|
675
|
+
)
|
|
@@ -14,9 +14,9 @@
|
|
|
14
14
|
|
|
15
15
|
import re
|
|
16
16
|
from textwrap import dedent
|
|
17
|
-
from typing import Any, Literal,
|
|
17
|
+
from typing import Any, Literal, TYPE_CHECKING, cast, overload, TypeVar
|
|
18
18
|
|
|
19
|
-
from agently.utils import RuntimeData, Settings
|
|
19
|
+
from agently.utils import RuntimeData, Settings, DataFormatter
|
|
20
20
|
|
|
21
21
|
if TYPE_CHECKING:
|
|
22
22
|
from agently.types.data.prompt import ChatMessage, PromptStandardSlot
|
|
@@ -80,8 +80,6 @@ class Prompt(RuntimeData):
|
|
|
80
80
|
):
|
|
81
81
|
super().__init__(prompt_dict, parent=parent_prompt, name=name)
|
|
82
82
|
|
|
83
|
-
self._placeholder_pattern = re.compile(r"\$\{\s*([^}]+?)\s*\}")
|
|
84
|
-
|
|
85
83
|
self.settings = Settings(
|
|
86
84
|
name="Prompt-Settings",
|
|
87
85
|
parent=parent_settings,
|
|
@@ -103,42 +101,6 @@ class Prompt(RuntimeData):
|
|
|
103
101
|
self.to_json_prompt = self.prompt_generator.to_json_prompt
|
|
104
102
|
self.to_yaml_prompt = self.prompt_generator.to_yaml_prompt
|
|
105
103
|
|
|
106
|
-
def _substitute_placeholder(self, obj: T, variable_mappings: dict[str, Any]) -> T | Any:
|
|
107
|
-
if not isinstance(variable_mappings, dict):
|
|
108
|
-
raise TypeError(f"Variable mappings require a dictionary but got: { variable_mappings }")
|
|
109
|
-
|
|
110
|
-
if isinstance(obj, str):
|
|
111
|
-
full_match = self._placeholder_pattern.fullmatch(obj)
|
|
112
|
-
if full_match:
|
|
113
|
-
key = full_match.group(1).strip()
|
|
114
|
-
return variable_mappings.get(key, obj)
|
|
115
|
-
else:
|
|
116
|
-
|
|
117
|
-
def replacer(match):
|
|
118
|
-
key = match.group(1).strip()
|
|
119
|
-
return str(variable_mappings.get(key, match.group(0)))
|
|
120
|
-
|
|
121
|
-
return self._placeholder_pattern.sub(replacer, obj)
|
|
122
|
-
|
|
123
|
-
if isinstance(obj, Mapping):
|
|
124
|
-
return {
|
|
125
|
-
self._substitute_placeholder(key, variable_mappings): self._substitute_placeholder(
|
|
126
|
-
value, variable_mappings
|
|
127
|
-
)
|
|
128
|
-
for key, value in obj.items()
|
|
129
|
-
}
|
|
130
|
-
|
|
131
|
-
if isinstance(obj, Sequence) and not isinstance(obj, (str, bytes, bytearray)):
|
|
132
|
-
if isinstance(obj, tuple):
|
|
133
|
-
return tuple(self._substitute_placeholder(value, variable_mappings) for value in obj)
|
|
134
|
-
else:
|
|
135
|
-
return [self._substitute_placeholder(value, variable_mappings) for value in obj]
|
|
136
|
-
|
|
137
|
-
if isinstance(obj, set):
|
|
138
|
-
return {self._substitute_placeholder(value, variable_mappings) for value in obj}
|
|
139
|
-
|
|
140
|
-
return obj
|
|
141
|
-
|
|
142
104
|
@overload
|
|
143
105
|
def set(
|
|
144
106
|
self,
|
|
@@ -165,8 +127,8 @@ class Prompt(RuntimeData):
|
|
|
165
127
|
value = dedent(value.strip())
|
|
166
128
|
if mappings is not None:
|
|
167
129
|
super().set(
|
|
168
|
-
|
|
169
|
-
|
|
130
|
+
DataFormatter.substitute_placeholder(key, mappings),
|
|
131
|
+
DataFormatter.substitute_placeholder(value, mappings),
|
|
170
132
|
)
|
|
171
133
|
else:
|
|
172
134
|
super().set(key, value)
|
|
@@ -178,7 +140,7 @@ class Prompt(RuntimeData):
|
|
|
178
140
|
):
|
|
179
141
|
if mappings is not None:
|
|
180
142
|
super().update(
|
|
181
|
-
|
|
143
|
+
DataFormatter.substitute_placeholder(new, mappings),
|
|
182
144
|
)
|
|
183
145
|
else:
|
|
184
146
|
super().update(new)
|
|
@@ -193,8 +155,8 @@ class Prompt(RuntimeData):
|
|
|
193
155
|
value = dedent(value.strip())
|
|
194
156
|
if mappings is not None:
|
|
195
157
|
super().append(
|
|
196
|
-
|
|
197
|
-
|
|
158
|
+
DataFormatter.substitute_placeholder(key, mappings),
|
|
159
|
+
DataFormatter.substitute_placeholder(value, mappings),
|
|
198
160
|
)
|
|
199
161
|
else:
|
|
200
162
|
super().append(key, value)
|
|
@@ -132,6 +132,7 @@ class TriggerFlowBluePrint:
|
|
|
132
132
|
*,
|
|
133
133
|
execution_id: str | None = None,
|
|
134
134
|
skip_exceptions: bool = False,
|
|
135
|
+
concurrency: int | None = None,
|
|
135
136
|
):
|
|
136
137
|
handlers_snapshot: TriggerFlowAllHandlers = {
|
|
137
138
|
"event": {k: v.copy() for k, v in self._handlers["event"].items()},
|
|
@@ -143,6 +144,7 @@ class TriggerFlowBluePrint:
|
|
|
143
144
|
trigger_flow=trigger_flow,
|
|
144
145
|
id=execution_id,
|
|
145
146
|
skip_exceptions=skip_exceptions,
|
|
147
|
+
concurrency=concurrency,
|
|
146
148
|
)
|
|
147
149
|
|
|
148
150
|
def copy(self, *, name: str | None = None):
|
|
@@ -30,9 +30,10 @@ class TriggerFlowChunk:
|
|
|
30
30
|
*,
|
|
31
31
|
name: str | None = None,
|
|
32
32
|
):
|
|
33
|
-
self.
|
|
33
|
+
self.id = uuid.uuid4().hex
|
|
34
|
+
self.name = name if name is not None else self.id
|
|
34
35
|
self._handler = handler
|
|
35
|
-
self.trigger = f"Chunk[{ handler.__name__ }]-{ self.
|
|
36
|
+
self.trigger = f"Chunk[{ handler.__name__ }]-{ self.id }"
|
|
36
37
|
|
|
37
38
|
async def async_call(self, data: "TriggerFlowEventData"):
|
|
38
39
|
result = await FunctionShifter.asyncify(self._handler)(data)
|
|
@@ -16,6 +16,7 @@
|
|
|
16
16
|
import uuid
|
|
17
17
|
import asyncio
|
|
18
18
|
import warnings
|
|
19
|
+
from contextvars import ContextVar
|
|
19
20
|
|
|
20
21
|
from typing import Any, Literal, TYPE_CHECKING
|
|
21
22
|
|
|
@@ -37,6 +38,7 @@ class TriggerFlowExecution:
|
|
|
37
38
|
trigger_flow: "TriggerFlow",
|
|
38
39
|
id: str | None = None,
|
|
39
40
|
skip_exceptions: bool = False,
|
|
41
|
+
concurrency: int | None = None,
|
|
40
42
|
):
|
|
41
43
|
# Basic Attributions
|
|
42
44
|
self.id = id if id is not None else uuid.uuid4().hex
|
|
@@ -45,6 +47,11 @@ class TriggerFlowExecution:
|
|
|
45
47
|
self._runtime_data = RuntimeData()
|
|
46
48
|
self._system_runtime_data = RuntimeData()
|
|
47
49
|
self._skip_exceptions = skip_exceptions
|
|
50
|
+
self._concurrency_semaphore = asyncio.Semaphore(concurrency) if concurrency and concurrency > 0 else None
|
|
51
|
+
self._concurrency_depth = ContextVar(
|
|
52
|
+
f"trigger_flow_execution_concurrency_depth_{ self.id }",
|
|
53
|
+
default=0,
|
|
54
|
+
)
|
|
48
55
|
|
|
49
56
|
# Settings
|
|
50
57
|
self.settings = Settings(
|
|
@@ -126,19 +133,29 @@ class TriggerFlowExecution:
|
|
|
126
133
|
},
|
|
127
134
|
self.settings,
|
|
128
135
|
)
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
136
|
+
async def run_handler(handler_func):
|
|
137
|
+
if self._concurrency_semaphore is None:
|
|
138
|
+
return await handler_func
|
|
139
|
+
depth = self._concurrency_depth.get()
|
|
140
|
+
token = self._concurrency_depth.set(depth + 1)
|
|
141
|
+
try:
|
|
142
|
+
if depth > 0:
|
|
143
|
+
return await handler_func
|
|
144
|
+
async with self._concurrency_semaphore:
|
|
145
|
+
return await handler_func
|
|
146
|
+
finally:
|
|
147
|
+
self._concurrency_depth.reset(token)
|
|
148
|
+
|
|
149
|
+
handler_task = FunctionShifter.asyncify(handler)(
|
|
150
|
+
TriggerFlowEventData(
|
|
151
|
+
trigger_event=trigger_event,
|
|
152
|
+
trigger_type=trigger_type,
|
|
153
|
+
value=value,
|
|
154
|
+
execution=self,
|
|
155
|
+
_layer_marks=_layer_marks,
|
|
140
156
|
)
|
|
141
157
|
)
|
|
158
|
+
tasks.append(asyncio.ensure_future(run_handler(handler_task)))
|
|
142
159
|
|
|
143
160
|
if tasks:
|
|
144
161
|
await asyncio.gather(*tasks, return_exceptions=self._skip_exceptions)
|