agently 4.0.6.5__py3-none-any.whl → 4.0.6.10__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -188,6 +188,7 @@ class ConfigurePromptExtension(BaseAgent):
188
188
  "Cannot execute YAML prompt configures, expect prompt configures as a dictionary data but got:"
189
189
  f"{ prompt }"
190
190
  )
191
+ return self
191
192
 
192
193
  def load_json_prompt(self, path_or_content: str, mappings: dict[str, Any] | None = None):
193
194
  path = Path(path_or_content)
@@ -209,3 +210,4 @@ class ConfigurePromptExtension(BaseAgent):
209
210
  "Cannot execute JSON prompt configures, expect prompt configures as a dictionary data but got:"
210
211
  f"{ prompt }"
211
212
  )
213
+ return self
@@ -17,6 +17,32 @@ from typing import TYPE_CHECKING
17
17
 
18
18
  from agently.types.plugins import EventHooker
19
19
 
20
+ COLORS = {
21
+ "black": 30,
22
+ "red": 31,
23
+ "green": 32,
24
+ "yellow": 33,
25
+ "blue": 34,
26
+ "magenta": 35,
27
+ "cyan": 36,
28
+ "white": 37,
29
+ "gray": 90,
30
+ }
31
+
32
+
33
+ def color_text(text: str, color: str | None = None, bold: bool = False, underline: bool = False) -> str:
34
+ codes = []
35
+ if bold:
36
+ codes.append("1")
37
+ if underline:
38
+ codes.append("4")
39
+ if color and color in COLORS:
40
+ codes.append(str(COLORS[color]))
41
+ if not codes:
42
+ return text
43
+ return f"\x1b[{';'.join(codes)}m{text}\x1b[0m"
44
+
45
+
20
46
  if TYPE_CHECKING:
21
47
  from agently.types.data import EventMessage, AgentlySystemEvent
22
48
 
@@ -72,12 +98,18 @@ class SystemMessageHooker(EventHooker):
72
98
  and SystemMessageHooker._current_meta["row_id"] == message_data["response_id"]
73
99
  and SystemMessageHooker._current_meta["stage"] == content["stage"]
74
100
  ):
75
- print(content["detail"], end="")
101
+ print(color_text(content["detail"], color="gray"), end="", flush=True)
76
102
  else:
77
- print(
78
- f"[Agent-{ message_data['agent_name'] }] - [Request-{ message_data['response_id'] }]\nStage: { content['stage'] }\nDetail:\n{ content['detail'] }",
79
- end="",
103
+ header = color_text(
104
+ f"[Agent-{ message_data['agent_name'] }] - [Request-{ message_data['response_id'] }]",
105
+ color="blue",
106
+ bold=True,
80
107
  )
108
+ stage_label = color_text("Stage:", color="cyan", bold=True)
109
+ stage_val = color_text(content["stage"], color="yellow", underline=True)
110
+ detail_label = color_text("Detail:\n", color="cyan", bold=True)
111
+ detail = color_text(content["detail"], color="green")
112
+ print(f"{header}\n{stage_label} {stage_val}\n{detail_label}{detail}", end="")
81
113
  SystemMessageHooker._current_meta["table_name"] = message_data["agent_name"]
82
114
  SystemMessageHooker._current_meta["row_id"] = message_data["response_id"]
83
115
  SystemMessageHooker._current_meta["stage"] = content["stage"]
@@ -99,28 +131,40 @@ class SystemMessageHooker(EventHooker):
99
131
  },
100
132
  )
101
133
  if settings["runtime.show_model_logs"]:
134
+ header = color_text(
135
+ f"[Agent-{ message_data['agent_name'] }] - [Response-{ message_data['response_id'] }]",
136
+ color="blue",
137
+ bold=True,
138
+ )
139
+ stage_label = color_text("Stage:", color="cyan", bold=True)
140
+ stage_val = color_text(content["stage"], color="yellow", underline=True)
141
+ detail_label = color_text("Detail:\n", color="cyan", bold=True)
142
+ detail = color_text(f"{content['detail']}", color="gray")
102
143
  await event_center.async_emit(
103
144
  "log",
104
145
  {
105
146
  "level": "INFO",
106
- "content": f"[Agent-{ message_data['agent_name'] }] - [Response-{ message_data['response_id'] }]\nStage: { content['stage'] }\nDetail:\n{ content['detail'] }",
147
+ "content": f"{header}\n{stage_label} {stage_val}\n{detail_label}{detail}",
107
148
  },
108
149
  )
109
150
  case "TOOL":
110
151
  if settings["runtime.show_tool_logs"]:
152
+ tool_title = color_text("[Tool Using Result]:", color="blue", bold=True)
153
+ tool_body = color_text(str(message.content["data"]), color="gray")
111
154
  await event_center.async_emit(
112
155
  "log",
113
156
  {
114
157
  "level": "INFO",
115
- "content": f"[Tool Using Result]:\n{ message.content['data'] }",
158
+ "content": f"{tool_title}\n{tool_body}",
116
159
  },
117
160
  )
118
161
  case "TRIGGER_FLOW":
119
162
  if settings["runtime.show_trigger_flow_logs"]:
163
+ trigger = color_text(f"[TriggerFlow] { message.content['data'] }", color="yellow", bold=True)
120
164
  await event_center.async_emit(
121
165
  "log",
122
166
  {
123
167
  "level": "INFO",
124
- "content": f"[TriggerFlow] { message.content['data'] }",
168
+ "content": trigger,
125
169
  },
126
170
  )
@@ -146,6 +146,10 @@ class OpenAICompatible(ModelRequester):
146
146
  self.model_type = cast(str, self.plugin_settings.get("model_type"))
147
147
  self._messenger = event_center.create_messenger(self.name)
148
148
 
149
+ # check if has attachment prompt
150
+ if self.prompt["attachment"]:
151
+ self.plugin_settings["rich_content"] = True
152
+
149
153
  @staticmethod
150
154
  def _on_register():
151
155
  pass
@@ -351,13 +355,13 @@ class OpenAICompatible(ModelRequester):
351
355
  api_key = self.plugin_settings.get("api_key", None)
352
356
  if api_key is not None and auth["api_key"] == "None":
353
357
  auth["api_key"] = str(api_key)
354
- if "api_key" in auth:
355
- headers_with_auth = {**request_data.headers, "Authorization": f"Bearer { auth['api_key'] }"}
356
- elif "headers" in auth and isinstance(auth["headers"], dict):
358
+ if "headers" in auth and isinstance(auth["headers"], dict):
357
359
  headers_with_auth = {**request_data.headers, **auth["headers"]}
358
360
  elif "body" in auth and isinstance(auth["body"], dict):
359
361
  headers_with_auth = request_data.headers.copy()
360
362
  request_data.data.update(**auth["body"])
363
+ if "api_key" in auth and auth["api_key"] != "None":
364
+ headers_with_auth = {**request_data.headers, "Authorization": f"Bearer { auth['api_key'] }"}
361
365
  else:
362
366
  headers_with_auth = request_data.headers.copy()
363
367
 
@@ -12,6 +12,7 @@
12
12
  # See the License for the specific language governing permissions and
13
13
  # limitations under the License.
14
14
 
15
+ import json
15
16
  import yaml
16
17
 
17
18
  from typing import (
@@ -38,6 +39,7 @@ from agently.utils import SettingsNamespace, DataFormatter
38
39
 
39
40
  if TYPE_CHECKING:
40
41
  from pydantic import BaseModel
42
+ from agently.types.data import SerializableData
41
43
  from agently.core import Prompt
42
44
  from agently.utils import Settings
43
45
 
@@ -307,9 +309,7 @@ class AgentlyPromptGenerator(PromptGenerator):
307
309
  if isinstance(role_mapping, dict):
308
310
  merged_role_mapping.update(role_mapping)
309
311
 
310
- prompt_text_list.append(
311
- f"{ (merged_role_mapping['user'] if 'user' in merged_role_mapping else 'user').upper() }:"
312
- )
312
+ prompt_text_list.append(f"{ (merged_role_mapping['user'] if 'user' in merged_role_mapping else 'user') }:")
313
313
 
314
314
  # system & developer
315
315
  if prompt_object.system:
@@ -368,7 +368,7 @@ class AgentlyPromptGenerator(PromptGenerator):
368
368
 
369
369
  prompt_text_list.extend(self._generate_main_prompt(prompt_object))
370
370
  prompt_text_list.append(
371
- f"{ (merged_role_mapping['assistant'] if 'assistant' in merged_role_mapping else 'assistant').upper() }:"
371
+ f"{ (merged_role_mapping['assistant'] if 'assistant' in merged_role_mapping else 'assistant') }:"
372
372
  )
373
373
 
374
374
  return "\n".join(prompt_text_list)
@@ -398,12 +398,7 @@ class AgentlyPromptGenerator(PromptGenerator):
398
398
  if prompt_object.system:
399
399
  prompt_messages.append(
400
400
  self._generate_yaml_prompt_message(
401
- str(
402
- prompt_title_mapping.get(
403
- 'system',
404
- 'SYSTEM',
405
- )
406
- ),
401
+ "system",
407
402
  prompt_object.system,
408
403
  role_mapping=merged_role_mapping,
409
404
  )
@@ -412,12 +407,7 @@ class AgentlyPromptGenerator(PromptGenerator):
412
407
  if prompt_object.developer:
413
408
  prompt_messages.append(
414
409
  self._generate_yaml_prompt_message(
415
- str(
416
- prompt_title_mapping.get(
417
- 'developer',
418
- 'DEVELOPER DIRECTIONS',
419
- )
420
- ),
410
+ "developer",
421
411
  prompt_object.developer,
422
412
  role_mapping=merged_role_mapping,
423
413
  )
@@ -661,7 +651,12 @@ class AgentlyPromptGenerator(PromptGenerator):
661
651
  }
662
652
  )
663
653
 
664
- return create_model(name, **fields, **validators)
654
+ return create_model(
655
+ name,
656
+ __config__={'extra': 'allow'},
657
+ **fields,
658
+ **validators,
659
+ )
665
660
  else:
666
661
  item_type = Any
667
662
  if len(schema) > 0:
@@ -710,3 +705,59 @@ class AgentlyPromptGenerator(PromptGenerator):
710
705
  "AgentlyOutput",
711
706
  {"list": DataFormatter.sanitize(output_prompt, remain_type=True)},
712
707
  )
708
+
709
+ def _to_serializable_output_prompt(self, output_prompt_part: Any):
710
+ if not isinstance(output_prompt_part, (Mapping, Sequence)) or isinstance(output_prompt_part, str):
711
+ return output_prompt_part
712
+
713
+ if isinstance(output_prompt_part, Mapping):
714
+ result = {}
715
+ for key, value in output_prompt_part.items():
716
+ result[key] = self._to_serializable_output_prompt(value)
717
+ return result
718
+ else:
719
+ if isinstance(output_prompt_part, tuple):
720
+ match len(output_prompt_part):
721
+ case 0:
722
+ return []
723
+ case 1:
724
+ return {
725
+ "$type": output_prompt_part[0],
726
+ }
727
+ case _:
728
+ desc_text = ";".join([item for item in output_prompt_part[1:] if item])
729
+ if desc_text:
730
+ return {
731
+ "$type": output_prompt_part[0],
732
+ "$desc": desc_text,
733
+ }
734
+ else:
735
+ return {
736
+ "$type": output_prompt_part[0],
737
+ }
738
+ else:
739
+ return list(output_prompt_part)
740
+
741
+ def to_serializable_prompt_data(self, inherit: bool = False) -> "SerializableData":
742
+ prompt_data = self.prompt.get(
743
+ default={},
744
+ inherit=inherit,
745
+ )
746
+ if "output" in prompt_data:
747
+ prompt_data["output"] = self._to_serializable_output_prompt(prompt_data["output"])
748
+ return DataFormatter.sanitize(prompt_data)
749
+
750
+ def to_json_prompt(self, inherit: bool = False):
751
+ return json.dumps(
752
+ self.to_serializable_prompt_data(inherit),
753
+ indent=2,
754
+ ensure_ascii=False,
755
+ )
756
+
757
+ def to_yaml_prompt(self, inherit: bool = False):
758
+ return yaml.safe_dump(
759
+ self.to_serializable_prompt_data(inherit),
760
+ indent=2,
761
+ allow_unicode=True,
762
+ sort_keys=False,
763
+ )
agently/core/Agent.py CHANGED
@@ -13,6 +13,8 @@
13
13
  # limitations under the License.
14
14
 
15
15
  import uuid
16
+ import yaml
17
+ import json
16
18
 
17
19
  from typing import Any, TYPE_CHECKING
18
20
 
@@ -91,7 +93,7 @@ class BaseAgent:
91
93
  value: Any,
92
94
  mappings: dict[str, Any] | None = None,
93
95
  ):
94
- self.prompt.set(key, value, mappings)
96
+ self.agent_prompt.set(key, value, mappings)
95
97
  return self
96
98
 
97
99
  def set_request_prompt(
@@ -104,7 +106,7 @@ class BaseAgent:
104
106
  return self
105
107
 
106
108
  def remove_agent_prompt(self, key: "PromptStandardSlot | str"):
107
- self.prompt.set(key, None)
109
+ self.agent_prompt.set(key, None)
108
110
  return self
109
111
 
110
112
  def remove_request_prompt(self, key: "PromptStandardSlot | str"):
@@ -112,34 +114,34 @@ class BaseAgent:
112
114
  return self
113
115
 
114
116
  def reset_chat_history(self):
115
- if "chat_history" in self.prompt:
116
- self.prompt.set("chat_history", [])
117
+ if "chat_history" in self.agent_prompt:
118
+ self.agent_prompt.set("chat_history", [])
117
119
  return self
118
120
 
119
121
  def set_chat_history(self, chat_history: "list[dict[str, Any] | ChatMessage]"):
120
122
  self.reset_chat_history()
121
123
  if not isinstance(chat_history, list):
122
124
  chat_history = [chat_history]
123
- self.prompt.set("chat_history", chat_history)
125
+ self.agent_prompt.set("chat_history", chat_history)
124
126
  return self
125
127
 
126
128
  def add_chat_history(self, chat_history: "list[dict[str, Any] | ChatMessage] | dict[str, Any] | ChatMessage"):
127
129
  if not isinstance(chat_history, list):
128
130
  chat_history = [chat_history]
129
- self.prompt.set("chat_history", chat_history)
131
+ self.agent_prompt.set("chat_history", chat_history)
130
132
  return self
131
133
 
132
134
  def reset_action_results(self):
133
- if "action_results" in self.prompt:
134
- del self.prompt["action_results"]
135
+ if "action_results" in self.agent_prompt:
136
+ del self.agent_prompt["action_results"]
135
137
  return self
136
138
 
137
139
  def set_action_results(self, action_results: list[dict[str, Any]]):
138
- self.prompt.set("action_results", action_results)
140
+ self.agent_prompt.set("action_results", action_results)
139
141
  return self
140
142
 
141
143
  def add_action_results(self, action: str, result: Any):
142
- self.prompt.append("action_results", {action: result})
144
+ self.agent_prompt.append("action_results", {action: result})
143
145
  return self
144
146
 
145
147
  # Quick Prompt
@@ -151,7 +153,7 @@ class BaseAgent:
151
153
  always: bool = False,
152
154
  ):
153
155
  if always:
154
- self.prompt.set("input", prompt, mappings)
156
+ self.agent_prompt.set("input", prompt, mappings)
155
157
  else:
156
158
  self.request.prompt.set("input", prompt, mappings)
157
159
  return self
@@ -164,8 +166,8 @@ class BaseAgent:
164
166
  always: bool = False,
165
167
  ):
166
168
  if always:
167
- self.prompt.set("instruct", ["{system.rule} ARE IMPORTANT RULES YOU SHALL FOLLOW!"])
168
- self.prompt.set("system.rule", prompt, mappings)
169
+ self.agent_prompt.set("instruct", ["{system.rule} ARE IMPORTANT RULES YOU SHALL FOLLOW!"])
170
+ self.agent_prompt.set("system.rule", prompt, mappings)
169
171
  else:
170
172
  self.request.prompt.set("instruct", ["{system.rule} ARE IMPORTANT RULES YOU SHALL FOLLOW!"])
171
173
  self.request.prompt.set("system.rule", prompt, mappings)
@@ -179,8 +181,8 @@ class BaseAgent:
179
181
  always: bool = False,
180
182
  ):
181
183
  if always:
182
- self.prompt.set("instruct", ["YOU MUST REACT AND RESPOND AS {system.role}!"])
183
- self.prompt.set("system.your_role", prompt, mappings)
184
+ self.agent_prompt.set("instruct", ["YOU MUST REACT AND RESPOND AS {system.role}!"])
185
+ self.agent_prompt.set("system.your_role", prompt, mappings)
184
186
  else:
185
187
  self.request.prompt.set("instruct", ["YOU MUST REACT AND RESPOND AS {system.role}!"])
186
188
  self.request.prompt.set("system.your_role", prompt, mappings)
@@ -194,8 +196,8 @@ class BaseAgent:
194
196
  always: bool = False,
195
197
  ):
196
198
  if always:
197
- self.prompt.set("instruct", ["{system.user_info} IS IMPORTANT INFORMATION ABOUT USER!"])
198
- self.prompt.set("system.user_info", prompt, mappings)
199
+ self.agent_prompt.set("instruct", ["{system.user_info} IS IMPORTANT INFORMATION ABOUT USER!"])
200
+ self.agent_prompt.set("system.user_info", prompt, mappings)
199
201
  else:
200
202
  self.request.prompt.set("instruct", ["{system.user_info} IS IMPORTANT INFORMATION ABOUT USER!"])
201
203
  self.request.prompt.set("system.user_info", prompt, mappings)
@@ -209,7 +211,7 @@ class BaseAgent:
209
211
  always: bool = False,
210
212
  ):
211
213
  if always:
212
- self.prompt.set("input", prompt, mappings)
214
+ self.agent_prompt.set("input", prompt, mappings)
213
215
  else:
214
216
  self.request.prompt.set("input", prompt, mappings)
215
217
  return self
@@ -222,7 +224,7 @@ class BaseAgent:
222
224
  always: bool = False,
223
225
  ):
224
226
  if always:
225
- self.prompt.set("info", prompt, mappings)
227
+ self.agent_prompt.set("info", prompt, mappings)
226
228
  else:
227
229
  self.request.prompt.set("info", prompt, mappings)
228
230
  return self
@@ -235,7 +237,7 @@ class BaseAgent:
235
237
  always: bool = False,
236
238
  ):
237
239
  if always:
238
- self.prompt.set("instruct", prompt, mappings)
240
+ self.agent_prompt.set("instruct", prompt, mappings)
239
241
  else:
240
242
  self.request.prompt.set("instruct", prompt, mappings)
241
243
  return self
@@ -248,7 +250,7 @@ class BaseAgent:
248
250
  always: bool = False,
249
251
  ):
250
252
  if always:
251
- self.prompt.set("examples", prompt, mappings)
253
+ self.agent_prompt.set("examples", prompt, mappings)
252
254
  else:
253
255
  self.request.prompt.set("examples", prompt, mappings)
254
256
  return self
@@ -266,11 +268,24 @@ class BaseAgent:
266
268
  always: bool = False,
267
269
  ):
268
270
  if always:
269
- self.prompt.set("output", prompt, mappings)
271
+ self.agent_prompt.set("output", prompt, mappings)
270
272
  else:
271
273
  self.request.prompt.set("output", prompt, mappings)
272
274
  return self
273
275
 
276
+ def attachment(
277
+ self,
278
+ prompt: list[dict[str, Any]],
279
+ mappings: dict[str, Any] | None = None,
280
+ *,
281
+ always: bool = False,
282
+ ):
283
+ if always:
284
+ self.agent_prompt.set("attachment", prompt, mappings)
285
+ else:
286
+ self.request_prompt.set("attachment", prompt, mappings)
287
+ return self
288
+
274
289
  def options(
275
290
  self,
276
291
  options: dict[str, Any],
@@ -278,7 +293,34 @@ class BaseAgent:
278
293
  always: bool = False,
279
294
  ):
280
295
  if always:
281
- self.prompt.set("options", options)
296
+ self.agent_prompt.set("options", options)
282
297
  else:
283
298
  self.request.prompt.set("options", options)
284
299
  return self
300
+
301
+ # Prompt
302
+ def get_prompt_text(self):
303
+ return self.request_prompt.to_text()[6:][:-11]
304
+
305
+ def get_json_prompt(self):
306
+ prompt_data = {
307
+ ".agent": self.agent_prompt.to_serializable_prompt_data(),
308
+ ".request": self.request_prompt.to_serializable_prompt_data(),
309
+ }
310
+ return json.dumps(
311
+ prompt_data,
312
+ indent=2,
313
+ ensure_ascii=False,
314
+ )
315
+
316
+ def get_yaml_prompt(self):
317
+ prompt_data = {
318
+ ".agent": self.agent_prompt.to_serializable_prompt_data(),
319
+ ".request": self.request_prompt.to_serializable_prompt_data(),
320
+ }
321
+ return yaml.safe_dump(
322
+ prompt_data,
323
+ indent=2,
324
+ allow_unicode=True,
325
+ sort_keys=False,
326
+ )