waldiez 0.3.6__py3-none-any.whl → 0.3.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of waldiez might be problematic. Click here for more details.
- waldiez/__init__.py +15 -66
- waldiez/_version.py +1 -1
- waldiez/cli.py +11 -8
- waldiez/exporting/__init__.py +2 -0
- waldiez/exporting/agent/agent_exporter.py +11 -2
- waldiez/exporting/agent/utils/__init__.py +2 -0
- waldiez/exporting/agent/utils/agent_class_name.py +2 -0
- waldiez/exporting/agent/utils/agent_imports.py +5 -0
- waldiez/exporting/agent/utils/reasoning.py +36 -0
- waldiez/exporting/flow/flow_exporter.py +21 -8
- waldiez/exporting/flow/utils/__init__.py +10 -5
- waldiez/exporting/flow/utils/def_main.py +25 -20
- waldiez/exporting/flow/utils/flow_content.py +42 -1
- waldiez/exporting/flow/utils/importing_utils.py +7 -1
- waldiez/exporting/flow/utils/logging_utils.py +176 -42
- waldiez/models/__init__.py +8 -0
- waldiez/models/agents/__init__.py +10 -0
- waldiez/models/agents/agent/agent.py +10 -4
- waldiez/models/agents/agent/termination_message.py +2 -0
- waldiez/models/agents/agents.py +10 -0
- waldiez/models/agents/rag_user/retrieve_config.py +46 -17
- waldiez/models/agents/reasoning/__init__.py +13 -0
- waldiez/models/agents/reasoning/reasoning_agent.py +43 -0
- waldiez/models/agents/reasoning/reasoning_agent_data.py +116 -0
- waldiez/models/agents/reasoning/reasoning_agent_reason_config.py +101 -0
- waldiez/models/agents/swarm_agent/__init__.py +2 -1
- waldiez/models/agents/swarm_agent/swarm_agent_data.py +2 -3
- waldiez/models/chat/chat_data.py +30 -63
- waldiez/models/chat/chat_message.py +2 -26
- waldiez/models/chat/chat_nested.py +7 -8
- waldiez/models/common/__init__.py +3 -18
- waldiez/models/common/date_utils.py +18 -0
- waldiez/models/common/dict_utils.py +37 -0
- waldiez/models/common/method_utils.py +2 -5
- waldiez/models/flow/flow_data.py +1 -1
- waldiez/models/waldiez.py +4 -1
- waldiez/runner.py +3 -3
- waldiez/running/environment.py +22 -16
- waldiez/running/gen_seq_diagram.py +7 -4
- waldiez/running/running.py +67 -19
- waldiez/utils/__init__.py +15 -0
- waldiez/utils/cli_extras/__init__.py +30 -0
- waldiez/{cli_extras.py → utils/cli_extras/jupyter.py} +9 -20
- waldiez/utils/cli_extras/studio.py +36 -0
- waldiez/{conflict_checker.py → utils/conflict_checker.py} +14 -3
- waldiez/utils/flaml_warnings.py +17 -0
- waldiez/utils/pysqlite3_checker.py +249 -0
- {waldiez-0.3.6.dist-info → waldiez-0.3.7.dist-info}/METADATA +27 -19
- {waldiez-0.3.6.dist-info → waldiez-0.3.7.dist-info}/RECORD +53 -40
- waldiez-0.3.7.dist-info/licenses/NOTICE.md +5 -0
- {waldiez-0.3.6.dist-info → waldiez-0.3.7.dist-info}/WHEEL +0 -0
- {waldiez-0.3.6.dist-info → waldiez-0.3.7.dist-info}/entry_points.txt +0 -0
- {waldiez-0.3.6.dist-info → waldiez-0.3.7.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,116 @@
|
|
|
1
|
+
# SPDX-License-Identifier: Apache-2.0.
|
|
2
|
+
# Copyright (c) 2024 - 2025 Waldiez and contributors.
|
|
3
|
+
"""Reasoning agent data model."""
|
|
4
|
+
|
|
5
|
+
from typing import Any, Dict
|
|
6
|
+
|
|
7
|
+
from pydantic import Field
|
|
8
|
+
from typing_extensions import Annotated, Literal
|
|
9
|
+
|
|
10
|
+
from ..assistant import WaldiezAssistantData
|
|
11
|
+
from .reasoning_agent_reason_config import WaldiezReasoningAgentReasonConfig
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class WaldiezReasoningAgentData(WaldiezAssistantData):
|
|
15
|
+
"""Reasoning agent data model."""
|
|
16
|
+
|
|
17
|
+
max_depth: Annotated[
|
|
18
|
+
int,
|
|
19
|
+
Field(
|
|
20
|
+
4,
|
|
21
|
+
title="Maximum depth",
|
|
22
|
+
description="Maximum depth of the reasoning tree",
|
|
23
|
+
alias="maxDepth",
|
|
24
|
+
deprecated=True,
|
|
25
|
+
),
|
|
26
|
+
] = 4
|
|
27
|
+
beam_size: Annotated[
|
|
28
|
+
int,
|
|
29
|
+
Field(
|
|
30
|
+
3,
|
|
31
|
+
title="Beam size",
|
|
32
|
+
description="Number of parallel reasoning paths to maintain",
|
|
33
|
+
alias="beamSize",
|
|
34
|
+
deprecated=True,
|
|
35
|
+
),
|
|
36
|
+
] = 3
|
|
37
|
+
answer_approach: Annotated[
|
|
38
|
+
Literal["pool", "best"],
|
|
39
|
+
Field(
|
|
40
|
+
"pool",
|
|
41
|
+
title="Answer approach",
|
|
42
|
+
description="How to generate final answer",
|
|
43
|
+
alias="answerApproach",
|
|
44
|
+
deprecated=True,
|
|
45
|
+
),
|
|
46
|
+
] = "pool"
|
|
47
|
+
verbose: Annotated[
|
|
48
|
+
bool,
|
|
49
|
+
Field(
|
|
50
|
+
True,
|
|
51
|
+
title="Verbose",
|
|
52
|
+
description="Whether to show intermediate steps",
|
|
53
|
+
),
|
|
54
|
+
]
|
|
55
|
+
reason_config: Annotated[
|
|
56
|
+
WaldiezReasoningAgentReasonConfig,
|
|
57
|
+
Field(
|
|
58
|
+
title="Reason config",
|
|
59
|
+
description="The reasoning agent's reason configuration",
|
|
60
|
+
default_factory=WaldiezReasoningAgentReasonConfig,
|
|
61
|
+
alias="reasonConfig",
|
|
62
|
+
),
|
|
63
|
+
]
|
|
64
|
+
|
|
65
|
+
def get_reasoning_config(self) -> Dict[str, Any]:
|
|
66
|
+
"""Get the reasoning configuration based on the reason_config method.
|
|
67
|
+
|
|
68
|
+
Returns
|
|
69
|
+
-------
|
|
70
|
+
Dict[str, Any]
|
|
71
|
+
The reasoning configuration.
|
|
72
|
+
|
|
73
|
+
"""
|
|
74
|
+
reason_dict: Dict[str, Any] = {
|
|
75
|
+
"method": self.reason_config.method,
|
|
76
|
+
"max_depth": self.reason_config.max_depth,
|
|
77
|
+
"forest_size": self.reason_config.forest_size,
|
|
78
|
+
"rating_scale": self.reason_config.rating_scale,
|
|
79
|
+
}
|
|
80
|
+
if self.reason_config.method == "beam_search":
|
|
81
|
+
reason_dict["beam_size"] = self.reason_config.beam_size
|
|
82
|
+
reason_dict["answer_approach"] = self.reason_config.answer_approach
|
|
83
|
+
if self.reason_config.method in ("mcts", "lats"):
|
|
84
|
+
reason_dict["nsim"] = self.reason_config.nsim
|
|
85
|
+
reason_dict["exploration_constant"] = (
|
|
86
|
+
self.reason_config.exploration_constant
|
|
87
|
+
)
|
|
88
|
+
return reason_dict
|
|
89
|
+
|
|
90
|
+
|
|
91
|
+
# reason_config (dict): Configuration for the reasoning method.
|
|
92
|
+
# Supported parameters:
|
|
93
|
+
# method (str): The search strategy to use. Options:
|
|
94
|
+
# - "beam_search" (default): Uses beam search with parallel paths
|
|
95
|
+
# - "mcts": Uses Monte Carlo Tree Search for exploration
|
|
96
|
+
# - "lats": Uses Language Agent Tree Search with per-step rewards
|
|
97
|
+
# - "dfs": Uses depth-first search
|
|
98
|
+
# (equivalent to beam_search with beam_size=1)
|
|
99
|
+
# Common parameters:
|
|
100
|
+
# max_depth (int): Maximum depth of reasoning tree (default: 3)
|
|
101
|
+
# forest_size (int):
|
|
102
|
+
# Number of independent trees to maintain (default: 1)
|
|
103
|
+
# rating_scale (int):
|
|
104
|
+
# Scale for grading responses, e.g. 1-10 (default: 10)
|
|
105
|
+
# Beam Search specific:
|
|
106
|
+
# beam_size (int): Number of parallel paths to maintain (default: 3)
|
|
107
|
+
# answer_approach (str):
|
|
108
|
+
# How to select final answer, "pool" or "best" (default: "pool")
|
|
109
|
+
# MCTS/LATS specific:
|
|
110
|
+
# nsim (int): Number of simulations to run (default: 3)
|
|
111
|
+
# exploration_constant (float):
|
|
112
|
+
# UCT exploration parameter (default: 1.41)
|
|
113
|
+
# Example configs:
|
|
114
|
+
# `{"method": "beam_search", "beam_size": 5, "max_depth": 4}`
|
|
115
|
+
# `{"method": "mcts", "nsim": 10, "exploration_constant": 2.0}`
|
|
116
|
+
# `{"method": "lats", "nsim": 5, "forest_size": 3}`
|
|
@@ -0,0 +1,101 @@
|
|
|
1
|
+
# SPDX-License-Identifier: Apache-2.0.
|
|
2
|
+
# Copyright (c) 2024 - 2025 Waldiez and contributors.
|
|
3
|
+
"""Reasoning agent's reason configuration model."""
|
|
4
|
+
|
|
5
|
+
from pydantic import Field
|
|
6
|
+
from typing_extensions import Annotated, Literal
|
|
7
|
+
|
|
8
|
+
from ...common import WaldiezBase
|
|
9
|
+
|
|
10
|
+
ReasoningConfigMethod = Literal["beam_search", "mcts", "lats", "dfs"]
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class WaldiezReasoningAgentReasonConfig(WaldiezBase):
|
|
14
|
+
"""Reasoning agent's reason configuration model.
|
|
15
|
+
|
|
16
|
+
Configuration for the reasoning method.
|
|
17
|
+
|
|
18
|
+
Attributes
|
|
19
|
+
----------
|
|
20
|
+
method : Literal["beam_search", "mcts", "lats", "dfs"]
|
|
21
|
+
The search strategy to use, default is "beam_search".
|
|
22
|
+
max_depth : int
|
|
23
|
+
Maximum depth of reasoning tree, default is 3.
|
|
24
|
+
forest_size : int
|
|
25
|
+
Number of independent trees to maintain, default is 1.
|
|
26
|
+
rating_scale : int
|
|
27
|
+
Scale for grading responses, e.g. 1-10, default is 10.
|
|
28
|
+
beam_size : int
|
|
29
|
+
Number of parallel paths to maintain, default is 3 (for beam_search).
|
|
30
|
+
answer_approach : Literal["pool", "best"]
|
|
31
|
+
How to select final answer, default is "pool" (only for beam_search).
|
|
32
|
+
nsim : int
|
|
33
|
+
Number of simulations to run, default is 3 (only for mcts and lats).
|
|
34
|
+
exploration_constant : float
|
|
35
|
+
UCT exploration parameter, default is 1.41 (only for mcts and lats).
|
|
36
|
+
"""
|
|
37
|
+
|
|
38
|
+
method: Annotated[
|
|
39
|
+
ReasoningConfigMethod,
|
|
40
|
+
Field(
|
|
41
|
+
"beam_search",
|
|
42
|
+
title="Method",
|
|
43
|
+
description="The search strategy to use.",
|
|
44
|
+
),
|
|
45
|
+
]
|
|
46
|
+
max_depth: Annotated[
|
|
47
|
+
int,
|
|
48
|
+
Field(
|
|
49
|
+
3,
|
|
50
|
+
title="Maximum depth",
|
|
51
|
+
description="Maximum depth of reasoning tree.",
|
|
52
|
+
),
|
|
53
|
+
]
|
|
54
|
+
forest_size: Annotated[
|
|
55
|
+
int,
|
|
56
|
+
Field(
|
|
57
|
+
1,
|
|
58
|
+
title="Forest size",
|
|
59
|
+
description="Number of independent trees to maintain.",
|
|
60
|
+
),
|
|
61
|
+
]
|
|
62
|
+
rating_scale: Annotated[
|
|
63
|
+
int,
|
|
64
|
+
Field(
|
|
65
|
+
10,
|
|
66
|
+
title="Rating scale",
|
|
67
|
+
description="Scale for grading responses, e.g. 1-10.",
|
|
68
|
+
),
|
|
69
|
+
]
|
|
70
|
+
beam_size: Annotated[
|
|
71
|
+
int,
|
|
72
|
+
Field(
|
|
73
|
+
3,
|
|
74
|
+
title="Beam size",
|
|
75
|
+
description="Number of parallel paths to maintain.",
|
|
76
|
+
),
|
|
77
|
+
]
|
|
78
|
+
answer_approach: Annotated[
|
|
79
|
+
Literal["pool", "best"],
|
|
80
|
+
Field(
|
|
81
|
+
"pool",
|
|
82
|
+
title="Answer approach",
|
|
83
|
+
description="How to select final answer.",
|
|
84
|
+
),
|
|
85
|
+
]
|
|
86
|
+
nsim: Annotated[
|
|
87
|
+
int,
|
|
88
|
+
Field(
|
|
89
|
+
3,
|
|
90
|
+
title="Number of simulations",
|
|
91
|
+
description="Number of simulations to run.",
|
|
92
|
+
),
|
|
93
|
+
]
|
|
94
|
+
exploration_constant: Annotated[
|
|
95
|
+
float,
|
|
96
|
+
Field(
|
|
97
|
+
1.41,
|
|
98
|
+
title="Exploration constant",
|
|
99
|
+
description="UCT exploration parameter.",
|
|
100
|
+
),
|
|
101
|
+
]
|
|
@@ -19,7 +19,7 @@ from .on_condition_available import (
|
|
|
19
19
|
)
|
|
20
20
|
from .on_condition_target import WaldiezSwarmOnConditionTarget
|
|
21
21
|
from .swarm_agent import WaldiezSwarmAgent
|
|
22
|
-
from .swarm_agent_data import WaldiezSwarmAgentData
|
|
22
|
+
from .swarm_agent_data import WaldiezSwarmAgentData, WaldiezSwarmHandoff
|
|
23
23
|
from .update_system_message import (
|
|
24
24
|
CUSTOM_UPDATE_SYSTEM_MESSAGE,
|
|
25
25
|
CUSTOM_UPDATE_SYSTEM_MESSAGE_ARGS,
|
|
@@ -42,6 +42,7 @@ __all__ = [
|
|
|
42
42
|
"WaldiezSwarmAgent",
|
|
43
43
|
"WaldiezSwarmAgentData",
|
|
44
44
|
"WaldiezSwarmAfterWorkRecipientType",
|
|
45
|
+
"WaldiezSwarmHandoff",
|
|
45
46
|
"WaldiezSwarmOnCondition",
|
|
46
47
|
"WaldiezSwarmOnConditionTarget",
|
|
47
48
|
"WaldiezSwarmOnConditionAvailable",
|
|
@@ -13,6 +13,8 @@ from .after_work import WaldiezSwarmAfterWork
|
|
|
13
13
|
from .on_condition import WaldiezSwarmOnCondition
|
|
14
14
|
from .update_system_message import WaldiezSwarmUpdateSystemMessage
|
|
15
15
|
|
|
16
|
+
WaldiezSwarmHandoff = Union[WaldiezSwarmOnCondition, WaldiezSwarmAfterWork]
|
|
17
|
+
|
|
16
18
|
|
|
17
19
|
# flake8: noqa: E501
|
|
18
20
|
# pylint: disable=line-too-long
|
|
@@ -118,8 +120,5 @@ class WaldiezSwarmAgentData(WaldiezAgentData):
|
|
|
118
120
|
]
|
|
119
121
|
on_conditions = sorted(on_conditions, key=lambda x: x.target.order)
|
|
120
122
|
handoffs = on_conditions + after_works
|
|
121
|
-
if after_works and after_works[0] != handoffs[-1]:
|
|
122
|
-
handoffs.remove(after_works[0])
|
|
123
|
-
handoffs.append(after_works[0])
|
|
124
123
|
self.handoffs = handoffs
|
|
125
124
|
return self
|
waldiez/models/chat/chat_data.py
CHANGED
|
@@ -11,7 +11,7 @@ from ..agents.swarm_agent import (
|
|
|
11
11
|
WaldiezSwarmAfterWork,
|
|
12
12
|
WaldiezSwarmOnConditionAvailable,
|
|
13
13
|
)
|
|
14
|
-
from ..common import WaldiezBase, check_function
|
|
14
|
+
from ..common import WaldiezBase, check_function, update_dict
|
|
15
15
|
from .chat_message import (
|
|
16
16
|
CALLABLE_MESSAGE,
|
|
17
17
|
CALLABLE_MESSAGE_ARGS,
|
|
@@ -291,22 +291,22 @@ class WaldiezChatData(WaldiezBase):
|
|
|
291
291
|
ValueError
|
|
292
292
|
If the validation fails.
|
|
293
293
|
"""
|
|
294
|
-
if isinstance(self.message, WaldiezChatMessage):
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
|
|
298
|
-
|
|
299
|
-
|
|
300
|
-
|
|
301
|
-
|
|
302
|
-
|
|
303
|
-
|
|
304
|
-
|
|
305
|
-
|
|
306
|
-
|
|
307
|
-
|
|
308
|
-
|
|
309
|
-
|
|
294
|
+
if not isinstance(self.message, WaldiezChatMessage): # pragma: no cover
|
|
295
|
+
return self
|
|
296
|
+
self._message_content = self.message.content
|
|
297
|
+
if self.message.type == "none":
|
|
298
|
+
self._message_content = None
|
|
299
|
+
if self.message.type == "string":
|
|
300
|
+
self._message_content = self.message.content
|
|
301
|
+
if self.message.type == "method":
|
|
302
|
+
valid, error_or_body = check_function(
|
|
303
|
+
self.message.content or "",
|
|
304
|
+
CALLABLE_MESSAGE,
|
|
305
|
+
CALLABLE_MESSAGE_ARGS,
|
|
306
|
+
)
|
|
307
|
+
if not valid:
|
|
308
|
+
raise ValueError(error_or_body)
|
|
309
|
+
self._message_content = error_or_body
|
|
310
310
|
return self
|
|
311
311
|
|
|
312
312
|
@field_validator("message", mode="before")
|
|
@@ -342,20 +342,22 @@ class WaldiezChatData(WaldiezBase):
|
|
|
342
342
|
)
|
|
343
343
|
if isinstance(value, dict):
|
|
344
344
|
return WaldiezChatMessage.model_validate(value)
|
|
345
|
-
if isinstance(value, WaldiezChatMessage):
|
|
346
|
-
return
|
|
347
|
-
|
|
348
|
-
|
|
349
|
-
|
|
345
|
+
if not isinstance(value, WaldiezChatMessage):
|
|
346
|
+
return WaldiezChatMessage(
|
|
347
|
+
type="none", use_carryover=False, content=None, context={}
|
|
348
|
+
)
|
|
349
|
+
return value
|
|
350
350
|
|
|
351
351
|
@field_validator("context_variables", mode="after")
|
|
352
352
|
@classmethod
|
|
353
|
-
def validate_context_variables(
|
|
353
|
+
def validate_context_variables(
|
|
354
|
+
cls, value: Optional[Dict[str, Any]]
|
|
355
|
+
) -> Optional[Dict[str, Any]]:
|
|
354
356
|
"""Validate the context variables.
|
|
355
357
|
|
|
356
358
|
Parameters
|
|
357
359
|
----------
|
|
358
|
-
value : Any
|
|
360
|
+
value : Optional[Dict[str, Any]]
|
|
359
361
|
The context variables value.
|
|
360
362
|
|
|
361
363
|
Returns
|
|
@@ -370,9 +372,7 @@ class WaldiezChatData(WaldiezBase):
|
|
|
370
372
|
"""
|
|
371
373
|
if value is None:
|
|
372
374
|
return None
|
|
373
|
-
|
|
374
|
-
raise ValueError("Context variables must be a dictionary.")
|
|
375
|
-
return get_context_dict(value)
|
|
375
|
+
return update_dict(value)
|
|
376
376
|
|
|
377
377
|
@property
|
|
378
378
|
def summary_args(self) -> Optional[Dict[str, Any]]:
|
|
@@ -398,8 +398,9 @@ class WaldiezChatData(WaldiezBase):
|
|
|
398
398
|
The dictionary to use for generating the kwargs.
|
|
399
399
|
"""
|
|
400
400
|
extra_args: Dict[str, Any] = {}
|
|
401
|
-
if isinstance(self.message, WaldiezChatMessage):
|
|
402
|
-
extra_args
|
|
401
|
+
if not isinstance(self.message, WaldiezChatMessage): # pragma: no cover
|
|
402
|
+
return extra_args
|
|
403
|
+
extra_args.update(update_dict(self.message.context))
|
|
403
404
|
return extra_args
|
|
404
405
|
|
|
405
406
|
def get_chat_args(self, for_queue: bool) -> Dict[str, Any]:
|
|
@@ -434,37 +435,3 @@ class WaldiezChatData(WaldiezBase):
|
|
|
434
435
|
if self._prerequisites:
|
|
435
436
|
args["prerequisites"] = self._prerequisites
|
|
436
437
|
return args
|
|
437
|
-
|
|
438
|
-
|
|
439
|
-
def get_context_dict(context: Dict[str, Any]) -> Dict[str, Any]:
|
|
440
|
-
"""Get the context dictionary.
|
|
441
|
-
|
|
442
|
-
Try to determine the type of the context variables.
|
|
443
|
-
|
|
444
|
-
Parameters
|
|
445
|
-
----------
|
|
446
|
-
context : Dict[str, Any]
|
|
447
|
-
The context variables.
|
|
448
|
-
|
|
449
|
-
Returns
|
|
450
|
-
-------
|
|
451
|
-
Dict[str, Any]
|
|
452
|
-
The context variables with the detected types.
|
|
453
|
-
"""
|
|
454
|
-
new_dict: Dict[str, Any] = {}
|
|
455
|
-
for key, value in context.items():
|
|
456
|
-
value_lower = str(value).lower()
|
|
457
|
-
if value_lower in ("none", "null"):
|
|
458
|
-
new_dict[key] = None
|
|
459
|
-
elif value_lower in ("true", "false"):
|
|
460
|
-
new_dict[key] = value.lower() == "true"
|
|
461
|
-
elif str(value).isdigit():
|
|
462
|
-
new_dict[key] = int(value)
|
|
463
|
-
elif str(value).replace(".", "").isdigit():
|
|
464
|
-
try:
|
|
465
|
-
new_dict[key] = float(value)
|
|
466
|
-
except ValueError: # pragma: no cover
|
|
467
|
-
new_dict[key] = value
|
|
468
|
-
else:
|
|
469
|
-
new_dict[key] = value
|
|
470
|
-
return new_dict
|
|
@@ -7,7 +7,7 @@ from typing import Any, Dict, List, Optional
|
|
|
7
7
|
from pydantic import Field, model_validator
|
|
8
8
|
from typing_extensions import Annotated, Literal, Self
|
|
9
9
|
|
|
10
|
-
from ..common import WaldiezBase, check_function
|
|
10
|
+
from ..common import WaldiezBase, check_function, update_dict
|
|
11
11
|
|
|
12
12
|
WaldiezChatMessageType = Literal[
|
|
13
13
|
"string", "method", "rag_message_generator", "none"
|
|
@@ -113,33 +113,9 @@ class WaldiezChatMessage(WaldiezBase):
|
|
|
113
113
|
WaldiezChatMessage
|
|
114
114
|
The validated instance.
|
|
115
115
|
"""
|
|
116
|
-
|
|
117
|
-
if isinstance(value, str):
|
|
118
|
-
if value.lower() == "true":
|
|
119
|
-
self.context[key] = True
|
|
120
|
-
elif value.lower() == "false":
|
|
121
|
-
self.context[key] = False
|
|
122
|
-
elif value.lower() in ["null", "none"]:
|
|
123
|
-
self.context[key] = None
|
|
124
|
-
else:
|
|
125
|
-
self.context[key] = self._number_or_string(value)
|
|
116
|
+
self.context = update_dict(self.context)
|
|
126
117
|
return self
|
|
127
118
|
|
|
128
|
-
@staticmethod
|
|
129
|
-
def _number_or_string(value: Any) -> Any:
|
|
130
|
-
try:
|
|
131
|
-
int_value = int(value)
|
|
132
|
-
if str(int_value) == value:
|
|
133
|
-
return int_value
|
|
134
|
-
except ValueError:
|
|
135
|
-
try:
|
|
136
|
-
float_value = float(value)
|
|
137
|
-
if str(float_value) == value:
|
|
138
|
-
return float_value
|
|
139
|
-
except ValueError:
|
|
140
|
-
pass
|
|
141
|
-
return value
|
|
142
|
-
|
|
143
119
|
@model_validator(mode="after")
|
|
144
120
|
def validate_content(self) -> Self:
|
|
145
121
|
"""Validate the content (if not a method).
|
|
@@ -114,27 +114,26 @@ class WaldiezChatNested(WaldiezBase):
|
|
|
114
114
|
If the validation fails.
|
|
115
115
|
"""
|
|
116
116
|
if self.message is not None:
|
|
117
|
+
self._message_content = self.message.content_body
|
|
117
118
|
if self.message.type == "none":
|
|
118
119
|
self._message_content = ""
|
|
119
|
-
|
|
120
|
+
if self.message.type == "string":
|
|
120
121
|
self._message_content = self.message.content
|
|
121
|
-
|
|
122
|
+
if self.message.type == "method":
|
|
122
123
|
self._message_content = self.message.validate_method(
|
|
123
124
|
function_name=NESTED_CHAT_MESSAGE,
|
|
124
125
|
function_args=NESTED_CHAT_ARGS,
|
|
125
126
|
)
|
|
126
|
-
|
|
127
|
-
self._message_content = self.message.content_body
|
|
127
|
+
|
|
128
128
|
if self.reply is not None:
|
|
129
|
+
self._reply_content = self.reply.content_body
|
|
129
130
|
if self.reply.type == "none":
|
|
130
131
|
self._reply_content = ""
|
|
131
|
-
|
|
132
|
+
if self.reply.type == "string":
|
|
132
133
|
self._reply_content = self.reply.content
|
|
133
|
-
|
|
134
|
+
if self.reply.type == "method":
|
|
134
135
|
self._reply_content = self.reply.validate_method(
|
|
135
136
|
function_name=NESTED_CHAT_REPLY,
|
|
136
137
|
function_args=NESTED_CHAT_ARGS,
|
|
137
138
|
)
|
|
138
|
-
else:
|
|
139
|
-
self._reply_content = self.reply.content_body
|
|
140
139
|
return self
|
|
@@ -2,9 +2,9 @@
|
|
|
2
2
|
# Copyright (c) 2024 - 2025 Waldiez and contributors.
|
|
3
3
|
"""Common utils for all models."""
|
|
4
4
|
|
|
5
|
-
from datetime import datetime, timezone
|
|
6
|
-
|
|
7
5
|
from .base import WaldiezBase
|
|
6
|
+
from .date_utils import now
|
|
7
|
+
from .dict_utils import update_dict
|
|
8
8
|
from .method_utils import (
|
|
9
9
|
check_function,
|
|
10
10
|
generate_function,
|
|
@@ -12,22 +12,6 @@ from .method_utils import (
|
|
|
12
12
|
parse_code_string,
|
|
13
13
|
)
|
|
14
14
|
|
|
15
|
-
|
|
16
|
-
def now() -> str:
|
|
17
|
-
"""Get the current date and time in UTC.
|
|
18
|
-
|
|
19
|
-
Returns
|
|
20
|
-
-------
|
|
21
|
-
str
|
|
22
|
-
The current date and time in UTC.
|
|
23
|
-
"""
|
|
24
|
-
return (
|
|
25
|
-
datetime.now(tz=timezone.utc)
|
|
26
|
-
.isoformat(timespec="milliseconds")
|
|
27
|
-
.replace("+00:00", "Z")
|
|
28
|
-
)
|
|
29
|
-
|
|
30
|
-
|
|
31
15
|
__all__ = [
|
|
32
16
|
"WaldiezBase",
|
|
33
17
|
"now",
|
|
@@ -35,4 +19,5 @@ __all__ = [
|
|
|
35
19
|
"get_function",
|
|
36
20
|
"generate_function",
|
|
37
21
|
"parse_code_string",
|
|
22
|
+
"update_dict",
|
|
38
23
|
]
|
|
@@ -0,0 +1,18 @@
|
|
|
1
|
+
"""Date utilities."""
|
|
2
|
+
|
|
3
|
+
from datetime import datetime, timezone
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
def now() -> str:
|
|
7
|
+
"""Get the current date and time in UTC.
|
|
8
|
+
|
|
9
|
+
Returns
|
|
10
|
+
-------
|
|
11
|
+
str
|
|
12
|
+
The current date and time in UTC.
|
|
13
|
+
"""
|
|
14
|
+
return (
|
|
15
|
+
datetime.now(tz=timezone.utc)
|
|
16
|
+
.isoformat(timespec="milliseconds")
|
|
17
|
+
.replace("+00:00", "Z")
|
|
18
|
+
)
|
|
@@ -0,0 +1,37 @@
|
|
|
1
|
+
"""Dictionary related utilities."""
|
|
2
|
+
|
|
3
|
+
from typing import Any, Dict
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
def update_dict(original: Dict[str, Any]) -> Dict[str, Any]:
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
Try to determine the type of the dictionary values.
|
|
10
|
+
|
|
11
|
+
Parameters
|
|
12
|
+
----------
|
|
13
|
+
original : Dict[str, Any]
|
|
14
|
+
The original dictionary.
|
|
15
|
+
|
|
16
|
+
Returns
|
|
17
|
+
-------
|
|
18
|
+
Dict[str, Any]
|
|
19
|
+
The updated dictionary with values converted to the detected types.
|
|
20
|
+
"""
|
|
21
|
+
new_dict: Dict[str, Any] = {}
|
|
22
|
+
for key, value in original.items():
|
|
23
|
+
value_lower = str(value).lower()
|
|
24
|
+
if value_lower in ("none", "null"):
|
|
25
|
+
new_dict[key] = None
|
|
26
|
+
elif value_lower in ("true", "false"):
|
|
27
|
+
new_dict[key] = value_lower == "true"
|
|
28
|
+
elif str(value).isdigit():
|
|
29
|
+
new_dict[key] = int(value)
|
|
30
|
+
elif str(value).replace(".", "").isdigit():
|
|
31
|
+
try:
|
|
32
|
+
new_dict[key] = float(value)
|
|
33
|
+
except ValueError: # pragma: no cover
|
|
34
|
+
new_dict[key] = value
|
|
35
|
+
else:
|
|
36
|
+
new_dict[key] = value
|
|
37
|
+
return new_dict
|
|
@@ -114,7 +114,7 @@ def _validate_function_body(
|
|
|
114
114
|
f" in function {node.name}"
|
|
115
115
|
),
|
|
116
116
|
)
|
|
117
|
-
if not node.body:
|
|
117
|
+
if not node.body: # pragma: no cover
|
|
118
118
|
return False, "No body found in the function"
|
|
119
119
|
function_body = _get_function_body(code_string, node)
|
|
120
120
|
return True, function_body
|
|
@@ -143,10 +143,7 @@ def get_function(
|
|
|
143
143
|
str
|
|
144
144
|
The function signature and body.
|
|
145
145
|
"""
|
|
146
|
-
|
|
147
|
-
tree = parso.parse(code_string) # type: ignore
|
|
148
|
-
except BaseException: # pylint: disable=broad-except
|
|
149
|
-
return ""
|
|
146
|
+
tree = parso.parse(code_string) # type: ignore
|
|
150
147
|
for node in tree.iter_funcdefs():
|
|
151
148
|
if node.name.value == function_name:
|
|
152
149
|
return node.get_code()
|
waldiez/models/flow/flow_data.py
CHANGED
|
@@ -151,7 +151,7 @@ class WaldiezFlowData(WaldiezBase):
|
|
|
151
151
|
for chat in self.chats:
|
|
152
152
|
chat_prerequisites = []
|
|
153
153
|
for chat_id in chat.data.prerequisites:
|
|
154
|
-
if chat_id not in id_to_chat_id:
|
|
154
|
+
if chat_id not in id_to_chat_id: # pragma: no cover
|
|
155
155
|
raise ValueError(
|
|
156
156
|
f"Chat with id {chat_id} not found in the flow."
|
|
157
157
|
)
|
waldiez/models/waldiez.py
CHANGED
|
@@ -230,7 +230,7 @@ class Waldiez:
|
|
|
230
230
|
requirements.add(f"pyautogen=={autogen_version}")
|
|
231
231
|
if self.has_multimodal_agents:
|
|
232
232
|
requirements.add(f"pyautogen[lmm]=={autogen_version}")
|
|
233
|
-
# ref: https://github.com/ag2ai/ag2/blob/main/
|
|
233
|
+
# ref: https://github.com/ag2ai/ag2/blob/main/pyproject.toml
|
|
234
234
|
models_with_additional_requirements = [
|
|
235
235
|
"together",
|
|
236
236
|
"gemini",
|
|
@@ -241,6 +241,9 @@ class Waldiez:
|
|
|
241
241
|
"bedrock",
|
|
242
242
|
]
|
|
243
243
|
for model in self.models:
|
|
244
|
+
if model.data.api_type == "google":
|
|
245
|
+
requirements.add(f"pyautogen[gemini]=={autogen_version}")
|
|
246
|
+
continue
|
|
244
247
|
if model.data.api_type in models_with_additional_requirements:
|
|
245
248
|
requirements.add(
|
|
246
249
|
f"pyautogen[{model.data.api_type}]=={autogen_version}"
|
waldiez/runner.py
CHANGED
|
@@ -14,7 +14,7 @@ import sys
|
|
|
14
14
|
import tempfile
|
|
15
15
|
from pathlib import Path
|
|
16
16
|
from types import TracebackType
|
|
17
|
-
from typing import TYPE_CHECKING, List, Optional, Type, Union
|
|
17
|
+
from typing import TYPE_CHECKING, Dict, List, Optional, Type, Union
|
|
18
18
|
|
|
19
19
|
from asyncer import syncify
|
|
20
20
|
|
|
@@ -164,7 +164,7 @@ class WaldiezRunner:
|
|
|
164
164
|
output_path: Optional[Union[str, Path]],
|
|
165
165
|
uploads_root: Optional[Union[str, Path]],
|
|
166
166
|
skip_mmd: bool = False,
|
|
167
|
-
) -> Union["ChatResult", List["ChatResult"]]:
|
|
167
|
+
) -> Union["ChatResult", List["ChatResult"], Dict[int, "ChatResult"]]:
|
|
168
168
|
"""Run the Waldiez workflow.
|
|
169
169
|
|
|
170
170
|
Parameters
|
|
@@ -223,7 +223,7 @@ class WaldiezRunner:
|
|
|
223
223
|
output_path: Optional[Union[str, Path]],
|
|
224
224
|
uploads_root: Optional[Union[str, Path]],
|
|
225
225
|
skip_mmd: bool = False,
|
|
226
|
-
) -> Union["ChatResult", List["ChatResult"]]:
|
|
226
|
+
) -> Union["ChatResult", List["ChatResult"], Dict[int, "ChatResult"]]:
|
|
227
227
|
"""Run the Waldiez workflow asynchronously."""
|
|
228
228
|
temp_dir = Path(tempfile.mkdtemp())
|
|
229
229
|
file_name = before_run(output_path, uploads_root)
|