lionagi 0.3.4__py3-none-any.whl → 0.3.6__py3-none-any.whl

Sign up to get free protection for your applications and to get access to all the features.
@@ -0,0 +1,13 @@
1
+ from .action_model import ActionModel
2
+ from .brainstorm_model import BrainstormModel
3
+ from .plan_model import PlanModel
4
+ from .reason_model import ReasonModel
5
+ from .step_model import StepModel
6
+
7
+ __all__ = [
8
+ "ReasonModel",
9
+ "StepModel",
10
+ "BrainstormModel",
11
+ "ActionModel",
12
+ "PlanModel",
13
+ ]
@@ -0,0 +1,61 @@
1
+ from typing import Any
2
+
3
+ from lionfuncs import to_dict, validate_str
4
+ from pydantic import BaseModel, Field, field_validator
5
+
6
+
7
+ class ActionModel(BaseModel):
8
+
9
+ title: str = Field(
10
+ ...,
11
+ title="Title",
12
+ description="Provide a concise title summarizing the action.",
13
+ )
14
+ content: str = Field(
15
+ ...,
16
+ title="Content",
17
+ description="Provide a brief description of the action to be performed.",
18
+ )
19
+ function: str = Field(
20
+ ...,
21
+ title="Function",
22
+ description=(
23
+ "Specify the name of the function to execute. **Choose from the provided "
24
+ "`tool_schema`; do not invent function names.**"
25
+ ),
26
+ examples=["print", "add", "len"],
27
+ )
28
+ arguments: dict[str, Any] = Field(
29
+ {},
30
+ title="Arguments",
31
+ description=(
32
+ "Provide the arguments to pass to the function as a dictionary. **Use "
33
+ "argument names and types as specified in the `tool_schema`; do not "
34
+ "invent argument names.**"
35
+ ),
36
+ examples=[{"num1": 1, "num2": 2}, {"x": "hello", "y": "world"}],
37
+ )
38
+
39
+ @field_validator("title", mode="before")
40
+ def validate_title(cls, value: Any) -> str:
41
+ return validate_str(value, "title")
42
+
43
+ @field_validator("content", mode="before")
44
+ def validate_content(cls, value: Any) -> str:
45
+ return validate_str(value, "content")
46
+
47
+ @field_validator("function", mode="before")
48
+ def validate_function(cls, value: Any) -> str:
49
+ return validate_str(value, "function")
50
+
51
+ @field_validator("arguments", mode="before")
52
+ def validate_arguments(cls, value: Any) -> dict[str, Any]:
53
+ return to_dict(
54
+ value,
55
+ fuzzy_parse=True,
56
+ suppress=True,
57
+ recursive=True,
58
+ )
59
+
60
+
61
+ __all__ = ["ActionModel"]
@@ -0,0 +1,42 @@
1
+ from typing import Any
2
+
3
+ from lionfuncs import validate_str
4
+ from pydantic import BaseModel, Field, field_validator
5
+
6
+ from .reason_model import ReasonModel
7
+ from .step_model import StepModel
8
+
9
+
10
+ class BrainstormModel(BaseModel):
11
+
12
+ title: str = Field(
13
+ ...,
14
+ title="Title",
15
+ description="Provide a concise title summarizing the brainstorming session.",
16
+ )
17
+ content: str = Field(
18
+ ...,
19
+ title="Content",
20
+ description="Describe the context or focus of the brainstorming session.",
21
+ )
22
+ ideas: list[StepModel] = Field(
23
+ ...,
24
+ title="Ideas",
25
+ description="A list of ideas for the next step, generated during brainstorming.",
26
+ )
27
+ reason: ReasonModel = Field(
28
+ ...,
29
+ title="Reason",
30
+ description="Provide the high level reasoning behind the brainstorming session.",
31
+ )
32
+
33
+ @field_validator("title", mode="before")
34
+ def validate_title(cls, value: Any) -> str:
35
+ return validate_str(value, "title")
36
+
37
+ @field_validator("content", mode="before")
38
+ def validate_content(cls, value: Any) -> str:
39
+ return validate_str(value, "content")
40
+
41
+
42
+ __all__ = ["BrainstormModel"]
@@ -0,0 +1,51 @@
1
+ from typing import Any, List
2
+
3
+ from lionfuncs import validate_str
4
+ from pydantic import BaseModel, Field, field_validator
5
+
6
+ from .reason_model import ReasonModel
7
+ from .step_model import StepModel
8
+
9
+
10
+ class PlanModel(BaseModel):
11
+ """
12
+ Represents a plan consisting of multiple steps, with an overall reason.
13
+
14
+ Attributes:
15
+ title (str): A concise title summarizing the plan.
16
+ content (str): A detailed description of the plan.
17
+ reason (ReasonModel): The overall reasoning behind the plan.
18
+ steps (List[StepModel]): A list of steps to execute the plan.
19
+ """
20
+
21
+ title: str = Field(
22
+ ...,
23
+ title="Title",
24
+ description="Provide a concise title summarizing the plan.",
25
+ )
26
+ content: str = Field(
27
+ ...,
28
+ title="Content",
29
+ description="Provide a detailed description of the plan.",
30
+ )
31
+ reason: ReasonModel = Field(
32
+ ...,
33
+ title="Reason",
34
+ description="Provide the reasoning behind the entire plan.",
35
+ )
36
+ steps: list[StepModel] = Field(
37
+ ...,
38
+ title="Steps",
39
+ description="A list of steps to execute the plan.",
40
+ )
41
+
42
+ @field_validator("title", mode="before")
43
+ def validate_title(cls, value: Any) -> str:
44
+ return validate_str(value, "title")
45
+
46
+ @field_validator("content", mode="before")
47
+ def validate_content(cls, value: Any) -> str:
48
+ return validate_str(value, "content")
49
+
50
+
51
+ __all__ = ["PlanModel"]
@@ -0,0 +1,63 @@
1
+ import logging
2
+ from typing import Any
3
+
4
+ from lionfuncs import to_num, validate_str
5
+ from pydantic import BaseModel, Field, field_validator
6
+
7
+
8
+ class ReasonModel(BaseModel):
9
+ title: str = Field(
10
+ ...,
11
+ title="Title",
12
+ description="Provide a concise title summarizing the reason.",
13
+ )
14
+ content: str = Field(
15
+ ...,
16
+ title="Content",
17
+ description=(
18
+ "Provide a detailed explanation supporting the reason, including relevant "
19
+ "information or context."
20
+ ),
21
+ )
22
+ confidence_score: float | None = Field(
23
+ None,
24
+ description=(
25
+ "Provide an objective numeric confidence score between 0 and 1 (with 3 "
26
+ "decimal places) indicating how likely you successfully achieved the task "
27
+ "according to user expectation. Interpret the score as:\n"
28
+ "- **1**: Very confident in a good job.\n"
29
+ "- **0**: Not confident at all.\n"
30
+ "- **[0.8, 1]**: You can continue the path of reasoning if needed.\n"
31
+ "- **[0.5, 0.8)**: Recheck your reasoning and consider reverting to a "
32
+ "previous, more confident reasoning path.\n"
33
+ "- **[0, 0.5)**: Stop because the reasoning is starting to be off track."
34
+ ),
35
+ examples=[0.821, 0.257, 0.923, 0.439],
36
+ ge=0,
37
+ le=1,
38
+ )
39
+
40
+ @field_validator("title", mode="before")
41
+ def validate_title(cls, value: Any) -> str:
42
+ return validate_str(value, "title")
43
+
44
+ @field_validator("content", mode="before")
45
+ def validate_content(cls, value: Any) -> str:
46
+ return validate_str(value, "content")
47
+
48
+ @field_validator("confidence_score", mode="before")
49
+ def validate_confidence_score(cls, value: Any) -> float:
50
+ try:
51
+ return to_num(
52
+ value,
53
+ upper_bound=1,
54
+ lower_bound=0,
55
+ num_type=float,
56
+ precision=3,
57
+ )
58
+ except Exception as e:
59
+ logging.error(f"Failed to convert {value} to a number. Error: {e}")
60
+ return 0.0
61
+
62
+
63
+ __all__ = ["ReasonModel"]
@@ -0,0 +1,65 @@
1
+ import logging
2
+ from typing import Any
3
+
4
+ from lionfuncs import validate_boolean, validate_str
5
+ from pydantic import BaseModel, Field, field_validator
6
+
7
+ from .action_model import ActionModel
8
+ from .reason_model import ReasonModel
9
+
10
+
11
+ class StepModel(BaseModel):
12
+ title: str = Field(
13
+ ...,
14
+ title="Title",
15
+ description="Provide a concise title summarizing the step.",
16
+ )
17
+ content: str = Field(
18
+ ...,
19
+ title="Content",
20
+ description="Describe the content of the step in detail.",
21
+ )
22
+ reason: ReasonModel = Field(
23
+ ...,
24
+ title="Reason",
25
+ description="Provide the reasoning behind this step, including supporting details.",
26
+ )
27
+ action_required: bool = Field(
28
+ False,
29
+ title="Action Required",
30
+ description=(
31
+ "Indicate whether this step requires an action. Set to **True** if an "
32
+ "action is required; otherwise, set to **False**."
33
+ ),
34
+ )
35
+ actions: list[ActionModel] = Field(
36
+ [],
37
+ title="Actions",
38
+ description=(
39
+ "List of actions to be performed if `action_required` is **True**. Leave "
40
+ "empty if no action is required. **When providing actions, you must "
41
+ "choose from the provided `tool_schema`. Do not invent function or "
42
+ "argument names.**"
43
+ ),
44
+ )
45
+
46
+ @field_validator("title", mode="before")
47
+ def validate_title(cls, value: Any) -> str:
48
+ return validate_str(value, "title")
49
+
50
+ @field_validator("content", mode="before")
51
+ def validate_content(cls, value: Any) -> str:
52
+ return validate_str(value, "content")
53
+
54
+ @field_validator("action_required", mode="before")
55
+ def validate_action_required(cls, value: Any) -> bool:
56
+ try:
57
+ return validate_boolean(value)
58
+ except Exception as e:
59
+ logging.error(
60
+ f"Failed to convert {value} to a boolean. Error: {e}"
61
+ )
62
+ return False
63
+
64
+
65
+ __all__ = ["StepModel"]
File without changes
@@ -0,0 +1,93 @@
1
+ from __future__ import annotations
2
+
3
+ from collections.abc import Callable
4
+ from enum import Enum
5
+
6
+ from lionfuncs import choose_most_similar
7
+ from pydantic import BaseModel
8
+
9
+ from lionagi.core.director.models import ReasonModel
10
+ from lionagi.core.session.branch import Branch
11
+
12
+ from .utils import is_enum
13
+
14
+ PROMPT = "Please select up to {max_num_selections} items from the following list {choices}. Provide the selection(s), and no comments from you"
15
+
16
+
17
+ class SelectionModel(BaseModel):
18
+ selected: list[str | Enum]
19
+
20
+
21
+ class ReasonSelectionModel(BaseModel):
22
+ selected: list[str | Enum]
23
+ reason: ReasonModel
24
+
25
+
26
+ async def select(
27
+ choices: list[str] | type[Enum],
28
+ max_num_selections: int = 1,
29
+ instruction=None,
30
+ context=None,
31
+ system=None,
32
+ sender=None,
33
+ recipient=None,
34
+ reason: bool = False,
35
+ return_enum: bool = False,
36
+ enum_parser: Callable = None, # parse the model string response to appropriate type
37
+ branch: Branch = None,
38
+ return_pydantic_model=False,
39
+ **kwargs, # additional chat arguments
40
+ ):
41
+ selections = []
42
+ if return_enum and not is_enum(choices):
43
+ raise ValueError("return_enum can only be True if choices is an Enum")
44
+
45
+ if is_enum(choices):
46
+ selections = [selection.value for selection in choices]
47
+ else:
48
+ selections = choices
49
+
50
+ prompt = PROMPT.format(
51
+ max_num_selections=max_num_selections, choices=selections
52
+ )
53
+
54
+ if instruction:
55
+ prompt = f"{instruction}\n\n{prompt} \n\n "
56
+
57
+ branch = branch or Branch()
58
+ response: SelectionModel | ReasonSelectionModel | str = await branch.chat(
59
+ instruction=prompt,
60
+ context=context,
61
+ system=system,
62
+ sender=sender,
63
+ recipient=recipient,
64
+ pydantic_model=SelectionModel if not reason else ReasonSelectionModel,
65
+ return_pydantic_model=True,
66
+ **kwargs,
67
+ )
68
+
69
+ selected = response
70
+ if isinstance(response, SelectionModel | ReasonSelectionModel):
71
+ selected = response.selected
72
+ selected = [selected] if not isinstance(selected, list) else selected
73
+ corrected_selections = [
74
+ choose_most_similar(selection, selections) for selection in selected
75
+ ]
76
+
77
+ if return_enum:
78
+ out = []
79
+ if not enum_parser:
80
+ enum_parser = lambda x: x
81
+ for selection in corrected_selections:
82
+ selection = enum_parser(selection)
83
+ for member in choices.__members__.values():
84
+ if member.value == selection:
85
+ out.append(member)
86
+ corrected_selections = out
87
+
88
+ if return_pydantic_model:
89
+ if not isinstance(response, SelectionModel | ReasonSelectionModel):
90
+ return SelectionModel(selected=corrected_selections)
91
+ response.selected = corrected_selections
92
+ return response
93
+ return corrected_selections
@@ -0,0 +1,6 @@
1
+ from enum import Enum
2
+ from inspect import isclass
3
+
4
+
5
+ def is_enum(choices):
6
+ return isclass(choices) and issubclass(choices, Enum)
lionagi/core/unit/unit.py CHANGED
@@ -117,9 +117,7 @@ class Unit(Directive, DirectiveMixin):
117
117
  requested_fields = break_down_annotation(pydantic_model)
118
118
  context = {
119
119
  "info": context,
120
- "return_guidance": pydantic_model.model_json_schema()[
121
- "properties"
122
- ],
120
+ "return_guidance": pydantic_model.model_json_schema(),
123
121
  }
124
122
 
125
123
  output, branch = await rcall(
lionagi/version.py CHANGED
@@ -1 +1 @@
1
- __version__ = "0.3.4"
1
+ __version__ = "0.3.6"
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: lionagi
3
- Version: 0.3.4
3
+ Version: 0.3.6
4
4
  Summary: Towards automated general intelligence.
5
5
  Author: HaiyangLi
6
6
  Author-email: quantocean.li@gmail.com
@@ -11,7 +11,7 @@ Classifier: Programming Language :: Python :: 3.11
11
11
  Classifier: Programming Language :: Python :: 3.12
12
12
  Requires-Dist: aiocache (>=0.12.0,<0.13.0)
13
13
  Requires-Dist: ipython (>=8.0.0,<9.0.0)
14
- Requires-Dist: lion-core (>=0.3.17,<0.4.0)
14
+ Requires-Dist: lion-core (>=0.4.0,<0.5.0)
15
15
  Requires-Dist: lion-openai (>=0.1.5,<0.2.0)
16
16
  Requires-Dist: python-dotenv (>=1.0.1,<2.0.0)
17
17
  Description-Content-Type: text/markdown
@@ -40,6 +40,15 @@ lionagi/core/director/README.md,sha256=HoIDnEmWmWXVeDfUvkyf4nXQOYqzy2jhToZcJz0zm
40
40
  lionagi/core/director/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
41
41
  lionagi/core/director/direct.py,sha256=vKcirI2vp9CvU6Y7SoznaLwppJpI9j9YBo0cvfWwxiM,9552
42
42
  lionagi/core/director/director.py,sha256=E-zgbAj5gbUgDrfE0YzFoipZnr0WWGZwIreEGGY2KJc,103
43
+ lionagi/core/director/models/__init__.py,sha256=2SSysv4nrZ998HRt9a1BeoWcgVu91Nx3dy2NL4kkHDc,300
44
+ lionagi/core/director/models/action_model.py,sha256=Wj04sHRlgPYiQJWEbJlzQsxKPsW4f0KkZsSWYqCcfME,1843
45
+ lionagi/core/director/models/brainstorm_model.py,sha256=1P2qX58qJDb1r4U14iFdl5aHtKCg7oYBUXavYc1xzIc,1177
46
+ lionagi/core/director/models/plan_model.py,sha256=SqaJlUXT7un-VTHk_LS3jjd00drMJEY4mJfNRwtalXk,1453
47
+ lionagi/core/director/models/reason_model.py,sha256=anY5fvszbABci74YvQnTVtvSh59I1cj9ZNw6A8GHfB8,2124
48
+ lionagi/core/director/models/step_model.py,sha256=oB_mSwlZjp5csX4YDgdX9BIu0iIefLxAhqoZU0qetXs,2008
49
+ lionagi/core/director/operations/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
50
+ lionagi/core/director/operations/select.py,sha256=6oqZVQMzkgizzHXuhuUmdySfDTLGRTzFEr1yTyRw9No,2834
51
+ lionagi/core/director/operations/utils.py,sha256=nuCvA6hXAi0oPFPdV_5kyHLGLlk1FbqhGfH2N0kPF9I,132
43
52
  lionagi/core/engine/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
44
53
  lionagi/core/engine/branch_engine.py,sha256=l59CTLk-ecsc_NgNgT3DkgP4OuuQ1lzdcOdDqie0H-k,12700
45
54
  lionagi/core/engine/instruction_map_engine.py,sha256=xjvqHLc7T2Hk7MoFA8y9DgMAmFKk56vTSTL-90SiAWc,8600
@@ -105,7 +114,7 @@ lionagi/core/unit/template/plan.py,sha256=i4FmKEB8eRsRCsTanvfoX-2RZ8SaM1qvLBluuY
105
114
  lionagi/core/unit/template/predict.py,sha256=-EIZQo0ZjGKy3MiM0AtqmbnJpbXcrwtSCON5n3jcyVo,3160
106
115
  lionagi/core/unit/template/score.py,sha256=ReUaIIr-NLjunSy4NNXQpIsH28NNceGBAUuPCRptzMc,3809
107
116
  lionagi/core/unit/template/select.py,sha256=VSpkphJl9bHSE8i0X6MMJD8LB5QwOj1UORHm8VDIRKE,3047
108
- lionagi/core/unit/unit.py,sha256=5XJ8k401cqVNLOJpNcHul3IG6WflC--AlSg2IprlSkk,15600
117
+ lionagi/core/unit/unit.py,sha256=a3rauBXe50SBUgndv7Q9bqF4h7pJCYdsoTfPf1e8GCs,15548
109
118
  lionagi/core/unit/unit_form.py,sha256=zK_ij3Tod5FwMVdIIhdVoEFvD3br-YM9RPe7WsOIW2s,10980
110
119
  lionagi/core/unit/unit_mixin.py,sha256=c8GvHzgc65iJKQBKv71ET3afLPsIz5-Ce-4Eo6_bZiw,38823
111
120
  lionagi/core/unit/util.py,sha256=WN2Jop-LUwQNYJNubFPhOZrisQ6SQq-XMhD_KhzLkgE,2707
@@ -229,8 +238,8 @@ lionagi/lions/researcher/data_source/finhub_.py,sha256=W63daXgIwHJQ6TDMR2ALQIDk1
229
238
  lionagi/lions/researcher/data_source/google_.py,sha256=401SKHQaSpxiOUoXl7stadl4qeF7SIX72lUNK7bKesg,6797
230
239
  lionagi/lions/researcher/data_source/wiki_.py,sha256=UPoa2dk_y5sELu7_rkdme2auDpUmc_Dn0Avgjwr2X2g,3145
231
240
  lionagi/lions/researcher/data_source/yfinance_.py,sha256=snAf897J69MyAc6fcFjF0irrMjbAh81EZ3RvaFT3hxE,977
232
- lionagi/version.py,sha256=oYLGMpySamd16KLiaBTfRyrAS7_oyp-TOEHmzmeumwg,22
233
- lionagi-0.3.4.dist-info/LICENSE,sha256=VXFWsdoN5AAknBCgFqQNgPWYx7OPp-PFEP961zGdOjc,11288
234
- lionagi-0.3.4.dist-info/METADATA,sha256=VM0JO4wP1qKYkLZ7Al0lLiOi0ZaWreAjxbJmkyhdaOo,3149
235
- lionagi-0.3.4.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
236
- lionagi-0.3.4.dist-info/RECORD,,
241
+ lionagi/version.py,sha256=W_9dCm49nLvZulVAvvsafxLJjVBSKDBHz9K7szFZllo,22
242
+ lionagi-0.3.6.dist-info/LICENSE,sha256=VXFWsdoN5AAknBCgFqQNgPWYx7OPp-PFEP961zGdOjc,11288
243
+ lionagi-0.3.6.dist-info/METADATA,sha256=YBvJNxfI2QZFGB4DW2WcHwG-WpcVaGSFUgq54xY0-KI,3148
244
+ lionagi-0.3.6.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
245
+ lionagi-0.3.6.dist-info/RECORD,,