lionagi 0.3.7__py3-none-any.whl → 0.3.8__py3-none-any.whl
Sign up to get free protection for your applications and to get access to all the features.
- lionagi/core/director/operations/select.py +2 -92
- lionagi/core/rule/choice.py +2 -2
- lionagi/operations/__init__.py +6 -0
- lionagi/operations/brainstorm.py +87 -0
- lionagi/operations/config.py +6 -0
- lionagi/operations/rank.py +102 -0
- lionagi/operations/score.py +144 -0
- lionagi/operations/select.py +141 -0
- lionagi/version.py +1 -1
- lionagi-0.3.8.dist-info/METADATA +241 -0
- {lionagi-0.3.7.dist-info → lionagi-0.3.8.dist-info}/RECORD +13 -30
- lionagi/core/director/models/__init__.py +0 -13
- lionagi/core/director/models/action_model.py +0 -61
- lionagi/core/director/models/brainstorm_model.py +0 -42
- lionagi/core/director/models/plan_model.py +0 -51
- lionagi/core/director/models/reason_model.py +0 -63
- lionagi/core/director/models/step_model.py +0 -65
- lionagi/core/operations/__init__.py +0 -0
- lionagi/core/operations/chat/__init__.py +0 -0
- lionagi/core/operations/direct/__init__.py +0 -0
- lionagi/core/operative/__init__.py +0 -0
- lionagi/operations/brainstorm/__init__.py +0 -0
- lionagi/operations/chat/__init__.py +0 -0
- lionagi/operations/models/__init__.py +0 -0
- lionagi/operations/plan/__init__.py +0 -0
- lionagi/operations/plan/base.py +0 -0
- lionagi/operations/query/__init__.py +0 -0
- lionagi/operations/rank/__init__.py +0 -0
- lionagi/operations/react/__init__.py +0 -0
- lionagi/operations/route/__init__.py +0 -0
- lionagi/operations/score/__init__.py +0 -0
- lionagi/operations/select/__init__.py +0 -0
- lionagi/operations/strategize/__init__.py +0 -0
- lionagi-0.3.7.dist-info/METADATA +0 -70
- {lionagi-0.3.7.dist-info → lionagi-0.3.8.dist-info}/LICENSE +0 -0
- {lionagi-0.3.7.dist-info → lionagi-0.3.8.dist-info}/WHEEL +0 -0
@@ -1,93 +1,3 @@
|
|
1
|
-
from
|
1
|
+
from lionagi.operations.select import select
|
2
2
|
|
3
|
-
|
4
|
-
from enum import Enum
|
5
|
-
|
6
|
-
from lionfuncs import choose_most_similar
|
7
|
-
from pydantic import BaseModel
|
8
|
-
|
9
|
-
from lionagi.core.director.models import ReasonModel
|
10
|
-
from lionagi.core.session.branch import Branch
|
11
|
-
|
12
|
-
from .utils import is_enum
|
13
|
-
|
14
|
-
PROMPT = "Please select up to {max_num_selections} items from the following list {choices}. Provide the selection(s), and no comments from you"
|
15
|
-
|
16
|
-
|
17
|
-
class SelectionModel(BaseModel):
|
18
|
-
selected: list[str | Enum]
|
19
|
-
|
20
|
-
|
21
|
-
class ReasonSelectionModel(BaseModel):
|
22
|
-
selected: list[str | Enum]
|
23
|
-
reason: ReasonModel
|
24
|
-
|
25
|
-
|
26
|
-
async def select(
|
27
|
-
choices: list[str] | type[Enum],
|
28
|
-
max_num_selections: int = 1,
|
29
|
-
instruction=None,
|
30
|
-
context=None,
|
31
|
-
system=None,
|
32
|
-
sender=None,
|
33
|
-
recipient=None,
|
34
|
-
reason: bool = False,
|
35
|
-
return_enum: bool = False,
|
36
|
-
enum_parser: Callable = None, # parse the model string response to appropriate type
|
37
|
-
branch: Branch = None,
|
38
|
-
return_pydantic_model=False,
|
39
|
-
**kwargs, # additional chat arguments
|
40
|
-
):
|
41
|
-
selections = []
|
42
|
-
if return_enum and not is_enum(choices):
|
43
|
-
raise ValueError("return_enum can only be True if choices is an Enum")
|
44
|
-
|
45
|
-
if is_enum(choices):
|
46
|
-
selections = [selection.value for selection in choices]
|
47
|
-
else:
|
48
|
-
selections = choices
|
49
|
-
|
50
|
-
prompt = PROMPT.format(
|
51
|
-
max_num_selections=max_num_selections, choices=selections
|
52
|
-
)
|
53
|
-
|
54
|
-
if instruction:
|
55
|
-
prompt = f"{instruction}\n\n{prompt} \n\n "
|
56
|
-
|
57
|
-
branch = branch or Branch()
|
58
|
-
response: SelectionModel | ReasonSelectionModel | str = await branch.chat(
|
59
|
-
instruction=prompt,
|
60
|
-
context=context,
|
61
|
-
system=system,
|
62
|
-
sender=sender,
|
63
|
-
recipient=recipient,
|
64
|
-
pydantic_model=SelectionModel if not reason else ReasonSelectionModel,
|
65
|
-
return_pydantic_model=True,
|
66
|
-
**kwargs,
|
67
|
-
)
|
68
|
-
|
69
|
-
selected = response
|
70
|
-
if isinstance(response, SelectionModel | ReasonSelectionModel):
|
71
|
-
selected = response.selected
|
72
|
-
selected = [selected] if not isinstance(selected, list) else selected
|
73
|
-
corrected_selections = [
|
74
|
-
choose_most_similar(selection, selections) for selection in selected
|
75
|
-
]
|
76
|
-
|
77
|
-
if return_enum:
|
78
|
-
out = []
|
79
|
-
if not enum_parser:
|
80
|
-
enum_parser = lambda x: x
|
81
|
-
for selection in corrected_selections:
|
82
|
-
selection = enum_parser(selection)
|
83
|
-
for member in choices.__members__.values():
|
84
|
-
if member.value == selection:
|
85
|
-
out.append(member)
|
86
|
-
corrected_selections = out
|
87
|
-
|
88
|
-
if return_pydantic_model:
|
89
|
-
if not isinstance(response, SelectionModel | ReasonSelectionModel):
|
90
|
-
return SelectionModel(selected=corrected_selections)
|
91
|
-
response.selected = corrected_selections
|
92
|
-
return response
|
93
|
-
return corrected_selections
|
3
|
+
__all__ = ["select"]
|
lionagi/core/rule/choice.py
CHANGED
@@ -1,4 +1,4 @@
|
|
1
|
-
from lionfuncs import
|
1
|
+
from lionfuncs import string_similarity
|
2
2
|
|
3
3
|
from lionagi.core.rule.base import Rule
|
4
4
|
|
@@ -45,4 +45,4 @@ class ChoiceRule(Rule):
|
|
45
45
|
Returns:
|
46
46
|
str: The most similar value from the set of predefined choices.
|
47
47
|
"""
|
48
|
-
return
|
48
|
+
return string_similarity(value, self.keys, choose_most_similar=True)
|
lionagi/operations/brainstorm.py
CHANGED
@@ -0,0 +1,87 @@
|
|
1
|
+
from typing import Any
|
2
|
+
|
3
|
+
from lion_core.operative.step_model import StepModel
|
4
|
+
from lion_core.session.branch import Branch
|
5
|
+
from lion_service import iModel
|
6
|
+
from pydantic import BaseModel, Field
|
7
|
+
|
8
|
+
from .config import DEFAULT_CHAT_CONFIG
|
9
|
+
|
10
|
+
|
11
|
+
class BrainstormModel(BaseModel):
|
12
|
+
|
13
|
+
topic: str = Field(
|
14
|
+
default_factory=str,
|
15
|
+
description="**Specify the topic or theme for the brainstorming session.**",
|
16
|
+
)
|
17
|
+
ideas: list[StepModel] = Field(
|
18
|
+
default_factory=list,
|
19
|
+
description="**Provide a list of ideas needed to accomplish the objective. Each step should be as described in a `PlanStepModel`.**",
|
20
|
+
)
|
21
|
+
|
22
|
+
|
23
|
+
PROMPT = "Please follow prompt and provide {num_steps} different ideas for the next step"
|
24
|
+
|
25
|
+
|
26
|
+
async def brainstorm(
|
27
|
+
num_steps: int = 3,
|
28
|
+
instruction=None,
|
29
|
+
guidance=None,
|
30
|
+
context=None,
|
31
|
+
system=None,
|
32
|
+
reason: bool = False,
|
33
|
+
actions: bool = False,
|
34
|
+
tools: Any = None,
|
35
|
+
imodel: iModel = None,
|
36
|
+
branch: Branch = None,
|
37
|
+
sender=None,
|
38
|
+
recipient=None,
|
39
|
+
clear_messages: bool = False,
|
40
|
+
system_sender=None,
|
41
|
+
system_datetime=None,
|
42
|
+
return_branch=False,
|
43
|
+
num_parse_retries: int = 3,
|
44
|
+
retry_imodel: iModel = None,
|
45
|
+
branch_user=None,
|
46
|
+
**kwargs, # additional operate arguments
|
47
|
+
):
|
48
|
+
if branch and branch.imodel:
|
49
|
+
imodel = imodel or branch.imodel
|
50
|
+
else:
|
51
|
+
imodel = imodel or iModel(**DEFAULT_CHAT_CONFIG)
|
52
|
+
|
53
|
+
prompt = PROMPT.format(num_steps=num_steps)
|
54
|
+
|
55
|
+
branch = branch or Branch(imodel=imodel)
|
56
|
+
if branch_user:
|
57
|
+
branch.user = branch_user
|
58
|
+
|
59
|
+
if system:
|
60
|
+
branch.add_message(
|
61
|
+
system=system,
|
62
|
+
system_datetime=system_datetime,
|
63
|
+
sender=system_sender,
|
64
|
+
)
|
65
|
+
_context = [{"operation": prompt}]
|
66
|
+
if context:
|
67
|
+
_context.append(context)
|
68
|
+
|
69
|
+
response = await branch.operate(
|
70
|
+
instruction=instruction,
|
71
|
+
guidance=guidance,
|
72
|
+
context=_context,
|
73
|
+
sender=sender,
|
74
|
+
recipient=recipient,
|
75
|
+
reason=reason,
|
76
|
+
actions=actions,
|
77
|
+
tools=tools,
|
78
|
+
clear_messages=clear_messages,
|
79
|
+
operative_model=BrainstormModel,
|
80
|
+
retry_imodel=retry_imodel,
|
81
|
+
num_parse_retries=num_parse_retries,
|
82
|
+
imodel=imodel,
|
83
|
+
**kwargs,
|
84
|
+
)
|
85
|
+
if return_branch:
|
86
|
+
return response, branch
|
87
|
+
return response
|
@@ -0,0 +1,102 @@
|
|
1
|
+
import asyncio
|
2
|
+
from typing import Any
|
3
|
+
|
4
|
+
import numpy as np
|
5
|
+
from lion_core.session.branch import Branch
|
6
|
+
from lion_core.session.session import Session
|
7
|
+
from lion_service import iModel
|
8
|
+
from lionfuncs import alcall, to_list
|
9
|
+
|
10
|
+
from .config import DEFAULT_CHAT_CONFIG
|
11
|
+
from .score import score
|
12
|
+
|
13
|
+
PROMPT = (
|
14
|
+
"Given all items: \n {choices} \n\n Please follow prompt and give score "
|
15
|
+
"to the item of interest: \n {item} \n\n"
|
16
|
+
)
|
17
|
+
|
18
|
+
|
19
|
+
async def rank(
|
20
|
+
choices: list[Any],
|
21
|
+
num_scorers: int = 5,
|
22
|
+
instruction=None,
|
23
|
+
guidance=None,
|
24
|
+
context=None,
|
25
|
+
system=None,
|
26
|
+
reason: bool = False,
|
27
|
+
actions: bool = False,
|
28
|
+
tools: Any = None,
|
29
|
+
imodel: iModel = None,
|
30
|
+
branch: Branch = None, # branch won't be used for the vote, it is for configuration
|
31
|
+
clear_messages: bool = False,
|
32
|
+
system_sender=None,
|
33
|
+
system_datetime=None,
|
34
|
+
num_parse_retries: int = 0,
|
35
|
+
retry_imodel: iModel = None,
|
36
|
+
return_session: bool = False,
|
37
|
+
**kwargs, # additional kwargs for score function
|
38
|
+
) -> dict:
|
39
|
+
|
40
|
+
if branch and branch.imodel:
|
41
|
+
imodel = imodel or branch.imodel
|
42
|
+
else:
|
43
|
+
imodel = imodel or iModel(**DEFAULT_CHAT_CONFIG)
|
44
|
+
|
45
|
+
branch = branch or Branch(imodel=imodel)
|
46
|
+
session = Session(default_branch=branch)
|
47
|
+
|
48
|
+
async def _score(item):
|
49
|
+
async with session.branches.async_lock:
|
50
|
+
b_ = session.new_branch(messages=session.default_branch.messages)
|
51
|
+
|
52
|
+
prompt = PROMPT.format(choices=choices, item=item)
|
53
|
+
if instruction:
|
54
|
+
prompt = f"{instruction}\n\n{prompt} \n\n "
|
55
|
+
|
56
|
+
kwargs["branch"] = b_
|
57
|
+
kwargs["score_range"] = kwargs.get("score_range", (1, 10))
|
58
|
+
kwargs["num_scores"] = kwargs.get("num_scores", 1)
|
59
|
+
kwargs["precision"] = kwargs.get("precision", 1)
|
60
|
+
|
61
|
+
response = await score(
|
62
|
+
instruction=prompt,
|
63
|
+
guidance=guidance,
|
64
|
+
context=context,
|
65
|
+
system=system,
|
66
|
+
system_datetime=system_datetime,
|
67
|
+
system_sender=system_sender,
|
68
|
+
sender=session.ln_id,
|
69
|
+
recipient=b_.ln_id,
|
70
|
+
default_score=-1,
|
71
|
+
reason=reason,
|
72
|
+
actions=actions,
|
73
|
+
tools=tools,
|
74
|
+
clear_messages=clear_messages,
|
75
|
+
num_parse_retries=num_parse_retries,
|
76
|
+
retry_imodel=retry_imodel,
|
77
|
+
**kwargs,
|
78
|
+
)
|
79
|
+
|
80
|
+
if response.score == -1:
|
81
|
+
return None
|
82
|
+
|
83
|
+
return response
|
84
|
+
|
85
|
+
async def _group_score(item):
|
86
|
+
tasks = [asyncio.create_task(_score(item)) for _ in range(num_scorers)]
|
87
|
+
responses = await asyncio.gather(*tasks)
|
88
|
+
responses = [i for i in responses if i is not None]
|
89
|
+
scores = to_list(
|
90
|
+
[i.score for i in responses], dropna=True, flatten=True
|
91
|
+
)
|
92
|
+
return {
|
93
|
+
"item": item,
|
94
|
+
"scores": scores,
|
95
|
+
"average": np.mean(scores) if scores else -1,
|
96
|
+
}
|
97
|
+
|
98
|
+
results = await alcall(choices, _group_score)
|
99
|
+
results = sorted(results, key=lambda x: x["average"], reverse=True)
|
100
|
+
if return_session:
|
101
|
+
return results, session
|
102
|
+
return results
|
@@ -0,0 +1,144 @@
|
|
1
|
+
import logging
|
2
|
+
from typing import Any
|
3
|
+
|
4
|
+
import numpy as np
|
5
|
+
from lion_core.session.branch import Branch
|
6
|
+
from lion_service import iModel
|
7
|
+
from lionfuncs import to_num
|
8
|
+
from pydantic import BaseModel, ConfigDict, Field, field_validator
|
9
|
+
|
10
|
+
from .config import DEFAULT_CHAT_CONFIG
|
11
|
+
|
12
|
+
PROMPT = "Please follow prompt and provide {num_scores} numeric score(s) in {score_range} for the given context. Return as {return_precision} format"
|
13
|
+
|
14
|
+
|
15
|
+
class ScoreModel(BaseModel):
|
16
|
+
|
17
|
+
score: list | float = Field(
|
18
|
+
default_factory=list,
|
19
|
+
description="** A numeric score or a list of numeric scores.**",
|
20
|
+
)
|
21
|
+
|
22
|
+
model_config = ConfigDict(
|
23
|
+
population_by_field_name=True,
|
24
|
+
arbitrary_types_allowed=True,
|
25
|
+
)
|
26
|
+
|
27
|
+
@field_validator("score", mode="before")
|
28
|
+
def validate_score(cls, value) -> list:
|
29
|
+
return [value] if not isinstance(value, list) else value
|
30
|
+
|
31
|
+
|
32
|
+
async def score(
|
33
|
+
score_range=(1, 10),
|
34
|
+
instruction=None,
|
35
|
+
guidance=None,
|
36
|
+
context=None,
|
37
|
+
system=None,
|
38
|
+
reason: bool = False,
|
39
|
+
actions: bool = False,
|
40
|
+
tools: Any = None,
|
41
|
+
imodel: iModel = None,
|
42
|
+
branch: Branch = None,
|
43
|
+
sender=None,
|
44
|
+
recipient=None,
|
45
|
+
clear_messages: bool = False,
|
46
|
+
system_sender=None,
|
47
|
+
system_datetime=None,
|
48
|
+
return_branch=False,
|
49
|
+
num_parse_retries: int = 0,
|
50
|
+
retry_imodel: iModel = None,
|
51
|
+
num_scores: int = 1,
|
52
|
+
use_average: bool = False,
|
53
|
+
precision: int = 0,
|
54
|
+
default_score=np.nan,
|
55
|
+
**kwargs,
|
56
|
+
) -> ScoreModel:
|
57
|
+
|
58
|
+
if branch and branch.imodel:
|
59
|
+
imodel = imodel or branch.imodel
|
60
|
+
else:
|
61
|
+
imodel = imodel or iModel(**DEFAULT_CHAT_CONFIG)
|
62
|
+
|
63
|
+
branch = branch or Branch(imodel=imodel)
|
64
|
+
|
65
|
+
return_precision = "integer" if precision == 0 else f"num:{precision}f"
|
66
|
+
prompt = PROMPT.format(
|
67
|
+
num_scores=num_scores,
|
68
|
+
score_range=score_range,
|
69
|
+
return_precision=return_precision,
|
70
|
+
)
|
71
|
+
if instruction:
|
72
|
+
prompt = f"{instruction}\n\n{prompt} \n\n "
|
73
|
+
|
74
|
+
if system:
|
75
|
+
branch.add_message(
|
76
|
+
system=system,
|
77
|
+
system_datetime=system_datetime,
|
78
|
+
sender=system_sender,
|
79
|
+
)
|
80
|
+
|
81
|
+
_context = [{"operation": prompt}]
|
82
|
+
if context:
|
83
|
+
_context.append(context)
|
84
|
+
|
85
|
+
kwargs["frozen"] = False
|
86
|
+
response = await branch.operate(
|
87
|
+
instruction=instruction,
|
88
|
+
guidance=guidance,
|
89
|
+
context=_context,
|
90
|
+
sender=sender,
|
91
|
+
recipient=recipient,
|
92
|
+
reason=reason,
|
93
|
+
actions=actions,
|
94
|
+
tools=tools,
|
95
|
+
clear_messages=clear_messages,
|
96
|
+
operative_model=ScoreModel,
|
97
|
+
imodel=imodel,
|
98
|
+
retry_imodel=retry_imodel,
|
99
|
+
num_parse_retries=num_parse_retries,
|
100
|
+
**kwargs,
|
101
|
+
)
|
102
|
+
|
103
|
+
return_kind = int if precision == 0 else float
|
104
|
+
err = None
|
105
|
+
try:
|
106
|
+
if isinstance(response, dict):
|
107
|
+
response = ScoreModel(**response)
|
108
|
+
|
109
|
+
response.score = [
|
110
|
+
to_num(
|
111
|
+
i,
|
112
|
+
upper_bound=score_range[1],
|
113
|
+
lower_bound=score_range[0],
|
114
|
+
num_type=return_kind,
|
115
|
+
precision=precision,
|
116
|
+
num_count=num_scores,
|
117
|
+
)
|
118
|
+
for i in response.score
|
119
|
+
]
|
120
|
+
if use_average:
|
121
|
+
scores = response.score
|
122
|
+
scores = [scores] if not isinstance(scores, list) else scores
|
123
|
+
response.score = np.mean(scores)
|
124
|
+
|
125
|
+
if response.score and num_scores == 1:
|
126
|
+
if isinstance(response.score, list):
|
127
|
+
response.score = response.score[0]
|
128
|
+
|
129
|
+
if return_branch:
|
130
|
+
return response, branch
|
131
|
+
return response
|
132
|
+
|
133
|
+
except Exception as e:
|
134
|
+
err = e
|
135
|
+
pass
|
136
|
+
|
137
|
+
logging.error(
|
138
|
+
f"Error converting score to {return_kind}: {err}, "
|
139
|
+
f"value is set to default: {default_score}"
|
140
|
+
)
|
141
|
+
response.score = default_score
|
142
|
+
if return_branch:
|
143
|
+
return response, branch
|
144
|
+
return response
|
@@ -0,0 +1,141 @@
|
|
1
|
+
from __future__ import annotations
|
2
|
+
|
3
|
+
import inspect
|
4
|
+
from collections.abc import Callable
|
5
|
+
from enum import Enum
|
6
|
+
from typing import Any
|
7
|
+
|
8
|
+
from lion_core.session.branch import Branch
|
9
|
+
from lion_service import iModel
|
10
|
+
from lionfuncs import string_similarity
|
11
|
+
from pydantic import BaseModel, Field
|
12
|
+
|
13
|
+
from lionagi.libs.sys_util import SysUtil
|
14
|
+
|
15
|
+
from .config import DEFAULT_CHAT_CONFIG
|
16
|
+
|
17
|
+
PROMPT = "Please select up to {max_num_selections} items from the following list {choices}. Provide the selection(s) into appropriate field in format required, and no comments from you"
|
18
|
+
|
19
|
+
|
20
|
+
class SelectionModel(BaseModel):
|
21
|
+
selected: list[str | Enum] = Field(default_factory=list)
|
22
|
+
|
23
|
+
|
24
|
+
async def select(
|
25
|
+
choices: list[str] | type[Enum],
|
26
|
+
max_num_selections: int = 1,
|
27
|
+
instruction=None,
|
28
|
+
guidance=None,
|
29
|
+
context=None,
|
30
|
+
system=None,
|
31
|
+
reason: bool = False,
|
32
|
+
actions: bool = False,
|
33
|
+
tools: Any = None,
|
34
|
+
imodel: iModel = None,
|
35
|
+
branch: Branch = None,
|
36
|
+
sender=None,
|
37
|
+
recipient=None,
|
38
|
+
return_enum: bool = False,
|
39
|
+
enum_parser: Callable = None, # parse the model string response to appropriate type
|
40
|
+
clear_messages: bool = False,
|
41
|
+
system_sender=None,
|
42
|
+
system_datetime=None,
|
43
|
+
return_branch=False,
|
44
|
+
num_parse_retries: int = 3,
|
45
|
+
retry_imodel: iModel = None,
|
46
|
+
branch_user=None,
|
47
|
+
**kwargs,
|
48
|
+
) -> SelectionModel | tuple[SelectionModel, Branch]:
|
49
|
+
|
50
|
+
if branch and branch.imodel:
|
51
|
+
imodel = imodel or branch.imodel
|
52
|
+
else:
|
53
|
+
imodel = imodel or iModel(**DEFAULT_CHAT_CONFIG)
|
54
|
+
|
55
|
+
selections = []
|
56
|
+
if return_enum and not _is_enum(choices):
|
57
|
+
raise ValueError("return_enum can only be True if choices is an Enum")
|
58
|
+
|
59
|
+
if _is_enum(choices):
|
60
|
+
selections = [selection.value for selection in choices]
|
61
|
+
else:
|
62
|
+
selections = choices
|
63
|
+
|
64
|
+
prompt = PROMPT.format(
|
65
|
+
max_num_selections=max_num_selections, choices=selections
|
66
|
+
)
|
67
|
+
|
68
|
+
if instruction:
|
69
|
+
prompt = f"{instruction}\n\n{prompt} \n\n "
|
70
|
+
|
71
|
+
branch = branch or Branch(imodel=imodel)
|
72
|
+
if branch_user:
|
73
|
+
try:
|
74
|
+
a = SysUtil.get_id(branch_user)
|
75
|
+
branch.user = a
|
76
|
+
except:
|
77
|
+
branch.user = branch_user
|
78
|
+
if system:
|
79
|
+
branch.add_message(
|
80
|
+
system=system,
|
81
|
+
system_datetime=system_datetime,
|
82
|
+
system_sender=system_sender,
|
83
|
+
)
|
84
|
+
|
85
|
+
kwargs["frozen"] = False
|
86
|
+
response_model: SelectionModel = await branch.operate(
|
87
|
+
instruction=prompt,
|
88
|
+
guidance=guidance,
|
89
|
+
context=context,
|
90
|
+
sender=sender,
|
91
|
+
recipient=recipient,
|
92
|
+
reason=reason,
|
93
|
+
actions=actions,
|
94
|
+
operative_model=SelectionModel,
|
95
|
+
clear_messages=clear_messages,
|
96
|
+
imodel=imodel,
|
97
|
+
num_parse_retries=num_parse_retries,
|
98
|
+
retry_imodel=retry_imodel,
|
99
|
+
tools=tools,
|
100
|
+
**kwargs,
|
101
|
+
)
|
102
|
+
|
103
|
+
selected = response_model
|
104
|
+
if isinstance(response_model, BaseModel) and hasattr(
|
105
|
+
response_model, "selected"
|
106
|
+
):
|
107
|
+
selected = response_model.selected
|
108
|
+
selected = [selected] if not isinstance(selected, list) else selected
|
109
|
+
corrected_selections = [
|
110
|
+
string_similarity(
|
111
|
+
word=selection,
|
112
|
+
correct_words=selections,
|
113
|
+
return_most_similar=True,
|
114
|
+
)
|
115
|
+
for selection in selected
|
116
|
+
]
|
117
|
+
|
118
|
+
if return_enum:
|
119
|
+
out = []
|
120
|
+
if not enum_parser:
|
121
|
+
enum_parser = lambda x: x
|
122
|
+
for selection in corrected_selections:
|
123
|
+
selection = enum_parser(selection)
|
124
|
+
for member in choices.__members__.values():
|
125
|
+
if member.value == selection:
|
126
|
+
out.append(member)
|
127
|
+
corrected_selections = out
|
128
|
+
|
129
|
+
if isinstance(response_model, BaseModel):
|
130
|
+
response_model.selected = corrected_selections
|
131
|
+
|
132
|
+
elif isinstance(response_model, dict):
|
133
|
+
response_model["selected"] = corrected_selections
|
134
|
+
|
135
|
+
if return_branch:
|
136
|
+
return response_model, branch
|
137
|
+
return response_model
|
138
|
+
|
139
|
+
|
140
|
+
def _is_enum(choices):
|
141
|
+
return inspect.isclass(choices) and issubclass(choices, Enum)
|
lionagi/version.py
CHANGED
@@ -1 +1 @@
|
|
1
|
-
__version__ = "0.3.
|
1
|
+
__version__ = "0.3.8"
|