lionagi 0.6.1__py3-none-any.whl → 0.7.1__py3-none-any.whl

Sign up to get free protection for your applications and to get access to all the features.
Files changed (78) hide show
  1. lionagi/libs/token_transform/__init__.py +0 -0
  2. lionagi/libs/token_transform/llmlingua.py +1 -0
  3. lionagi/libs/token_transform/perplexity.py +439 -0
  4. lionagi/libs/token_transform/synthlang.py +409 -0
  5. lionagi/operations/ReAct/ReAct.py +126 -0
  6. lionagi/operations/ReAct/utils.py +28 -0
  7. lionagi/operations/__init__.py +1 -9
  8. lionagi/operations/_act/act.py +73 -0
  9. lionagi/operations/chat/__init__.py +3 -0
  10. lionagi/operations/chat/chat.py +173 -0
  11. lionagi/operations/communicate/__init__.py +0 -0
  12. lionagi/operations/communicate/communicate.py +108 -0
  13. lionagi/operations/instruct/__init__.py +3 -0
  14. lionagi/operations/instruct/instruct.py +29 -0
  15. lionagi/operations/interpret/__init__.py +3 -0
  16. lionagi/operations/interpret/interpret.py +39 -0
  17. lionagi/operations/operate/__init__.py +3 -0
  18. lionagi/operations/operate/operate.py +194 -0
  19. lionagi/operations/parse/__init__.py +3 -0
  20. lionagi/operations/parse/parse.py +89 -0
  21. lionagi/operations/plan/plan.py +3 -3
  22. lionagi/operations/select/__init__.py +0 -4
  23. lionagi/operations/select/select.py +11 -30
  24. lionagi/operations/select/utils.py +13 -2
  25. lionagi/operations/translate/__init__.py +0 -0
  26. lionagi/operations/translate/translate.py +47 -0
  27. lionagi/operations/types.py +16 -0
  28. lionagi/operatives/action/manager.py +115 -93
  29. lionagi/operatives/action/request_response_model.py +31 -0
  30. lionagi/operatives/action/tool.py +50 -20
  31. lionagi/operatives/strategies/__init__.py +3 -0
  32. lionagi/protocols/_concepts.py +1 -1
  33. lionagi/protocols/adapters/adapter.py +25 -0
  34. lionagi/protocols/adapters/json_adapter.py +107 -27
  35. lionagi/protocols/adapters/pandas_/csv_adapter.py +55 -11
  36. lionagi/protocols/adapters/pandas_/excel_adapter.py +52 -10
  37. lionagi/protocols/adapters/pandas_/pd_dataframe_adapter.py +54 -4
  38. lionagi/protocols/adapters/pandas_/pd_series_adapter.py +40 -0
  39. lionagi/protocols/generic/element.py +1 -1
  40. lionagi/protocols/generic/pile.py +5 -8
  41. lionagi/protocols/graph/edge.py +1 -1
  42. lionagi/protocols/graph/graph.py +16 -8
  43. lionagi/protocols/graph/node.py +1 -1
  44. lionagi/protocols/mail/exchange.py +126 -15
  45. lionagi/protocols/mail/mail.py +33 -0
  46. lionagi/protocols/mail/mailbox.py +62 -0
  47. lionagi/protocols/mail/manager.py +97 -41
  48. lionagi/protocols/mail/package.py +57 -3
  49. lionagi/protocols/messages/action_request.py +77 -26
  50. lionagi/protocols/messages/action_response.py +55 -26
  51. lionagi/protocols/messages/assistant_response.py +50 -15
  52. lionagi/protocols/messages/base.py +36 -0
  53. lionagi/protocols/messages/instruction.py +175 -145
  54. lionagi/protocols/messages/manager.py +152 -56
  55. lionagi/protocols/messages/message.py +61 -25
  56. lionagi/protocols/messages/system.py +54 -19
  57. lionagi/service/imodel.py +24 -0
  58. lionagi/session/branch.py +1116 -939
  59. lionagi/utils.py +1 -0
  60. lionagi/version.py +1 -1
  61. {lionagi-0.6.1.dist-info → lionagi-0.7.1.dist-info}/METADATA +1 -1
  62. {lionagi-0.6.1.dist-info → lionagi-0.7.1.dist-info}/RECORD +75 -56
  63. lionagi/libs/compress/models.py +0 -66
  64. lionagi/libs/compress/utils.py +0 -69
  65. lionagi/operations/select/prompt.py +0 -5
  66. /lionagi/{libs/compress → operations/ReAct}/__init__.py +0 -0
  67. /lionagi/operations/{strategies → _act}/__init__.py +0 -0
  68. /lionagi/{operations → operatives}/strategies/base.py +0 -0
  69. /lionagi/{operations → operatives}/strategies/concurrent.py +0 -0
  70. /lionagi/{operations → operatives}/strategies/concurrent_chunk.py +0 -0
  71. /lionagi/{operations → operatives}/strategies/concurrent_sequential_chunk.py +0 -0
  72. /lionagi/{operations → operatives}/strategies/params.py +0 -0
  73. /lionagi/{operations → operatives}/strategies/sequential.py +0 -0
  74. /lionagi/{operations → operatives}/strategies/sequential_chunk.py +0 -0
  75. /lionagi/{operations → operatives}/strategies/sequential_concurrent_chunk.py +0 -0
  76. /lionagi/{operations → operatives}/strategies/utils.py +0 -0
  77. {lionagi-0.6.1.dist-info → lionagi-0.7.1.dist-info}/WHEEL +0 -0
  78. {lionagi-0.6.1.dist-info → lionagi-0.7.1.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,173 @@
1
+ # Copyright (c) 2023 - 2024, HaiyangLi <quantocean.li at gmail dot com>
2
+ #
3
+ # SPDX-License-Identifier: Apache-2.0
4
+
5
+ from typing import TYPE_CHECKING, Literal
6
+
7
+ from pydantic import BaseModel
8
+
9
+ from lionagi.protocols.types import (
10
+ ActionResponse,
11
+ AssistantResponse,
12
+ Instruction,
13
+ Log,
14
+ RoledMessage,
15
+ )
16
+ from lionagi.service.imodel import iModel
17
+ from lionagi.utils import copy
18
+
19
+ if TYPE_CHECKING:
20
+ from lionagi.session.branch import Branch
21
+
22
+
23
+ async def chat(
24
+ branch: "Branch",
25
+ instruction=None,
26
+ guidance=None,
27
+ context=None,
28
+ sender=None,
29
+ recipient=None,
30
+ request_fields=None,
31
+ response_format: type[BaseModel] = None,
32
+ progression=None,
33
+ imodel: iModel = None,
34
+ tool_schemas=None,
35
+ images: list = None,
36
+ image_detail: Literal["low", "high", "auto"] = None,
37
+ plain_content: str = None,
38
+ return_ins_res_message: bool = False,
39
+ **kwargs,
40
+ ) -> tuple[Instruction, AssistantResponse]:
41
+ ins: Instruction = branch.msgs.create_instruction(
42
+ instruction=instruction,
43
+ guidance=guidance,
44
+ context=context,
45
+ sender=sender or branch.user or "user",
46
+ recipient=recipient or branch.id,
47
+ response_format=response_format,
48
+ request_fields=request_fields,
49
+ images=images,
50
+ image_detail=image_detail,
51
+ tool_schemas=tool_schemas,
52
+ plain_content=plain_content,
53
+ )
54
+
55
+ progression = progression or branch.msgs.progression
56
+ messages: list[RoledMessage] = [
57
+ branch.msgs.messages[i] for i in progression
58
+ ]
59
+
60
+ use_ins = None
61
+ _to_use = []
62
+ _action_responses: set[ActionResponse] = set()
63
+
64
+ for i in messages:
65
+ if isinstance(i, ActionResponse):
66
+ _action_responses.add(i)
67
+ if isinstance(i, AssistantResponse):
68
+ j = AssistantResponse(
69
+ role=i.role,
70
+ content=copy(i.content),
71
+ sender=i.sender,
72
+ recipient=i.recipient,
73
+ template=i.template,
74
+ )
75
+ _to_use.append(j)
76
+ if isinstance(i, Instruction):
77
+ j = Instruction(
78
+ role=i.role,
79
+ content=copy(i.content),
80
+ sender=i.sender,
81
+ recipient=i.recipient,
82
+ template=i.template,
83
+ )
84
+ j.tool_schemas = None
85
+ j.respond_schema_info = None
86
+ j.request_response_format = None
87
+
88
+ if _action_responses:
89
+ d_ = [k.content for k in _action_responses]
90
+ for z in d_:
91
+ if z not in j.context:
92
+ j.context.append(z)
93
+
94
+ _to_use.append(j)
95
+ _action_responses = set()
96
+ else:
97
+ _to_use.append(j)
98
+
99
+ messages = _to_use
100
+ if _action_responses:
101
+ j = ins.model_copy()
102
+ d_ = [k.content for k in _action_responses]
103
+ for z in d_:
104
+ if z not in j.context:
105
+ j.context.append(z)
106
+ use_ins = j
107
+
108
+ if messages and len(messages) > 1:
109
+ _msgs = [messages[0]]
110
+
111
+ for i in messages[1:]:
112
+ if isinstance(i, AssistantResponse):
113
+ if isinstance(_msgs[-1], AssistantResponse):
114
+ _msgs[-1].response = (
115
+ f"{_msgs[-1].response}\n\n{i.response}"
116
+ )
117
+ else:
118
+ _msgs.append(i)
119
+ else:
120
+ if isinstance(_msgs[-1], AssistantResponse):
121
+ _msgs.append(i)
122
+ messages = _msgs
123
+
124
+ imodel = imodel or branch.chat_model
125
+ if branch.msgs.system and imodel.sequential_exchange:
126
+ messages = [msg for msg in messages if msg.role != "system"]
127
+ first_instruction = None
128
+
129
+ if len(messages) == 0:
130
+ first_instruction = ins.model_copy()
131
+ first_instruction.guidance = branch.msgs.system.rendered + (
132
+ first_instruction.guidance or ""
133
+ )
134
+ messages.append(first_instruction)
135
+ elif len(messages) >= 1:
136
+ first_instruction = messages[0]
137
+ if not isinstance(first_instruction, Instruction):
138
+ raise ValueError(
139
+ "First message in progression must be an Instruction or System"
140
+ )
141
+ first_instruction = first_instruction.model_copy()
142
+ first_instruction.guidance = branch.msgs.system.rendered + (
143
+ first_instruction.guidance or ""
144
+ )
145
+ messages[0] = first_instruction
146
+ messages.append(use_ins or ins)
147
+
148
+ else:
149
+ messages.append(use_ins or ins)
150
+
151
+ kwargs["messages"] = [i.chat_msg for i in messages]
152
+ imodel = imodel or branch.chat_model
153
+
154
+ api_call = None
155
+ if kwargs.get("stream", None) is True:
156
+ api_call = await imodel.stream(**kwargs)
157
+ else:
158
+ api_call = await imodel.invoke(**kwargs)
159
+
160
+ branch._log_manager.log(Log.create(api_call))
161
+
162
+ if return_ins_res_message:
163
+ # Wrap result in `AssistantResponse` and return
164
+ return ins, AssistantResponse.create(
165
+ assistant_response=api_call.response,
166
+ sender=branch.id,
167
+ recipient=branch.user,
168
+ )
169
+ return AssistantResponse.create(
170
+ assistant_response=api_call.response,
171
+ sender=branch.id,
172
+ recipient=branch.user,
173
+ ).response
File without changes
@@ -0,0 +1,108 @@
1
+ # Copyright (c) 2023 - 2024, HaiyangLi <quantocean.li at gmail dot com>
2
+ #
3
+ # SPDX-License-Identifier: Apache-2.0
4
+
5
+ import logging
6
+ from typing import TYPE_CHECKING
7
+
8
+ from lionagi.libs.validate.fuzzy_validate_mapping import fuzzy_validate_mapping
9
+ from lionagi.utils import UNDEFINED
10
+
11
+ if TYPE_CHECKING:
12
+ from lionagi.session.branch import Branch
13
+
14
+
15
+ async def communicate(
16
+ branch: "Branch",
17
+ instruction=None,
18
+ *,
19
+ guidance=None,
20
+ context=None,
21
+ plain_content=None,
22
+ sender=None,
23
+ recipient=None,
24
+ progression=None,
25
+ request_model=None,
26
+ response_format=None,
27
+ request_fields=None,
28
+ imodel=None,
29
+ chat_model=None,
30
+ parse_model=None,
31
+ skip_validation=False,
32
+ images=None,
33
+ image_detail="auto",
34
+ num_parse_retries=3,
35
+ fuzzy_match_kwargs=None,
36
+ clear_messages=False,
37
+ operative_model=None,
38
+ **kwargs,
39
+ ):
40
+ if operative_model:
41
+ logging.warning(
42
+ "operative_model is deprecated. Use response_format instead."
43
+ )
44
+ if (
45
+ (operative_model and response_format)
46
+ or (operative_model and request_model)
47
+ or (response_format and request_model)
48
+ ):
49
+ raise ValueError(
50
+ "Cannot specify both operative_model and response_format"
51
+ "or operative_model and request_model as they are aliases"
52
+ "for the same parameter."
53
+ )
54
+
55
+ response_format = response_format or operative_model or request_model
56
+
57
+ imodel = imodel or chat_model or branch.chat_model
58
+ parse_model = parse_model or branch.parse_model
59
+
60
+ if clear_messages:
61
+ branch.msgs.clear_messages()
62
+
63
+ if num_parse_retries > 5:
64
+ logging.warning(
65
+ f"Are you sure you want to retry {num_parse_retries} "
66
+ "times? lowering retry attempts to 5. Suggestion is under 3"
67
+ )
68
+ num_parse_retries = 5
69
+
70
+ ins, res = await branch.chat(
71
+ instruction=instruction,
72
+ guidance=guidance,
73
+ context=context,
74
+ sender=sender,
75
+ recipient=recipient,
76
+ response_format=response_format,
77
+ progression=progression,
78
+ imodel=imodel,
79
+ images=images,
80
+ image_detail=image_detail,
81
+ plain_content=plain_content,
82
+ return_ins_res_message=True,
83
+ **kwargs,
84
+ )
85
+ branch.msgs.add_message(instruction=ins)
86
+ branch.msgs.add_message(assistant_response=res)
87
+
88
+ if skip_validation:
89
+ return res.response
90
+
91
+ if response_format is not None:
92
+ return await branch.parse(
93
+ text=res.response,
94
+ request_type=response_format,
95
+ max_retries=num_parse_retries,
96
+ **(fuzzy_match_kwargs or {}),
97
+ )
98
+
99
+ if request_fields is not None:
100
+ _d = fuzzy_validate_mapping(
101
+ res.response,
102
+ request_fields,
103
+ handle_unmatched="force",
104
+ fill_value=UNDEFINED,
105
+ )
106
+ return {k: v for k, v in _d.items() if v != UNDEFINED}
107
+
108
+ return res.response
@@ -0,0 +1,3 @@
1
+ # Copyright (c) 2023 - 2024, HaiyangLi <quantocean.li at gmail dot com>
2
+ #
3
+ # SPDX-License-Identifier: Apache-2.0
@@ -0,0 +1,29 @@
1
+ # Copyright (c) 2023 - 2024, HaiyangLi <quantocean.li at gmail dot com>
2
+ #
3
+ # SPDX-License-Identifier: Apache-2.0
4
+
5
+ from typing import TYPE_CHECKING, Any
6
+
7
+ from lionagi.operatives.types import Instruct
8
+
9
+ if TYPE_CHECKING:
10
+ from lionagi.session.branch import Branch
11
+
12
+
13
+ async def instruct(
14
+ branch: "Branch",
15
+ instruct: Instruct,
16
+ /,
17
+ **kwargs,
18
+ ) -> Any:
19
+ config = {
20
+ **(instruct.to_dict() if isinstance(instruct, Instruct) else instruct),
21
+ **kwargs,
22
+ }
23
+ if any(i in config and config[i] for i in Instruct.reserved_kwargs):
24
+ if "response_format" in config or "request_model" in config:
25
+ return await branch.operate(**config)
26
+ for i in Instruct.reserved_kwargs:
27
+ config.pop(i, None)
28
+
29
+ return await branch.communicate(**config)
@@ -0,0 +1,3 @@
1
+ # Copyright (c) 2023 - 2024, HaiyangLi <quantocean.li at gmail dot com>
2
+ #
3
+ # SPDX-License-Identifier: Apache-2.0
@@ -0,0 +1,39 @@
1
+ # Copyright (c) 2023 - 2024, HaiyangLi <quantocean.li at gmail dot com>
2
+ #
3
+ # SPDX-License-Identifier: Apache-2.0
4
+
5
+ from typing import TYPE_CHECKING
6
+
7
+ if TYPE_CHECKING:
8
+ from lionagi.session.branch import Branch
9
+
10
+
11
+ async def interpret(
12
+ branch: "Branch",
13
+ text: str,
14
+ domain: str | None = None,
15
+ style: str | None = None,
16
+ **kwargs,
17
+ ) -> str:
18
+ instruction = (
19
+ "Rewrite the following user input into a clear, structured prompt or "
20
+ "query for an LLM, ensuring any implicit details are made explicit. "
21
+ "Return only the improved user prompt."
22
+ )
23
+ guidance = (
24
+ f"Domain hint: {domain or 'general'}. "
25
+ f"Desired style: {style or 'concise'}. "
26
+ "You can add or clarify context if needed."
27
+ )
28
+ context = [f"User input: {text}"]
29
+
30
+ # Default temperature if none provided
31
+ kwargs["temperature"] = kwargs.get("temperature", 0.1)
32
+
33
+ refined_prompt = await branch.chat(
34
+ instruction=instruction,
35
+ guidance=guidance,
36
+ context=context,
37
+ **kwargs,
38
+ )
39
+ return str(refined_prompt)
@@ -0,0 +1,3 @@
1
+ # Copyright (c) 2023 - 2024, HaiyangLi <quantocean.li at gmail dot com>
2
+ #
3
+ # SPDX-License-Identifier: Apache-2.0
@@ -0,0 +1,194 @@
1
+ # Copyright (c) 2023 - 2024, HaiyangLi <quantocean.li at gmail dot com>
2
+ #
3
+ # SPDX-License-Identifier: Apache-2.0
4
+
5
+ import logging
6
+ from typing import TYPE_CHECKING, Literal
7
+
8
+ from pydantic import BaseModel, JsonValue
9
+
10
+ from lionagi.operatives.types import (
11
+ FieldModel,
12
+ Instruct,
13
+ ModelParams,
14
+ Operative,
15
+ Step,
16
+ ToolRef,
17
+ )
18
+ from lionagi.protocols.types import Instruction, Progression, SenderRecipient
19
+ from lionagi.service.imodel import iModel
20
+
21
+ if TYPE_CHECKING:
22
+ from lionagi.session.branch import Branch
23
+
24
+
25
+ async def operate(
26
+ branch: "Branch",
27
+ *,
28
+ instruct: Instruct = None,
29
+ instruction: Instruction | JsonValue = None,
30
+ guidance: JsonValue = None,
31
+ context: JsonValue = None,
32
+ sender: SenderRecipient = None,
33
+ recipient: SenderRecipient = None,
34
+ progression: Progression = None,
35
+ imodel: iModel = None, # deprecated, alias of chat_model
36
+ chat_model: iModel = None,
37
+ invoke_actions: bool = True,
38
+ tool_schemas: list[dict] = None,
39
+ images: list = None,
40
+ image_detail: Literal["low", "high", "auto"] = None,
41
+ parse_model: iModel = None,
42
+ skip_validation: bool = False,
43
+ tools: ToolRef = None,
44
+ operative: Operative = None,
45
+ response_format: type[BaseModel] = None, # alias of operative.request_type
46
+ return_operative: bool = False,
47
+ actions: bool = False,
48
+ reason: bool = False,
49
+ action_kwargs: dict = None,
50
+ field_models: list[FieldModel] = None,
51
+ exclude_fields: list | dict | None = None,
52
+ request_params: ModelParams = None,
53
+ request_param_kwargs: dict = None,
54
+ response_params: ModelParams = None,
55
+ response_param_kwargs: dict = None,
56
+ handle_validation: Literal[
57
+ "raise", "return_value", "return_none"
58
+ ] = "return_value",
59
+ operative_model: type[BaseModel] = None,
60
+ request_model: type[BaseModel] = None,
61
+ **kwargs,
62
+ ) -> list | BaseModel | None | dict | str:
63
+ if operative_model:
64
+ logging.warning(
65
+ "`operative_model` is deprecated. Use `response_format` instead."
66
+ )
67
+ if (
68
+ (operative_model and response_format)
69
+ or (operative_model and request_model)
70
+ or (response_format and request_model)
71
+ ):
72
+ raise ValueError(
73
+ "Cannot specify both `operative_model` and `response_format` (or `request_model`) "
74
+ "as they are aliases of each other."
75
+ )
76
+
77
+ # Use the final chosen format
78
+ response_format = response_format or operative_model or request_model
79
+
80
+ # Decide which chat model to use
81
+ chat_model = chat_model or imodel or branch.chat_model
82
+ parse_model = parse_model or chat_model
83
+
84
+ # Convert dict-based instructions to Instruct if needed
85
+ if isinstance(instruct, dict):
86
+ instruct = Instruct(**instruct)
87
+
88
+ # Or create a new Instruct if not provided
89
+ instruct = instruct or Instruct(
90
+ instruction=instruction,
91
+ guidance=guidance,
92
+ context=context,
93
+ )
94
+
95
+ # If reason or actions are requested, apply them to instruct
96
+ if reason:
97
+ instruct.reason = True
98
+ if actions:
99
+ instruct.actions = True
100
+
101
+ # 1) Create or update the Operative
102
+ operative = Step.request_operative(
103
+ request_params=request_params,
104
+ reason=instruct.reason,
105
+ actions=instruct.actions,
106
+ exclude_fields=exclude_fields,
107
+ base_type=response_format,
108
+ field_models=field_models,
109
+ **(request_param_kwargs or {}),
110
+ )
111
+
112
+ # If the instruction signals actions, ensure tools are provided
113
+ if instruct.actions:
114
+ tools = tools or True
115
+
116
+ # If we want to auto-invoke tools, fetch or generate the schemas
117
+ if invoke_actions and tools:
118
+ tool_schemas = tool_schemas or branch.acts.get_tool_schema(tools=tools)
119
+
120
+ # 2) Send the instruction to the chat model
121
+ ins, res = await branch.chat(
122
+ instruction=instruct.instruction,
123
+ guidance=instruct.guidance,
124
+ context=instruct.context,
125
+ sender=sender,
126
+ recipient=recipient,
127
+ response_format=operative.request_type,
128
+ progression=progression,
129
+ imodel=chat_model, # or the override
130
+ images=images,
131
+ image_detail=image_detail,
132
+ tool_schemas=tool_schemas,
133
+ return_ins_res_message=True,
134
+ **kwargs,
135
+ )
136
+ branch.msgs.add_message(instruction=ins)
137
+ branch.msgs.add_message(assistant_response=res)
138
+
139
+ # 3) Populate the operative with the raw response
140
+ operative.response_str_dict = res.response
141
+
142
+ # 4) Possibly skip validation
143
+ if skip_validation:
144
+ return operative if return_operative else operative.response_str_dict
145
+
146
+ # 5) Parse or validate the response into the operative's model
147
+ response_model = operative.update_response_model(res.response)
148
+ if not isinstance(response_model, BaseModel):
149
+ # If the response isn't directly a model, attempt a parse
150
+ response_model = await branch.parse(
151
+ text=res.response,
152
+ request_type=operative.request_type,
153
+ max_retries=operative.max_retries,
154
+ handle_validation="return_value",
155
+ )
156
+ operative.response_model = operative.update_response_model(
157
+ text=response_model
158
+ )
159
+
160
+ # If we still fail to parse, handle according to user preference
161
+ if not isinstance(response_model, BaseModel):
162
+ match handle_validation:
163
+ case "return_value":
164
+ return response_model
165
+ case "return_none":
166
+ return None
167
+ case "raise":
168
+ raise ValueError(
169
+ "Failed to parse the LLM response into the requested format."
170
+ )
171
+
172
+ # 6) If no tool invocation is needed, return result or operative
173
+ if not invoke_actions:
174
+ return operative if return_operative else operative.response_model
175
+
176
+ # 7) If the model indicates an action is required, call the tools
177
+ if (
178
+ getattr(response_model, "action_required", None) is True
179
+ and getattr(response_model, "action_requests", None) is not None
180
+ ):
181
+ action_response_models = await branch.act(
182
+ response_model.action_requests,
183
+ **(action_kwargs or {}),
184
+ )
185
+ # Possibly refine the operative with the tool outputs
186
+ operative = Step.respond_operative(
187
+ response_params=response_params,
188
+ operative=operative,
189
+ additional_data={"action_responses": action_response_models},
190
+ **(response_param_kwargs or {}),
191
+ )
192
+
193
+ # Return final result or the full operative
194
+ return operative if return_operative else operative.response_model
@@ -0,0 +1,3 @@
1
+ # Copyright (c) 2023 - 2024, HaiyangLi <quantocean.li at gmail dot com>
2
+ #
3
+ # SPDX-License-Identifier: Apache-2.0
@@ -0,0 +1,89 @@
1
+ # Copyright (c) 2023 - 2024, HaiyangLi <quantocean.li at gmail dot com>
2
+ #
3
+ # SPDX-License-Identifier: Apache-2.0
4
+
5
+ from typing import TYPE_CHECKING, Any, Literal
6
+
7
+ from pydantic import BaseModel
8
+
9
+ from lionagi.libs.validate.fuzzy_validate_mapping import fuzzy_validate_mapping
10
+ from lionagi.operatives.types import Operative
11
+ from lionagi.utils import breakdown_pydantic_annotation
12
+
13
+ if TYPE_CHECKING:
14
+ from lionagi.session.branch import Branch
15
+
16
+
17
+ async def parse(
18
+ branch: "Branch",
19
+ text: str,
20
+ handle_validation: Literal[
21
+ "raise", "return_value", "return_none"
22
+ ] = "return_value",
23
+ max_retries: int = 3,
24
+ request_type: type[BaseModel] = None,
25
+ operative: Operative = None,
26
+ similarity_algo="jaro_winkler",
27
+ similarity_threshold: float = 0.85,
28
+ fuzzy_match: bool = True,
29
+ handle_unmatched: Literal[
30
+ "ignore", "raise", "remove", "fill", "force"
31
+ ] = "force",
32
+ fill_value: Any = None,
33
+ fill_mapping: dict[str, Any] | None = None,
34
+ strict: bool = False,
35
+ suppress_conversion_errors: bool = False,
36
+ response_format=None,
37
+ ):
38
+ _should_try = True
39
+ num_try = 0
40
+ response_model = text
41
+ if operative is not None:
42
+ max_retries = operative.max_retries
43
+ response_format = operative.request_type
44
+
45
+ while (
46
+ _should_try
47
+ and num_try < max_retries
48
+ and not isinstance(response_model, BaseModel)
49
+ ):
50
+ num_try += 1
51
+ _, res = await branch.chat(
52
+ instruction="reformat text into specified model",
53
+ guidane="follow the required response format, using the model schema as a guide",
54
+ context=[{"text_to_format": text}],
55
+ response_format=response_format or request_type,
56
+ sender=branch.user,
57
+ recipient=branch.id,
58
+ imodel=branch.parse_model,
59
+ return_ins_res_message=True,
60
+ )
61
+ if operative is not None:
62
+ response_model = operative.update_response_model(res.response)
63
+ else:
64
+ response_model = fuzzy_validate_mapping(
65
+ res.response,
66
+ breakdown_pydantic_annotation(request_type),
67
+ similarity_algo=similarity_algo,
68
+ similarity_threshold=similarity_threshold,
69
+ fuzzy_match=fuzzy_match,
70
+ handle_unmatched=handle_unmatched,
71
+ fill_value=fill_value,
72
+ fill_mapping=fill_mapping,
73
+ strict=strict,
74
+ suppress_conversion_errors=suppress_conversion_errors,
75
+ )
76
+ response_model = request_type.model_validate(response_model)
77
+
78
+ if not isinstance(response_model, BaseModel):
79
+ match handle_validation:
80
+ case "return_value":
81
+ return response_model
82
+ case "return_none":
83
+ return None
84
+ case "raise":
85
+ raise ValueError(
86
+ "Failed to parse response into request format"
87
+ )
88
+
89
+ return response_model
@@ -6,7 +6,7 @@ from typing import Any, Literal
6
6
 
7
7
  from pydantic import BaseModel
8
8
 
9
- from lionagi.operatives.instruct.instruct import (
9
+ from lionagi.operatives.types import (
10
10
  LIST_INSTRUCT_FIELD_MODEL,
11
11
  Instruct,
12
12
  InstructResponse,
@@ -293,7 +293,7 @@ async def plan(
293
293
  # ---------------------------------------------------------
294
294
  # Strategy C: SEQUENTIAL_CONCURRENT_CHUNK
295
295
  # - process plan steps in chunks (one chunk after another),
296
- # - each chunks steps run in parallel.
296
+ # - each chunk's steps run in parallel.
297
297
  # ---------------------------------------------------------
298
298
  case "sequential_concurrent_chunk":
299
299
  chunk_size = (execution_kwargs or {}).get("chunk_size", 5)
@@ -334,7 +334,7 @@ async def plan(
334
334
  # Strategy D: CONCURRENT_SEQUENTIAL_CHUNK
335
335
  # - split plan steps into chunks,
336
336
  # - run all chunks in parallel,
337
- # - but each chunks steps run sequentially.
337
+ # - but each chunk's steps run sequentially.
338
338
  # ---------------------------------------------------------
339
339
  case "concurrent_sequential_chunk":
340
340
  chunk_size = (execution_kwargs or {}).get("chunk_size", 5)
@@ -1,7 +1,3 @@
1
1
  # Copyright (c) 2023 - 2024, HaiyangLi <quantocean.li at gmail dot com>
2
2
  #
3
3
  # SPDX-License-Identifier: Apache-2.0
4
-
5
- from .select import select
6
-
7
- __all__ = ["select"]