lionagi 0.17.11__py3-none-any.whl → 0.18.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (52) hide show
  1. lionagi/libs/schema/minimal_yaml.py +98 -0
  2. lionagi/ln/types.py +32 -5
  3. lionagi/models/field_model.py +9 -0
  4. lionagi/operations/ReAct/ReAct.py +474 -237
  5. lionagi/operations/ReAct/utils.py +3 -0
  6. lionagi/operations/act/act.py +206 -0
  7. lionagi/operations/chat/chat.py +130 -114
  8. lionagi/operations/communicate/communicate.py +101 -42
  9. lionagi/operations/flow.py +4 -4
  10. lionagi/operations/interpret/interpret.py +65 -20
  11. lionagi/operations/operate/operate.py +212 -106
  12. lionagi/operations/parse/parse.py +170 -142
  13. lionagi/operations/select/select.py +78 -17
  14. lionagi/operations/select/utils.py +1 -1
  15. lionagi/operations/types.py +119 -23
  16. lionagi/protocols/generic/log.py +3 -2
  17. lionagi/protocols/messages/__init__.py +27 -0
  18. lionagi/protocols/messages/action_request.py +86 -184
  19. lionagi/protocols/messages/action_response.py +73 -131
  20. lionagi/protocols/messages/assistant_response.py +130 -159
  21. lionagi/protocols/messages/base.py +26 -18
  22. lionagi/protocols/messages/instruction.py +281 -625
  23. lionagi/protocols/messages/manager.py +112 -62
  24. lionagi/protocols/messages/message.py +87 -197
  25. lionagi/protocols/messages/system.py +52 -123
  26. lionagi/protocols/types.py +0 -2
  27. lionagi/service/connections/endpoint.py +0 -8
  28. lionagi/service/connections/providers/oai_.py +29 -94
  29. lionagi/service/connections/providers/ollama_.py +3 -2
  30. lionagi/service/hooks/hooked_event.py +2 -2
  31. lionagi/service/third_party/claude_code.py +3 -2
  32. lionagi/service/third_party/openai_models.py +433 -0
  33. lionagi/session/branch.py +170 -178
  34. lionagi/session/session.py +3 -9
  35. lionagi/tools/file/reader.py +2 -2
  36. lionagi/version.py +1 -1
  37. {lionagi-0.17.11.dist-info → lionagi-0.18.0.dist-info}/METADATA +1 -2
  38. {lionagi-0.17.11.dist-info → lionagi-0.18.0.dist-info}/RECORD +41 -49
  39. lionagi/operations/_act/act.py +0 -86
  40. lionagi/protocols/messages/templates/README.md +0 -28
  41. lionagi/protocols/messages/templates/action_request.jinja2 +0 -5
  42. lionagi/protocols/messages/templates/action_response.jinja2 +0 -9
  43. lionagi/protocols/messages/templates/assistant_response.jinja2 +0 -6
  44. lionagi/protocols/messages/templates/instruction_message.jinja2 +0 -61
  45. lionagi/protocols/messages/templates/system_message.jinja2 +0 -11
  46. lionagi/protocols/messages/templates/tool_schemas.jinja2 +0 -7
  47. lionagi/service/connections/providers/types.py +0 -28
  48. lionagi/service/third_party/openai_model_names.py +0 -198
  49. lionagi/service/types.py +0 -58
  50. /lionagi/operations/{_act → act}/__init__.py +0 -0
  51. {lionagi-0.17.11.dist-info → lionagi-0.18.0.dist-info}/WHEEL +0 -0
  52. {lionagi-0.17.11.dist-info → lionagi-0.18.0.dist-info}/licenses/LICENSE +0 -0
@@ -1,17 +1,23 @@
1
1
  # Copyright (c) 2023-2025, HaiyangLi <quantocean.li at gmail dot com>
2
2
  # SPDX-License-Identifier: Apache-2.0
3
3
 
4
- import logging
5
- from typing import TYPE_CHECKING
4
+ import warnings
5
+ from typing import TYPE_CHECKING, Any, Literal
6
6
 
7
+ from pydantic import JsonValue
8
+
9
+ from lionagi.ln.fuzzy import FuzzyMatchKeysParams
7
10
  from lionagi.ln.fuzzy._fuzzy_validate import fuzzy_validate_mapping
8
- from lionagi.utils import UNDEFINED
11
+ from lionagi.ln.types import Undefined
12
+
13
+ from ..types import ChatParam, ParseParam
9
14
 
10
15
  if TYPE_CHECKING:
16
+ from lionagi.protocols.messages.instruction import Instruction
11
17
  from lionagi.session.branch import Branch
12
18
 
13
19
 
14
- async def communicate(
20
+ def prepare_communicate_kw(
15
21
  branch: "Branch",
16
22
  instruction=None,
17
23
  *,
@@ -29,7 +35,7 @@ async def communicate(
29
35
  parse_model=None,
30
36
  skip_validation=False,
31
37
  images=None,
32
- image_detail="auto",
38
+ image_detail: Literal["low", "high", "auto"] = "auto",
33
39
  num_parse_retries=3,
34
40
  fuzzy_match_kwargs=None,
35
41
  clear_messages=False,
@@ -37,88 +43,141 @@ async def communicate(
37
43
  include_token_usage_to_model: bool = False,
38
44
  **kwargs,
39
45
  ):
46
+ # Handle deprecated parameters
40
47
  if operative_model:
41
- logging.warning(
42
- "operative_model is deprecated. Use response_format instead."
48
+ warnings.warn(
49
+ "Parameter 'operative_model' is deprecated. Use 'response_format' instead.",
50
+ DeprecationWarning,
51
+ stacklevel=2,
43
52
  )
53
+
44
54
  if (
45
55
  (operative_model and response_format)
46
56
  or (operative_model and request_model)
47
57
  or (response_format and request_model)
48
58
  ):
49
59
  raise ValueError(
50
- "Cannot specify both operative_model and response_format"
51
- "or operative_model and request_model as they are aliases"
60
+ "Cannot specify both operative_model and response_format "
61
+ "or operative_model and request_model as they are aliases "
52
62
  "for the same parameter."
53
63
  )
54
64
 
55
65
  response_format = response_format or operative_model or request_model
56
-
57
66
  imodel = imodel or chat_model or branch.chat_model
58
67
  parse_model = parse_model or branch.parse_model
59
68
 
60
- if clear_messages:
61
- branch.msgs.clear_messages()
62
-
63
69
  if num_parse_retries > 5:
64
- logging.warning(
65
- f"Are you sure you want to retry {num_parse_retries} "
66
- "times? lowering retry attempts to 5. Suggestion is under 3"
70
+ warnings.warn(
71
+ f"num_parse_retries={num_parse_retries} is high. Lowering to 5. Suggestion: <3",
72
+ UserWarning,
73
+ stacklevel=2,
67
74
  )
68
75
  num_parse_retries = 5
69
76
 
70
- ins, res = await branch.chat(
71
- instruction=instruction,
77
+ # Build contexts
78
+ chat_param = ChatParam(
72
79
  guidance=guidance,
73
80
  context=context,
74
- sender=sender,
75
- recipient=recipient,
81
+ sender=sender or branch.user or "user",
82
+ recipient=recipient or branch.id,
76
83
  response_format=response_format,
77
84
  progression=progression,
78
- imodel=imodel,
79
- images=images,
85
+ tool_schemas=[],
86
+ images=images or [],
80
87
  image_detail=image_detail,
81
- plain_content=plain_content,
82
- return_ins_res_message=True,
88
+ plain_content=plain_content or "",
83
89
  include_token_usage_to_model=include_token_usage_to_model,
84
- **kwargs,
90
+ imodel=imodel,
91
+ imodel_kw=kwargs,
92
+ )
93
+
94
+ parse_param = None
95
+ if response_format and not skip_validation:
96
+ from ..parse.parse import get_default_call
97
+
98
+ fuzzy_kw = fuzzy_match_kwargs or {}
99
+ handle_validation = fuzzy_kw.pop("handle_validation", "raise")
100
+
101
+ parse_param = ParseParam(
102
+ response_format=response_format,
103
+ fuzzy_match_params=(
104
+ FuzzyMatchKeysParams(**fuzzy_kw)
105
+ if fuzzy_kw
106
+ else FuzzyMatchKeysParams()
107
+ ),
108
+ handle_validation=handle_validation,
109
+ alcall_params=get_default_call().with_updates(
110
+ retry_attempts=num_parse_retries
111
+ ),
112
+ imodel=parse_model,
113
+ imodel_kw={},
114
+ )
115
+
116
+ return {
117
+ "instruction": instruction or "",
118
+ "chat_param": chat_param,
119
+ "parse_param": parse_param,
120
+ "clear_messages": clear_messages,
121
+ "skip_validation": skip_validation,
122
+ "request_fields": request_fields,
123
+ }
124
+
125
+
126
+ async def communicate(
127
+ branch: "Branch",
128
+ instruction: "JsonValue | Instruction",
129
+ chat_param: ChatParam,
130
+ parse_param: ParseParam | None = None,
131
+ clear_messages: bool = False,
132
+ skip_validation: bool = False,
133
+ request_fields: dict | None = None,
134
+ ) -> Any:
135
+ if clear_messages:
136
+ branch.msgs.clear_messages()
137
+
138
+ from ..chat.chat import chat
139
+
140
+ ins, res = await chat(
141
+ branch, instruction, chat_param, return_ins_res_message=True
85
142
  )
143
+
86
144
  branch.msgs.add_message(instruction=ins)
87
145
  branch.msgs.add_message(assistant_response=res)
88
146
 
89
147
  if skip_validation:
90
148
  return res.response
91
149
 
92
- if response_format is not None:
93
- # Default to raising errors unless explicitly set in fuzzy_match_kwargs
94
- parse_kwargs = {
95
- "handle_validation": "raise", # Default to raising errors
96
- **(fuzzy_match_kwargs or {}),
97
- }
150
+ # Handle response_format with parse
151
+ if parse_param and chat_param.response_format:
152
+ from lionagi.protocols.messages.assistant_response import (
153
+ AssistantResponse,
154
+ )
155
+
156
+ from ..parse.parse import parse
98
157
 
99
158
  try:
100
- return await branch.parse(
101
- text=res.response,
102
- request_type=response_format,
103
- max_retries=num_parse_retries,
104
- **parse_kwargs,
159
+ out, res2 = await parse(
160
+ branch, res.response, parse_param, return_res_message=True
105
161
  )
162
+ if res2 and isinstance(res2, AssistantResponse):
163
+ res.metadata["original_model_response"] = res.model_response
164
+ # model_response is read-only property - update metadata instead
165
+ res.metadata["model_response"] = res2.model_response
166
+ return out
106
167
  except ValueError as e:
107
168
  # Re-raise with more context
108
- logging.error(
109
- f"Failed to parse response '{res.response}' into {response_format}: {e}"
110
- )
111
169
  raise ValueError(
112
- f"Failed to parse model response into {response_format.__name__}: {e}"
170
+ f"Failed to parse model response into {chat_param.response_format}: {e}"
113
171
  ) from e
114
172
 
173
+ # Handle request_fields with fuzzy validation
115
174
  if request_fields is not None:
116
175
  _d = fuzzy_validate_mapping(
117
176
  res.response,
118
177
  request_fields,
119
178
  handle_unmatched="force",
120
- fill_value=UNDEFINED,
179
+ fill_value=Undefined,
121
180
  )
122
- return {k: v for k, v in _d.items() if v != UNDEFINED}
181
+ return {k: v for k, v in _d.items() if v != Undefined}
123
182
 
124
183
  return res.response
@@ -429,14 +429,14 @@ class DependencyAwareExecutor:
429
429
  if hasattr(branch, "_message_manager") and hasattr(
430
430
  primary_branch, "_message_manager"
431
431
  ):
432
- branch._message_manager.pile.clear()
433
- for msg in primary_branch._message_manager.pile:
432
+ branch._message_manager.messages.clear()
433
+ for msg in primary_branch._message_manager.messages:
434
434
  if hasattr(msg, "clone"):
435
- branch._message_manager.pile.append(
435
+ branch._message_manager.messages.append(
436
436
  msg.clone()
437
437
  )
438
438
  else:
439
- branch._message_manager.pile.append(msg)
439
+ branch._message_manager.messages.append(msg)
440
440
 
441
441
  # Clear the pending flag
442
442
  branch.metadata["pending_context_inheritance"] = False
@@ -3,40 +3,85 @@
3
3
 
4
4
  from typing import TYPE_CHECKING
5
5
 
6
+ from ..types import ChatParam, InterpretParam
7
+
6
8
  if TYPE_CHECKING:
9
+ from lionagi.service.imodel import iModel
7
10
  from lionagi.session.branch import Branch
8
11
 
9
12
 
10
- async def interpret(
13
+ def prepare_interpret_kw(
11
14
  branch: "Branch",
12
15
  text: str,
13
16
  domain: str | None = None,
14
17
  style: str | None = None,
15
18
  sample_writing: str | None = None,
16
- interpret_model: str | None = None,
19
+ interpret_model: "iModel | None" = None,
17
20
  **kwargs,
18
21
  ) -> str:
22
+ """Interpret and refine user input into clearer prompts."""
23
+
24
+ # Build InterpretParam
25
+ intp_param = InterpretParam(
26
+ domain=domain or "general",
27
+ style=style or "concise",
28
+ sample_writing=sample_writing or "",
29
+ imodel=interpret_model or branch.chat_model,
30
+ imodel_kw=kwargs,
31
+ )
32
+ return {
33
+ "text": text,
34
+ "intp_param": intp_param,
35
+ }
36
+
37
+
38
+ async def interpret(
39
+ branch: "Branch",
40
+ text: str,
41
+ intp_param: InterpretParam,
42
+ ) -> str:
43
+ """Execute interpretation with context - clean implementation."""
44
+
45
+ from ..chat.chat import chat
46
+
19
47
  instruction = (
20
- "You are given a user's raw instruction or question. Your task is to rewrite it into a clearer,"
48
+ "You are given a user's raw instruction or question. Your task is to rewrite it into a clearer, "
21
49
  "more structured prompt for an LLM or system, making any implicit or missing details explicit. "
22
50
  "Return only the re-written prompt. Do not assume any details not mentioned in the input, nor "
23
51
  "give additional instruction than what is explicitly stated."
24
52
  )
25
- guidance = f"Domain hint: {domain or 'general'}. Desired style: {style or 'concise'}. "
26
- if sample_writing:
27
- guidance += f" Sample writing: {sample_writing}"
28
-
29
- context = [f"User input: {text}"]
30
-
31
- # Default temperature if none provided
32
- kwargs["guidance"] = guidance + "\n" + kwargs.get("guidance", "")
33
- kwargs["instruction"] = instruction + "\n" + kwargs.get("instruction", "")
34
- kwargs["temperature"] = kwargs.get("temperature", 0.1)
35
- if interpret_model:
36
- kwargs["chat_model"] = interpret_model
37
-
38
- refined_prompt = await branch.chat(
39
- context=context,
40
- **kwargs,
53
+
54
+ guidance = (
55
+ f"Domain hint: {intp_param.domain}. Desired style: {intp_param.style}."
41
56
  )
42
- return str(refined_prompt)
57
+ if intp_param.sample_writing:
58
+ guidance += f" Sample writing: {intp_param.sample_writing}"
59
+
60
+ # Build ChatParam
61
+ chat_param = ChatParam(
62
+ guidance=guidance,
63
+ context=[f"User input: {text}"],
64
+ sender=branch.user or "user",
65
+ recipient=branch.id,
66
+ response_format=None,
67
+ progression=None,
68
+ tool_schemas=[],
69
+ images=[],
70
+ image_detail="auto",
71
+ plain_content="",
72
+ include_token_usage_to_model=False,
73
+ imodel=intp_param.imodel,
74
+ imodel_kw={
75
+ **intp_param.imodel_kw,
76
+ "temperature": intp_param.imodel_kw.get("temperature", 0.1),
77
+ },
78
+ )
79
+
80
+ result = await chat(
81
+ branch,
82
+ instruction=instruction,
83
+ chat_param=chat_param,
84
+ return_ins_res_message=False,
85
+ )
86
+
87
+ return str(result)