lionagi 0.13.1__py3-none-any.whl → 0.13.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (44) hide show
  1. lionagi/fields/action.py +0 -1
  2. lionagi/fields/reason.py +0 -1
  3. lionagi/libs/file/save.py +1 -1
  4. lionagi/libs/schema/as_readable.py +184 -16
  5. lionagi/libs/schema/extract_docstring.py +1 -2
  6. lionagi/libs/token_transform/synthlang_/base.py +0 -2
  7. lionagi/libs/validate/string_similarity.py +1 -2
  8. lionagi/models/hashable_model.py +0 -1
  9. lionagi/models/schema_model.py +0 -1
  10. lionagi/operations/ReAct/utils.py +0 -1
  11. lionagi/operations/_act/act.py +0 -1
  12. lionagi/operations/interpret/interpret.py +1 -4
  13. lionagi/operations/manager.py +0 -1
  14. lionagi/operations/plan/plan.py +0 -1
  15. lionagi/operations/select/utils.py +0 -2
  16. lionagi/protocols/forms/flow.py +3 -1
  17. lionagi/protocols/generic/pile.py +1 -2
  18. lionagi/protocols/generic/processor.py +0 -1
  19. lionagi/protocols/graph/graph.py +1 -3
  20. lionagi/protocols/mail/package.py +0 -1
  21. lionagi/protocols/messages/assistant_response.py +0 -2
  22. lionagi/protocols/messages/message.py +0 -1
  23. lionagi/service/connections/endpoint_config.py +6 -0
  24. lionagi/service/connections/match_endpoint.py +26 -8
  25. lionagi/service/connections/providers/claude_code_.py +195 -22
  26. lionagi/service/connections/providers/claude_code_cli.py +414 -0
  27. lionagi/service/connections/providers/oai_.py +1 -1
  28. lionagi/service/manager.py +0 -1
  29. lionagi/service/rate_limited_processor.py +0 -2
  30. lionagi/service/token_calculator.py +0 -3
  31. lionagi/session/branch.py +0 -2
  32. lionagi/session/session.py +0 -1
  33. lionagi/settings.py +0 -1
  34. lionagi/utils.py +6 -9
  35. lionagi/version.py +1 -1
  36. {lionagi-0.13.1.dist-info → lionagi-0.13.3.dist-info}/METADATA +8 -3
  37. {lionagi-0.13.1.dist-info → lionagi-0.13.3.dist-info}/RECORD +39 -43
  38. lionagi/traits/__init__.py +0 -58
  39. lionagi/traits/base.py +0 -216
  40. lionagi/traits/composer.py +0 -343
  41. lionagi/traits/protocols.py +0 -495
  42. lionagi/traits/registry.py +0 -1071
  43. {lionagi-0.13.1.dist-info → lionagi-0.13.3.dist-info}/WHEEL +0 -0
  44. {lionagi-0.13.1.dist-info → lionagi-0.13.3.dist-info}/licenses/LICENSE +0 -0
lionagi/fields/action.py CHANGED
@@ -21,7 +21,6 @@ __all__ = (
21
21
 
22
22
 
23
23
  def parse_action_request(content: str | dict) -> list[dict]:
24
-
25
24
  json_blocks = []
26
25
 
27
26
  if isinstance(content, BaseModel):
lionagi/fields/reason.py CHANGED
@@ -11,7 +11,6 @@ __all__ = ("Reason",)
11
11
 
12
12
 
13
13
  class Reason(HashableModel):
14
-
15
14
  title: str | None = None
16
15
  content: str | None = None
17
16
  confidence_score: float | None = Field(
lionagi/libs/file/save.py CHANGED
@@ -82,7 +82,7 @@ def save_chunks(
82
82
  for i, chunk in enumerate(chunks):
83
83
  file_path = create_path(
84
84
  directory=output_path,
85
- filename=f"chunk_{i+1}",
85
+ filename=f"chunk_{i + 1}",
86
86
  extension="json",
87
87
  timestamp=timestamp,
88
88
  random_hash_digits=random_hash_digits,
@@ -3,10 +3,93 @@
3
3
  # SPDX-License-Identifier: Apache-2.0
4
4
 
5
5
  import json
6
+ import sys
6
7
  from typing import Any
7
8
 
8
9
  from lionagi.utils import to_dict
9
10
 
11
+ # Try to import rich for enhanced console output
12
+ try:
13
+ from rich.align import Align
14
+ from rich.box import MINIMAL, ROUNDED
15
+ from rich.console import Console
16
+ from rich.markdown import Markdown
17
+ from rich.padding import Padding
18
+ from rich.panel import Panel
19
+ from rich.style import Style
20
+ from rich.syntax import Syntax
21
+ from rich.text import Text
22
+ from rich.theme import Theme
23
+
24
+ DARK_THEME = Theme(
25
+ {
26
+ "info": "bright_cyan",
27
+ "warning": "bright_yellow",
28
+ "error": "bold bright_red",
29
+ "success": "bold bright_green",
30
+ "panel.border": "bright_blue",
31
+ "panel.title": "bold bright_cyan",
32
+ "markdown.h1": "bold bright_magenta",
33
+ "markdown.h2": "bold bright_blue",
34
+ "markdown.h3": "bold bright_cyan",
35
+ "markdown.h4": "bold bright_green",
36
+ "markdown.code": "bright_yellow on grey23",
37
+ "markdown.code_block": "bright_white on grey15",
38
+ "markdown.paragraph": "bright_white",
39
+ "markdown.text": "bright_white",
40
+ "markdown.emph": "italic bright_yellow",
41
+ "markdown.strong": "bold bright_white",
42
+ "markdown.item": "bright_cyan",
43
+ "markdown.item.bullet": "bright_blue",
44
+ "json.key": "bright_cyan",
45
+ "json.string": "bright_green",
46
+ "json.number": "bright_yellow",
47
+ "json.boolean": "bright_magenta",
48
+ "json.null": "bright_red",
49
+ "yaml.key": "bright_cyan",
50
+ "yaml.string": "bright_green",
51
+ "yaml.number": "bright_yellow",
52
+ "yaml.boolean": "bright_magenta",
53
+ }
54
+ )
55
+
56
+ LIGHT_THEME = Theme(
57
+ {
58
+ "info": "blue",
59
+ "warning": "dark_orange",
60
+ "error": "bold red",
61
+ "success": "bold green4",
62
+ "panel.border": "blue",
63
+ "panel.title": "bold blue",
64
+ "markdown.h1": "bold dark_magenta",
65
+ "markdown.h2": "bold dark_blue",
66
+ "markdown.h3": "bold dark_cyan",
67
+ "markdown.h4": "bold dark_green",
68
+ "markdown.code": "dark_orange on grey93",
69
+ "markdown.code_block": "black on grey82",
70
+ "markdown.paragraph": "black",
71
+ "markdown.text": "black",
72
+ "markdown.emph": "italic dark_orange",
73
+ "markdown.strong": "bold black",
74
+ "markdown.item": "dark_blue",
75
+ "markdown.item.bullet": "blue",
76
+ "json.key": "dark_blue",
77
+ "json.string": "dark_green",
78
+ "json.number": "dark_orange",
79
+ "json.boolean": "dark_magenta",
80
+ "json.null": "dark_red",
81
+ "yaml.key": "dark_blue",
82
+ "yaml.string": "dark_green",
83
+ "yaml.number": "dark_orange",
84
+ "yaml.boolean": "dark_magenta",
85
+ }
86
+ )
87
+ RICH_AVAILABLE = True
88
+ except ImportError:
89
+ RICH_AVAILABLE = False
90
+ DARK_THEME = None
91
+ LIGHT_THEME = None
92
+
10
93
 
11
94
  def in_notebook() -> bool:
12
95
  """
@@ -22,6 +105,18 @@ def in_notebook() -> bool:
22
105
  return False
23
106
 
24
107
 
108
+ def in_console() -> bool:
109
+ """
110
+ Checks if we're running in a console/terminal environment.
111
+ Returns True if stdout is a TTY and not in a notebook.
112
+ """
113
+ return (
114
+ hasattr(sys.stdout, "isatty")
115
+ and sys.stdout.isatty()
116
+ and not in_notebook()
117
+ )
118
+
119
+
25
120
  def format_dict(data: Any, indent: int = 0) -> str:
26
121
  """
27
122
  Recursively format Python data (dicts, lists, strings, etc.) into a
@@ -77,6 +172,11 @@ def as_readable(
77
172
  format_curly: bool = False,
78
173
  display_str: bool = False,
79
174
  max_chars: int | None = None,
175
+ use_rich: bool = True,
176
+ theme: str = "dark",
177
+ max_panel_width: int = 140,
178
+ panel: bool = True,
179
+ border: bool = True,
80
180
  ) -> str:
81
181
  """
82
182
  Convert `input_` into a human-readable string. If `format_curly=True`, uses
@@ -84,14 +184,22 @@ def as_readable(
84
184
 
85
185
  - For Pydantic models or nested data, uses `to_dict` to get a dictionary.
86
186
  - If the result is a list of items, each is processed and concatenated.
187
+ - When in console and rich is available, provides syntax highlighting.
87
188
 
88
189
  Args:
89
190
  input_: The data to convert (could be a single item or list).
90
191
  md: If True, wraps the final output in code fences for Markdown display.
91
192
  format_curly: If True, use `format_dict`. Otherwise, produce JSON text.
193
+ display_str: If True, prints the output instead of returning it.
194
+ max_chars: If set, truncates output to this many characters.
195
+ use_rich: If True and rich is available, uses rich for console output.
196
+ theme: Color theme - "dark" (default) or "light". Dark uses GitHub Dark Dimmed,
197
+ light uses Solarized Light inspired colors.
198
+ max_panel_width: Maximum width for panels and code blocks in characters.
199
+ panel: If True, wraps the output in a panel for better visibility.
92
200
 
93
201
  Returns:
94
- A formatted string representation of `input_`.
202
+ A formatted string representation of `input_` (unless display_str=True).
95
203
  """
96
204
 
97
205
  # 1) Convert the input to a Python dict/list structure
@@ -108,6 +216,7 @@ def as_readable(
108
216
  return to_dict(obj, **to_dict_kwargs)
109
217
 
110
218
  def _inner(i_: Any) -> Any:
219
+ items = []
111
220
  try:
112
221
  if isinstance(i_, list):
113
222
  # Already a list. Convert each item
@@ -146,7 +255,6 @@ def as_readable(
146
255
  final_str = "\n\n".join(rendered).strip()
147
256
 
148
257
  # 4) If Markdown requested, wrap with code fences
149
- # - If we used format_curly, we might do "```yaml" instead. But user specifically asked for JSON code blocks previously
150
258
  if md:
151
259
  if format_curly:
152
260
  return f"```yaml\n{final_str}\n```"
@@ -157,18 +265,78 @@ def as_readable(
157
265
 
158
266
  str_ = _inner(input_).strip()
159
267
  if max_chars is not None and len(str_) > max_chars:
160
- str1 = str_[:max_chars] + "...\n\n[Truncated output]\n\n"
161
- if str_.endswith("\n```"):
162
- str1 += "```"
163
- str_ = str1
164
- if display_str:
165
- if md and in_notebook():
166
- # If in IPython environment, display Markdown
167
- from IPython.display import Markdown, display
168
-
169
- display(Markdown(str_))
268
+ trunc = str_[:max_chars] + "...\n\n[Truncated output]"
269
+ str_ = trunc + ("\n```" if str_.endswith("\n```") else "")
270
+
271
+ # -------------------- PRINT / DISPLAY LOGIC ---------------------------
272
+ if not display_str:
273
+ return str_ # caller will handle printing
274
+
275
+ # (1) IPython notebook --------------------------------------------------
276
+ if md and in_notebook():
277
+ from IPython.display import Markdown, display
278
+
279
+ display(Markdown(str_))
280
+ return
281
+
282
+ # (2) Rich console ------------------------------------------------------
283
+ if RICH_AVAILABLE and in_console() and use_rich:
284
+ console_theme = DARK_THEME if theme == "dark" else LIGHT_THEME
285
+ syntax_theme = "github-dark" if theme == "dark" else "solarized-light"
286
+ console = Console(theme=console_theme)
287
+
288
+ # determine prose / fenced code
289
+ is_fenced_code = (
290
+ md and str_.startswith("```") and str_.rstrip().endswith("```")
291
+ )
292
+ is_prose_md = md and not is_fenced_code
293
+ panel_width = min(console.width - 4, max_panel_width)
294
+
295
+ def _out(rich_obj):
296
+ if not panel:
297
+ console.print(Padding(rich_obj, (0, 0, 0, 2)))
298
+ return
299
+
300
+ console.print(
301
+ Padding(
302
+ Panel(
303
+ Align.left(rich_obj, pad=False),
304
+ border_style="panel.border" if border else "",
305
+ box=ROUNDED if border else MINIMAL,
306
+ width=panel_width,
307
+ expand=False,
308
+ ),
309
+ (0, 0, 0, 4),
310
+ )
311
+ )
312
+
313
+ # 2‑a prose markdown ------------------------------------------------
314
+ if is_prose_md:
315
+ from rich.markdown import Markdown as RichMarkdown
316
+
317
+ _out(RichMarkdown(str_, code_theme=syntax_theme))
318
+ return
319
+
320
+ # 2‑b code (fenced or explicit) -------------------------------------
321
+ if is_fenced_code:
322
+ lines = str_.splitlines()
323
+ lang, code = (
324
+ (lines[0][3:].strip() or ("yaml" if format_curly else "json")),
325
+ "\n".join(lines[1:-1]),
326
+ )
170
327
  else:
171
- # Otherwise, just print the string
172
- print(str_)
173
- else:
174
- return str_
328
+ lang, code = ("yaml" if format_curly else "json"), str_
329
+
330
+ syntax = Syntax(
331
+ code,
332
+ lang,
333
+ theme=syntax_theme,
334
+ line_numbers=False,
335
+ word_wrap=True,
336
+ background_color="default",
337
+ )
338
+ _out(syntax)
339
+ return
340
+
341
+ # (3) Plain fallback ----------------------------------------------------
342
+ print(str_)
@@ -52,8 +52,7 @@ def extract_docstring(
52
52
  )
53
53
  else:
54
54
  raise ValueError(
55
- f'{style} is not supported. Please choose either "google" or'
56
- ' "reST".'
55
+ f'{style} is not supported. Please choose either "google" or "reST".'
57
56
  )
58
57
  return func_description, params_description
59
58
 
@@ -21,7 +21,6 @@ __all__ = (
21
21
 
22
22
 
23
23
  class SynthlangFramework(Resource):
24
-
25
24
  category: ResourceCategory = Field(
26
25
  default=ResourceCategory.FRAMEWORK, frozen=True
27
26
  )
@@ -72,7 +71,6 @@ class SynthlangFramework(Resource):
72
71
  framework_options: list[FRAMEWORK_CHOICES] = None,
73
72
  additional_text: str = "",
74
73
  ) -> str:
75
-
76
74
  framework_options_text = self.build_framework_text(framework_options)
77
75
  base_prompt = self.load_base_system_prompt()
78
76
  template_details = (
@@ -297,8 +297,7 @@ def string_similarity(
297
297
  score_func = algorithm
298
298
  else:
299
299
  raise ValueError(
300
- "algorithm must be a string specifying a built-in algorithm or "
301
- "a callable"
300
+ "algorithm must be a string specifying a built-in algorithm or a callable"
302
301
  )
303
302
 
304
303
  # Calculate similarities
@@ -5,7 +5,6 @@ from lionagi.utils import UNDEFINED, hash_dict
5
5
 
6
6
 
7
7
  class HashableModel(BaseModel):
8
-
9
8
  def to_dict(self, **kwargs) -> dict:
10
9
  """provides interface, specific methods need to be implemented in subclass kwargs for pydantic model_dump"""
11
10
  return {
@@ -10,7 +10,6 @@ __all__ = ("SchemaModel",)
10
10
 
11
11
 
12
12
  class SchemaModel(HashableModel):
13
-
14
13
  model_config = ConfigDict(
15
14
  extra="forbid",
16
15
  validate_default=False,
@@ -113,7 +113,6 @@ class ReActAnalysis(HashableModel):
113
113
 
114
114
 
115
115
  class Analysis(HashableModel):
116
-
117
116
  answer: str | None = None
118
117
 
119
118
  @field_validator("answer", mode="before")
@@ -20,7 +20,6 @@ async def _act(
20
20
  suppress_errors: bool = False,
21
21
  verbose_action: bool = False,
22
22
  ) -> "ActionResponseModel":
23
-
24
23
  _request = {}
25
24
 
26
25
  if isinstance(action_request, BaseModel):
@@ -23,10 +23,7 @@ async def interpret(
23
23
  "Return only the re-written prompt. Do not assume any details not mentioned in the input, nor "
24
24
  "give additional instruction than what is explicitly stated."
25
25
  )
26
- guidance = (
27
- f"Domain hint: {domain or 'general'}. "
28
- f"Desired style: {style or 'concise'}. "
29
- )
26
+ guidance = f"Domain hint: {domain or 'general'}. Desired style: {style or 'concise'}. "
30
27
  if sample_writing:
31
28
  guidance += f" Sample writing: {sample_writing}"
32
29
 
@@ -8,7 +8,6 @@ experimental
8
8
 
9
9
 
10
10
  class OperationManager(Manager):
11
-
12
11
  def __init__(self, *args, **kwargs):
13
12
  super().__init__()
14
13
  self.registry: dict[str, Callable] = {}
@@ -231,7 +231,6 @@ async def plan(
231
231
 
232
232
  # We now handle multiple strategies:
233
233
  match execution_strategy:
234
-
235
234
  # ---------------------------------------------------------
236
235
  # Strategy A: SEQUENTIAL
237
236
  # ---------------------------------------------------------
@@ -65,7 +65,6 @@ def parse_to_representation(
65
65
 
66
66
 
67
67
  def get_choice_representation(choice: Any) -> str:
68
-
69
68
  if isinstance(choice, str):
70
69
  return choice
71
70
 
@@ -77,7 +76,6 @@ def get_choice_representation(choice: Any) -> str:
77
76
 
78
77
 
79
78
  def parse_selection(selection_str: str, choices: Any):
80
-
81
79
  select_from = []
82
80
 
83
81
  if isinstance(choices, dict):
@@ -46,7 +46,9 @@ class FlowDefinition(BaseModel):
46
46
  ins_str, outs_str = seg.split("->", 1)
47
47
  inputs = [x.strip() for x in ins_str.split(",") if x.strip()]
48
48
  outputs = [y.strip() for y in outs_str.split(",") if y.strip()]
49
- step = FlowStep(name=f"step_{i+1}", inputs=inputs, outputs=outputs)
49
+ step = FlowStep(
50
+ name=f"step_{i + 1}", inputs=inputs, outputs=outputs
51
+ )
50
52
  self.steps.append(step)
51
53
 
52
54
  def get_required_fields(self) -> set[str]:
@@ -851,8 +851,7 @@ class Pile(Element, Collective[E], Generic[E]):
851
851
  if self.strict_type:
852
852
  if type(i) not in self.item_type:
853
853
  raise TypeError(
854
- "Invalid item type in pile."
855
- f" Expected {self.item_type}",
854
+ f"Invalid item type in pile. Expected {self.item_type}",
856
855
  )
857
856
  else:
858
857
  if not any(issubclass(type(i), t) for t in self.item_type):
@@ -149,7 +149,6 @@ class Processor(Observer):
149
149
  next_event = await self.dequeue()
150
150
 
151
151
  if await self.request_permission(**next_event.request):
152
-
153
152
  if next_event.streaming:
154
153
  task = asyncio.create_task(next_event.stream())
155
154
  else:
@@ -21,7 +21,6 @@ __all__ = ("Graph",)
21
21
 
22
22
 
23
23
  class Graph(Element, Relational):
24
-
25
24
  internal_nodes: Pile[Node] = Field(
26
25
  default_factory=lambda: Pile(item_type={Node}, strict_type=False),
27
26
  title="Internal Nodes",
@@ -52,8 +51,7 @@ class Graph(Element, Relational):
52
51
  """Add a node to the graph."""
53
52
  if not isinstance(node, Relational):
54
53
  raise RelationError(
55
- "Failed to add node: Invalid node type: "
56
- "not a <Relational> entity."
54
+ "Failed to add node: Invalid node type: not a <Relational> entity."
57
55
  )
58
56
  _id = ID.get_id(node)
59
57
  try:
@@ -91,7 +91,6 @@ class Package(Observable):
91
91
  item: Any,
92
92
  request_source: ID[Communicatable] = None,
93
93
  ):
94
-
95
94
  super().__init__()
96
95
  self.id = IDType.create()
97
96
  self.created_at = time(type_="timestamp")
@@ -15,7 +15,6 @@ from .message import MessageRole, RoledMessage, Template, jinja_env
15
15
  def prepare_assistant_response(
16
16
  assistant_response: BaseModel | list[BaseModel] | dict | str | Any, /
17
17
  ) -> dict:
18
-
19
18
  assistant_response = (
20
19
  [assistant_response]
21
20
  if not isinstance(assistant_response, list)
@@ -26,7 +25,6 @@ def prepare_assistant_response(
26
25
  model_responses = []
27
26
 
28
27
  for i in assistant_response:
29
-
30
28
  if isinstance(i, BaseModel):
31
29
  i = i.model_dump(exclude_none=True, exclude_unset=True)
32
30
 
@@ -232,7 +232,6 @@ class RoledMessage(Node, Sendable):
232
232
  self.template = template
233
233
 
234
234
  def __str__(self) -> str:
235
-
236
235
  content_preview = (
237
236
  f"{str(self.content)[:75]}..."
238
237
  if len(str(self.content)) > 75
@@ -73,6 +73,12 @@ class EndpointConfig(BaseModel):
73
73
 
74
74
  return self
75
75
 
76
+ @field_validator("provider", mode="before")
77
+ def _validate_provider(cls, v: str):
78
+ if not v:
79
+ raise ValueError("Provider must be specified")
80
+ return v.strip().lower()
81
+
76
82
  @property
77
83
  def full_url(self):
78
84
  if not self.endpoint_params:
@@ -2,6 +2,8 @@
2
2
  #
3
3
  # SPDX-License-Identifier: Apache-2.0
4
4
 
5
+ from lionagi.service.connections.endpoint_config import EndpointConfig
6
+
5
7
  from .endpoint import Endpoint
6
8
 
7
9
 
@@ -45,13 +47,29 @@ def match_endpoint(
45
47
  from .providers.perplexity_ import PerplexityChatEndpoint
46
48
 
47
49
  return PerplexityChatEndpoint(**kwargs)
48
- if provider == "claude_code" and (
49
- "query" in endpoint or "code" in endpoint
50
- ):
51
- from lionagi.service.connections.providers.claude_code_ import (
52
- ClaudeCodeEndpoint,
53
- )
50
+ if provider == "claude_code":
51
+ if "cli" in endpoint:
52
+ from .providers.claude_code_cli import ClaudeCodeCLIEndpoint
53
+
54
+ return ClaudeCodeCLIEndpoint(**kwargs)
55
+
56
+ if "query" in endpoint or "code" in endpoint:
57
+ from lionagi.service.connections.providers.claude_code_ import (
58
+ ClaudeCodeEndpoint,
59
+ )
60
+
61
+ return ClaudeCodeEndpoint(**kwargs)
62
+
63
+ from .providers.oai_ import OpenaiChatEndpoint
54
64
 
55
- return ClaudeCodeEndpoint(**kwargs)
65
+ config = EndpointConfig(
66
+ provider=provider,
67
+ endpoint=endpoint or "chat/completions",
68
+ name="openai_compatible_chat",
69
+ auth_type="bearer",
70
+ content_type="application/json",
71
+ method="POST",
72
+ requires_tokens=True,
73
+ )
56
74
 
57
- return None
75
+ return Endpoint(config, **kwargs)