lionagi 0.8.4__py3-none-any.whl → 0.8.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
lionagi/__init__.py CHANGED
@@ -2,6 +2,8 @@
2
2
  #
3
3
  # SPDX-License-Identifier: Apache-2.0
4
4
 
5
+ import logging
6
+
5
7
  from pydantic import BaseModel, Field
6
8
 
7
9
  from . import _types as types
@@ -13,6 +15,9 @@ from .version import __version__
13
15
 
14
16
  LiteiModel = iModel
15
17
 
18
+ logger = logging.getLogger(__name__)
19
+ logger.setLevel(logging.INFO)
20
+
16
21
  __all__ = (
17
22
  "Session",
18
23
  "Branch",
@@ -24,4 +29,5 @@ __all__ = (
24
29
  "__version__",
25
30
  "BaseModel",
26
31
  "Field",
32
+ "logger",
27
33
  )
@@ -21,6 +21,10 @@ async def ReAct(
21
21
  branch: "Branch",
22
22
  instruct: Instruct | dict[str, Any],
23
23
  interpret: bool = False,
24
+ interpret_domain: str | None = None,
25
+ interpret_style: str | None = None,
26
+ interpret_sample: str | None = None,
27
+ interpret_kwargs: dict | None = None,
24
28
  tools: Any = None,
25
29
  tool_schemas: Any = None,
26
30
  response_format: type[BaseModel] | BaseModel = None,
@@ -29,6 +33,7 @@ async def ReAct(
29
33
  response_kwargs: dict | None = None,
30
34
  return_analysis: bool = False,
31
35
  analysis_model: iModel | None = None,
36
+ verbose_analysis: bool = False,
32
37
  **kwargs,
33
38
  ):
34
39
  # If no tools or tool schemas are provided, default to "all tools"
@@ -43,7 +48,11 @@ async def ReAct(
43
48
  instruct.to_dict()
44
49
  if isinstance(instruct, Instruct)
45
50
  else instruct
46
- )
51
+ ),
52
+ domain=interpret_domain,
53
+ style=interpret_style,
54
+ sample_writing=interpret_sample,
55
+ **(interpret_kwargs or {}),
47
56
  )
48
57
 
49
58
  # Convert Instruct to dict if necessary
@@ -52,10 +61,13 @@ async def ReAct(
52
61
  if isinstance(instruct, Instruct)
53
62
  else dict(instruct)
54
63
  )
55
- # Overwrite the "instruction" field with the interpreted string (if any)
56
- instruct_dict["instruction"] = instruction_str or instruct_dict.get(
57
- "instruction"
58
- )
64
+
65
+ # Overwrite "instruction" with the interpreted prompt (if any) plus a note about expansions
66
+ max_ext_info = f"\nIf needed, you can do up to {max_extensions or 0 if extension_allowed else 0} expansions."
67
+ instruct_dict["instruction"] = (
68
+ instruction_str
69
+ or (instruct_dict.get("instruction") or "") # in case it's missing
70
+ ) + max_ext_info
59
71
 
60
72
  # Prepare a copy of user-provided kwargs for the first operate call
61
73
  kwargs_for_operate = copy(kwargs)
@@ -73,6 +85,12 @@ async def ReAct(
73
85
  )
74
86
  analyses = [analysis]
75
87
 
88
+ # If verbose, show round #1 analysis
89
+ if verbose_analysis:
90
+ print(
91
+ f"ReAct Round #1 Analysis:\n {analysis.model_dump_json(indent=2)}",
92
+ )
93
+
76
94
  # Validate and clamp max_extensions if needed
77
95
  if max_extensions and max_extensions > 5:
78
96
  logging.warning("max_extensions should not exceed 5; defaulting to 5.")
@@ -80,6 +98,8 @@ async def ReAct(
80
98
 
81
99
  # Step 2: Possibly loop through expansions if extension_needed
82
100
  extensions = max_extensions
101
+ round_count = 1
102
+
83
103
  while (
84
104
  extension_allowed
85
105
  and analysis.extension_needed
@@ -95,16 +115,28 @@ async def ReAct(
95
115
  extensions=extensions
96
116
  )
97
117
 
118
+ operate_kwargs = copy(kwargs)
119
+ operate_kwargs["actions"] = True
120
+ operate_kwargs["reason"] = True
121
+ operate_kwargs["response_format"] = ReActAnalysis
122
+ operate_kwargs["action_strategy"] = analysis.action_strategy
123
+ if analysis.action_batch_size:
124
+ operate_kwargs["action_batch_size"] = analysis.action_batch_size
125
+
98
126
  analysis = await branch.operate(
99
127
  instruction=new_instruction,
100
- response_format=ReActAnalysis,
101
128
  tools=tools,
102
129
  tool_schemas=tool_schemas,
103
- reason=True,
104
- actions=True,
130
+ **operate_kwargs,
105
131
  )
106
132
  analyses.append(analysis)
133
+ round_count += 1
107
134
 
135
+ # If verbose, show round analysis
136
+ if verbose_analysis:
137
+ print(
138
+ f"ReAct Round #{round_count} Analysis:\n {analysis.model_dump_json(indent=2)}",
139
+ )
108
140
  if extensions:
109
141
  extensions -= 1
110
142
 
@@ -2,27 +2,91 @@
2
2
  #
3
3
  # SPDX-License-Identifier: Apache-2.0
4
4
 
5
- from typing import ClassVar
5
+ from typing import ClassVar, Literal
6
6
 
7
7
  from pydantic import BaseModel, Field
8
8
 
9
9
 
10
+ class PlannedAction(BaseModel):
11
+ """
12
+ Short descriptor for an upcoming action/tool invocation the LLM wants to perform.
13
+ The model can hold multiple actions in a single round if needed.
14
+ """
15
+
16
+ action_type: str = Field(
17
+ ...,
18
+ description="The name or type of tool/action to invoke (e.g., 'search_exa', 'reader_tool').",
19
+ )
20
+ description: str = Field(
21
+ ...,
22
+ description="A short explanation of why or what is intended to achieve with this action.",
23
+ )
24
+
25
+
10
26
  class ReActAnalysis(BaseModel):
27
+ """
28
+ Captures the ReAct chain-of-thought output each round:
29
+ 1) The LLM's 'analysis' (reasoning),
30
+ 2) A list of planned actions to perform before finalizing,
31
+ 3) Indication whether more expansions/rounds are needed,
32
+ 4) Additional tuning knobs: how to handle validation, how to execute actions, etc.
33
+ """
11
34
 
35
+ # Standard ReAct strings for controlling expansions:
12
36
  FIRST_EXT_PROMPT: ClassVar[str] = (
13
- "You are provided with additional rounds to perform reason action to provide an accurate final answer. you have max another {extensions} rounds. Pleasen continue."
37
+ "You can perform multiple reason-action steps for accuracy. "
38
+ "If you are not ready to finalize, set extension_needed to True. "
39
+ "You have up to {extensions} expansions. Please continue."
14
40
  )
15
-
16
41
  CONTINUE_EXT_PROMPT: ClassVar[str] = (
17
- "You are provided with another round, you have max another {extensions} rounds. Please continue."
42
+ "Another round is available. You may do multiple actions if needed. "
43
+ "You have up to {extensions} expansions. Please continue."
18
44
  )
19
-
20
45
  ANSWER_PROMPT: ClassVar[str] = (
21
- "given above reason and actions, please provide final answer to the original user request:\n\n {instruction}"
46
+ "Given your reasoning and actions, please now provide the final answer "
47
+ "to the user's request:\n\n{instruction}"
48
+ )
49
+
50
+ analysis: str = Field(
51
+ ...,
52
+ description="Free-form reasoning or chain-of-thought summary. Must be consistent with the plan.",
53
+ )
54
+
55
+ planned_actions: list[PlannedAction] = Field(
56
+ default_factory=list,
57
+ description=(
58
+ "One or more short descriptors of the tool calls or operations "
59
+ "the LLM wants to perform this round. For example, read the doc, "
60
+ "then run a search."
61
+ ),
22
62
  )
23
63
 
24
- analysis: str
25
64
  extension_needed: bool = Field(
26
65
  False,
27
- description="Set to True if more steps are needed to provide an accurate answer. If True, additional rounds are allowed. Typically should be set to true if more actions should be taken or planned to be taken. If false, will proceed to provide final answer next.",
66
+ description="Set True if more expansions are needed. If False, final answer is next.",
67
+ )
68
+
69
+ milestone: str | None = Field(
70
+ None,
71
+ description=(
72
+ "A sub-goal or mini-checkpoint to reach before finalizing. "
73
+ "E.g. 'Validate results from search_exa, then summarize outcomes.'"
74
+ ),
75
+ )
76
+
77
+ action_strategy: Literal["sequential", "concurrent", "batch"] = Field(
78
+ "concurrent",
79
+ description=(
80
+ "Specifies how to invoke the planned actions:\n"
81
+ "'sequential' => Each action is run in order, \n"
82
+ "'concurrent' => All actions run in parallel, \n"
83
+ "'batch' => Divide actions into async batches of N (if reasonable)."
84
+ ),
85
+ )
86
+
87
+ action_batch_size: int | None = Field(
88
+ None,
89
+ description=(
90
+ "provide if and only if action_strategy is 'batch', this specifies the number of actions to run in parallel per batch."
91
+ ),
28
92
  )
@@ -18,6 +18,7 @@ async def _act(
18
18
  branch: "Branch",
19
19
  action_request: BaseModel | dict,
20
20
  suppress_errors: bool = False,
21
+ verbose_action: bool = False,
21
22
  ) -> "ActionResponseModel":
22
23
 
23
24
  _request = {}
@@ -35,6 +36,11 @@ async def _act(
35
36
 
36
37
  try:
37
38
  func_call = await branch._action_manager.invoke(_request)
39
+ if verbose_action:
40
+ print(
41
+ f"Action {_request['function']} invoked, status: {func_call.status}."
42
+ )
43
+
38
44
  except Exception as e:
39
45
  content = {
40
46
  "error": str(e),
@@ -43,6 +49,8 @@ async def _act(
43
49
  "branch": str(branch.id),
44
50
  }
45
51
  branch._log_manager.log(Log(content=content))
52
+ if verbose_action:
53
+ print(f"Action {_request['function']} failed, error: {str(e)}.")
46
54
  if suppress_errors:
47
55
  logging.error(
48
56
  f"Error invoking action '{_request['function']}': {e}"
@@ -16,15 +16,46 @@ async def interpret(
16
16
  sample_writing: str | None = None,
17
17
  **kwargs,
18
18
  ) -> str:
19
- instruction = (
20
- "Rewrite the following user input into a clear, structured prompt or "
21
- "query for an LLM, ensuring any implicit details are made explicit. "
22
- "Return only the improved user prompt."
23
- )
19
+ instruction = """
20
+ You are given a user's raw instruction or question. Your task is to rewrite it into a clearer, more structured prompt for an LLM or system, making any implicit or missing details explicit.
21
+
22
+ Follow these guidelines:
23
+
24
+ 1. **Dissect the user's request**:
25
+ - If the user references a local file, note it clearly (e.g., "paper_file_path": "…").
26
+ - If the user might need external references or up-to-date data, mention that possibility.
27
+ - If the user's question is ambiguous, propose clarifications.
28
+
29
+ 2. **Be explicit about the user's final objective**:
30
+ - For example, if the user wants a comparison with other works, add that as a bullet point or sub-question.
31
+ - If the user wants a summary plus code snippet, highlight that in your structured prompt.
32
+
33
+ 3. **Do NOT produce final system actions**:
34
+ - You're not calling any tools directly here; only rewriting the user query to reflect potential next steps.
35
+ - If the user's request might require searching or doc reading, note it as an *option*, e.g. "Potential tool usage: {search, partial doc read}."
36
+
37
+ 4. **Return only the improved user prompt**:
38
+ - The final output should be a single text block or short JSON specifying the clarified user request.
39
+ - Keep it concise yet thorough.
40
+
41
+ For instance, if the user's original text is:
42
+ "Please read my local PDF on RL and compare it to the newest research methods from exa or perplexity."
43
+
44
+ A re-written version might be:
45
+ "**Task**:
46
+ - Summarize the local PDF (paper_file_path: 'myRLpaper.pdf').
47
+ - Compare its approach with recent reinforcement learning research found via exa/perplexity searches.
48
+ **Potential Tool Usage**:
49
+ - Doc reading (reader_tool)
50
+ - External search (search_exa, search_perplexity)
51
+ **Output**:
52
+ - A structured summary + comparative analysis."
53
+
54
+ Now, apply this rewriting to the input below. Return only the re-written prompt.
55
+ """
24
56
  guidance = (
25
57
  f"Domain hint: {domain or 'general'}. "
26
58
  f"Desired style: {style or 'concise'}. "
27
- "You can add or clarify context if needed."
28
59
  )
29
60
  if sample_writing:
30
61
  guidance += f" Sample writing: {sample_writing}"
@@ -32,11 +63,11 @@ async def interpret(
32
63
  context = [f"User input: {text}"]
33
64
 
34
65
  # Default temperature if none provided
35
- kwargs["temperature"] = kwargs.get("temperature", 0.1)
36
66
  kwargs["guidance"] = guidance + "\n" + kwargs.get("guidance", "")
67
+ kwargs["instruction"] = instruction + "\n" + kwargs.get("instruction", "")
68
+ kwargs["temperature"] = kwargs.get("temperature", 0.1)
37
69
 
38
70
  refined_prompt = await branch.chat(
39
- instruction=instruction,
40
71
  context=context,
41
72
  **kwargs,
42
73
  )
@@ -50,6 +50,8 @@ async def operate(
50
50
  action_strategy: Literal[
51
51
  "sequential", "concurrent", "batch"
52
52
  ] = "concurrent",
53
+ action_batch_size: int = None,
54
+ verbose_action: bool = False,
53
55
  field_models: list[FieldModel] = None,
54
56
  exclude_fields: list | dict | None = None,
55
57
  request_params: ModelParams = None,
@@ -189,9 +191,13 @@ async def operate(
189
191
  if instruct.action_strategy
190
192
  else action_kwargs.get("strategy", "concurrent")
191
193
  )
194
+ if action_batch_size:
195
+ action_kwargs["batch_size"] = action_batch_size
192
196
 
193
197
  action_response_models = await branch.act(
194
- response_model.action_requests, **action_kwargs
198
+ response_model.action_requests,
199
+ verbose_action=verbose_action,
200
+ **action_kwargs,
195
201
  )
196
202
  # Possibly refine the operative with the tool outputs
197
203
  operative = Step.respond_operative(
@@ -61,6 +61,7 @@ class EndpointConfig(BaseModel):
61
61
  use_enum_values=True,
62
62
  )
63
63
 
64
+ name: str | None = None
64
65
  provider: str | None = None
65
66
  base_url: str | None = None
66
67
  endpoint: str
@@ -75,6 +76,7 @@ class EndpointConfig(BaseModel):
75
76
  requires_tokens: bool = False
76
77
  api_version: str | None = None
77
78
  allowed_roles: list[str] | None = None
79
+ request_options: type | None = None
78
80
 
79
81
 
80
82
  class EndPoint(ABC):
@@ -100,6 +102,11 @@ class EndPoint(ABC):
100
102
  config.update(kwargs)
101
103
  self.config = EndpointConfig(**config)
102
104
 
105
+ @property
106
+ def name(self) -> str | None:
107
+ """str | None: The name of the endpoint, if any."""
108
+ return self.config.name or self.endpoint
109
+
103
110
  @property
104
111
  def is_streamable(self) -> bool:
105
112
  """bool: Whether this endpoint supports streaming responses."""
@@ -185,6 +192,10 @@ class EndPoint(ABC):
185
192
  """bool: Indicates if this endpoint uses role-based messages."""
186
193
  return self.allowed_roles is not None
187
194
 
195
+ @property
196
+ def request_options(self) -> type | None:
197
+ return self.config.request_options
198
+
188
199
  def create_payload(self, **kwargs) -> dict:
189
200
  """Generates a request payload (and headers) for this endpoint.
190
201
 
lionagi/service/imodel.py CHANGED
@@ -5,6 +5,8 @@
5
5
  import os
6
6
  import warnings
7
7
 
8
+ from pydantic import BaseModel
9
+
8
10
  from .endpoints.base import APICalling, EndPoint
9
11
  from .endpoints.match_endpoint import match_endpoint
10
12
  from .endpoints.rate_limited_processor import RateLimitedAPIExecutor
@@ -266,6 +268,15 @@ class iModel:
266
268
  """
267
269
  return self.kwargs.get("model", "")
268
270
 
271
+ @property
272
+ def request_options(self) -> type[BaseModel] | None:
273
+ """type[BaseModel] | None: The request options model for the endpoint.
274
+
275
+ Returns:
276
+ The request options model if available; otherwise, None.
277
+ """
278
+ return self.endpoint.request_options
279
+
269
280
  def to_dict(self):
270
281
  kwargs = self.kwargs
271
282
  if "kwargs" in self.kwargs:
@@ -1,10 +1,8 @@
1
- from typing import TYPE_CHECKING, Literal
1
+ from typing import Literal
2
2
 
3
3
  from lionagi.service.endpoints.base import EndPoint
4
4
 
5
- if TYPE_CHECKING:
6
- from .models import ExaSearchRequest
7
-
5
+ from .models import ExaSearchRequest
8
6
 
9
7
  CATEGORY_OPTIONS = Literal[
10
8
  "article",
@@ -21,6 +19,7 @@ CATEGORY_OPTIONS = Literal[
21
19
  ]
22
20
 
23
21
  SEARCH_CONFIG = {
22
+ "name": "search_exa",
24
23
  "provider": "exa",
25
24
  "base_url": "https://api.exa.ai",
26
25
  "endpoint": "search",
@@ -47,6 +46,7 @@ SEARCH_CONFIG = {
47
46
  "type", # keyword, neural, auto
48
47
  "useAutoPrompt",
49
48
  },
49
+ "request_options": ExaSearchRequest,
50
50
  }
51
51
 
52
52
 
@@ -4,7 +4,10 @@
4
4
 
5
5
  from lionagi.service.endpoints.chat_completion import ChatCompletionEndPoint
6
6
 
7
+ from .models import PerplexityChatCompletionRequest
8
+
7
9
  CHAT_COMPLETION_CONFIG = {
10
+ "name": "search_perplexity",
8
11
  "provider": "perplexity",
9
12
  "base_url": "https://api.perplexity.ai",
10
13
  "endpoint": "chat/completions",
@@ -31,6 +34,7 @@ CHAT_COMPLETION_CONFIG = {
31
34
  "frequency_penalty",
32
35
  },
33
36
  "allowed_roles": ["user", "assistant"],
37
+ "request_options": PerplexityChatCompletionRequest,
34
38
  }
35
39
 
36
40
 
@@ -0,0 +1,144 @@
1
+ from enum import Enum
2
+ from typing import Any
3
+
4
+ from pydantic import BaseModel, Field, model_validator
5
+
6
+
7
+ class PerplexityRole(str, Enum):
8
+ """Roles allowed in Perplexity's messages."""
9
+
10
+ system = "system"
11
+ user = "user"
12
+ assistant = "assistant"
13
+
14
+
15
+ class PerplexityMessage(BaseModel):
16
+ """
17
+ A single message in the conversation.
18
+ `role` can be 'system', 'user', or 'assistant'.
19
+ `content` is the text for that conversation turn.
20
+ """
21
+
22
+ role: PerplexityRole = Field(
23
+ ...,
24
+ description="The role of the speaker. Must be system, user, or assistant.",
25
+ )
26
+ content: str = Field(..., description="The text content of this message.")
27
+
28
+
29
+ class PerplexityChatCompletionRequest(BaseModel):
30
+ """
31
+ Represents the request body for Perplexity's Chat Completions endpoint.
32
+ Endpoint: POST https://api.perplexity.ai/chat/completions
33
+ """
34
+
35
+ model: str = Field(
36
+ ...,
37
+ description="The model name, e.g. 'llama-3.1-sonar-small-128k-online'.",
38
+ )
39
+ messages: list[PerplexityMessage] = Field(
40
+ ..., description="A list of messages forming the conversation so far."
41
+ )
42
+
43
+ # Optional parameters
44
+ frequency_penalty: float | None = Field(
45
+ default=None,
46
+ gt=0,
47
+ description=(
48
+ "Multiplicative penalty > 0. Values > 1.0 penalize repeated tokens more strongly. "
49
+ "Value=1.0 means no penalty. Incompatible with presence_penalty."
50
+ ),
51
+ )
52
+ presence_penalty: float | None = Field(
53
+ default=None,
54
+ ge=-2.0,
55
+ le=2.0,
56
+ description=(
57
+ "Penalizes tokens that have appeared so far (range -2 to 2). "
58
+ "Positive values encourage talking about new topics. Incompatible with frequency_penalty."
59
+ ),
60
+ )
61
+ max_tokens: int | None = Field(
62
+ default=None,
63
+ description=(
64
+ "Maximum number of completion tokens. If omitted, model generates tokens until it "
65
+ "hits stop or context limit."
66
+ ),
67
+ )
68
+ return_images: bool | None = Field(
69
+ default=None,
70
+ description="If True, attempt to return images (closed beta feature).",
71
+ )
72
+ return_related_questions: bool | None = Field(
73
+ default=None,
74
+ description="If True, attempt to return related questions (closed beta feature).",
75
+ )
76
+ search_domain_filter: list[Any] | None = Field(
77
+ default=None,
78
+ description=(
79
+ "List of domains to limit or exclude in the online search. Example: ['example.com', '-twitter.com']. "
80
+ "Supports up to 3 entries. (Closed beta feature.)"
81
+ ),
82
+ )
83
+ search_recency_filter: str | None = Field(
84
+ default=None,
85
+ description=(
86
+ "Returns search results within a specified time interval: 'month', 'week', 'day', or 'hour'."
87
+ ),
88
+ )
89
+ stream: bool | None = Field(
90
+ default=None,
91
+ description=(
92
+ "If True, response is returned incrementally via Server-Sent Events (SSE)."
93
+ ),
94
+ )
95
+ temperature: float | None = Field(
96
+ default=None,
97
+ ge=0.0,
98
+ lt=2.0,
99
+ description=(
100
+ "Controls randomness of sampling, range [0, 2). Higher => more random. "
101
+ "Defaults to 0.2."
102
+ ),
103
+ )
104
+ top_k: int | None = Field(
105
+ default=None,
106
+ ge=0,
107
+ le=2048,
108
+ description=(
109
+ "Top-K filtering. 0 disables top-k filtering. If set, only the top K tokens are considered. "
110
+ "We recommend altering either top_k or top_p, but not both."
111
+ ),
112
+ )
113
+ top_p: float | None = Field(
114
+ default=None,
115
+ ge=0.0,
116
+ le=1.0,
117
+ description=(
118
+ "Nucleus sampling threshold. We recommend altering either top_k or top_p, but not both."
119
+ ),
120
+ )
121
+
122
+ @model_validator(mode="before")
123
+ def validate_penalties(cls, values):
124
+ """
125
+ Disallow using both frequency_penalty != 1.0 and presence_penalty != 0.0 at once,
126
+ since the docs say they're incompatible.
127
+ """
128
+ freq_pen = values.get("frequency_penalty", 1.0)
129
+ pres_pen = values.get("presence_penalty", 0.0)
130
+
131
+ # The doc states frequency_penalty is incompatible with presence_penalty.
132
+ # We'll enforce that if presence_penalty != 0, frequency_penalty must be 1.0
133
+ # or vice versa. Adjust logic as needed.
134
+ if pres_pen != 0.0 and freq_pen != 1.0:
135
+ raise ValueError(
136
+ "presence_penalty is incompatible with frequency_penalty. "
137
+ "Please use only one: either presence_penalty=0 with freq_pen !=1, "
138
+ "or presence_penalty!=0 with freq_pen=1."
139
+ )
140
+ return values
141
+
142
+ def to_dict(self) -> dict:
143
+ """Return a dict suitable for JSON serialization and sending to Perplexity API."""
144
+ return self.model_dump(exclude_none=True)
lionagi/session/branch.py CHANGED
@@ -45,8 +45,10 @@ from lionagi.protocols.types import (
45
45
  SenderRecipient,
46
46
  System,
47
47
  )
48
+ from lionagi.service.endpoints.base import EndPoint
48
49
  from lionagi.service.types import iModel, iModelManager
49
50
  from lionagi.settings import Settings
51
+ from lionagi.tools.base import LionTool
50
52
  from lionagi.utils import UNDEFINED, alcall, bcall, copy
51
53
 
52
54
  if TYPE_CHECKING:
@@ -204,7 +206,9 @@ class Branch(Element, Communicatable, Relational):
204
206
  )
205
207
 
206
208
  # --- ActionManager ---
207
- self._action_manager = ActionManager(tools)
209
+ self._action_manager = ActionManager()
210
+ if tools:
211
+ self.register_tools(tools)
208
212
 
209
213
  # --- LogManager ---
210
214
  if log_config:
@@ -345,19 +349,28 @@ class Branch(Element, Communicatable, Relational):
345
349
 
346
350
  return branch_clone
347
351
 
352
+ def _register_tool(self, tools: FuncTool | LionTool, update: bool = False):
353
+ if isinstance(tools, type) and issubclass(tools, LionTool):
354
+ tools = tools()
355
+ if isinstance(tools, LionTool):
356
+ tools = tools.to_tool()
357
+ self._action_manager.register_tool(tools, update=update)
358
+
348
359
  def register_tools(
349
- self, tools: FuncTool | list[FuncTool], update: bool = False
360
+ self, tools: FuncTool | list[FuncTool] | LionTool, update: bool = False
350
361
  ):
351
362
  """
352
363
  Registers one or more tools in the ActionManager.
353
364
 
354
365
  Args:
355
- tools (FuncTool | list[FuncTool]):
366
+ tools (FuncTool | list[FuncTool] | LionTool):
356
367
  A single tool or a list of tools to register.
357
368
  update (bool, optional):
358
369
  If `True`, updates existing tools with the same name.
359
370
  """
360
- self._action_manager.register_tools(tools, update=update)
371
+ tools = [tools] if not isinstance(tools, list) else tools
372
+ for tool in tools:
373
+ self._register_tool(tool, update=update)
361
374
 
362
375
  # -------------------------------------------------------------------------
363
376
  # Conversion / Serialization
@@ -548,12 +561,38 @@ class Branch(Element, Communicatable, Relational):
548
561
 
549
562
  def connect(
550
563
  self,
551
- name: str,
552
- imodel: iModel,
553
- request_options: type[BaseModel],
564
+ provider: str = None,
565
+ base_url: str = None,
566
+ endpoint: str | EndPoint = "chat",
567
+ endpoint_params: list[str] | None = None,
568
+ api_key: str = None,
569
+ queue_capacity: int = 100,
570
+ capacity_refresh_time: float = 60,
571
+ interval: float | None = None,
572
+ limit_requests: int = None,
573
+ limit_tokens: int = None,
574
+ invoke_with_endpoint: bool = False,
575
+ imodel: iModel = None,
576
+ name: str = None,
577
+ request_options: type[BaseModel] = None,
554
578
  description: str = None,
555
579
  update: bool = False,
556
580
  ):
581
+ if not imodel:
582
+ imodel = iModel(
583
+ provider=provider,
584
+ base_url=base_url,
585
+ endpoint=endpoint,
586
+ endpoint_params=endpoint_params,
587
+ api_key=api_key,
588
+ queue_capacity=queue_capacity,
589
+ capacity_refresh_time=capacity_refresh_time,
590
+ interval=interval,
591
+ limit_requests=limit_requests,
592
+ limit_tokens=limit_tokens,
593
+ invoke_with_endpoint=invoke_with_endpoint,
594
+ )
595
+
557
596
  if not update and name in self.tools:
558
597
  raise ValueError(f"Tool with name '{name}' already exists.")
559
598
 
@@ -563,13 +602,13 @@ class Branch(Element, Communicatable, Relational):
563
602
  self._log_manager.log(Log.create(api_call))
564
603
  return api_call.response
565
604
 
566
- _connect.__name__ = name
605
+ _connect.__name__ = name or imodel.endpoint.name
567
606
  if description:
568
607
  _connect.__doc__ = description
569
608
 
570
609
  tool = Tool(
571
610
  func_callable=_connect,
572
- request_options=request_options,
611
+ request_options=request_options or imodel.request_options,
573
612
  )
574
613
  self._action_manager.register_tools(tool, update=update)
575
614
 
@@ -865,6 +904,11 @@ class Branch(Element, Communicatable, Relational):
865
904
  actions: bool = False,
866
905
  reason: bool = False,
867
906
  action_kwargs: dict = None,
907
+ action_strategy: Literal[
908
+ "sequential", "concurrent", "batch"
909
+ ] = "concurrent",
910
+ action_batch_size: int = None,
911
+ verbose_action: bool = False,
868
912
  field_models: list[FieldModel] = None,
869
913
  exclude_fields: list | dict | None = None,
870
914
  request_params: ModelParams = None,
@@ -939,6 +983,12 @@ class Branch(Element, Communicatable, Relational):
939
983
  If `True`, signals that the LLM should provide chain-of-thought or reasoning (where applicable).
940
984
  action_kwargs (dict | None, optional):
941
985
  Additional parameters for the `branch.act()` call if tools are invoked.
986
+ action_strategy (Literal["sequential","concurrent","batch"], optional):
987
+ The strategy for invoking tools (default: "concurrent").
988
+ action_batch_size (int, optional):
989
+ The batch size for concurrent tool invocation if `action_strategy="batch"`.
990
+ verbose_action (bool, optional):
991
+ If `True`, logs detailed information about tool invocation.
942
992
  field_models (list[FieldModel] | None, optional):
943
993
  Field-level definitions or overrides for the model schema.
944
994
  exclude_fields (list|dict|None, optional):
@@ -996,6 +1046,9 @@ class Branch(Element, Communicatable, Relational):
996
1046
  actions=actions,
997
1047
  reason=reason,
998
1048
  action_kwargs=action_kwargs,
1049
+ action_strategy=action_strategy,
1050
+ action_batch_size=action_batch_size,
1051
+ verbose_action=verbose_action,
999
1052
  field_models=field_models,
1000
1053
  exclude_fields=exclude_fields,
1001
1054
  request_params=request_params,
@@ -1123,6 +1176,7 @@ class Branch(Element, Communicatable, Relational):
1123
1176
  self,
1124
1177
  action_request: ActionRequest | BaseModel | dict,
1125
1178
  suppress_errors: bool = False,
1179
+ verbose_action: bool = False,
1126
1180
  ) -> ActionResponse:
1127
1181
  """
1128
1182
  Internal method to invoke a tool (action) asynchronously.
@@ -1138,13 +1192,19 @@ class Branch(Element, Communicatable, Relational):
1138
1192
  """
1139
1193
  from lionagi.operations._act.act import _act
1140
1194
 
1141
- return await _act(self, action_request, suppress_errors)
1195
+ return await _act(
1196
+ branch=self,
1197
+ action_request=action_request,
1198
+ suppress_errors=suppress_errors,
1199
+ verbose_action=verbose_action,
1200
+ )
1142
1201
 
1143
1202
  async def act(
1144
1203
  self,
1145
1204
  action_request: list | ActionRequest | BaseModel | dict,
1146
1205
  *,
1147
1206
  strategy: Literal["concurrent", "sequential", "batch"] = "concurrent",
1207
+ verbose_action: bool = False,
1148
1208
  batch_size: int = None,
1149
1209
  suppress_errors: bool = True,
1150
1210
  sanitize_input: bool = False,
@@ -1170,6 +1230,10 @@ class Branch(Element, Communicatable, Relational):
1170
1230
  action_request (list|ActionRequest|BaseModel|dict):
1171
1231
  A single or list of action requests, each requiring
1172
1232
  `function` and `arguments`.
1233
+ strategy (Literal["concurrent","sequential","batch"]):
1234
+ The execution strategy to use.
1235
+ verbose_action (bool):
1236
+ If True, log detailed information about the action.
1173
1237
  suppress_errors (bool):
1174
1238
  If True, log errors instead of raising exceptions.
1175
1239
  sanitize_input (bool):
@@ -1216,6 +1280,7 @@ class Branch(Element, Communicatable, Relational):
1216
1280
  case "concurrent":
1217
1281
  return await self._concurrent_act(
1218
1282
  action_request,
1283
+ verbose_action=verbose_action,
1219
1284
  suppress_errors=suppress_errors,
1220
1285
  sanitize_input=sanitize_input,
1221
1286
  unique_input=unique_input,
@@ -1236,11 +1301,13 @@ class Branch(Element, Communicatable, Relational):
1236
1301
  case "sequential":
1237
1302
  return await self._sequential_act(
1238
1303
  action_request,
1304
+ verbose_action=verbose_action,
1239
1305
  suppress_errors=suppress_errors,
1240
1306
  )
1241
1307
  case "batch":
1242
1308
  return await self._batch_act(
1243
1309
  action_request,
1310
+ verbose_action=verbose_action,
1244
1311
  batch_size=batch_size or 1,
1245
1312
  max_concurrent=max_concurrent,
1246
1313
  suppress_errors=suppress_errors,
@@ -1271,6 +1338,7 @@ class Branch(Element, Communicatable, Relational):
1271
1338
  self,
1272
1339
  action_request: ActionRequest | BaseModel | dict,
1273
1340
  suppress_errors: bool = True,
1341
+ verbose_action: bool = False,
1274
1342
  ) -> list:
1275
1343
  action_request = (
1276
1344
  action_request
@@ -1280,7 +1348,11 @@ class Branch(Element, Communicatable, Relational):
1280
1348
  results = []
1281
1349
  for req in action_request:
1282
1350
  results.append(
1283
- await self._act(req, suppress_errors=suppress_errors)
1351
+ await self._act(
1352
+ req,
1353
+ verbose_action=verbose_action,
1354
+ suppress_errors=suppress_errors,
1355
+ )
1284
1356
  )
1285
1357
  return results
1286
1358
 
@@ -1521,6 +1593,10 @@ class Branch(Element, Communicatable, Relational):
1521
1593
  self,
1522
1594
  instruct: Instruct | dict[str, Any],
1523
1595
  interpret: bool = False,
1596
+ interpret_domain: str | None = None,
1597
+ interpret_style: str | None = None,
1598
+ interpret_sample: str | None = None,
1599
+ interpret_kwargs: dict | None = None,
1524
1600
  tools: Any = None,
1525
1601
  tool_schemas: Any = None,
1526
1602
  response_format: type[BaseModel] = None,
@@ -1529,6 +1605,7 @@ class Branch(Element, Communicatable, Relational):
1529
1605
  response_kwargs: dict | None = None,
1530
1606
  return_analysis: bool = False,
1531
1607
  analysis_model: iModel | None = None,
1608
+ verbose: bool = False,
1532
1609
  **kwargs,
1533
1610
  ):
1534
1611
  """
@@ -1547,6 +1624,14 @@ class Branch(Element, Communicatable, Relational):
1547
1624
  interpret (bool, optional):
1548
1625
  If `True`, first interprets (`branch.interpret`) the instructions to refine them
1549
1626
  before proceeding. Defaults to `False`.
1627
+ interpret_domain (str | None, optional):
1628
+ Optional domain hint for the interpretation step.
1629
+ interpret_style (str | None, optional):
1630
+ Optional style hint for the interpretation step.
1631
+ interpret_sample (str | None, optional):
1632
+ Optional sample hint for the interpretation step.
1633
+ interpret_kwargs (dict | None, optional):
1634
+ Additional arguments for the interpretation step.
1550
1635
  tools (Any, optional):
1551
1636
  Tools to be made available for the ReAct process. If omitted or `None`,
1552
1637
  and if no `tool_schemas` are provided, it defaults to `True` (all tools).
@@ -1595,6 +1680,10 @@ class Branch(Element, Communicatable, Relational):
1595
1680
  self,
1596
1681
  instruct,
1597
1682
  interpret=interpret,
1683
+ interpret_domain=interpret_domain,
1684
+ interpret_style=interpret_style,
1685
+ interpret_sample=interpret_sample,
1686
+ interpret_kwargs=interpret_kwargs,
1598
1687
  tools=tools,
1599
1688
  tool_schemas=tool_schemas,
1600
1689
  response_format=response_format,
@@ -1603,6 +1692,8 @@ class Branch(Element, Communicatable, Relational):
1603
1692
  response_kwargs=response_kwargs,
1604
1693
  return_analysis=return_analysis,
1605
1694
  analysis_model=analysis_model,
1695
+ verbose_action=verbose,
1696
+ verbose_analysis=verbose,
1606
1697
  **kwargs,
1607
1698
  )
1608
1699
 
File without changes
lionagi/tools/base.py ADDED
@@ -0,0 +1,12 @@
1
+ from abc import ABC, abstractmethod
2
+
3
+ from lionagi.operatives.action.tool import Tool
4
+
5
+
6
+ class LionTool(ABC):
7
+ is_lion_system_tool: bool = True
8
+ system_tool_name: str
9
+
10
+ @abstractmethod
11
+ def to_tool(self) -> Tool:
12
+ pass
@@ -0,0 +1,244 @@
1
+ import tempfile
2
+ from enum import Enum
3
+
4
+ from pydantic import BaseModel, Field, field_validator
5
+
6
+ from lionagi.operatives.action.tool import Tool
7
+ from lionagi.utils import to_num
8
+
9
+ from .base import LionTool
10
+
11
+
12
+ class ReaderAction(str, Enum):
13
+ """
14
+ This enumeration indicates the *type* of action the LLM wants to perform.
15
+ - 'open': Convert a file/URL to text and store it internally for partial reads
16
+ - 'read': Return a partial slice of the already-opened doc
17
+ """
18
+
19
+ open = "open"
20
+ read = "read"
21
+
22
+
23
+ class ReaderRequest(BaseModel):
24
+ """
25
+ The request model for the 'ReaderTool'.
26
+ It indicates:
27
+ - whether we are 'open'-ing a doc or 'read'-ing from a doc
28
+ - which file/URL we want to open (if action='open')
29
+ - which doc_id and offsets we want to read (if action='read')
30
+ """
31
+
32
+ action: ReaderAction = Field(
33
+ ...,
34
+ description=(
35
+ "Action to perform. Must be one of: "
36
+ "- 'open': Convert a file/URL to text and store it internally for partial reads. "
37
+ "- 'read': Return a partial slice of the already-opened doc."
38
+ ),
39
+ )
40
+
41
+ path_or_url: str | None = Field(
42
+ None,
43
+ description=(
44
+ "Local file path or remote URL to open. This field is REQUIRED if action='open'. "
45
+ "If action='read', leave it None."
46
+ ),
47
+ )
48
+
49
+ doc_id: str | None = Field(
50
+ None,
51
+ description=(
52
+ "Unique ID referencing a previously opened document. "
53
+ "This field is REQUIRED if action='read'. If action='open', leave it None."
54
+ ),
55
+ )
56
+
57
+ start_offset: int | None = Field(
58
+ None,
59
+ description=(
60
+ "Character start offset in the doc for partial reading. "
61
+ "If omitted or None, defaults to 0. Only used if action='read'."
62
+ ),
63
+ )
64
+
65
+ end_offset: int | None = Field(
66
+ None,
67
+ description=(
68
+ "Character end offset in the doc for partial reading. "
69
+ "If omitted or None, we read until the document's end. Only used if action='read'."
70
+ ),
71
+ )
72
+
73
+ @field_validator("start_offset", "end_offset", mode="before")
74
+ def _validate_offsets(cls, v):
75
+ try:
76
+ return to_num(v, num_type=int)
77
+ except ValueError:
78
+ return None
79
+
80
+
81
+ class DocumentInfo(BaseModel):
82
+ """
83
+ Returned info when we 'open' a doc.
84
+ doc_id: The unique string to reference this doc in subsequent 'read' calls
85
+ length: The total character length of the converted text
86
+ """
87
+
88
+ doc_id: str
89
+ length: int | None = None
90
+
91
+
92
+ class PartialChunk(BaseModel):
93
+ """
94
+ Represents a partial slice of text from [start_offset..end_offset).
95
+ """
96
+
97
+ start_offset: int | None = None
98
+ end_offset: int | None = None
99
+ content: str | None = None
100
+
101
+
102
+ class ReaderResponse(BaseModel):
103
+ """
104
+ The response from the 'ReaderTool'.
105
+ - If action='open' succeeded, doc_info is filled (doc_id & length).
106
+ - If action='read' succeeded, chunk is filled (the partial text).
107
+ - If failure occurs, success=False & error hold details.
108
+ """
109
+
110
+ success: bool = Field(
111
+ ...,
112
+ description=(
113
+ "Indicates if the requested action was performed successfully."
114
+ ),
115
+ )
116
+ error: str | None = Field(
117
+ None,
118
+ description=("Describes any error that occurred, if success=False."),
119
+ )
120
+ doc_info: DocumentInfo | None = Field(
121
+ None,
122
+ description=(
123
+ "Populated only if action='open' succeeded, letting the LLM know doc_id & total length."
124
+ ),
125
+ )
126
+ chunk: PartialChunk | None = Field(
127
+ None,
128
+ description=(
129
+ "Populated only if action='read' succeeded, providing the partial slice of text."
130
+ ),
131
+ )
132
+
133
+
134
+ class ReaderTool(LionTool):
135
+ """
136
+ A single tool that the LLM can call with ReaderRequest to either:
137
+ - open a doc (File/URL) -> returns doc_id, doc length
138
+ - read partial text from doc -> returns chunk
139
+ """
140
+
141
+ is_lion_system_tool = True
142
+ system_tool_name = "reader_tool"
143
+
144
+ from lionagi.libs.package.imports import check_import
145
+
146
+ DocumentConverter = check_import(
147
+ "docling",
148
+ module_name="document_converter",
149
+ import_name="DocumentConverter",
150
+ )
151
+
152
+ def __init__(self):
153
+ super().__init__()
154
+ self.converter = ReaderTool.DocumentConverter()
155
+ self.documents = {} # doc_id -> (temp_file_path, doc_length)
156
+ self._tool = None
157
+
158
+ def handle_request(self, request: ReaderRequest) -> ReaderResponse:
159
+ """
160
+ A function that takes ReaderRequest to either:
161
+ - open a doc (File/URL) -> returns doc_id, doc length
162
+ - read partial text from doc -> returns chunk
163
+ """
164
+ if isinstance(request, dict):
165
+ request = ReaderRequest(**request)
166
+ if request.action == "open":
167
+ return self._open_doc(request.path_or_url)
168
+ elif request.action == "read":
169
+ return self._read_doc(
170
+ request.doc_id, request.start_offset, request.end_offset
171
+ )
172
+ else:
173
+ return ReaderResponse(success=False, error="Unknown action type")
174
+
175
+ def _open_doc(self, source: str) -> ReaderResponse:
176
+ try:
177
+ result = self.converter.convert(source)
178
+ text = result.document.export_to_markdown()
179
+ except Exception as e:
180
+ return ReaderResponse(
181
+ success=False, error=f"Conversion error: {str(e)}"
182
+ )
183
+
184
+ doc_id = f"DOC_{abs(hash(source))}"
185
+ temp_file = tempfile.NamedTemporaryFile(
186
+ delete=False, mode="w", encoding="utf-8"
187
+ )
188
+ temp_file.write(text)
189
+ doc_len = len(text)
190
+ temp_file.close()
191
+
192
+ # store info
193
+ self.documents[doc_id] = (temp_file.name, doc_len)
194
+
195
+ return ReaderResponse(
196
+ success=True, doc_info=DocumentInfo(doc_id=doc_id, length=doc_len)
197
+ )
198
+
199
+ def _read_doc(self, doc_id: str, start: int, end: int) -> ReaderResponse:
200
+ if doc_id not in self.documents:
201
+ return ReaderResponse(
202
+ success=False, error="doc_id not found in memory"
203
+ )
204
+
205
+ path, length = self.documents[doc_id]
206
+ # clamp offsets
207
+ s = max(0, start if start is not None else 0)
208
+ e = min(length, end if end is not None else length)
209
+
210
+ try:
211
+ with open(path, "r", encoding="utf-8") as f:
212
+ f.seek(s)
213
+ content = f.read(e - s)
214
+ except Exception as ex:
215
+ return ReaderResponse(
216
+ success=False, error=f"Read error: {str(ex)}"
217
+ )
218
+
219
+ return ReaderResponse(
220
+ success=True,
221
+ chunk=PartialChunk(start_offset=s, end_offset=e, content=content),
222
+ )
223
+
224
+ def to_tool(self):
225
+ if self._tool is None:
226
+
227
+ def reader_tool(**kwargs):
228
+ """
229
+ A function that takes ReaderRequest to either:
230
+ - open a doc (File/URL) -> returns doc_id, doc length
231
+ - read partial text from doc -> returns chunk
232
+ """
233
+ return self.handle_request(
234
+ ReaderRequest(**kwargs)
235
+ ).model_dump()
236
+
237
+ if self.system_tool_name != "reader_tool":
238
+ reader_tool.__name__ = self.system_tool_name
239
+
240
+ self._tool = Tool(
241
+ func_callable=reader_tool,
242
+ request_options=ReaderRequest,
243
+ )
244
+ return self._tool
lionagi/tools/types.py ADDED
@@ -0,0 +1,3 @@
1
+ from .reader import ReaderTool
2
+
3
+ __all__ = ("ReaderTool",)
lionagi/version.py CHANGED
@@ -1 +1 @@
1
- __version__ = "0.8.4"
1
+ __version__ = "0.8.6"
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lionagi
3
- Version: 0.8.4
3
+ Version: 0.8.6
4
4
  Summary: An Intelligence Operating System.
5
5
  Author-email: HaiyangLi <quantocean.li@gmail.com>
6
6
  License: Apache License
@@ -1,10 +1,10 @@
1
- lionagi/__init__.py,sha256=-tKLn-wTj_VK4XQr3CV16ioTNnaUYD2fhUAtJOZZ0vU,574
1
+ lionagi/__init__.py,sha256=TWvUzgX_HgXO6xoShWT3kY8BNaZDOc8JIOu59PFqN2M,672
2
2
  lionagi/_class_registry.py,sha256=dutMsw-FQNqVV5gGH-NEIv90uBkSr8fERJ_x3swbb-s,3112
3
3
  lionagi/_errors.py,sha256=wNKdnVQvE_CHEstK7htrrj334RA_vbGcIds-3pUiRkc,455
4
4
  lionagi/_types.py,sha256=9g7iytvSj3UjZxD-jL06_fxuNfgZyWT3Qnp0XYp1wQU,63
5
5
  lionagi/settings.py,sha256=k9zRJXv57TveyfHO3Vr9VGiKrSwlRUUVKt5zf6v9RU4,1627
6
6
  lionagi/utils.py,sha256=QbF4E1PG-BaRcEVH3kJIYCJVNq-oRNoTxjda5k8NYW4,73177
7
- lionagi/version.py,sha256=jhHEJFZWhkQDemoZMomBYq-RNrKXknYzUaeIU9A6XsI,22
7
+ lionagi/version.py,sha256=VpASnrti7EGWxUfSWGgERUfe7NLJltfVXYosOzHbpPg,22
8
8
  lionagi/libs/__init__.py,sha256=v8vNyJVIVj8_Oz9RJdVe6ZKUQMYTgDh1VQpnr1KdLaw,112
9
9
  lionagi/libs/parse.py,sha256=tpEbmIRGuHhLCJlUlm6fjmqm_Z6XJLAXGNFHNuk422I,1011
10
10
  lionagi/libs/file/__init__.py,sha256=v8vNyJVIVj8_Oz9RJdVe6ZKUQMYTgDh1VQpnr1KdLaw,112
@@ -48,11 +48,11 @@ lionagi/operations/__init__.py,sha256=O7nV0tedpUe7_OlUWmCcduGPFtqtzWZcR_SIOnjLsr
48
48
  lionagi/operations/manager.py,sha256=H7UY86PIxvxKdzJY9YVsWyJcqlwLWhVyvm4sYePH_uY,565
49
49
  lionagi/operations/types.py,sha256=LIa68xcyKLVafof-DSFwKtSkneuYPFqrtGyClohYI6o,704
50
50
  lionagi/operations/utils.py,sha256=Twy6L_UFt9JqJFRYuKKTKVZIXsePidNl5ipcYcCbesI,1220
51
- lionagi/operations/ReAct/ReAct.py,sha256=07F_VhxNnOhOInEZkbY9nT2YiCcnApSibxjz2yAa7Wk,3696
51
+ lionagi/operations/ReAct/ReAct.py,sha256=odFcuNMuwJ2NjUGGdTekFJzD43WFvNFNHCjzS1X6HT8,4962
52
52
  lionagi/operations/ReAct/__init__.py,sha256=v8vNyJVIVj8_Oz9RJdVe6ZKUQMYTgDh1VQpnr1KdLaw,112
53
- lionagi/operations/ReAct/utils.py,sha256=yBsbaZm3NNb-LhdjdK3jVmxSYbp0enWzl8d09iv8oSo,1099
53
+ lionagi/operations/ReAct/utils.py,sha256=uWPZC1aJVAPvJweAgr3NdXpYszeagN5OnJIkUdrSvlw,3228
54
54
  lionagi/operations/_act/__init__.py,sha256=v8vNyJVIVj8_Oz9RJdVe6ZKUQMYTgDh1VQpnr1KdLaw,112
55
- lionagi/operations/_act/act.py,sha256=HBp-sNwNigLDNkuEZqGU_98UCaJXPZsaokkFAXwOMn0,2454
55
+ lionagi/operations/_act/act.py,sha256=FWK8vXiccBZI_sIQcEBw8Cn6slMooZkfmkmxBaYA4kw,2739
56
56
  lionagi/operations/brainstorm/__init__.py,sha256=v8vNyJVIVj8_Oz9RJdVe6ZKUQMYTgDh1VQpnr1KdLaw,112
57
57
  lionagi/operations/brainstorm/brainstorm.py,sha256=OwByrh6E-rTU_u6fDNTwWOlkJ4ycYJB9ZF-x-HYOs8I,17222
58
58
  lionagi/operations/brainstorm/prompt.py,sha256=f-Eh6pO606dT2TrX9BFv_einRDpYwFi6Gep9Strd1cM,610
@@ -63,9 +63,9 @@ lionagi/operations/communicate/communicate.py,sha256=1PzBpzgATcAdBog0EUxtj5_uJGi
63
63
  lionagi/operations/instruct/__init__.py,sha256=v8vNyJVIVj8_Oz9RJdVe6ZKUQMYTgDh1VQpnr1KdLaw,112
64
64
  lionagi/operations/instruct/instruct.py,sha256=CYICzWvmgALtSPIetfF3csx6WDsCuiu4NRRPL56UA2g,795
65
65
  lionagi/operations/interpret/__init__.py,sha256=v8vNyJVIVj8_Oz9RJdVe6ZKUQMYTgDh1VQpnr1KdLaw,112
66
- lionagi/operations/interpret/interpret.py,sha256=mjTcOCOixc34JNnX0WowSVACtZXz9IBuzjp9VyOus5w,1244
66
+ lionagi/operations/interpret/interpret.py,sha256=Mtg65jletgMZAZ08kNLgjbC_y9C3l2xw67fHpHmBesg,2905
67
67
  lionagi/operations/operate/__init__.py,sha256=v8vNyJVIVj8_Oz9RJdVe6ZKUQMYTgDh1VQpnr1KdLaw,112
68
- lionagi/operations/operate/operate.py,sha256=kzH7R4J3O-1Ue-PYqIoEdrXHrkYYQVfHa6gSC4Km_sc,7003
68
+ lionagi/operations/operate/operate.py,sha256=eHf1wopNm04hOAg1NMcrn6nz4acofUENi0R4r8FQMMs,7218
69
69
  lionagi/operations/parse/__init__.py,sha256=v8vNyJVIVj8_Oz9RJdVe6ZKUQMYTgDh1VQpnr1KdLaw,112
70
70
  lionagi/operations/parse/parse.py,sha256=LpF6LVAvCVoE8n63BkhSxXSHYgSx7CNkN7yXUwaNpQo,3003
71
71
  lionagi/operations/plan/__init__.py,sha256=AFkAmOJBTqPlYuqFRRn7rCvIw3CGh9XXH_22cNWbfig,156
@@ -161,11 +161,11 @@ lionagi/protocols/messages/templates/instruction_message.jinja2,sha256=L-ptw5OHx
161
161
  lionagi/protocols/messages/templates/system_message.jinja2,sha256=JRKJ0aFpYfaXSFouKc_N4unZ35C3yZTOWhIrIdCB5qk,215
162
162
  lionagi/protocols/messages/templates/tool_schemas.jinja2,sha256=ozIaSDCRjIAhLyA8VM6S-YqS0w2NcctALSwx4LjDwII,126
163
163
  lionagi/service/__init__.py,sha256=DMGXIqPsmut9H5GT0ZeSzQIzYzzPwI-2gLXydpbwiV8,21
164
- lionagi/service/imodel.py,sha256=Mv5wVsmZj4hYmrvXfHVmA-fskfAKrDY0mbdmgOCAGoE,11946
164
+ lionagi/service/imodel.py,sha256=zQq9cdVPpEAPB7IscntExvtHOYA5ToiWonmD2n93pEw,12273
165
165
  lionagi/service/manager.py,sha256=MKSYBkg23s7YhZy5GEFdnpspEnhPVfFhpkpoJe20D7k,1435
166
166
  lionagi/service/types.py,sha256=v9SAn5-GTmds4Mar13Or_VFrRHCinBK99dmeDUd-QNk,486
167
167
  lionagi/service/endpoints/__init__.py,sha256=v8vNyJVIVj8_Oz9RJdVe6ZKUQMYTgDh1VQpnr1KdLaw,112
168
- lionagi/service/endpoints/base.py,sha256=RbBBQIbtNtkP1LT9U3ZrhL35SfmVIpaq-AGW8d538k4,18275
168
+ lionagi/service/endpoints/base.py,sha256=SaYobDBCdKd4398TyFPp5u3PKyMnzEbm6PsoUxVkZDA,18605
169
169
  lionagi/service/endpoints/chat_completion.py,sha256=9ltSQaKPH43WdEDW32_-f5x07I9hOU8g-T_PAG-nYsQ,2529
170
170
  lionagi/service/endpoints/match_endpoint.py,sha256=hIGYyok1y53FfI6av5NfYMygRIpDWYZbdCj0pJJfmPY,1874
171
171
  lionagi/service/endpoints/rate_limited_processor.py,sha256=umri0FofbyBSFdAQBEsviDB5K6N12LkRiXQgSOorGKg,4663
@@ -176,7 +176,7 @@ lionagi/service/providers/anthropic_/__init__.py,sha256=v8vNyJVIVj8_Oz9RJdVe6ZKU
176
176
  lionagi/service/providers/anthropic_/messages.py,sha256=PTZZ2VXVMRHWY84YFIzrft9gVrcH2V-NIq_Phi9_-xI,1760
177
177
  lionagi/service/providers/exa_/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
178
178
  lionagi/service/providers/exa_/models.py,sha256=263KP-JSxbxmomNrFeYjB_cebquoMOsCJeWsiKZ0mL4,5420
179
- lionagi/service/providers/exa_/search.py,sha256=Z9bOxTjPZbtI_qH4fSePYKSXhkc8N3ZFbCHdsml8YSA,1903
179
+ lionagi/service/providers/exa_/search.py,sha256=Z3pyJH8KiWiquJSJw8Rd6D7x43BwTFHb2ESsgSicCk0,1932
180
180
  lionagi/service/providers/exa_/types.py,sha256=8ODjXpFajBE9-DGqBJNS--GObwmLSDi667xS84z_AgA,139
181
181
  lionagi/service/providers/groq_/__init__.py,sha256=v8vNyJVIVj8_Oz9RJdVe6ZKUQMYTgDh1VQpnr1KdLaw,112
182
182
  lionagi/service/providers/groq_/chat_completions.py,sha256=578NqQYyrIYjIemyL3bagvFGE6ear_w4S1HNlPWA5mg,1343
@@ -185,11 +185,16 @@ lionagi/service/providers/openai_/chat_completions.py,sha256=SfRcEnMTn3MD59YuZCl
185
185
  lionagi/service/providers/openrouter_/__init__.py,sha256=v8vNyJVIVj8_Oz9RJdVe6ZKUQMYTgDh1VQpnr1KdLaw,112
186
186
  lionagi/service/providers/openrouter_/chat_completions.py,sha256=MRf4ZbMCgzNIL4gxUZTD-KeFe8JYDn1Fu40Jph3bCH8,1525
187
187
  lionagi/service/providers/perplexity_/__init__.py,sha256=v8vNyJVIVj8_Oz9RJdVe6ZKUQMYTgDh1VQpnr1KdLaw,112
188
- lionagi/service/providers/perplexity_/chat_completions.py,sha256=SsDbrtXwQsR4Yu2VMU43KfeS86QWI8UTNhDth5lNWNs,1055
188
+ lionagi/service/providers/perplexity_/chat_completions.py,sha256=jhE-KHWRX6yYEeKWLMCKLgK3bQzieSv2viqQWDP8q0Q,1197
189
+ lionagi/service/providers/perplexity_/models.py,sha256=gXH4XGkhZ4aFxvMSDTlHq9Rz1mhu3aTENXAtE-BIr6U,4866
189
190
  lionagi/session/__init__.py,sha256=v8vNyJVIVj8_Oz9RJdVe6ZKUQMYTgDh1VQpnr1KdLaw,112
190
- lionagi/session/branch.py,sha256=ZLItnjW66__85s0brxhq1LZisjDoVS4HBJSHUpFaxsg,63834
191
+ lionagi/session/branch.py,sha256=EH1JhOe1ZGlCVXpf0znz_xAA3GibNIGFATwLAyxiCK0,67835
191
192
  lionagi/session/session.py,sha256=po6C7PnM0iu_ISHUo4PBzzQ61HFOgcsAUfPoO--eLak,8987
192
- lionagi-0.8.4.dist-info/METADATA,sha256=MHgfVuup5176MSrXlfX2B_tQUaiKNdGAjtITrJVLCWY,22819
193
- lionagi-0.8.4.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
194
- lionagi-0.8.4.dist-info/licenses/LICENSE,sha256=VXFWsdoN5AAknBCgFqQNgPWYx7OPp-PFEP961zGdOjc,11288
195
- lionagi-0.8.4.dist-info/RECORD,,
193
+ lionagi/tools/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
194
+ lionagi/tools/base.py,sha256=ffaIcLF_uwEphCkP_wsa3UfkqVenML3HpsnR5kRCTtA,236
195
+ lionagi/tools/reader.py,sha256=TyjSqhSIQwxdkwgYSz760YKBbqJ5OfwZegRwQz47R24,7509
196
+ lionagi/tools/types.py,sha256=_OWzoTHTcqNwPs3OGrPkpO9m_vHDCxVDL-FN-t6ZD60,58
197
+ lionagi-0.8.6.dist-info/METADATA,sha256=SmTcVVpZcb_nfuNo5WrDS6ilr16Tj0qL_yX-AAP1ri0,22819
198
+ lionagi-0.8.6.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
199
+ lionagi-0.8.6.dist-info/licenses/LICENSE,sha256=VXFWsdoN5AAknBCgFqQNgPWYx7OPp-PFEP961zGdOjc,11288
200
+ lionagi-0.8.6.dist-info/RECORD,,