lionagi 0.9.4__py3-none-any.whl → 0.9.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -47,6 +47,8 @@ async def ReAct(
47
47
  analysis_model: iModel | None = None,
48
48
  verbose_analysis: bool = False,
49
49
  verbose_length: int = None,
50
+ include_token_usage_to_model: bool = True,
51
+ continue_after_failed_response: bool = False,
50
52
  **kwargs,
51
53
  ):
52
54
  outs = []
@@ -73,6 +75,8 @@ async def ReAct(
73
75
  verbose_analysis=verbose_analysis,
74
76
  display_as=display_as,
75
77
  verbose_length=verbose_length,
78
+ include_token_usage_to_model=include_token_usage_to_model,
79
+ continue_after_failed_response=continue_after_failed_response,
76
80
  **kwargs,
77
81
  ):
78
82
  analysis, str_ = i
@@ -101,6 +105,8 @@ async def ReAct(
101
105
  analysis_model=analysis_model,
102
106
  display_as=display_as,
103
107
  verbose_length=verbose_length,
108
+ include_token_usage_to_model=include_token_usage_to_model,
109
+ continue_after_failed_response=continue_after_failed_response,
104
110
  **kwargs,
105
111
  ):
106
112
  outs.append(i)
@@ -131,6 +137,8 @@ async def ReActStream(
131
137
  verbose_analysis: bool = False,
132
138
  display_as: Literal["json", "yaml"] = "yaml",
133
139
  verbose_length: int = None,
140
+ include_token_usage_to_model: bool = True,
141
+ continue_after_failed_response: bool = False,
134
142
  **kwargs,
135
143
  ) -> AsyncGenerator:
136
144
  irfm: FieldModel | None = None
@@ -213,6 +221,9 @@ async def ReActStream(
213
221
  kwargs_for_operate = copy(kwargs)
214
222
  kwargs_for_operate["actions"] = True
215
223
  kwargs_for_operate["reason"] = True
224
+ kwargs_for_operate["include_token_usage_to_model"] = (
225
+ include_token_usage_to_model
226
+ )
216
227
 
217
228
  # Step 1: Generate initial ReAct analysis
218
229
  analysis: ReActAnalysis = await branch.operate(
@@ -255,7 +266,7 @@ async def ReActStream(
255
266
  if isinstance(analysis, dict)
256
267
  else False
257
268
  )
258
- and (extensions if max_extensions else 0) > 0
269
+ and (extensions - 1 if max_extensions else 0) > 0
259
270
  ):
260
271
  new_instruction = None
261
272
  if extensions == max_extensions:
@@ -272,6 +283,9 @@ async def ReActStream(
272
283
  operate_kwargs["reason"] = True
273
284
  operate_kwargs["response_format"] = ReActAnalysis
274
285
  operate_kwargs["action_strategy"] = analysis.action_strategy
286
+ operate_kwargs["include_token_usage_to_model"] = (
287
+ include_token_usage_to_model
288
+ )
275
289
  if analysis.action_batch_size:
276
290
  operate_kwargs["action_batch_size"] = analysis.action_batch_size
277
291
  if irfm:
@@ -289,6 +303,7 @@ async def ReActStream(
289
303
  operate_kwargs["guidance"] = guide + operate_kwargs.get(
290
304
  "guidance", ""
291
305
  )
306
+ operate_kwargs["reasoning_effort"] = reasoning_effort
292
307
 
293
308
  analysis = await branch.operate(
294
309
  instruction=new_instruction,
@@ -298,6 +313,16 @@ async def ReActStream(
298
313
  )
299
314
  round_count += 1
300
315
 
316
+ if isinstance(analysis, dict) and all(
317
+ i is None for i in analysis.values()
318
+ ):
319
+ if not continue_after_failed_response:
320
+ raise ValueError(
321
+ "All values in the response are None. "
322
+ "This might be due to a failed response. "
323
+ "Set `continue_after_failed_response=True` to ignore this error."
324
+ )
325
+
301
326
  # If verbose, show round analysis
302
327
  if verbose_analysis:
303
328
  str_ = f"\n### ReAct Round No.{round_count} Analysis:\n"
@@ -329,6 +354,15 @@ async def ReActStream(
329
354
  response_format=response_format,
330
355
  **(response_kwargs or {}),
331
356
  )
357
+ if isinstance(analysis, dict) and all(
358
+ i is None for i in analysis.values()
359
+ ):
360
+ if not continue_after_failed_response:
361
+ raise ValueError(
362
+ "All values in the response are None. "
363
+ "This might be due to a failed response. "
364
+ "Set `continue_after_failed_response=True` to ignore this error."
365
+ )
332
366
  except Exception:
333
367
  out = branch.msgs.last_response.response
334
368
 
@@ -30,6 +30,8 @@ class ReActAnalysis(BaseModel):
30
30
  2) A list of planned actions to perform before finalizing,
31
31
  3) Indication whether more expansions/rounds are needed,
32
32
  4) Additional tuning knobs: how to handle validation, how to execute actions, etc.
33
+ Remember do not repeat yourself, and aim to use the most efficient way to achieve
34
+ the goal to user's satisfaction.
33
35
  """
34
36
 
35
37
  # Standard ReAct strings for controlling expansions:
@@ -38,11 +40,12 @@ class ReActAnalysis(BaseModel):
38
40
  "If you are not ready to finalize, set extension_needed to True. "
39
41
  "hint: you should set extension_needed to True if the overall goal"
40
42
  "is not yet achieved. Do not set it to False, if you are just providing"
41
- "an interim answer. You have up to {extensions} expansions. Please continue."
43
+ "an interim answer. You have up to {extensions} expansions. Please "
44
+ "strategize accordingly and continue."
42
45
  )
43
46
  CONTINUE_EXT_PROMPT: ClassVar[str] = (
44
47
  "Another round is available. You may do multiple actions if needed. "
45
- "You have up to {extensions} expansions. Please continue."
48
+ "You have up to {extensions} expansions. Please strategize accordingly and continue."
46
49
  )
47
50
  ANSWER_PROMPT: ClassVar[str] = (
48
51
  "Given your reasoning and actions, please now provide the final answer "
@@ -36,6 +36,7 @@ async def chat(
36
36
  image_detail: Literal["low", "high", "auto"] = None,
37
37
  plain_content: str = None,
38
38
  return_ins_res_message: bool = False,
39
+ include_token_usage_to_model: bool = False,
39
40
  **kwargs,
40
41
  ) -> tuple[Instruction, AssistantResponse]:
41
42
  ins: Instruction = branch.msgs.create_instruction(
@@ -151,11 +152,12 @@ async def chat(
151
152
  kwargs["messages"] = [i.chat_msg for i in messages]
152
153
  imodel = imodel or branch.chat_model
153
154
 
154
- meth = (
155
- imodel.invoke
156
- if ("stream" not in kwargs or not kwargs["stream"])
157
- else imodel.stream
158
- )
155
+ meth = imodel.invoke
156
+ if "stream" not in kwargs or not kwargs["stream"]:
157
+ kwargs["include_token_usage_to_model"] = include_token_usage_to_model
158
+ else:
159
+ meth = imodel.stream
160
+
159
161
  api_call = await meth(**kwargs)
160
162
  branch._log_manager.log(Log.create(api_call))
161
163
 
@@ -35,6 +35,7 @@ async def communicate(
35
35
  fuzzy_match_kwargs=None,
36
36
  clear_messages=False,
37
37
  operative_model=None,
38
+ include_token_usage_to_model: bool = False,
38
39
  **kwargs,
39
40
  ):
40
41
  if operative_model:
@@ -80,6 +81,7 @@ async def communicate(
80
81
  image_detail=image_detail,
81
82
  plain_content=plain_content,
82
83
  return_ins_res_message=True,
84
+ include_token_usage_to_model=include_token_usage_to_model,
83
85
  **kwargs,
84
86
  )
85
87
  branch.msgs.add_message(instruction=ins)
@@ -20,7 +20,8 @@ async def interpret(
20
20
  instruction = (
21
21
  "You are given a user's raw instruction or question. Your task is to rewrite it into a clearer,"
22
22
  "more structured prompt for an LLM or system, making any implicit or missing details explicit. "
23
- "Return only the re-written prompt."
23
+ "Return only the re-written prompt. Do not assume any details not mentioned in the input, nor "
24
+ "give additional instruction than what is explicitly stated."
24
25
  )
25
26
  guidance = (
26
27
  f"Domain hint: {domain or 'general'}. "
@@ -63,6 +63,7 @@ async def operate(
63
63
  ] = "return_value",
64
64
  operative_model: type[BaseModel] = None,
65
65
  request_model: type[BaseModel] = None,
66
+ include_token_usage_to_model: bool = False,
66
67
  **kwargs,
67
68
  ) -> list | BaseModel | None | dict | str:
68
69
  if operative_model:
@@ -138,6 +139,7 @@ async def operate(
138
139
  image_detail=image_detail,
139
140
  tool_schemas=tool_schemas,
140
141
  return_ins_res_message=True,
142
+ include_token_usage_to_model=include_token_usage_to_model,
141
143
  **kwargs,
142
144
  )
143
145
  branch.msgs.add_message(instruction=ins)
@@ -349,11 +349,37 @@ class APICalling(Event):
349
349
  endpoint: EndPoint = Field(exclude=True)
350
350
  is_cached: bool = Field(default=False, exclude=True)
351
351
  should_invoke_endpoint: bool = Field(default=True, exclude=True)
352
+ include_token_usage_to_model: bool = Field(
353
+ default=False,
354
+ exclude=True,
355
+ description="Whether to include token usage information into instruction messages",
356
+ )
352
357
 
353
358
  @model_validator(mode="after")
354
359
  def _validate_streaming(self) -> Self:
355
360
  if self.payload.get("stream") is True:
356
361
  self.streaming = True
362
+
363
+ if self.include_token_usage_to_model:
364
+ if isinstance(self.payload["messages"][-1], dict):
365
+ required_tokens = self.required_tokens
366
+ self.payload["messages"][-1][
367
+ "content"
368
+ ] += f"\n\nEstimated Current Token Usage: {required_tokens}"
369
+ if "model" in self.payload:
370
+ if (
371
+ self.payload["model"].startswith("gpt-4")
372
+ or "o1mini" in self.payload["model"]
373
+ or "o1-preview" in self.payload["model"]
374
+ ):
375
+ self.payload["messages"][-1]["content"] += "/128_000"
376
+ elif "o1" in self.payload["model"]:
377
+ self.payload["messages"][-1]["content"] += "/200_000"
378
+ elif "sonnet" in self.payload["model"]:
379
+ self.payload["messages"][-1]["content"] += "/200_000"
380
+ elif "haiku" in self.payload["model"]:
381
+ self.payload["messages"][-1]["content"] += "/200_000"
382
+
357
383
  return self
358
384
 
359
385
  @property
lionagi/service/imodel.py CHANGED
@@ -162,7 +162,9 @@ class iModel:
162
162
  else:
163
163
  self.streaming_process_func = streaming_process_func
164
164
 
165
- def create_api_calling(self, **kwargs) -> APICalling:
165
+ def create_api_calling(
166
+ self, include_token_usage_to_model: bool = False, **kwargs
167
+ ) -> APICalling:
166
168
  """Constructs an `APICalling` object from endpoint-specific payload.
167
169
 
168
170
  Args:
@@ -183,6 +185,7 @@ class iModel:
183
185
  endpoint=self.endpoint,
184
186
  is_cached=payload.get("is_cached", False),
185
187
  should_invoke_endpoint=self.should_invoke_endpoint,
188
+ include_token_usage_to_model=include_token_usage_to_model,
186
189
  )
187
190
 
188
191
  async def process_chunk(self, chunk) -> None:
@@ -200,7 +203,12 @@ class iModel:
200
203
  return await self.streaming_process_func(chunk)
201
204
  return self.streaming_process_func(chunk)
202
205
 
203
- async def stream(self, api_call=None, **kwargs) -> AsyncGenerator:
206
+ async def stream(
207
+ self,
208
+ api_call=None,
209
+ include_token_usage_to_model: bool = False,
210
+ **kwargs,
211
+ ) -> AsyncGenerator:
204
212
  """Performs a streaming API call with the given arguments.
205
213
 
206
214
  Args:
@@ -214,7 +222,10 @@ class iModel:
214
222
  """
215
223
  if api_call is None:
216
224
  kwargs["stream"] = True
217
- api_call = self.create_api_calling(**kwargs)
225
+ api_call = self.create_api_calling(
226
+ include_token_usage_to_model=include_token_usage_to_model,
227
+ **kwargs,
228
+ )
218
229
  await self.executor.append(api_call)
219
230
 
220
231
  if (
@@ -89,6 +89,8 @@ class OpenAIChatCompletionEndPoint(ChatCompletionEndPoint):
89
89
  payload.pop("top_p", None)
90
90
  if payload["messages"][0].get("role") == "system":
91
91
  payload["messages"][0]["role"] = "developer"
92
+ else:
93
+ payload.pop("reasoning_effort", None)
92
94
 
93
95
  return {
94
96
  "payload": payload,
lionagi/session/branch.py CHANGED
@@ -941,6 +941,7 @@ class Branch(Element, Communicatable, Relational):
941
941
  ] = "return_value",
942
942
  operative_model: type[BaseModel] = None,
943
943
  request_model: type[BaseModel] = None,
944
+ include_token_usage_to_model: bool = False,
944
945
  **kwargs,
945
946
  ) -> list | BaseModel | None | dict | str:
946
947
  """
@@ -1028,6 +1029,8 @@ class Branch(Element, Communicatable, Relational):
1028
1029
  Alias for `response_format`.
1029
1030
  request_model (type[BaseModel], optional):
1030
1031
  Another alias for `response_format`.
1032
+ include_token_usage_to_model:
1033
+ If `True`, includes token usage in the model messages.
1031
1034
  **kwargs:
1032
1035
  Additional keyword arguments passed to the LLM via `branch.chat()`.
1033
1036
 
@@ -1080,6 +1083,7 @@ class Branch(Element, Communicatable, Relational):
1080
1083
  operative_model=operative_model,
1081
1084
  request_model=request_model,
1082
1085
  imodel=imodel,
1086
+ include_token_usage_to_model=include_token_usage_to_model,
1083
1087
  **kwargs,
1084
1088
  )
1085
1089
 
@@ -1106,6 +1110,7 @@ class Branch(Element, Communicatable, Relational):
1106
1110
  fuzzy_match_kwargs: dict = None,
1107
1111
  clear_messages: bool = False,
1108
1112
  operative_model: type[BaseModel] = None,
1113
+ include_token_usage_to_model: bool = False,
1109
1114
  **kwargs,
1110
1115
  ):
1111
1116
  """
@@ -1190,6 +1195,7 @@ class Branch(Element, Communicatable, Relational):
1190
1195
  fuzzy_match_kwargs=fuzzy_match_kwargs,
1191
1196
  clear_messages=clear_messages,
1192
1197
  operative_model=operative_model,
1198
+ include_token_usage_to_model=include_token_usage_to_model,
1193
1199
  **kwargs,
1194
1200
  )
1195
1201
 
@@ -1639,6 +1645,7 @@ class Branch(Element, Communicatable, Relational):
1639
1645
  analysis_model: iModel | None = None,
1640
1646
  verbose: bool = False,
1641
1647
  verbose_length: int = None,
1648
+ include_token_usage_to_model: bool = True,
1642
1649
  **kwargs,
1643
1650
  ):
1644
1651
  """
@@ -1688,6 +1695,12 @@ class Branch(Element, Communicatable, Relational):
1688
1695
  analysis_model (iModel | None, optional):
1689
1696
  A custom LLM model for generating the ReAct analysis steps. If `None`,
1690
1697
  uses the branch's default `chat_model`.
1698
+ include_token_usage_to_model:
1699
+ If `True`, includes token usage in the model messages.
1700
+ verbose (bool):
1701
+ If `True`, logs detailed information about the process.
1702
+ verbose_length (int):
1703
+ If `verbose=True`, limits the length of logged strings to this value.
1691
1704
  **kwargs:
1692
1705
  Additional keyword arguments passed into the initial `branch.operate()` call.
1693
1706
 
@@ -1733,6 +1746,7 @@ class Branch(Element, Communicatable, Relational):
1733
1746
  intermediate_listable=intermediate_listable,
1734
1747
  reasoning_effort=reasoning_effort,
1735
1748
  display_as=display_as,
1749
+ include_token_usage_to_model=include_token_usage_to_model,
1736
1750
  **kwargs,
1737
1751
  )
1738
1752
 
@@ -1758,6 +1772,7 @@ class Branch(Element, Communicatable, Relational):
1758
1772
  verbose: bool = False,
1759
1773
  display_as: Literal["json", "yaml"] = "yaml",
1760
1774
  verbose_length: int = None,
1775
+ include_token_usage_to_model: bool = True,
1761
1776
  **kwargs,
1762
1777
  ) -> AsyncGenerator:
1763
1778
  from lionagi.operations.ReAct.ReAct import ReActStream
@@ -1784,6 +1799,7 @@ class Branch(Element, Communicatable, Relational):
1784
1799
  verbose_analysis=True,
1785
1800
  display_as=display_as,
1786
1801
  verbose_length=verbose_length,
1802
+ include_token_usage_to_model=include_token_usage_to_model,
1787
1803
  **kwargs,
1788
1804
  ):
1789
1805
  analysis, str_ = result
lionagi/version.py CHANGED
@@ -1 +1 @@
1
- __version__ = "0.9.4"
1
+ __version__ = "0.9.5"
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lionagi
3
- Version: 0.9.4
3
+ Version: 0.9.5
4
4
  Summary: An Intelligence Operating System.
5
5
  Author-email: HaiyangLi <quantocean.li@gmail.com>
6
6
  License: Apache License
@@ -4,7 +4,7 @@ lionagi/_errors.py,sha256=JlBTFJnRWtVYcRxKb7fWFiJHLbykl1E19mSJ8sXYVxg,455
4
4
  lionagi/_types.py,sha256=9g7iytvSj3UjZxD-jL06_fxuNfgZyWT3Qnp0XYp1wQU,63
5
5
  lionagi/settings.py,sha256=W52mM34E6jXF3GyqCFzVREKZrmnUqtZm_BVDsUiDI_s,1627
6
6
  lionagi/utils.py,sha256=K36D9AAGiMPR4eM9tYoiVgvH-NdPPSeMQPls09s7keQ,73223
7
- lionagi/version.py,sha256=e56AvHfJCtG2ZwwINqsxINVbehWdKxMYgIDbjd7P-II,22
7
+ lionagi/version.py,sha256=ORAtCCI2THBDcdzIbh6oBsoshDvkkmXUWpmO4Q5McAk,22
8
8
  lionagi/libs/__init__.py,sha256=5y5joOZzfFWERl75auAcNcKC3lImVJ5ZZGvvHZUFCJM,112
9
9
  lionagi/libs/parse.py,sha256=JRS3bql0InHJqATnAatl-hQv4N--XXw4P77JHhTFnrc,1011
10
10
  lionagi/libs/file/__init__.py,sha256=5y5joOZzfFWERl75auAcNcKC3lImVJ5ZZGvvHZUFCJM,112
@@ -48,24 +48,24 @@ lionagi/operations/__init__.py,sha256=2HExKTx2J3iKWsvS9YaF6i5SZgqcpAJkVmWbi2H5A5
48
48
  lionagi/operations/manager.py,sha256=H7UY86PIxvxKdzJY9YVsWyJcqlwLWhVyvm4sYePH_uY,565
49
49
  lionagi/operations/types.py,sha256=fM8HphnbBifMzhoKKvdl3JxGCBHlEGPJEYkLWj9b7vE,704
50
50
  lionagi/operations/utils.py,sha256=LrWr_JEyJmSw5RL03KZhWgDKYsjFk0-OS8SoaGU7Jhs,1220
51
- lionagi/operations/ReAct/ReAct.py,sha256=eBCQOzVHv85uEL4YH1qv8NhbzM0t-qyKZ4JI7tQRXQ8,11971
51
+ lionagi/operations/ReAct/ReAct.py,sha256=f3_Q3Sc9U7i_4IBzFN3KxohUnMTMa_yFJ2I9rSlERvE,13578
52
52
  lionagi/operations/ReAct/__init__.py,sha256=5y5joOZzfFWERl75auAcNcKC3lImVJ5ZZGvvHZUFCJM,112
53
- lionagi/operations/ReAct/utils.py,sha256=jXf9LafAg0GtZajXqT4jOLJHW_Y4eL-hbz5_OlFCBh8,3612
53
+ lionagi/operations/ReAct/utils.py,sha256=84Giel5ToqfbN5F6Tm0uw8yZTTnxiM_jWuFEhnKOxM8,3800
54
54
  lionagi/operations/_act/__init__.py,sha256=5y5joOZzfFWERl75auAcNcKC3lImVJ5ZZGvvHZUFCJM,112
55
55
  lionagi/operations/_act/act.py,sha256=CunHTTZcS6xNUe0xKSDgtMJ7-ucSvHeW4BtmVjXnaxk,2958
56
56
  lionagi/operations/brainstorm/__init__.py,sha256=5y5joOZzfFWERl75auAcNcKC3lImVJ5ZZGvvHZUFCJM,112
57
57
  lionagi/operations/brainstorm/brainstorm.py,sha256=iRZUW_V-0Ncw7Av0_WBk3oNRWb4LqQU5i2gQDxulYWY,17222
58
58
  lionagi/operations/brainstorm/prompt.py,sha256=Dqi4NNeztdI4iutggRqjnOrG4a4E2JtwIAtRnjZ_ghQ,610
59
59
  lionagi/operations/chat/__init__.py,sha256=5y5joOZzfFWERl75auAcNcKC3lImVJ5ZZGvvHZUFCJM,112
60
- lionagi/operations/chat/chat.py,sha256=GHhudJlQk-okoP-S8ceUnoWF-ccxbdQW_p8gqsE7t6w,5348
60
+ lionagi/operations/chat/chat.py,sha256=xJAH2H0zyVvxiL3XtW3MC6YrwCCB1uCkwcQIJ1YsIOk,5466
61
61
  lionagi/operations/communicate/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
62
- lionagi/operations/communicate/communicate.py,sha256=f8dQGzFSCrBHcoe_z75GcD8zxgcWQ5umK_Y8BUVaQ3c,2962
62
+ lionagi/operations/communicate/communicate.py,sha256=dPaPqg898biY6j_FlgH4HEJxTK6T_87ixXWhD6kbk40,3077
63
63
  lionagi/operations/instruct/__init__.py,sha256=5y5joOZzfFWERl75auAcNcKC3lImVJ5ZZGvvHZUFCJM,112
64
64
  lionagi/operations/instruct/instruct.py,sha256=-HDdCgvRGCNB5vMCV0xM2KEa9VrjosApXSKtQQ9d0xQ,795
65
65
  lionagi/operations/interpret/__init__.py,sha256=5y5joOZzfFWERl75auAcNcKC3lImVJ5ZZGvvHZUFCJM,112
66
- lionagi/operations/interpret/interpret.py,sha256=zdsPx8qylFOTH3QYfpQeqNj3wddaqJORKy0_1flDWnk,1399
66
+ lionagi/operations/interpret/interpret.py,sha256=8_F3oYaoYK8MDcK4iCwksBP7sI0UlgBiZSrUusdlKNo,1528
67
67
  lionagi/operations/operate/__init__.py,sha256=5y5joOZzfFWERl75auAcNcKC3lImVJ5ZZGvvHZUFCJM,112
68
- lionagi/operations/operate/operate.py,sha256=NFC1EqykJkz__4oFpzzR8wGmH3_nOBRW4vdVnG2C1tI,7218
68
+ lionagi/operations/operate/operate.py,sha256=j5dGWhHlcWnO-aaLZ4Xe0Hb1M7FGp9BGm356-LOH6rA,7333
69
69
  lionagi/operations/parse/__init__.py,sha256=5y5joOZzfFWERl75auAcNcKC3lImVJ5ZZGvvHZUFCJM,112
70
70
  lionagi/operations/parse/parse.py,sha256=i2q6YGRwsp2WEu0aySfZ6io7iLNAqPAx1wWd2hUjpgM,3296
71
71
  lionagi/operations/plan/__init__.py,sha256=yGBPll6lOqVjadbTvDLGrTlMx3FfBW-e00z7AMvg7Uo,156
@@ -161,11 +161,11 @@ lionagi/protocols/messages/templates/instruction_message.jinja2,sha256=L-ptw5OHx
161
161
  lionagi/protocols/messages/templates/system_message.jinja2,sha256=JRKJ0aFpYfaXSFouKc_N4unZ35C3yZTOWhIrIdCB5qk,215
162
162
  lionagi/protocols/messages/templates/tool_schemas.jinja2,sha256=ozIaSDCRjIAhLyA8VM6S-YqS0w2NcctALSwx4LjDwII,126
163
163
  lionagi/service/__init__.py,sha256=DMGXIqPsmut9H5GT0ZeSzQIzYzzPwI-2gLXydpbwiV8,21
164
- lionagi/service/imodel.py,sha256=w3cqrJSz2q7k_Y3BXsuS85ZTpBOfa0bNM7Gr58IdTaA,14589
164
+ lionagi/service/imodel.py,sha256=TcqjpkRkPEEny6uaY383tL_45GcLvjju3-NVZS4EbLU,14907
165
165
  lionagi/service/manager.py,sha256=FkuqAtLErqLmXNnDtuAdTUFo4uuE_VL660BBGBhzInU,1435
166
166
  lionagi/service/types.py,sha256=CHPi8Bxl_yJ1pl2jYZBOrTHbT8_oO9sK75d4LMB651g,486
167
167
  lionagi/service/endpoints/__init__.py,sha256=5y5joOZzfFWERl75auAcNcKC3lImVJ5ZZGvvHZUFCJM,112
168
- lionagi/service/endpoints/base.py,sha256=WuXs2tDrOxbbv9-UkiAgpVcM_6nuCNmvvry7eN1GuoI,23482
168
+ lionagi/service/endpoints/base.py,sha256=nr53A1Z6zGmP5aqkyG-Gc-vbKG22CqvOj1x4nPwnBJ8,24763
169
169
  lionagi/service/endpoints/chat_completion.py,sha256=nihV7kCYm7ixdm8dH0JW7vKjqH9yIom4QDXGeDwuO6E,2964
170
170
  lionagi/service/endpoints/match_endpoint.py,sha256=hPCqFwVirj5g9Husec980OCUynjRmr0zQzrs7O4yP74,1874
171
171
  lionagi/service/endpoints/rate_limited_processor.py,sha256=P0CsMyhuG8OHCPYe2qez92Bm7v2ZRq4L5I6LOiAoGYs,5199
@@ -181,14 +181,14 @@ lionagi/service/providers/exa_/types.py,sha256=8ODjXpFajBE9-DGqBJNS--GObwmLSDi66
181
181
  lionagi/service/providers/groq_/__init__.py,sha256=5y5joOZzfFWERl75auAcNcKC3lImVJ5ZZGvvHZUFCJM,112
182
182
  lionagi/service/providers/groq_/chat_completions.py,sha256=q1p_1qus4vduWWBzs9V_KbNrqU2Tu2o8TZm6Fh09fw4,1343
183
183
  lionagi/service/providers/openai_/__init__.py,sha256=5y5joOZzfFWERl75auAcNcKC3lImVJ5ZZGvvHZUFCJM,112
184
- lionagi/service/providers/openai_/chat_completions.py,sha256=d-tRlGBmNCcKS_06Ps6dZ6zUa18l0rHHXKo3xPKYKj8,2803
184
+ lionagi/service/providers/openai_/chat_completions.py,sha256=y3RAgI5WQH5EwT1wZxp5ttnkCxUJEcOM3Cta6u9cpQo,2867
185
185
  lionagi/service/providers/openrouter_/__init__.py,sha256=5y5joOZzfFWERl75auAcNcKC3lImVJ5ZZGvvHZUFCJM,112
186
186
  lionagi/service/providers/openrouter_/chat_completions.py,sha256=0pdXjJCXmCPPbKKVubrnqofaodTOxWTJam8fd3NgrNk,1525
187
187
  lionagi/service/providers/perplexity_/__init__.py,sha256=5y5joOZzfFWERl75auAcNcKC3lImVJ5ZZGvvHZUFCJM,112
188
188
  lionagi/service/providers/perplexity_/chat_completions.py,sha256=O4MIS_3xIINGjkAZdlw0Bu_jAfBDR4VZA1F8JW2EU1M,1197
189
189
  lionagi/service/providers/perplexity_/models.py,sha256=gXH4XGkhZ4aFxvMSDTlHq9Rz1mhu3aTENXAtE-BIr6U,4866
190
190
  lionagi/session/__init__.py,sha256=5y5joOZzfFWERl75auAcNcKC3lImVJ5ZZGvvHZUFCJM,112
191
- lionagi/session/branch.py,sha256=XL0P507Jfqk9LhC8rDvajkjVE8FkPx7hnLltb-LdqHw,71503
191
+ lionagi/session/branch.py,sha256=dKlaM6hh_q7OoXkz4E5S3aS4ksqC2yzdhjzI7xe6pzU,72439
192
192
  lionagi/session/prompts.py,sha256=AhuHL19s0TijVZX3tMKUKMi6l88xeVdpkuEn2vJSRyU,3236
193
193
  lionagi/session/session.py,sha256=8SuNMiJX6IAW6Ou8aDK0LsVG7zcD5yd22sakMyrd3pw,8987
194
194
  lionagi/tools/__init__.py,sha256=5y5joOZzfFWERl75auAcNcKC3lImVJ5ZZGvvHZUFCJM,112
@@ -211,7 +211,7 @@ lionagi/tools/file/writer.py,sha256=5y5joOZzfFWERl75auAcNcKC3lImVJ5ZZGvvHZUFCJM,
211
211
  lionagi/tools/file/providers/__init__.py,sha256=5y5joOZzfFWERl75auAcNcKC3lImVJ5ZZGvvHZUFCJM,112
212
212
  lionagi/tools/file/providers/docling_.py,sha256=5y5joOZzfFWERl75auAcNcKC3lImVJ5ZZGvvHZUFCJM,112
213
213
  lionagi/tools/query/__init__.py,sha256=5y5joOZzfFWERl75auAcNcKC3lImVJ5ZZGvvHZUFCJM,112
214
- lionagi-0.9.4.dist-info/METADATA,sha256=46U0oyzqf8A6wuF-KlmsEZWVDiRQCFyZrYfPeG5ZgOQ,18053
215
- lionagi-0.9.4.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
216
- lionagi-0.9.4.dist-info/licenses/LICENSE,sha256=VXFWsdoN5AAknBCgFqQNgPWYx7OPp-PFEP961zGdOjc,11288
217
- lionagi-0.9.4.dist-info/RECORD,,
214
+ lionagi-0.9.5.dist-info/METADATA,sha256=RgrktdGFQrMKZn0BfHReT-JQ03IVXAkvTvqq6C5g_is,18053
215
+ lionagi-0.9.5.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
216
+ lionagi-0.9.5.dist-info/licenses/LICENSE,sha256=VXFWsdoN5AAknBCgFqQNgPWYx7OPp-PFEP961zGdOjc,11288
217
+ lionagi-0.9.5.dist-info/RECORD,,