lionagi 0.12.8__py3-none-any.whl → 0.13.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -151,9 +151,6 @@ def load_pydantic_model_from_schema(
151
151
  base_class="pydantic.BaseModel",
152
152
  )
153
153
  except Exception as e:
154
- # Optional: Print generated code on failure for debugging
155
- # if output_file.exists():
156
- # print(f"--- Generated Code (Error) ---\n{output_file.read_text()}\n--------------------------")
157
154
  error_msg = "Failed to generate model code"
158
155
  raise RuntimeError(error_msg) from e
159
156
 
@@ -175,15 +172,9 @@ def load_pydantic_model_from_schema(
175
172
  # --- 3. Import the Generated Module Dynamically ---
176
173
  try:
177
174
  spec, generated_module = get_modules()
178
- # Important: Make pydantic available within the executed module's globals
179
- # if it's not explicitly imported by the generated code for some reason.
180
- # Usually, datamodel-code-generator handles imports well.
181
- # generated_module.__dict__['BaseModel'] = BaseModel
182
175
  spec.loader.exec_module(generated_module)
183
176
 
184
177
  except Exception as e:
185
- # Optional: Print generated code on failure for debugging
186
- # print(f"--- Generated Code (Import Error) ---\n{output_file.read_text()}\n--------------------------")
187
178
  error_msg = f"Failed to load generated module ({output_file})"
188
179
  raise RuntimeError(error_msg) from e
189
180
 
@@ -53,7 +53,6 @@ class EndpointConfig(BaseModel):
53
53
 
54
54
  @model_validator(mode="after")
55
55
  def _validate_api_key(self):
56
-
57
56
  if self.api_key is not None:
58
57
  if isinstance(self.api_key, SecretStr):
59
58
  self._api_key = self.api_key.get_secret_value()
@@ -61,6 +60,9 @@ class EndpointConfig(BaseModel):
61
60
  # Skip settings lookup for ollama special case
62
61
  if self.provider == "ollama" and self.api_key == "ollama_key":
63
62
  self._api_key = "ollama_key"
63
+ if self.provider == "claude_code":
64
+ self._api_key = "dummy"
65
+
64
66
  else:
65
67
  from lionagi.config import settings
66
68
 
@@ -89,9 +91,11 @@ class EndpointConfig(BaseModel):
89
91
  if isinstance(v, BaseModel):
90
92
  return v.__class__
91
93
  if isinstance(v, dict | str):
92
- from lionagi.libs.schema import SchemaUtil
94
+ from lionagi.libs.schema.load_pydantic_model_from_schema import (
95
+ load_pydantic_model_from_schema,
96
+ )
93
97
 
94
- return SchemaUtil.load_pydantic_model_from_schema(v)
98
+ return load_pydantic_model_from_schema(v)
95
99
  except Exception as e:
96
100
  raise ValueError("Invalid request options") from e
97
101
  raise ValueError(
@@ -2,83 +2,216 @@
2
2
  #
3
3
  # SPDX-License-Identifier: Apache-2.0
4
4
 
5
+ from __future__ import annotations
6
+
5
7
  import json
6
8
  from pathlib import Path
7
- from typing import Any
9
+ from typing import Any, Literal
8
10
 
9
- from claude_code_sdk import ClaudeCodeOptions, PermissionMode
10
- from pydantic import BaseModel, Field
11
+ from claude_code_sdk import ClaudeCodeOptions
12
+ from claude_code_sdk import query as sdk_query
13
+ from pydantic import BaseModel, Field, field_validator, model_validator
11
14
 
12
15
  from lionagi.service.connections.endpoint import Endpoint
13
16
  from lionagi.service.connections.endpoint_config import EndpointConfig
14
17
  from lionagi.utils import to_dict
15
18
 
19
+ # --------------------------------------------------------------------------- constants
20
+ ClaudePermission = Literal[
21
+ "default",
22
+ "acceptEdits",
23
+ "bypassPermissions",
24
+ "dangerously-skip-permissions",
25
+ ]
26
+
27
+ CLAUDE_CODE_OPTION_PARAMS = {
28
+ "allowed_tools",
29
+ "max_thinking_tokens",
30
+ "mcp_tools",
31
+ "mcp_servers",
32
+ "permission_mode",
33
+ "continue_conversation",
34
+ "resume",
35
+ "max_turns",
36
+ "disallowed_tools",
37
+ "model",
38
+ "permission_prompt_tool_name",
39
+ "cwd",
40
+ "system_prompt",
41
+ "append_system_prompt",
42
+ }
43
+
16
44
 
45
+ # --------------------------------------------------------------------------- request model
17
46
  class ClaudeCodeRequest(BaseModel):
18
- prompt: str = Field(description="The prompt for Claude Code")
19
- allowed_tools: list[str] = Field(
20
- default_factory=list, description="List of allowed tools"
21
- )
22
- max_thinking_tokens: int = 8000
23
- mcp_tools: list[str] = list
24
- mcp_servers: dict[str, Any] = Field(default_factory=dict)
25
- permission_mode: PermissionMode | None = None
47
+ # -- conversational bits -------------------------------------------------
48
+ prompt: str = Field(description="The prompt for Claude Code")
49
+ system_prompt: str | None = None
50
+ append_system_prompt: str | None = None
51
+ max_turns: int | None = None
26
52
  continue_conversation: bool = False
27
53
  resume: str | None = None
28
- max_turns: int | None = None
29
- disallowed_tools: list[str] = Field(default_factory=list)
30
- model: str | None = None
54
+
55
+ # -- repo / workspace ----------------------------------------------------
56
+ repo: Path = Field(default_factory=Path.cwd, exclude=True)
57
+ ws: str | None = None # sub-directory under repo
58
+ add_dir: str | None = None # extra read-only mount
59
+ allowed_tools: list[str] | None = None
60
+
61
+ # -- runtime & safety ----------------------------------------------------
62
+ model: Literal["sonnet", "opus"] | str | None = "sonnet"
63
+ max_thinking_tokens: int | None = None
64
+ mcp_tools: list[str] = Field(default_factory=list)
65
+ mcp_servers: dict[str, Any] = Field(default_factory=dict)
66
+ permission_mode: ClaudePermission | None = None
31
67
  permission_prompt_tool_name: str | None = None
32
- cwd: str | Path | None = None
33
- system_prompt: str | None = None
34
- append_system_prompt: str | None = None
68
+ disallowed_tools: list[str] = Field(default_factory=list)
69
+
70
+ # ------------------------ validators & helpers --------------------------
71
+ @field_validator("permission_mode", mode="before")
72
+ def _norm_perm(cls, v):
73
+ if v in {
74
+ "dangerously-skip-permissions",
75
+ "--dangerously-skip-permissions",
76
+ }:
77
+ return "bypassPermissions"
78
+ return v
79
+
80
+ # Workspace path derived from repo + ws
81
+ def cwd(self) -> Path:
82
+ if not self.ws:
83
+ return self.repo
84
+
85
+ # Convert to Path object for proper validation
86
+ ws_path = Path(self.ws)
87
+
88
+ # Check for absolute paths or directory traversal attempts
89
+ if ws_path.is_absolute():
90
+ raise ValueError(
91
+ f"Workspace path must be relative, got absolute: {self.ws}"
92
+ )
93
+
94
+ if ".." in ws_path.parts:
95
+ raise ValueError(
96
+ f"Directory traversal detected in workspace path: {self.ws}"
97
+ )
98
+
99
+ # Resolve paths to handle symlinks and normalize
100
+ repo_resolved = self.repo.resolve()
101
+ result = (self.repo / ws_path).resolve()
102
+
103
+ # Ensure the resolved path is within the repository bounds
104
+ try:
105
+ result.relative_to(repo_resolved)
106
+ except ValueError:
107
+ raise ValueError(
108
+ f"Workspace path escapes repository bounds. "
109
+ f"Repository: {repo_resolved}, Workspace: {result}"
110
+ )
111
+
112
+ return result
113
+
114
+ @model_validator(mode="after")
115
+ def _check_perm_workspace(self):
116
+ if self.permission_mode == "bypassPermissions":
117
+ # Use secure path validation with resolved paths
118
+ repo_resolved = self.repo.resolve()
119
+ cwd_resolved = self.cwd().resolve()
35
120
 
121
+ # Check if cwd is within repo bounds using proper path methods
122
+ try:
123
+ cwd_resolved.relative_to(repo_resolved)
124
+ except ValueError:
125
+ raise ValueError(
126
+ f"With bypassPermissions, workspace must be within repository bounds. "
127
+ f"Repository: {repo_resolved}, Workspace: {cwd_resolved}"
128
+ )
129
+ return self
130
+
131
+ # ------------------------ CLI helpers -----------------------------------
132
+ def as_cmd_args(self) -> list[str]:
133
+ """Build argument list for the *Node* `claude` CLI."""
134
+ full_prompt = f"Human User: {self.prompt}\n\nAssistant:"
135
+ args: list[str] = ["-p", full_prompt, "--output-format", "stream-json"]
136
+ if self.allowed_tools:
137
+ args.append("--allowedTools")
138
+ for tool in self.allowed_tools:
139
+ args.append(f'"{tool}"')
140
+
141
+ if self.disallowed_tools:
142
+ args.append("--disallowedTools")
143
+ for tool in self.disallowed_tools:
144
+ args.append(f'"{tool}"')
145
+
146
+ if self.resume:
147
+ args += ["--resume", self.resume]
148
+ elif self.continue_conversation:
149
+ args.append("--continue")
150
+
151
+ if self.max_turns:
152
+ # +1 because CLI counts *pairs*
153
+ args += ["--max-turns", str(self.max_turns + 1)]
154
+
155
+ if self.permission_mode == "bypassPermissions":
156
+ args += ["--dangerously-skip-permissions"]
157
+
158
+ if self.add_dir:
159
+ args += ["--add-dir", self.add_dir]
160
+
161
+ args += ["--model", self.model or "sonnet", "--verbose"]
162
+ return args
163
+
164
+ # ------------------------ SDK helpers -----------------------------------
36
165
  def as_claude_options(self) -> ClaudeCodeOptions:
37
- dict_ = self.model_dump(exclude_unset=True)
38
- dict_.pop("prompt")
39
- return ClaudeCodeOptions(**dict_)
166
+ data = {
167
+ k: v
168
+ for k, v in self.model_dump(exclude_none=True).items()
169
+ if k in CLAUDE_CODE_OPTION_PARAMS
170
+ }
171
+ return ClaudeCodeOptions(**data)
40
172
 
173
+ # ------------------------ convenience constructor -----------------------
41
174
  @classmethod
42
175
  def create(
43
176
  cls,
44
- messages: list[dict],
177
+ messages: list[dict[str, Any]],
45
178
  resume: str | None = None,
46
- continue_conversation: bool = None,
179
+ continue_conversation: bool | None = None,
47
180
  **kwargs,
48
181
  ):
182
+ if not messages:
183
+ raise ValueError("messages may not be empty")
184
+
49
185
  prompt = messages[-1]["content"]
50
- if isinstance(prompt, dict | list):
186
+ if isinstance(prompt, (dict, list)):
51
187
  prompt = json.dumps(prompt)
52
188
 
53
- # If resume is provided, set continue_conversation to True
54
- if resume is not None and continue_conversation is None:
189
+ if resume and continue_conversation is None:
55
190
  continue_conversation = True
56
191
 
57
- dict_ = dict(
192
+ data: dict[str, Any] = dict(
58
193
  prompt=prompt,
59
- continue_conversation=continue_conversation,
60
194
  resume=resume,
195
+ continue_conversation=bool(continue_conversation),
61
196
  )
62
197
 
63
- if resume is not None or continue_conversation is not None:
64
- if messages[0]["role"] == "system":
65
- dict_["system_prompt"] = messages[0]["content"]
66
-
67
- if (a := kwargs.get("system_prompt")) is not None:
68
- dict_["append_system_prompt"] = a
198
+ if (messages[0]["role"] == "system") and (
199
+ resume or continue_conversation
200
+ ):
201
+ data["system_prompt"] = messages[0]["content"]
69
202
 
70
- if (a := kwargs.get("append_system_prompt")) is not None:
71
- dict_.setdefault("append_system_prompt", "")
72
- dict_["append_system_prompt"] += str(a)
203
+ # Merge optional system prompts
204
+ if kwargs.get("system_prompt"):
205
+ data["append_system_prompt"] = kwargs.pop("system_prompt")
73
206
 
74
- dict_ = {**dict_, **kwargs}
75
- dict_ = {k: v for k, v in dict_.items() if v is not None}
76
- return cls(**dict_)
207
+ data.update(kwargs)
208
+ return cls.model_validate(data, strict=False)
77
209
 
78
210
 
211
+ # --------------------------------------------------------------------------- SDK endpoint
79
212
  ENDPOINT_CONFIG = EndpointConfig(
80
213
  name="claude_code",
81
- provider="anthropic",
214
+ provider="claude_code",
82
215
  base_url="internal",
83
216
  endpoint="query",
84
217
  api_key="dummy",
@@ -88,50 +221,25 @@ ENDPOINT_CONFIG = EndpointConfig(
88
221
 
89
222
 
90
223
  class ClaudeCodeEndpoint(Endpoint):
91
- def __init__(self, config=ENDPOINT_CONFIG, **kwargs):
92
- super().__init__(config=config, **kwargs)
224
+ """Direct Python-SDK (non-CLI) endpoint - unchanged except for bug-fixes."""
93
225
 
94
- def create_payload(
95
- self,
96
- request: dict | BaseModel,
97
- **kwargs,
98
- ):
99
- request_dict = to_dict(request)
100
- # Merge stored kwargs from config, then request, then additional kwargs
101
- request_dict = {**self.config.kwargs, **request_dict, **kwargs}
102
- messages = request_dict.pop("messages", None)
226
+ def __init__(self, config: EndpointConfig = ENDPOINT_CONFIG, **kwargs):
227
+ super().__init__(config=config, **kwargs)
103
228
 
104
- resume = request_dict.pop("resume", None)
105
- continue_conversation = request_dict.pop("continue_conversation", None)
229
+ def create_payload(self, request: dict | BaseModel, **kwargs):
230
+ req_dict = {**self.config.kwargs, **to_dict(request), **kwargs}
231
+ messages = req_dict.pop("messages")
232
+ req_obj = ClaudeCodeRequest.create(messages=messages, **req_dict)
233
+ return {"request": req_obj}, {}
106
234
 
107
- request_obj = ClaudeCodeRequest.create(
108
- messages=messages,
109
- resume=resume,
110
- continue_conversation=continue_conversation,
111
- **{
112
- k: v
113
- for k, v in request_dict.items()
114
- if v is not None and k in ClaudeCodeRequest.model_fields
115
- },
235
+ def _stream_claude_code(self, request: ClaudeCodeRequest):
236
+ return sdk_query(
237
+ prompt=request.prompt, options=request.as_claude_options()
116
238
  )
117
- request_options = request_obj.as_claude_options()
118
- payload = {
119
- "prompt": request_obj.prompt,
120
- "options": request_options,
121
- }
122
- return (payload, {})
123
-
124
- def _stream_claude_code(self, prompt: str, options: ClaudeCodeOptions):
125
- from claude_code_sdk import query
126
-
127
- return query(prompt=prompt, options=options)
128
239
 
129
- async def stream(
130
- self,
131
- request: dict | BaseModel,
132
- **kwargs,
133
- ):
134
- async for chunk in self._stream_claude_code(**request, **kwargs):
240
+ async def stream(self, request: dict | BaseModel, **kwargs):
241
+ payload = self.create_payload(request, **kwargs)["request"]
242
+ async for chunk in self._stream_claude_code(payload):
135
243
  yield chunk
136
244
 
137
245
  def _parse_claude_code_response(self, responses: list) -> dict:
@@ -153,7 +261,6 @@ class ClaudeCodeEndpoint(Endpoint):
153
261
  "tool_results": [],
154
262
  "is_error": False,
155
263
  "num_turns": None,
156
- "cost_usd": None,
157
264
  "total_cost_usd": None,
158
265
  "usage": {
159
266
  "prompt_tokens": 0,
@@ -183,7 +290,6 @@ class ClaudeCodeEndpoint(Endpoint):
183
290
  if isinstance(response, types.ResultMessage):
184
291
  results["result"] += response.result.strip() or ""
185
292
  results["usage"] = response.usage
186
- results["cost_usd"] = response.cost_usd
187
293
  results["is_error"] = response.is_error
188
294
  results["total_cost_usd"] = response.total_cost_usd
189
295
  results["num_turns"] = response.num_turns
@@ -202,5 +308,4 @@ class ClaudeCodeEndpoint(Endpoint):
202
308
  async for chunk in self._stream_claude_code(**payload):
203
309
  responses.append(chunk)
204
310
 
205
- # Parse the responses into a consistent format
206
311
  return self._parse_claude_code_response(responses)
lionagi/service/imodel.py CHANGED
@@ -46,6 +46,7 @@ class iModel:
46
46
  limit_tokens: int = None,
47
47
  concurrency_limit: int | None = None,
48
48
  streaming_process_func: Callable = None,
49
+ provider_metadata: dict | None = None,
49
50
  **kwargs,
50
51
  ) -> None:
51
52
  """Initializes the iModel instance.
@@ -79,6 +80,8 @@ class iModel:
79
80
  concurrency_limit (int | None, optional):
80
81
  Maximum number of streaming concurrent requests allowed.
81
82
  only applies to streaming requests.
83
+ provider_metadata (dict | None, optional):
84
+ Provider-specific metadata, such as session IDs for
82
85
  **kwargs:
83
86
  Additional keyword arguments, such as `model`, or any other
84
87
  provider-specific fields.
@@ -121,7 +124,7 @@ class iModel:
121
124
  self.streaming_process_func = streaming_process_func
122
125
 
123
126
  # Provider-specific metadata storage (e.g., session_id for Claude Code)
124
- self._provider_metadata = {}
127
+ self.provider_metadata = provider_metadata or {}
125
128
 
126
129
  def create_api_calling(
127
130
  self, include_token_usage_to_model: bool = False, **kwargs
@@ -142,9 +145,9 @@ class iModel:
142
145
  self.endpoint.config.provider == "claude_code"
143
146
  and "resume" not in kwargs
144
147
  and "session_id" not in kwargs
145
- and self._provider_metadata.get("session_id")
148
+ and self.provider_metadata.get("session_id")
146
149
  ):
147
- kwargs["resume"] = self._provider_metadata["session_id"]
150
+ kwargs["resume"] = self.provider_metadata["session_id"]
148
151
 
149
152
  # The new Endpoint.create_payload returns (payload, headers)
150
153
  payload, headers = self.endpoint.create_payload(request=kwargs)
@@ -280,7 +283,7 @@ class iModel:
280
283
  ):
281
284
  response = completed_call.response
282
285
  if isinstance(response, dict) and "session_id" in response:
283
- self._provider_metadata["session_id"] = response[
286
+ self.provider_metadata["session_id"] = response[
284
287
  "session_id"
285
288
  ]
286
289
 
@@ -322,6 +325,7 @@ class iModel:
322
325
  return {
323
326
  "endpoint": self.endpoint.to_dict(),
324
327
  "processor_config": self.executor.config,
328
+ "provider_metadata": self.provider_metadata,
325
329
  }
326
330
 
327
331
  @classmethod
@@ -338,5 +342,6 @@ class iModel:
338
342
 
339
343
  return cls(
340
344
  endpoint=e1,
345
+ provider_metadata=data.get("provider_metadata"),
341
346
  **data.get("processor_config", {}),
342
347
  )
@@ -6,7 +6,7 @@
6
6
 
7
7
  from __future__ import annotations
8
8
 
9
- from typing import Literal, Optional, Union
9
+ from typing import Literal, Union
10
10
 
11
11
  from pydantic import BaseModel, Field, field_validator
12
12
 
@@ -121,7 +121,7 @@ class TokenCalculator:
121
121
  return num_tokens # buffer for chat
122
122
 
123
123
  @staticmethod
124
- def calcualte_embed_token(inputs: list[str], /, **kwargs) -> int:
124
+ def calculate_embed_token(inputs: list[str], /, **kwargs) -> int:
125
125
  try:
126
126
  if not "inputs" in kwargs:
127
127
  raise ValueError("Missing 'inputs' field in payload")
lionagi/version.py CHANGED
@@ -1 +1 @@
1
- __version__ = "0.12.8"
1
+ __version__ = "0.13.0"
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lionagi
3
- Version: 0.12.8
3
+ Version: 0.13.0
4
4
  Summary: An Intelligence Operating System.
5
5
  Author-email: HaiyangLi <quantocean.li@gmail.com>, Liangbingyan Luo <llby_luo@outlook.com>
6
6
  License: Apache License
@@ -230,11 +230,11 @@ Requires-Dist: python-dotenv>=1.1.0
230
230
  Requires-Dist: tiktoken>=0.8.0
231
231
  Requires-Dist: toml>=0.9.0
232
232
  Provides-Extra: all
233
- Requires-Dist: claude-code-sdk>=0.0.10; extra == 'all'
233
+ Requires-Dist: claude-code-sdk>=0.0.14; extra == 'all'
234
234
  Requires-Dist: docling>=2.15.1; extra == 'all'
235
235
  Requires-Dist: ollama>=0.5.0; extra == 'all'
236
236
  Provides-Extra: claude-code
237
- Requires-Dist: claude-code-sdk>=0.0.10; extra == 'claude-code'
237
+ Requires-Dist: claude-code-sdk>=0.0.14; extra == 'claude-code'
238
238
  Provides-Extra: docs
239
239
  Requires-Dist: furo>=2024.8.6; extra == 'docs'
240
240
  Requires-Dist: sphinx-autobuild>=2024.10.3; extra == 'docs'
@@ -6,7 +6,7 @@ lionagi/config.py,sha256=dAhDFKtaaSfn6WT9dwX9Vd4TWWs6-Su1FgYIrFgYcgc,3709
6
6
  lionagi/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
7
7
  lionagi/settings.py,sha256=W52mM34E6jXF3GyqCFzVREKZrmnUqtZm_BVDsUiDI_s,1627
8
8
  lionagi/utils.py,sha256=uLTJKl7aTnFXV6ehA6zwiwEB7G2nQYKsO2pZ6mqFzUk,78908
9
- lionagi/version.py,sha256=F7xxYe0dXryqS1cGEXFikx8AI7-UsZzdi89hJdyx-b0,23
9
+ lionagi/version.py,sha256=DgpLNbv0e1LIEOOe54Db8_390i9pelMEFEnsBsNmyhA,23
10
10
  lionagi/fields/__init__.py,sha256=8oU7Vfk-fKiULFKqhM6VpJMqdZcVXPTM7twVfNDN_SQ,603
11
11
  lionagi/fields/action.py,sha256=iWSApCM77jS0Oc28lb7G601Etkp-yjx5U1hfI_FQgfA,5792
12
12
  lionagi/fields/base.py,sha256=5CJc7j8kTTWzXwpYzkSAFzx4BglABfx3AElIATKB7bg,3857
@@ -46,7 +46,7 @@ lionagi/libs/schema/extract_code_block.py,sha256=PuJbJj1JnqR5fSZudowPcVPpEoKISLr
46
46
  lionagi/libs/schema/extract_docstring.py,sha256=aYyLSRlB8lTH9QF9-6a56uph3AAkNuTyZ0S_duf5-fw,5729
47
47
  lionagi/libs/schema/function_to_schema.py,sha256=XAB031WbYu3a7eFJyYjXVMAjmtWYSYr5kC_DYgjiuyM,5604
48
48
  lionagi/libs/schema/json_schema.py,sha256=cuHcaMr748O9g6suNGmRx4tRXcidd5-c7AMGjTIZyHM,7670
49
- lionagi/libs/schema/load_pydantic_model_from_schema.py,sha256=WwmOqo4mlSjClYb8s0jOr4PODeFz777TxfeyBnzZ3rc,10899
49
+ lionagi/libs/schema/load_pydantic_model_from_schema.py,sha256=VGrmnMb26JNcB9LaQcqTwW--sUljqqlqqsPD69zjcEc,10183
50
50
  lionagi/libs/token_transform/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
51
51
  lionagi/libs/token_transform/base.py,sha256=LBnaDgi4HNgaJJGwIzWcQjVMdu49i_93rRvOvMU22Rw,1545
52
52
  lionagi/libs/token_transform/llmlingua.py,sha256=DkeLUlrb7rGx3nZ04aADU9HXXu5mZTf_DBwT0xhzIv4,7
@@ -163,28 +163,28 @@ lionagi/protocols/operatives/__init__.py,sha256=5y5joOZzfFWERl75auAcNcKC3lImVJ5Z
163
163
  lionagi/protocols/operatives/operative.py,sha256=PXEMzD6tFM5PPK9kkPaSb7DBIzy7TNC3f2evuGhWhpg,6677
164
164
  lionagi/protocols/operatives/step.py,sha256=AXXRhjsbWqkoMDQ_JyqsfQItQsjBJmldugJz36mA4N0,9772
165
165
  lionagi/service/__init__.py,sha256=DMGXIqPsmut9H5GT0ZeSzQIzYzzPwI-2gLXydpbwiV8,21
166
- lionagi/service/imodel.py,sha256=LAlU6orAAZr7B-_3upZng8OvMVz0FadWLRawwdqfBGY,12346
166
+ lionagi/service/imodel.py,sha256=jE1Y3JzGwJZesHQtRSadVTAHA2TGdBAwfmzQoOA5Meg,12651
167
167
  lionagi/service/manager.py,sha256=9-dIE4ZftI94RLLLPXH-yB4E3zfnbTs3yppdFDPNchM,1165
168
168
  lionagi/service/rate_limited_processor.py,sha256=PnO0rBf9ObKhD3vtl6pYZX3nHVDvMPdOww59zCWgslQ,5230
169
169
  lionagi/service/resilience.py,sha256=uYJYZQ9M-tje8ME3vJmYabXwKHF1c3Ij4-WrdCwogcs,18742
170
- lionagi/service/token_calculator.py,sha256=zpbk1YlFW5M_e-vs9YJyhDThMm3U3IaZ7hKm-_6XrDU,6460
170
+ lionagi/service/token_calculator.py,sha256=FysR31hpv4QPVsbZBVPR60LSW_xCQpwXx89zOJJ73LQ,6460
171
171
  lionagi/service/types.py,sha256=6zavqBxK1Fj0nB9eZgJn3JICxmdT-n0nn8YWZFzM5LU,508
172
172
  lionagi/service/connections/__init__.py,sha256=yHQZ7OJpCftd6CStYR8inbxjJydYdmv9kCvbUBhJ2zU,362
173
173
  lionagi/service/connections/api_calling.py,sha256=XetCrjMhOHNKGGv-NzHhBhVS7XjKPalrS_iExzU-4S4,8005
174
174
  lionagi/service/connections/endpoint.py,sha256=yNIjq9wETMnytynGbq3qY_dkyaMlaHrcfiZjS-tnmLg,14756
175
- lionagi/service/connections/endpoint_config.py,sha256=jCgMOujN5KzQ2miOrfEqKrVZW3jlZEgBp2R3_2sXynI,4380
175
+ lionagi/service/connections/endpoint_config.py,sha256=QMfxqwvHhNjFSZupJ8oDCMlZK_uQkjGC145w6ETxnB8,4558
176
176
  lionagi/service/connections/header_factory.py,sha256=22sG4ian3MiNklF6SdQqkEYgtWKOZik_yDE0Lna6BiE,1754
177
177
  lionagi/service/connections/match_endpoint.py,sha256=mEZPDkK1qtvjTGN9-PZsK7w_yB7642nZiJsb0l5QUx4,1827
178
178
  lionagi/service/connections/providers/__init__.py,sha256=3lzOakDoBWmMaNnT2g-YwktPKa_Wme4lnPRSmOQfayY,105
179
179
  lionagi/service/connections/providers/anthropic_.py,sha256=SUPnw2UqjY5wuHXLHas6snMTzhQ-UuixvPYbkVnXn34,3083
180
- lionagi/service/connections/providers/claude_code_.py,sha256=o7tfhCcOxkOKe3dwdzIme6ov0B3Os1Xiu9uewIw2pg0,7182
180
+ lionagi/service/connections/providers/claude_code_.py,sha256=TmIxVCCbHxe2NIc2Kb3oJTYFGdJZi5IkB_23LQ1PcG0,11089
181
181
  lionagi/service/connections/providers/exa_.py,sha256=GGWaD9jd5gKM257OfUaIBBKIqR1NrNcBE67p_7JbK7g,938
182
182
  lionagi/service/connections/providers/oai_.py,sha256=hH5oEMvS9A0Kbvh-lSFm5dLiqPV2gfRra6zzDOq78P0,5191
183
183
  lionagi/service/connections/providers/ollama_.py,sha256=jdx6dGeChwVk5TFfFRbpnrpKzj8YQZw6D5iWJ6zYmfk,4096
184
184
  lionagi/service/connections/providers/perplexity_.py,sha256=9MH9YmMy9Jg7JDMJHQxxMYHyjJ4NP0OlN7sCuhla85I,917
185
185
  lionagi/service/third_party/README.md,sha256=qFjWnI8rmLivIyr6Tc-hRZh-rQwntROp76af4MBNJJc,2214
186
186
  lionagi/service/third_party/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
187
- lionagi/service/third_party/anthropic_models.py,sha256=wA2NiiPJzkkVF_-wFgnTJoKwvuoduqHw-yVSetp0XZI,4014
187
+ lionagi/service/third_party/anthropic_models.py,sha256=oqSPSlcayYG-fS5BLiLeTtkrpaxgkPhEK_YgneumrOo,4004
188
188
  lionagi/service/third_party/exa_models.py,sha256=G_hnekcy-DillPLzMoDQ8ZisVAL8Mp7iMAK4xqAT_3w,5470
189
189
  lionagi/service/third_party/openai_models.py,sha256=sF-fQ726CnaDBgLY_r2NdPqc3GicPKhZjh5F8IfjBO0,501904
190
190
  lionagi/service/third_party/pplx_models.py,sha256=Nkm1ftESBa_NwP9ITBUNqLmAZ3Jh92aL732g_i6T8LQ,5947
@@ -198,7 +198,7 @@ lionagi/tools/types.py,sha256=XtJLY0m-Yi_ZLWhm0KycayvqMCZd--HxfQ0x9vFUYDE,230
198
198
  lionagi/tools/file/__init__.py,sha256=5y5joOZzfFWERl75auAcNcKC3lImVJ5ZZGvvHZUFCJM,112
199
199
  lionagi/tools/file/reader.py,sha256=0TdnfVGVCKuM58MmGM-NyVjhU9BFoitkNYEepdc0z_Y,9529
200
200
  lionagi/tools/memory/tools.py,sha256=zTGBenVsF8Wuh303kWntmQSGlAFKonHNdh5ePuQ26KE,15948
201
- lionagi-0.12.8.dist-info/METADATA,sha256=8cD7ID0yI0CK3JqF0MelRZPsodoqUS1GyQJsnW44yZs,20200
202
- lionagi-0.12.8.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
203
- lionagi-0.12.8.dist-info/licenses/LICENSE,sha256=VXFWsdoN5AAknBCgFqQNgPWYx7OPp-PFEP961zGdOjc,11288
204
- lionagi-0.12.8.dist-info/RECORD,,
201
+ lionagi-0.13.0.dist-info/METADATA,sha256=calK5dDwwOP5FlidDG33qMadI8U75UD-TcPrwOyBEJ0,20200
202
+ lionagi-0.13.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
203
+ lionagi-0.13.0.dist-info/licenses/LICENSE,sha256=VXFWsdoN5AAknBCgFqQNgPWYx7OPp-PFEP961zGdOjc,11288
204
+ lionagi-0.13.0.dist-info/RECORD,,