lionagi 0.12.7__py3-none-any.whl → 0.13.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- lionagi/libs/schema/load_pydantic_model_from_schema.py +0 -9
- lionagi/service/connections/endpoint_config.py +7 -3
- lionagi/service/connections/providers/claude_code_.py +227 -166
- lionagi/service/connections/providers/oai_.py +13 -2
- lionagi/service/imodel.py +9 -4
- lionagi/service/third_party/anthropic_models.py +1 -1
- lionagi/service/token_calculator.py +1 -1
- lionagi/version.py +1 -1
- {lionagi-0.12.7.dist-info → lionagi-0.13.0.dist-info}/METADATA +3 -3
- {lionagi-0.12.7.dist-info → lionagi-0.13.0.dist-info}/RECORD +12 -12
- {lionagi-0.12.7.dist-info → lionagi-0.13.0.dist-info}/WHEEL +0 -0
- {lionagi-0.12.7.dist-info → lionagi-0.13.0.dist-info}/licenses/LICENSE +0 -0
@@ -151,9 +151,6 @@ def load_pydantic_model_from_schema(
|
|
151
151
|
base_class="pydantic.BaseModel",
|
152
152
|
)
|
153
153
|
except Exception as e:
|
154
|
-
# Optional: Print generated code on failure for debugging
|
155
|
-
# if output_file.exists():
|
156
|
-
# print(f"--- Generated Code (Error) ---\n{output_file.read_text()}\n--------------------------")
|
157
154
|
error_msg = "Failed to generate model code"
|
158
155
|
raise RuntimeError(error_msg) from e
|
159
156
|
|
@@ -175,15 +172,9 @@ def load_pydantic_model_from_schema(
|
|
175
172
|
# --- 3. Import the Generated Module Dynamically ---
|
176
173
|
try:
|
177
174
|
spec, generated_module = get_modules()
|
178
|
-
# Important: Make pydantic available within the executed module's globals
|
179
|
-
# if it's not explicitly imported by the generated code for some reason.
|
180
|
-
# Usually, datamodel-code-generator handles imports well.
|
181
|
-
# generated_module.__dict__['BaseModel'] = BaseModel
|
182
175
|
spec.loader.exec_module(generated_module)
|
183
176
|
|
184
177
|
except Exception as e:
|
185
|
-
# Optional: Print generated code on failure for debugging
|
186
|
-
# print(f"--- Generated Code (Import Error) ---\n{output_file.read_text()}\n--------------------------")
|
187
178
|
error_msg = f"Failed to load generated module ({output_file})"
|
188
179
|
raise RuntimeError(error_msg) from e
|
189
180
|
|
@@ -53,7 +53,6 @@ class EndpointConfig(BaseModel):
|
|
53
53
|
|
54
54
|
@model_validator(mode="after")
|
55
55
|
def _validate_api_key(self):
|
56
|
-
|
57
56
|
if self.api_key is not None:
|
58
57
|
if isinstance(self.api_key, SecretStr):
|
59
58
|
self._api_key = self.api_key.get_secret_value()
|
@@ -61,6 +60,9 @@ class EndpointConfig(BaseModel):
|
|
61
60
|
# Skip settings lookup for ollama special case
|
62
61
|
if self.provider == "ollama" and self.api_key == "ollama_key":
|
63
62
|
self._api_key = "ollama_key"
|
63
|
+
if self.provider == "claude_code":
|
64
|
+
self._api_key = "dummy"
|
65
|
+
|
64
66
|
else:
|
65
67
|
from lionagi.config import settings
|
66
68
|
|
@@ -89,9 +91,11 @@ class EndpointConfig(BaseModel):
|
|
89
91
|
if isinstance(v, BaseModel):
|
90
92
|
return v.__class__
|
91
93
|
if isinstance(v, dict | str):
|
92
|
-
from lionagi.libs.schema import
|
94
|
+
from lionagi.libs.schema.load_pydantic_model_from_schema import (
|
95
|
+
load_pydantic_model_from_schema,
|
96
|
+
)
|
93
97
|
|
94
|
-
return
|
98
|
+
return load_pydantic_model_from_schema(v)
|
95
99
|
except Exception as e:
|
96
100
|
raise ValueError("Invalid request options") from e
|
97
101
|
raise ValueError(
|
@@ -2,136 +2,244 @@
|
|
2
2
|
#
|
3
3
|
# SPDX-License-Identifier: Apache-2.0
|
4
4
|
|
5
|
+
from __future__ import annotations
|
6
|
+
|
5
7
|
import json
|
6
8
|
from pathlib import Path
|
7
|
-
from typing import Any
|
9
|
+
from typing import Any, Literal
|
8
10
|
|
9
|
-
from claude_code_sdk import ClaudeCodeOptions
|
10
|
-
from
|
11
|
+
from claude_code_sdk import ClaudeCodeOptions
|
12
|
+
from claude_code_sdk import query as sdk_query
|
13
|
+
from pydantic import BaseModel, Field, field_validator, model_validator
|
11
14
|
|
12
15
|
from lionagi.service.connections.endpoint import Endpoint
|
13
16
|
from lionagi.service.connections.endpoint_config import EndpointConfig
|
14
17
|
from lionagi.utils import to_dict
|
15
18
|
|
16
|
-
|
19
|
+
# --------------------------------------------------------------------------- constants
|
20
|
+
ClaudePermission = Literal[
|
21
|
+
"default",
|
22
|
+
"acceptEdits",
|
23
|
+
"bypassPermissions",
|
24
|
+
"dangerously-skip-permissions",
|
25
|
+
]
|
26
|
+
|
27
|
+
CLAUDE_CODE_OPTION_PARAMS = {
|
28
|
+
"allowed_tools",
|
29
|
+
"max_thinking_tokens",
|
30
|
+
"mcp_tools",
|
31
|
+
"mcp_servers",
|
32
|
+
"permission_mode",
|
33
|
+
"continue_conversation",
|
34
|
+
"resume",
|
35
|
+
"max_turns",
|
36
|
+
"disallowed_tools",
|
37
|
+
"model",
|
38
|
+
"permission_prompt_tool_name",
|
39
|
+
"cwd",
|
40
|
+
"system_prompt",
|
41
|
+
"append_system_prompt",
|
42
|
+
}
|
43
|
+
|
44
|
+
|
45
|
+
# --------------------------------------------------------------------------- request model
|
17
46
|
class ClaudeCodeRequest(BaseModel):
|
18
|
-
|
19
|
-
|
20
|
-
|
21
|
-
|
22
|
-
|
23
|
-
mcp_tools: list[str] = list
|
24
|
-
mcp_servers: dict[str, Any] = Field(default_factory=dict)
|
25
|
-
permission_mode: PermissionMode | None = None
|
47
|
+
# -- conversational bits -------------------------------------------------
|
48
|
+
prompt: str = Field(description="The prompt for Claude Code")
|
49
|
+
system_prompt: str | None = None
|
50
|
+
append_system_prompt: str | None = None
|
51
|
+
max_turns: int | None = None
|
26
52
|
continue_conversation: bool = False
|
27
53
|
resume: str | None = None
|
28
|
-
|
29
|
-
|
30
|
-
|
54
|
+
|
55
|
+
# -- repo / workspace ----------------------------------------------------
|
56
|
+
repo: Path = Field(default_factory=Path.cwd, exclude=True)
|
57
|
+
ws: str | None = None # sub-directory under repo
|
58
|
+
add_dir: str | None = None # extra read-only mount
|
59
|
+
allowed_tools: list[str] | None = None
|
60
|
+
|
61
|
+
# -- runtime & safety ----------------------------------------------------
|
62
|
+
model: Literal["sonnet", "opus"] | str | None = "sonnet"
|
63
|
+
max_thinking_tokens: int | None = None
|
64
|
+
mcp_tools: list[str] = Field(default_factory=list)
|
65
|
+
mcp_servers: dict[str, Any] = Field(default_factory=dict)
|
66
|
+
permission_mode: ClaudePermission | None = None
|
31
67
|
permission_prompt_tool_name: str | None = None
|
32
|
-
|
33
|
-
|
34
|
-
|
68
|
+
disallowed_tools: list[str] = Field(default_factory=list)
|
69
|
+
|
70
|
+
# ------------------------ validators & helpers --------------------------
|
71
|
+
@field_validator("permission_mode", mode="before")
|
72
|
+
def _norm_perm(cls, v):
|
73
|
+
if v in {
|
74
|
+
"dangerously-skip-permissions",
|
75
|
+
"--dangerously-skip-permissions",
|
76
|
+
}:
|
77
|
+
return "bypassPermissions"
|
78
|
+
return v
|
79
|
+
|
80
|
+
# Workspace path derived from repo + ws
|
81
|
+
def cwd(self) -> Path:
|
82
|
+
if not self.ws:
|
83
|
+
return self.repo
|
84
|
+
|
85
|
+
# Convert to Path object for proper validation
|
86
|
+
ws_path = Path(self.ws)
|
87
|
+
|
88
|
+
# Check for absolute paths or directory traversal attempts
|
89
|
+
if ws_path.is_absolute():
|
90
|
+
raise ValueError(
|
91
|
+
f"Workspace path must be relative, got absolute: {self.ws}"
|
92
|
+
)
|
93
|
+
|
94
|
+
if ".." in ws_path.parts:
|
95
|
+
raise ValueError(
|
96
|
+
f"Directory traversal detected in workspace path: {self.ws}"
|
97
|
+
)
|
35
98
|
|
99
|
+
# Resolve paths to handle symlinks and normalize
|
100
|
+
repo_resolved = self.repo.resolve()
|
101
|
+
result = (self.repo / ws_path).resolve()
|
102
|
+
|
103
|
+
# Ensure the resolved path is within the repository bounds
|
104
|
+
try:
|
105
|
+
result.relative_to(repo_resolved)
|
106
|
+
except ValueError:
|
107
|
+
raise ValueError(
|
108
|
+
f"Workspace path escapes repository bounds. "
|
109
|
+
f"Repository: {repo_resolved}, Workspace: {result}"
|
110
|
+
)
|
111
|
+
|
112
|
+
return result
|
113
|
+
|
114
|
+
@model_validator(mode="after")
|
115
|
+
def _check_perm_workspace(self):
|
116
|
+
if self.permission_mode == "bypassPermissions":
|
117
|
+
# Use secure path validation with resolved paths
|
118
|
+
repo_resolved = self.repo.resolve()
|
119
|
+
cwd_resolved = self.cwd().resolve()
|
120
|
+
|
121
|
+
# Check if cwd is within repo bounds using proper path methods
|
122
|
+
try:
|
123
|
+
cwd_resolved.relative_to(repo_resolved)
|
124
|
+
except ValueError:
|
125
|
+
raise ValueError(
|
126
|
+
f"With bypassPermissions, workspace must be within repository bounds. "
|
127
|
+
f"Repository: {repo_resolved}, Workspace: {cwd_resolved}"
|
128
|
+
)
|
129
|
+
return self
|
130
|
+
|
131
|
+
# ------------------------ CLI helpers -----------------------------------
|
132
|
+
def as_cmd_args(self) -> list[str]:
|
133
|
+
"""Build argument list for the *Node* `claude` CLI."""
|
134
|
+
full_prompt = f"Human User: {self.prompt}\n\nAssistant:"
|
135
|
+
args: list[str] = ["-p", full_prompt, "--output-format", "stream-json"]
|
136
|
+
if self.allowed_tools:
|
137
|
+
args.append("--allowedTools")
|
138
|
+
for tool in self.allowed_tools:
|
139
|
+
args.append(f'"{tool}"')
|
140
|
+
|
141
|
+
if self.disallowed_tools:
|
142
|
+
args.append("--disallowedTools")
|
143
|
+
for tool in self.disallowed_tools:
|
144
|
+
args.append(f'"{tool}"')
|
145
|
+
|
146
|
+
if self.resume:
|
147
|
+
args += ["--resume", self.resume]
|
148
|
+
elif self.continue_conversation:
|
149
|
+
args.append("--continue")
|
150
|
+
|
151
|
+
if self.max_turns:
|
152
|
+
# +1 because CLI counts *pairs*
|
153
|
+
args += ["--max-turns", str(self.max_turns + 1)]
|
154
|
+
|
155
|
+
if self.permission_mode == "bypassPermissions":
|
156
|
+
args += ["--dangerously-skip-permissions"]
|
157
|
+
|
158
|
+
if self.add_dir:
|
159
|
+
args += ["--add-dir", self.add_dir]
|
160
|
+
|
161
|
+
args += ["--model", self.model or "sonnet", "--verbose"]
|
162
|
+
return args
|
163
|
+
|
164
|
+
# ------------------------ SDK helpers -----------------------------------
|
36
165
|
def as_claude_options(self) -> ClaudeCodeOptions:
|
37
|
-
|
38
|
-
|
39
|
-
|
166
|
+
data = {
|
167
|
+
k: v
|
168
|
+
for k, v in self.model_dump(exclude_none=True).items()
|
169
|
+
if k in CLAUDE_CODE_OPTION_PARAMS
|
170
|
+
}
|
171
|
+
return ClaudeCodeOptions(**data)
|
40
172
|
|
173
|
+
# ------------------------ convenience constructor -----------------------
|
41
174
|
@classmethod
|
42
175
|
def create(
|
43
176
|
cls,
|
44
|
-
messages: list[dict],
|
177
|
+
messages: list[dict[str, Any]],
|
45
178
|
resume: str | None = None,
|
46
|
-
continue_conversation: bool = None,
|
179
|
+
continue_conversation: bool | None = None,
|
47
180
|
**kwargs,
|
48
181
|
):
|
182
|
+
if not messages:
|
183
|
+
raise ValueError("messages may not be empty")
|
184
|
+
|
49
185
|
prompt = messages[-1]["content"]
|
50
|
-
if isinstance(prompt, dict
|
186
|
+
if isinstance(prompt, (dict, list)):
|
51
187
|
prompt = json.dumps(prompt)
|
52
188
|
|
53
|
-
|
54
|
-
if resume is not None and continue_conversation is None:
|
189
|
+
if resume and continue_conversation is None:
|
55
190
|
continue_conversation = True
|
56
191
|
|
57
|
-
|
192
|
+
data: dict[str, Any] = dict(
|
58
193
|
prompt=prompt,
|
59
|
-
continue_conversation=continue_conversation,
|
60
194
|
resume=resume,
|
195
|
+
continue_conversation=bool(continue_conversation),
|
61
196
|
)
|
62
197
|
|
63
|
-
if
|
64
|
-
|
65
|
-
|
66
|
-
|
67
|
-
if (a := kwargs.get("system_prompt")) is not None:
|
68
|
-
dict_["append_system_prompt"] = a
|
198
|
+
if (messages[0]["role"] == "system") and (
|
199
|
+
resume or continue_conversation
|
200
|
+
):
|
201
|
+
data["system_prompt"] = messages[0]["content"]
|
69
202
|
|
70
|
-
|
71
|
-
|
72
|
-
|
203
|
+
# Merge optional system prompts
|
204
|
+
if kwargs.get("system_prompt"):
|
205
|
+
data["append_system_prompt"] = kwargs.pop("system_prompt")
|
73
206
|
|
74
|
-
|
75
|
-
|
76
|
-
return cls(**dict_)
|
207
|
+
data.update(kwargs)
|
208
|
+
return cls.model_validate(data, strict=False)
|
77
209
|
|
78
210
|
|
211
|
+
# --------------------------------------------------------------------------- SDK endpoint
|
79
212
|
ENDPOINT_CONFIG = EndpointConfig(
|
80
213
|
name="claude_code",
|
81
|
-
provider="
|
214
|
+
provider="claude_code",
|
82
215
|
base_url="internal",
|
83
216
|
endpoint="query",
|
84
217
|
api_key="dummy",
|
85
218
|
request_options=ClaudeCodeRequest,
|
219
|
+
timeout=3000,
|
86
220
|
)
|
87
221
|
|
88
222
|
|
89
223
|
class ClaudeCodeEndpoint(Endpoint):
|
224
|
+
"""Direct Python-SDK (non-CLI) endpoint - unchanged except for bug-fixes."""
|
90
225
|
|
91
|
-
def __init__(self, config=ENDPOINT_CONFIG, **kwargs):
|
226
|
+
def __init__(self, config: EndpointConfig = ENDPOINT_CONFIG, **kwargs):
|
92
227
|
super().__init__(config=config, **kwargs)
|
93
228
|
|
94
|
-
def create_payload(
|
95
|
-
self,
|
96
|
-
|
97
|
-
**
|
98
|
-
|
99
|
-
request_dict = to_dict(request)
|
100
|
-
# Merge stored kwargs from config, then request, then additional kwargs
|
101
|
-
request_dict = {**self.config.kwargs, **request_dict, **kwargs}
|
102
|
-
messages = request_dict.pop("messages", None)
|
103
|
-
|
104
|
-
resume = request_dict.pop("resume", None)
|
105
|
-
continue_conversation = request_dict.pop("continue_conversation", None)
|
229
|
+
def create_payload(self, request: dict | BaseModel, **kwargs):
|
230
|
+
req_dict = {**self.config.kwargs, **to_dict(request), **kwargs}
|
231
|
+
messages = req_dict.pop("messages")
|
232
|
+
req_obj = ClaudeCodeRequest.create(messages=messages, **req_dict)
|
233
|
+
return {"request": req_obj}, {}
|
106
234
|
|
107
|
-
|
108
|
-
|
109
|
-
|
110
|
-
continue_conversation=continue_conversation,
|
111
|
-
**{
|
112
|
-
k: v
|
113
|
-
for k, v in request_dict.items()
|
114
|
-
if v is not None and k in ClaudeCodeRequest.model_fields
|
115
|
-
},
|
235
|
+
def _stream_claude_code(self, request: ClaudeCodeRequest):
|
236
|
+
return sdk_query(
|
237
|
+
prompt=request.prompt, options=request.as_claude_options()
|
116
238
|
)
|
117
|
-
request_options = request_obj.as_claude_options()
|
118
|
-
payload = {
|
119
|
-
"prompt": request_obj.prompt,
|
120
|
-
"options": request_options,
|
121
|
-
}
|
122
|
-
return (payload, {})
|
123
239
|
|
124
|
-
def
|
125
|
-
|
126
|
-
|
127
|
-
return query(prompt=prompt, options=options)
|
128
|
-
|
129
|
-
async def stream(
|
130
|
-
self,
|
131
|
-
request: dict | BaseModel,
|
132
|
-
**kwargs,
|
133
|
-
):
|
134
|
-
async for chunk in self._stream_claude_code(**request, **kwargs):
|
240
|
+
async def stream(self, request: dict | BaseModel, **kwargs):
|
241
|
+
payload = self.create_payload(request, **kwargs)["request"]
|
242
|
+
async for chunk in self._stream_claude_code(payload):
|
135
243
|
yield chunk
|
136
244
|
|
137
245
|
def _parse_claude_code_response(self, responses: list) -> dict:
|
@@ -146,95 +254,49 @@ class ClaudeCodeEndpoint(Endpoint):
|
|
146
254
|
When Claude Code uses tools, the ResultMessage.result may be None.
|
147
255
|
In that case, we need to look at the tool results in UserMessages.
|
148
256
|
"""
|
149
|
-
|
150
|
-
|
151
|
-
|
152
|
-
|
257
|
+
results = {
|
258
|
+
"session_id": None,
|
259
|
+
"model": "claude-code",
|
260
|
+
"result": "",
|
261
|
+
"tool_results": [],
|
262
|
+
"is_error": False,
|
263
|
+
"num_turns": None,
|
264
|
+
"total_cost_usd": None,
|
265
|
+
"usage": {
|
266
|
+
"prompt_tokens": 0,
|
267
|
+
"completion_tokens": 0,
|
268
|
+
"total_tokens": 0,
|
269
|
+
},
|
270
|
+
}
|
153
271
|
|
154
|
-
|
155
|
-
for response in responses:
|
156
|
-
class_name = response.__class__.__name__
|
157
|
-
|
158
|
-
if class_name == "SystemMessage" and hasattr(response, "data"):
|
159
|
-
model = response.data.get("model", "claude-code")
|
160
|
-
|
161
|
-
elif class_name == "AssistantMessage":
|
162
|
-
# Extract text content from assistant messages
|
163
|
-
if hasattr(response, "content") and response.content:
|
164
|
-
for block in response.content:
|
165
|
-
if hasattr(block, "text"):
|
166
|
-
assistant_text_content.append(block.text)
|
167
|
-
elif isinstance(block, dict) and "text" in block:
|
168
|
-
assistant_text_content.append(block["text"])
|
169
|
-
|
170
|
-
elif class_name == "UserMessage":
|
171
|
-
# Extract tool results from user messages
|
172
|
-
if hasattr(response, "content") and response.content:
|
173
|
-
for item in response.content:
|
174
|
-
if (
|
175
|
-
isinstance(item, dict)
|
176
|
-
and item.get("type") == "tool_result"
|
177
|
-
):
|
178
|
-
tool_results.append(item.get("content", ""))
|
179
|
-
|
180
|
-
elif class_name == "ResultMessage":
|
181
|
-
result_message = response
|
182
|
-
|
183
|
-
# Determine the final content
|
184
|
-
final_content = ""
|
185
|
-
if (
|
186
|
-
result_message
|
187
|
-
and hasattr(result_message, "result")
|
188
|
-
and result_message.result
|
189
|
-
):
|
190
|
-
# Use ResultMessage.result if available
|
191
|
-
final_content = result_message.result
|
192
|
-
elif assistant_text_content:
|
193
|
-
# Use assistant text content if available
|
194
|
-
final_content = "\n".join(assistant_text_content)
|
195
|
-
elif tool_results:
|
196
|
-
# If only tool results are available, use a generic summary
|
197
|
-
# (Claude Code typically provides its own summary after tool use)
|
198
|
-
final_content = (
|
199
|
-
"I've completed the requested task using the available tools."
|
200
|
-
)
|
272
|
+
from claude_code_sdk import types
|
201
273
|
|
202
|
-
|
203
|
-
|
204
|
-
|
205
|
-
|
206
|
-
|
207
|
-
|
208
|
-
|
209
|
-
|
210
|
-
|
211
|
-
|
212
|
-
|
213
|
-
|
214
|
-
|
274
|
+
for response in responses:
|
275
|
+
if isinstance(response, types.SystemMessage):
|
276
|
+
results["session_id"] = response.data.get("session_id")
|
277
|
+
results["model"] = response.data.get("model", "claude-code")
|
278
|
+
if isinstance(response, types.AssistantMessage):
|
279
|
+
for block in response.content:
|
280
|
+
if isinstance(block, types.TextBlock):
|
281
|
+
results["result"] += block.text.strip() + "\n"
|
282
|
+
if isinstance(block, types.ToolResultBlock):
|
283
|
+
results["tool_results"].append(
|
284
|
+
{
|
285
|
+
"tool_use_id": block.tool_use_id,
|
286
|
+
"content": block.content,
|
287
|
+
"is_error": block.is_error,
|
288
|
+
}
|
215
289
|
)
|
216
|
-
|
217
|
-
|
218
|
-
|
219
|
-
|
220
|
-
|
290
|
+
if isinstance(response, types.ResultMessage):
|
291
|
+
results["result"] += response.result.strip() or ""
|
292
|
+
results["usage"] = response.usage
|
293
|
+
results["is_error"] = response.is_error
|
294
|
+
results["total_cost_usd"] = response.total_cost_usd
|
295
|
+
results["num_turns"] = response.num_turns
|
296
|
+
results["duration_ms"] = response.duration_ms
|
297
|
+
results["duration_api_ms"] = response.duration_api_ms
|
221
298
|
|
222
|
-
|
223
|
-
if result_message and hasattr(result_message, "usage"):
|
224
|
-
result["usage"] = result_message.usage
|
225
|
-
|
226
|
-
# Add only essential Claude Code metadata
|
227
|
-
if result_message:
|
228
|
-
if hasattr(result_message, "cost_usd"):
|
229
|
-
result["usage"]["cost_usd"] = result_message.cost_usd
|
230
|
-
if hasattr(result_message, "session_id"):
|
231
|
-
result["session_id"] = result_message.session_id
|
232
|
-
if hasattr(result_message, "is_error"):
|
233
|
-
result["is_error"] = result_message.is_error
|
234
|
-
if hasattr(result_message, "num_turns"):
|
235
|
-
result["num_turns"] = result_message.num_turns
|
236
|
-
|
237
|
-
return result
|
299
|
+
return results
|
238
300
|
|
239
301
|
async def _call(
|
240
302
|
self,
|
@@ -246,5 +308,4 @@ class ClaudeCodeEndpoint(Endpoint):
|
|
246
308
|
async for chunk in self._stream_claude_code(**payload):
|
247
309
|
responses.append(chunk)
|
248
310
|
|
249
|
-
# Parse the responses into a consistent format
|
250
311
|
return self._parse_claude_code_response(responses)
|
@@ -98,10 +98,21 @@ GROQ_CHAT_ENDPOINT_CONFIG = EndpointConfig(
|
|
98
98
|
|
99
99
|
|
100
100
|
REASONING_MODELS = (
|
101
|
-
"o3-mini-2025-01-31",
|
102
|
-
"o3-mini",
|
103
101
|
"o1",
|
104
102
|
"o1-2024-12-17",
|
103
|
+
"o1-preview-2024-09-12",
|
104
|
+
"o1-pro",
|
105
|
+
"o1-pro-2025-03-19",
|
106
|
+
"o3-pro",
|
107
|
+
"o3-pro-2025-06-10",
|
108
|
+
"o3",
|
109
|
+
"o3-2025-04-16",
|
110
|
+
"o4-mini",
|
111
|
+
"o4-mini-2025-04-16",
|
112
|
+
"o3-mini",
|
113
|
+
"o3-mini-2025-01-31",
|
114
|
+
"o1-mini",
|
115
|
+
"o1-mini-2024-09-12",
|
105
116
|
)
|
106
117
|
|
107
118
|
REASONING_NOT_SUPPORT_PARAMS = (
|
lionagi/service/imodel.py
CHANGED
@@ -46,6 +46,7 @@ class iModel:
|
|
46
46
|
limit_tokens: int = None,
|
47
47
|
concurrency_limit: int | None = None,
|
48
48
|
streaming_process_func: Callable = None,
|
49
|
+
provider_metadata: dict | None = None,
|
49
50
|
**kwargs,
|
50
51
|
) -> None:
|
51
52
|
"""Initializes the iModel instance.
|
@@ -79,6 +80,8 @@ class iModel:
|
|
79
80
|
concurrency_limit (int | None, optional):
|
80
81
|
Maximum number of streaming concurrent requests allowed.
|
81
82
|
only applies to streaming requests.
|
83
|
+
provider_metadata (dict | None, optional):
|
84
|
+
Provider-specific metadata, such as session IDs for
|
82
85
|
**kwargs:
|
83
86
|
Additional keyword arguments, such as `model`, or any other
|
84
87
|
provider-specific fields.
|
@@ -121,7 +124,7 @@ class iModel:
|
|
121
124
|
self.streaming_process_func = streaming_process_func
|
122
125
|
|
123
126
|
# Provider-specific metadata storage (e.g., session_id for Claude Code)
|
124
|
-
self.
|
127
|
+
self.provider_metadata = provider_metadata or {}
|
125
128
|
|
126
129
|
def create_api_calling(
|
127
130
|
self, include_token_usage_to_model: bool = False, **kwargs
|
@@ -142,9 +145,9 @@ class iModel:
|
|
142
145
|
self.endpoint.config.provider == "claude_code"
|
143
146
|
and "resume" not in kwargs
|
144
147
|
and "session_id" not in kwargs
|
145
|
-
and self.
|
148
|
+
and self.provider_metadata.get("session_id")
|
146
149
|
):
|
147
|
-
kwargs["resume"] = self.
|
150
|
+
kwargs["resume"] = self.provider_metadata["session_id"]
|
148
151
|
|
149
152
|
# The new Endpoint.create_payload returns (payload, headers)
|
150
153
|
payload, headers = self.endpoint.create_payload(request=kwargs)
|
@@ -280,7 +283,7 @@ class iModel:
|
|
280
283
|
):
|
281
284
|
response = completed_call.response
|
282
285
|
if isinstance(response, dict) and "session_id" in response:
|
283
|
-
self.
|
286
|
+
self.provider_metadata["session_id"] = response[
|
284
287
|
"session_id"
|
285
288
|
]
|
286
289
|
|
@@ -322,6 +325,7 @@ class iModel:
|
|
322
325
|
return {
|
323
326
|
"endpoint": self.endpoint.to_dict(),
|
324
327
|
"processor_config": self.executor.config,
|
328
|
+
"provider_metadata": self.provider_metadata,
|
325
329
|
}
|
326
330
|
|
327
331
|
@classmethod
|
@@ -338,5 +342,6 @@ class iModel:
|
|
338
342
|
|
339
343
|
return cls(
|
340
344
|
endpoint=e1,
|
345
|
+
provider_metadata=data.get("provider_metadata"),
|
341
346
|
**data.get("processor_config", {}),
|
342
347
|
)
|
@@ -121,7 +121,7 @@ class TokenCalculator:
|
|
121
121
|
return num_tokens # buffer for chat
|
122
122
|
|
123
123
|
@staticmethod
|
124
|
-
def
|
124
|
+
def calculate_embed_token(inputs: list[str], /, **kwargs) -> int:
|
125
125
|
try:
|
126
126
|
if not "inputs" in kwargs:
|
127
127
|
raise ValueError("Missing 'inputs' field in payload")
|
lionagi/version.py
CHANGED
@@ -1 +1 @@
|
|
1
|
-
__version__ = "0.
|
1
|
+
__version__ = "0.13.0"
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: lionagi
|
3
|
-
Version: 0.
|
3
|
+
Version: 0.13.0
|
4
4
|
Summary: An Intelligence Operating System.
|
5
5
|
Author-email: HaiyangLi <quantocean.li@gmail.com>, Liangbingyan Luo <llby_luo@outlook.com>
|
6
6
|
License: Apache License
|
@@ -230,11 +230,11 @@ Requires-Dist: python-dotenv>=1.1.0
|
|
230
230
|
Requires-Dist: tiktoken>=0.8.0
|
231
231
|
Requires-Dist: toml>=0.9.0
|
232
232
|
Provides-Extra: all
|
233
|
-
Requires-Dist: claude-code-sdk>=0.0.
|
233
|
+
Requires-Dist: claude-code-sdk>=0.0.14; extra == 'all'
|
234
234
|
Requires-Dist: docling>=2.15.1; extra == 'all'
|
235
235
|
Requires-Dist: ollama>=0.5.0; extra == 'all'
|
236
236
|
Provides-Extra: claude-code
|
237
|
-
Requires-Dist: claude-code-sdk>=0.0.
|
237
|
+
Requires-Dist: claude-code-sdk>=0.0.14; extra == 'claude-code'
|
238
238
|
Provides-Extra: docs
|
239
239
|
Requires-Dist: furo>=2024.8.6; extra == 'docs'
|
240
240
|
Requires-Dist: sphinx-autobuild>=2024.10.3; extra == 'docs'
|
@@ -6,7 +6,7 @@ lionagi/config.py,sha256=dAhDFKtaaSfn6WT9dwX9Vd4TWWs6-Su1FgYIrFgYcgc,3709
|
|
6
6
|
lionagi/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
7
7
|
lionagi/settings.py,sha256=W52mM34E6jXF3GyqCFzVREKZrmnUqtZm_BVDsUiDI_s,1627
|
8
8
|
lionagi/utils.py,sha256=uLTJKl7aTnFXV6ehA6zwiwEB7G2nQYKsO2pZ6mqFzUk,78908
|
9
|
-
lionagi/version.py,sha256=
|
9
|
+
lionagi/version.py,sha256=DgpLNbv0e1LIEOOe54Db8_390i9pelMEFEnsBsNmyhA,23
|
10
10
|
lionagi/fields/__init__.py,sha256=8oU7Vfk-fKiULFKqhM6VpJMqdZcVXPTM7twVfNDN_SQ,603
|
11
11
|
lionagi/fields/action.py,sha256=iWSApCM77jS0Oc28lb7G601Etkp-yjx5U1hfI_FQgfA,5792
|
12
12
|
lionagi/fields/base.py,sha256=5CJc7j8kTTWzXwpYzkSAFzx4BglABfx3AElIATKB7bg,3857
|
@@ -46,7 +46,7 @@ lionagi/libs/schema/extract_code_block.py,sha256=PuJbJj1JnqR5fSZudowPcVPpEoKISLr
|
|
46
46
|
lionagi/libs/schema/extract_docstring.py,sha256=aYyLSRlB8lTH9QF9-6a56uph3AAkNuTyZ0S_duf5-fw,5729
|
47
47
|
lionagi/libs/schema/function_to_schema.py,sha256=XAB031WbYu3a7eFJyYjXVMAjmtWYSYr5kC_DYgjiuyM,5604
|
48
48
|
lionagi/libs/schema/json_schema.py,sha256=cuHcaMr748O9g6suNGmRx4tRXcidd5-c7AMGjTIZyHM,7670
|
49
|
-
lionagi/libs/schema/load_pydantic_model_from_schema.py,sha256=
|
49
|
+
lionagi/libs/schema/load_pydantic_model_from_schema.py,sha256=VGrmnMb26JNcB9LaQcqTwW--sUljqqlqqsPD69zjcEc,10183
|
50
50
|
lionagi/libs/token_transform/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
51
51
|
lionagi/libs/token_transform/base.py,sha256=LBnaDgi4HNgaJJGwIzWcQjVMdu49i_93rRvOvMU22Rw,1545
|
52
52
|
lionagi/libs/token_transform/llmlingua.py,sha256=DkeLUlrb7rGx3nZ04aADU9HXXu5mZTf_DBwT0xhzIv4,7
|
@@ -163,28 +163,28 @@ lionagi/protocols/operatives/__init__.py,sha256=5y5joOZzfFWERl75auAcNcKC3lImVJ5Z
|
|
163
163
|
lionagi/protocols/operatives/operative.py,sha256=PXEMzD6tFM5PPK9kkPaSb7DBIzy7TNC3f2evuGhWhpg,6677
|
164
164
|
lionagi/protocols/operatives/step.py,sha256=AXXRhjsbWqkoMDQ_JyqsfQItQsjBJmldugJz36mA4N0,9772
|
165
165
|
lionagi/service/__init__.py,sha256=DMGXIqPsmut9H5GT0ZeSzQIzYzzPwI-2gLXydpbwiV8,21
|
166
|
-
lionagi/service/imodel.py,sha256=
|
166
|
+
lionagi/service/imodel.py,sha256=jE1Y3JzGwJZesHQtRSadVTAHA2TGdBAwfmzQoOA5Meg,12651
|
167
167
|
lionagi/service/manager.py,sha256=9-dIE4ZftI94RLLLPXH-yB4E3zfnbTs3yppdFDPNchM,1165
|
168
168
|
lionagi/service/rate_limited_processor.py,sha256=PnO0rBf9ObKhD3vtl6pYZX3nHVDvMPdOww59zCWgslQ,5230
|
169
169
|
lionagi/service/resilience.py,sha256=uYJYZQ9M-tje8ME3vJmYabXwKHF1c3Ij4-WrdCwogcs,18742
|
170
|
-
lionagi/service/token_calculator.py,sha256=
|
170
|
+
lionagi/service/token_calculator.py,sha256=FysR31hpv4QPVsbZBVPR60LSW_xCQpwXx89zOJJ73LQ,6460
|
171
171
|
lionagi/service/types.py,sha256=6zavqBxK1Fj0nB9eZgJn3JICxmdT-n0nn8YWZFzM5LU,508
|
172
172
|
lionagi/service/connections/__init__.py,sha256=yHQZ7OJpCftd6CStYR8inbxjJydYdmv9kCvbUBhJ2zU,362
|
173
173
|
lionagi/service/connections/api_calling.py,sha256=XetCrjMhOHNKGGv-NzHhBhVS7XjKPalrS_iExzU-4S4,8005
|
174
174
|
lionagi/service/connections/endpoint.py,sha256=yNIjq9wETMnytynGbq3qY_dkyaMlaHrcfiZjS-tnmLg,14756
|
175
|
-
lionagi/service/connections/endpoint_config.py,sha256=
|
175
|
+
lionagi/service/connections/endpoint_config.py,sha256=QMfxqwvHhNjFSZupJ8oDCMlZK_uQkjGC145w6ETxnB8,4558
|
176
176
|
lionagi/service/connections/header_factory.py,sha256=22sG4ian3MiNklF6SdQqkEYgtWKOZik_yDE0Lna6BiE,1754
|
177
177
|
lionagi/service/connections/match_endpoint.py,sha256=mEZPDkK1qtvjTGN9-PZsK7w_yB7642nZiJsb0l5QUx4,1827
|
178
178
|
lionagi/service/connections/providers/__init__.py,sha256=3lzOakDoBWmMaNnT2g-YwktPKa_Wme4lnPRSmOQfayY,105
|
179
179
|
lionagi/service/connections/providers/anthropic_.py,sha256=SUPnw2UqjY5wuHXLHas6snMTzhQ-UuixvPYbkVnXn34,3083
|
180
|
-
lionagi/service/connections/providers/claude_code_.py,sha256=
|
180
|
+
lionagi/service/connections/providers/claude_code_.py,sha256=TmIxVCCbHxe2NIc2Kb3oJTYFGdJZi5IkB_23LQ1PcG0,11089
|
181
181
|
lionagi/service/connections/providers/exa_.py,sha256=GGWaD9jd5gKM257OfUaIBBKIqR1NrNcBE67p_7JbK7g,938
|
182
|
-
lionagi/service/connections/providers/oai_.py,sha256=
|
182
|
+
lionagi/service/connections/providers/oai_.py,sha256=hH5oEMvS9A0Kbvh-lSFm5dLiqPV2gfRra6zzDOq78P0,5191
|
183
183
|
lionagi/service/connections/providers/ollama_.py,sha256=jdx6dGeChwVk5TFfFRbpnrpKzj8YQZw6D5iWJ6zYmfk,4096
|
184
184
|
lionagi/service/connections/providers/perplexity_.py,sha256=9MH9YmMy9Jg7JDMJHQxxMYHyjJ4NP0OlN7sCuhla85I,917
|
185
185
|
lionagi/service/third_party/README.md,sha256=qFjWnI8rmLivIyr6Tc-hRZh-rQwntROp76af4MBNJJc,2214
|
186
186
|
lionagi/service/third_party/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
187
|
-
lionagi/service/third_party/anthropic_models.py,sha256=
|
187
|
+
lionagi/service/third_party/anthropic_models.py,sha256=oqSPSlcayYG-fS5BLiLeTtkrpaxgkPhEK_YgneumrOo,4004
|
188
188
|
lionagi/service/third_party/exa_models.py,sha256=G_hnekcy-DillPLzMoDQ8ZisVAL8Mp7iMAK4xqAT_3w,5470
|
189
189
|
lionagi/service/third_party/openai_models.py,sha256=sF-fQ726CnaDBgLY_r2NdPqc3GicPKhZjh5F8IfjBO0,501904
|
190
190
|
lionagi/service/third_party/pplx_models.py,sha256=Nkm1ftESBa_NwP9ITBUNqLmAZ3Jh92aL732g_i6T8LQ,5947
|
@@ -198,7 +198,7 @@ lionagi/tools/types.py,sha256=XtJLY0m-Yi_ZLWhm0KycayvqMCZd--HxfQ0x9vFUYDE,230
|
|
198
198
|
lionagi/tools/file/__init__.py,sha256=5y5joOZzfFWERl75auAcNcKC3lImVJ5ZZGvvHZUFCJM,112
|
199
199
|
lionagi/tools/file/reader.py,sha256=0TdnfVGVCKuM58MmGM-NyVjhU9BFoitkNYEepdc0z_Y,9529
|
200
200
|
lionagi/tools/memory/tools.py,sha256=zTGBenVsF8Wuh303kWntmQSGlAFKonHNdh5ePuQ26KE,15948
|
201
|
-
lionagi-0.
|
202
|
-
lionagi-0.
|
203
|
-
lionagi-0.
|
204
|
-
lionagi-0.
|
201
|
+
lionagi-0.13.0.dist-info/METADATA,sha256=calK5dDwwOP5FlidDG33qMadI8U75UD-TcPrwOyBEJ0,20200
|
202
|
+
lionagi-0.13.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
203
|
+
lionagi-0.13.0.dist-info/licenses/LICENSE,sha256=VXFWsdoN5AAknBCgFqQNgPWYx7OPp-PFEP961zGdOjc,11288
|
204
|
+
lionagi-0.13.0.dist-info/RECORD,,
|
File without changes
|
File without changes
|