lionagi 0.13.1__py3-none-any.whl → 0.13.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- lionagi/fields/action.py +0 -1
- lionagi/fields/reason.py +0 -1
- lionagi/libs/file/save.py +1 -1
- lionagi/libs/schema/as_readable.py +184 -16
- lionagi/libs/schema/extract_docstring.py +1 -2
- lionagi/libs/token_transform/synthlang_/base.py +0 -2
- lionagi/libs/validate/string_similarity.py +1 -2
- lionagi/models/hashable_model.py +0 -1
- lionagi/models/schema_model.py +0 -1
- lionagi/operations/ReAct/utils.py +0 -1
- lionagi/operations/_act/act.py +0 -1
- lionagi/operations/interpret/interpret.py +1 -4
- lionagi/operations/manager.py +0 -1
- lionagi/operations/plan/plan.py +0 -1
- lionagi/operations/select/utils.py +0 -2
- lionagi/protocols/forms/flow.py +3 -1
- lionagi/protocols/generic/pile.py +1 -2
- lionagi/protocols/generic/processor.py +0 -1
- lionagi/protocols/graph/graph.py +1 -3
- lionagi/protocols/mail/package.py +0 -1
- lionagi/protocols/messages/assistant_response.py +0 -2
- lionagi/protocols/messages/message.py +0 -1
- lionagi/service/connections/endpoint_config.py +6 -0
- lionagi/service/connections/match_endpoint.py +26 -8
- lionagi/service/connections/providers/claude_code_.py +195 -22
- lionagi/service/connections/providers/claude_code_cli.py +414 -0
- lionagi/service/connections/providers/oai_.py +1 -1
- lionagi/service/manager.py +0 -1
- lionagi/service/rate_limited_processor.py +0 -2
- lionagi/service/token_calculator.py +0 -3
- lionagi/session/branch.py +0 -2
- lionagi/session/session.py +0 -1
- lionagi/settings.py +0 -1
- lionagi/utils.py +6 -9
- lionagi/version.py +1 -1
- {lionagi-0.13.1.dist-info → lionagi-0.13.3.dist-info}/METADATA +8 -3
- {lionagi-0.13.1.dist-info → lionagi-0.13.3.dist-info}/RECORD +39 -43
- lionagi/traits/__init__.py +0 -58
- lionagi/traits/base.py +0 -216
- lionagi/traits/composer.py +0 -343
- lionagi/traits/protocols.py +0 -495
- lionagi/traits/registry.py +0 -1071
- {lionagi-0.13.1.dist-info → lionagi-0.13.3.dist-info}/WHEEL +0 -0
- {lionagi-0.13.1.dist-info → lionagi-0.13.3.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,414 @@
|
|
1
|
+
from __future__ import annotations
|
2
|
+
|
3
|
+
import asyncio
|
4
|
+
import codecs
|
5
|
+
import contextlib
|
6
|
+
import dataclasses
|
7
|
+
import json
|
8
|
+
import logging
|
9
|
+
import shutil
|
10
|
+
from collections.abc import AsyncIterator, Callable
|
11
|
+
from datetime import datetime
|
12
|
+
from functools import partial
|
13
|
+
from textwrap import shorten
|
14
|
+
from typing import Any
|
15
|
+
|
16
|
+
from json_repair import repair_json
|
17
|
+
from pydantic import BaseModel
|
18
|
+
|
19
|
+
from lionagi.libs.schema.as_readable import as_readable
|
20
|
+
from lionagi.service.connections.endpoint import Endpoint, EndpointConfig
|
21
|
+
from lionagi.utils import to_dict
|
22
|
+
|
23
|
+
from .claude_code_ import ClaudeCodeRequest
|
24
|
+
|
25
|
+
CLAUDE = shutil.which("claude") or "claude"
|
26
|
+
if not shutil.which(CLAUDE):
|
27
|
+
raise RuntimeError(
|
28
|
+
"Claude CLI binary not found (npm i -g @anthropic-ai/claude-code)"
|
29
|
+
)
|
30
|
+
logging.basicConfig(level=logging.INFO)
|
31
|
+
log = logging.getLogger("claude-cli")
|
32
|
+
|
33
|
+
|
34
|
+
@dataclasses.dataclass
|
35
|
+
class ClaudeChunk:
|
36
|
+
"""Low-level wrapper around every NDJSON object coming from the CLI."""
|
37
|
+
|
38
|
+
raw: dict[str, Any]
|
39
|
+
type: str
|
40
|
+
# convenience views
|
41
|
+
thinking: str | None = None
|
42
|
+
text: str | None = None
|
43
|
+
tool_use: dict[str, Any] | None = None
|
44
|
+
tool_result: dict[str, Any] | None = None
|
45
|
+
|
46
|
+
|
47
|
+
@dataclasses.dataclass
|
48
|
+
class ClaudeSession:
|
49
|
+
"""Aggregated view of a whole CLI conversation."""
|
50
|
+
|
51
|
+
session_id: str | None = None
|
52
|
+
model: str | None = None
|
53
|
+
|
54
|
+
# chronological log
|
55
|
+
chunks: list[ClaudeChunk] = dataclasses.field(default_factory=list)
|
56
|
+
|
57
|
+
# materialised views
|
58
|
+
thinking_log: list[str] = dataclasses.field(default_factory=list)
|
59
|
+
messages: list[dict[str, Any]] = dataclasses.field(default_factory=list)
|
60
|
+
tool_uses: list[dict[str, Any]] = dataclasses.field(default_factory=list)
|
61
|
+
tool_results: list[dict[str, Any]] = dataclasses.field(
|
62
|
+
default_factory=list
|
63
|
+
)
|
64
|
+
|
65
|
+
# final summary
|
66
|
+
result: str = ""
|
67
|
+
usage: dict[str, Any] = dataclasses.field(default_factory=dict)
|
68
|
+
total_cost_usd: float | None = None
|
69
|
+
num_turns: int | None = None
|
70
|
+
duration_ms: int | None = None
|
71
|
+
duration_api_ms: int | None = None
|
72
|
+
is_error: bool = False
|
73
|
+
|
74
|
+
|
75
|
+
# --------------------------------------------------------------------------- helpers
|
76
|
+
|
77
|
+
|
78
|
+
async def ndjson_from_cli(request: ClaudeCodeRequest):
|
79
|
+
"""
|
80
|
+
Yields each JSON object emitted by the *claude-code* CLI.
|
81
|
+
|
82
|
+
• Robust against UTF‑8 splits across chunks (incremental decoder).
|
83
|
+
• Robust against braces inside strings (uses json.JSONDecoder.raw_decode)
|
84
|
+
• Falls back to `json_repair.repair_json` when necessary.
|
85
|
+
"""
|
86
|
+
workspace = request.cwd()
|
87
|
+
workspace.mkdir(parents=True, exist_ok=True)
|
88
|
+
|
89
|
+
proc = await asyncio.create_subprocess_exec(
|
90
|
+
CLAUDE,
|
91
|
+
*request.as_cmd_args(),
|
92
|
+
cwd=str(workspace),
|
93
|
+
stdout=asyncio.subprocess.PIPE,
|
94
|
+
stderr=asyncio.subprocess.PIPE,
|
95
|
+
)
|
96
|
+
|
97
|
+
decoder = codecs.getincrementaldecoder("utf-8")()
|
98
|
+
json_decoder = json.JSONDecoder()
|
99
|
+
buffer: str = "" # text buffer that may hold >1 JSON objects
|
100
|
+
|
101
|
+
try:
|
102
|
+
while True:
|
103
|
+
chunk = await proc.stdout.read(4096)
|
104
|
+
if not chunk:
|
105
|
+
break
|
106
|
+
|
107
|
+
# 1) decode *incrementally* so we never split multibyte chars
|
108
|
+
buffer += decoder.decode(chunk)
|
109
|
+
|
110
|
+
# 2) try to peel off as many complete JSON objs as possible
|
111
|
+
while buffer:
|
112
|
+
buffer = buffer.lstrip() # remove leading spaces/newlines
|
113
|
+
if not buffer:
|
114
|
+
break
|
115
|
+
try:
|
116
|
+
obj, idx = json_decoder.raw_decode(buffer)
|
117
|
+
yield obj
|
118
|
+
buffer = buffer[idx:] # keep remainder for next round
|
119
|
+
except json.JSONDecodeError:
|
120
|
+
# incomplete → need more bytes
|
121
|
+
break
|
122
|
+
|
123
|
+
# 3) flush any tail bytes in the incremental decoder
|
124
|
+
buffer += decoder.decode(b"", final=True)
|
125
|
+
buffer = buffer.strip()
|
126
|
+
if buffer:
|
127
|
+
try:
|
128
|
+
obj, idx = json_decoder.raw_decode(buffer)
|
129
|
+
yield obj
|
130
|
+
except json.JSONDecodeError:
|
131
|
+
try:
|
132
|
+
fixed = repair_json(buffer)
|
133
|
+
yield json.loads(fixed)
|
134
|
+
log.warning(
|
135
|
+
"Repaired malformed JSON fragment at stream end"
|
136
|
+
)
|
137
|
+
except Exception:
|
138
|
+
log.error(
|
139
|
+
"Skipped unrecoverable JSON tail: %.120s…", buffer
|
140
|
+
)
|
141
|
+
|
142
|
+
# 4) propagate non‑zero exit code
|
143
|
+
if await proc.wait() != 0:
|
144
|
+
err = (await proc.stderr.read()).decode().strip()
|
145
|
+
raise RuntimeError(err or "CLI exited non‑zero")
|
146
|
+
|
147
|
+
finally:
|
148
|
+
with contextlib.suppress(ProcessLookupError):
|
149
|
+
proc.terminate()
|
150
|
+
await proc.wait()
|
151
|
+
|
152
|
+
|
153
|
+
# --------------------------------------------------------------------------- SSE route
|
154
|
+
async def stream_events(request: ClaudeCodeRequest):
|
155
|
+
async for obj in ndjson_from_cli(request):
|
156
|
+
yield obj
|
157
|
+
yield {"type": "done"}
|
158
|
+
|
159
|
+
|
160
|
+
print_readable = partial(as_readable, md=True, display_str=True)
|
161
|
+
|
162
|
+
|
163
|
+
def _pp_system(sys_obj: dict[str, Any], theme) -> None:
|
164
|
+
txt = (
|
165
|
+
f"◼️ **Claude Code Session** \n"
|
166
|
+
f"- id: `{sys_obj.get('session_id', '?')}` \n"
|
167
|
+
f"- model: `{sys_obj.get('model', '?')}` \n"
|
168
|
+
f"- tools: {', '.join(sys_obj.get('tools', [])[:8])}"
|
169
|
+
+ ("…" if len(sys_obj.get("tools", [])) > 8 else "")
|
170
|
+
)
|
171
|
+
print_readable(txt, border=False, theme=theme)
|
172
|
+
|
173
|
+
|
174
|
+
def _pp_thinking(thought: str, theme) -> None:
|
175
|
+
text = f"""
|
176
|
+
🧠 Thinking:
|
177
|
+
{thought}
|
178
|
+
"""
|
179
|
+
print_readable(text, border=True, theme=theme)
|
180
|
+
|
181
|
+
|
182
|
+
def _pp_assistant_text(text: str, theme) -> None:
|
183
|
+
txt = f"""
|
184
|
+
> 🗣️ Claude:
|
185
|
+
{text}
|
186
|
+
"""
|
187
|
+
print_readable(txt, theme=theme)
|
188
|
+
|
189
|
+
|
190
|
+
def _pp_tool_use(tu: dict[str, Any], theme) -> None:
|
191
|
+
preview = shorten(str(tu["input"]).replace("\n", " "), 130)
|
192
|
+
body = f"- 🔧 Tool Use — {tu['name']}({tu['id']}) - input: {preview}"
|
193
|
+
print_readable(body, border=False, panel=False, theme=theme)
|
194
|
+
|
195
|
+
|
196
|
+
def _pp_tool_result(tr: dict[str, Any], theme) -> None:
|
197
|
+
body_preview = shorten(str(tr["content"]).replace("\n", " "), 130)
|
198
|
+
status = "ERR" if tr.get("is_error") else "OK"
|
199
|
+
body = (
|
200
|
+
f"- 📄 Tool Result({tr['tool_use_id']}) - {status}\n\n"
|
201
|
+
f"\tcontent: {body_preview}"
|
202
|
+
)
|
203
|
+
print_readable(body, border=False, panel=False, theme=theme)
|
204
|
+
|
205
|
+
|
206
|
+
def _pp_final(sess: ClaudeSession, theme) -> None:
|
207
|
+
usage = sess.usage or {}
|
208
|
+
txt = (
|
209
|
+
f"### ✅ Session complete - {datetime.utcnow().isoformat(timespec='seconds')} UTC\n"
|
210
|
+
f"**Result:**\n\n{sess.result or ''}\n\n"
|
211
|
+
f"- cost: **${sess.total_cost_usd:.4f}** \n"
|
212
|
+
f"- turns: **{sess.num_turns}** \n"
|
213
|
+
f"- duration: **{sess.duration_ms} ms** (API {sess.duration_api_ms} ms) \n"
|
214
|
+
f"- tokens in/out: {usage.get('input_tokens', 0)}/{usage.get('output_tokens', 0)}"
|
215
|
+
)
|
216
|
+
print_readable(txt, theme=theme)
|
217
|
+
|
218
|
+
|
219
|
+
# --------------------------------------------------------------------------- internal utils
|
220
|
+
|
221
|
+
|
222
|
+
async def _maybe_await(func, *args, **kw):
|
223
|
+
"""Call func which may be sync or async."""
|
224
|
+
res = func(*args, **kw) if func else None
|
225
|
+
if asyncio.iscoroutine(res):
|
226
|
+
await res
|
227
|
+
|
228
|
+
|
229
|
+
# --------------------------------------------------------------------------- main parser
|
230
|
+
|
231
|
+
|
232
|
+
async def stream_claude_code_cli( # noqa: C901 (complexity from branching is fine here)
|
233
|
+
request: ClaudeCodeRequest,
|
234
|
+
session: ClaudeSession = ClaudeSession(),
|
235
|
+
*,
|
236
|
+
on_system: Callable[[dict[str, Any]], None] | None = None,
|
237
|
+
on_thinking: Callable[[str], None] | None = None,
|
238
|
+
on_text: Callable[[str], None] | None = None,
|
239
|
+
on_tool_use: Callable[[dict[str, Any]], None] | None = None,
|
240
|
+
on_tool_result: Callable[[dict[str, Any]], None] | None = None,
|
241
|
+
on_final: Callable[[ClaudeSession], None] | None = None,
|
242
|
+
) -> AsyncIterator[ClaudeChunk | dict | ClaudeSession]:
|
243
|
+
"""
|
244
|
+
Consume the ND‑JSON stream produced by ndjson_from_cli()
|
245
|
+
and return a fully‑populated ClaudeSession.
|
246
|
+
|
247
|
+
If callbacks are omitted a default pretty‑print is emitted.
|
248
|
+
"""
|
249
|
+
stream = ndjson_from_cli(request)
|
250
|
+
theme = request.cli_display_theme or "light"
|
251
|
+
|
252
|
+
async for obj in stream:
|
253
|
+
typ = obj.get("type", "unknown")
|
254
|
+
chunk = ClaudeChunk(raw=obj, type=typ)
|
255
|
+
session.chunks.append(chunk)
|
256
|
+
|
257
|
+
# ------------------------ SYSTEM -----------------------------------
|
258
|
+
if typ == "system":
|
259
|
+
data = obj
|
260
|
+
session.session_id = data.get("session_id", session.session_id)
|
261
|
+
session.model = data.get("model", session.model)
|
262
|
+
await _maybe_await(on_system, data)
|
263
|
+
if request.verbose_output and on_system is None:
|
264
|
+
_pp_system(data, theme)
|
265
|
+
yield data
|
266
|
+
|
267
|
+
# ------------------------ ASSISTANT --------------------------------
|
268
|
+
elif typ == "assistant":
|
269
|
+
msg = obj["message"]
|
270
|
+
session.messages.append(msg)
|
271
|
+
|
272
|
+
for blk in msg.get("content", []):
|
273
|
+
btype = blk.get("type")
|
274
|
+
if btype == "thinking":
|
275
|
+
thought = blk.get("thinking", "").strip()
|
276
|
+
chunk.thinking = thought
|
277
|
+
session.thinking_log.append(thought)
|
278
|
+
await _maybe_await(on_thinking, thought)
|
279
|
+
if request.verbose_output and on_thinking is None:
|
280
|
+
_pp_thinking(thought, theme)
|
281
|
+
|
282
|
+
elif btype == "text":
|
283
|
+
text = blk.get("text", "")
|
284
|
+
chunk.text = text
|
285
|
+
await _maybe_await(on_text, text)
|
286
|
+
if request.verbose_output and on_text is None:
|
287
|
+
_pp_assistant_text(text, theme)
|
288
|
+
|
289
|
+
elif btype == "tool_use":
|
290
|
+
tu = {
|
291
|
+
"id": blk["id"],
|
292
|
+
"name": blk["name"],
|
293
|
+
"input": blk["input"],
|
294
|
+
}
|
295
|
+
chunk.tool_use = tu
|
296
|
+
session.tool_uses.append(tu)
|
297
|
+
await _maybe_await(on_tool_use, tu)
|
298
|
+
if request.verbose_output and on_tool_use is None:
|
299
|
+
_pp_tool_use(tu, theme)
|
300
|
+
|
301
|
+
elif btype == "tool_result":
|
302
|
+
tr = {
|
303
|
+
"tool_use_id": blk["tool_use_id"],
|
304
|
+
"content": blk["content"],
|
305
|
+
"is_error": blk.get("is_error", False),
|
306
|
+
}
|
307
|
+
chunk.tool_result = tr
|
308
|
+
session.tool_results.append(tr)
|
309
|
+
await _maybe_await(on_tool_result, tr)
|
310
|
+
if request.verbose_output and on_tool_result is None:
|
311
|
+
_pp_tool_result(tr, theme)
|
312
|
+
yield chunk
|
313
|
+
|
314
|
+
# ------------------------ USER (tool_result containers) ------------
|
315
|
+
elif typ == "user":
|
316
|
+
msg = obj["message"]
|
317
|
+
session.messages.append(msg)
|
318
|
+
for blk in msg.get("content", []):
|
319
|
+
if blk.get("type") == "tool_result":
|
320
|
+
tr = {
|
321
|
+
"tool_use_id": blk["tool_use_id"],
|
322
|
+
"content": blk["content"],
|
323
|
+
"is_error": blk.get("is_error", False),
|
324
|
+
}
|
325
|
+
chunk.tool_result = tr
|
326
|
+
session.tool_results.append(tr)
|
327
|
+
await _maybe_await(on_tool_result, tr)
|
328
|
+
if request.verbose_output and on_tool_result is None:
|
329
|
+
_pp_tool_result(tr, theme)
|
330
|
+
yield chunk
|
331
|
+
|
332
|
+
# ------------------------ RESULT -----------------------------------
|
333
|
+
elif typ == "result":
|
334
|
+
session.result = obj.get("result", "").strip()
|
335
|
+
session.usage = obj.get("usage", {})
|
336
|
+
session.total_cost_usd = obj.get("total_cost_usd")
|
337
|
+
session.num_turns = obj.get("num_turns")
|
338
|
+
session.duration_ms = obj.get("duration_ms")
|
339
|
+
session.duration_api_ms = obj.get("duration_api_ms")
|
340
|
+
session.is_error = obj.get("is_error", False)
|
341
|
+
|
342
|
+
# ------------------------ DONE -------------------------------------
|
343
|
+
elif typ == "done":
|
344
|
+
break
|
345
|
+
|
346
|
+
# final pretty print
|
347
|
+
await _maybe_await(on_final, session)
|
348
|
+
if request.verbose_output and on_final is None:
|
349
|
+
_pp_final(session, theme)
|
350
|
+
|
351
|
+
yield session
|
352
|
+
|
353
|
+
|
354
|
+
ENDPOINT_CONFIG = EndpointConfig(
|
355
|
+
name="claude_code_cli",
|
356
|
+
provider="claude_code",
|
357
|
+
base_url="internal",
|
358
|
+
endpoint="query_cli",
|
359
|
+
api_key="dummy",
|
360
|
+
request_options=ClaudeCodeRequest,
|
361
|
+
timeout=12000, # 20 mins
|
362
|
+
)
|
363
|
+
|
364
|
+
|
365
|
+
class ClaudeCodeCLIEndpoint(Endpoint):
|
366
|
+
def __init__(self, config: EndpointConfig = ENDPOINT_CONFIG, **kwargs):
|
367
|
+
super().__init__(config=config, **kwargs)
|
368
|
+
|
369
|
+
def create_payload(self, request: dict | BaseModel, **kwargs):
|
370
|
+
req_dict = {**self.config.kwargs, **to_dict(request), **kwargs}
|
371
|
+
messages = req_dict.pop("messages")
|
372
|
+
req_obj = ClaudeCodeRequest.create(messages=messages, **req_dict)
|
373
|
+
return {"request": req_obj}, {}
|
374
|
+
|
375
|
+
async def stream(self, request: dict | BaseModel, **kwargs):
|
376
|
+
payload, _ = self.create_payload(request, **kwargs)["request"]
|
377
|
+
async for chunk in stream_claude_code_cli(payload):
|
378
|
+
yield chunk
|
379
|
+
|
380
|
+
async def _call(
|
381
|
+
self,
|
382
|
+
payload: dict,
|
383
|
+
headers: dict, # type: ignore[unused-argument]
|
384
|
+
**kwargs,
|
385
|
+
):
|
386
|
+
responses = []
|
387
|
+
request: ClaudeCodeRequest = payload["request"]
|
388
|
+
session: ClaudeSession = ClaudeSession()
|
389
|
+
system: dict = None
|
390
|
+
|
391
|
+
# 1. stream the Claude Code response
|
392
|
+
async for chunk in stream_claude_code_cli(request, session, **kwargs):
|
393
|
+
if isinstance(chunk, dict):
|
394
|
+
system = chunk
|
395
|
+
responses.append(chunk)
|
396
|
+
|
397
|
+
if request.auto_finish and not isinstance(
|
398
|
+
responses[-1], ClaudeSession
|
399
|
+
):
|
400
|
+
req2 = request.model_copy(deep=True)
|
401
|
+
req2.max_turns = 1
|
402
|
+
req2.continue_conversation = True
|
403
|
+
if system:
|
404
|
+
req2.resume = system.get("session_id") if system else None
|
405
|
+
|
406
|
+
async for chunk in stream_claude_code_cli(req2, session, **kwargs):
|
407
|
+
responses.append(chunk)
|
408
|
+
if isinstance(chunk, ClaudeSession):
|
409
|
+
break
|
410
|
+
print(
|
411
|
+
f"Session {session.session_id} finished with {len(responses)} chunks"
|
412
|
+
)
|
413
|
+
|
414
|
+
return to_dict(session, recursive=True)
|
@@ -25,7 +25,7 @@ OPENAI_CHAT_ENDPOINT_CONFIG = EndpointConfig(
|
|
25
25
|
provider="openai",
|
26
26
|
base_url="https://api.openai.com/v1",
|
27
27
|
endpoint="chat/completions",
|
28
|
-
kwargs={"model": "gpt-
|
28
|
+
kwargs={"model": "gpt-4.1-nano"},
|
29
29
|
api_key=settings.OPENAI_API_KEY or "dummy-key-for-testing",
|
30
30
|
auth_type="bearer",
|
31
31
|
content_type="application/json",
|
lionagi/service/manager.py
CHANGED
@@ -18,7 +18,6 @@ __all__ = (
|
|
18
18
|
|
19
19
|
|
20
20
|
class RateLimitedAPIProcessor(Processor):
|
21
|
-
|
22
21
|
event_type = APICalling
|
23
22
|
|
24
23
|
def __init__(
|
@@ -127,7 +126,6 @@ class RateLimitedAPIProcessor(Processor):
|
|
127
126
|
|
128
127
|
|
129
128
|
class RateLimitedAPIExecutor(Executor):
|
130
|
-
|
131
129
|
processor_type = RateLimitedAPIProcessor
|
132
130
|
|
133
131
|
def __init__(
|
@@ -104,10 +104,8 @@ def get_image_pricing(model: str) -> dict:
|
|
104
104
|
|
105
105
|
|
106
106
|
class TokenCalculator:
|
107
|
-
|
108
107
|
@staticmethod
|
109
108
|
def calculate_message_tokens(messages: list[dict], /, **kwargs) -> int:
|
110
|
-
|
111
109
|
model = kwargs.get("model", "gpt-4o")
|
112
110
|
tokenizer = tiktoken.get_encoding(get_encoding_name(model)).encode
|
113
111
|
|
@@ -149,7 +147,6 @@ class TokenCalculator:
|
|
149
147
|
return_tokens: bool = False,
|
150
148
|
return_decoded: bool = False,
|
151
149
|
) -> int | list[int]:
|
152
|
-
|
153
150
|
if not s_:
|
154
151
|
return 0
|
155
152
|
|
lionagi/session/branch.py
CHANGED
@@ -185,7 +185,6 @@ class Branch(Element, Communicatable, Relational):
|
|
185
185
|
use_lion_system_message,
|
186
186
|
]
|
187
187
|
):
|
188
|
-
|
189
188
|
if use_lion_system_message:
|
190
189
|
system = f"Developer Prompt: {str(system)}" if system else ""
|
191
190
|
system = (LION_SYSTEM_MESSAGE + "\n\n" + system).strip()
|
@@ -647,7 +646,6 @@ class Branch(Element, Communicatable, Relational):
|
|
647
646
|
"""
|
648
647
|
meta = {}
|
649
648
|
if "clone_from" in self.metadata:
|
650
|
-
|
651
649
|
# Provide some reference info about the source from which we cloned
|
652
650
|
meta["clone_from"] = {
|
653
651
|
"id": str(self.metadata["clone_from"].id),
|
lionagi/session/session.py
CHANGED
@@ -86,7 +86,6 @@ class Session(Node, Communicatable, Relational):
|
|
86
86
|
tools: Tool | Callable | list = None,
|
87
87
|
**kwargs, # additional branch parameters
|
88
88
|
) -> Branch:
|
89
|
-
|
90
89
|
kwargs["system"] = system
|
91
90
|
kwargs["system_sender"] = system_sender
|
92
91
|
kwargs["system_datetime"] = system_datetime
|
lionagi/settings.py
CHANGED
lionagi/utils.py
CHANGED
@@ -5,6 +5,7 @@
|
|
5
5
|
import asyncio
|
6
6
|
import contextlib
|
7
7
|
import copy as _copy
|
8
|
+
import dataclasses
|
8
9
|
import functools
|
9
10
|
import importlib.metadata
|
10
11
|
import importlib.util
|
@@ -134,7 +135,6 @@ def hash_dict(data) -> int:
|
|
134
135
|
|
135
136
|
|
136
137
|
class Params(BaseModel):
|
137
|
-
|
138
138
|
def keys(self):
|
139
139
|
return self.model_fields.keys()
|
140
140
|
|
@@ -900,7 +900,6 @@ async def bcall(
|
|
900
900
|
flatten_tuple_set: bool = False,
|
901
901
|
**kwargs: Any,
|
902
902
|
) -> AsyncGenerator[list[T | tuple[T, float]], None]:
|
903
|
-
|
904
903
|
input_ = to_list(input_, flatten=True, dropna=True)
|
905
904
|
|
906
905
|
for i in range(0, len(input_), batch_size):
|
@@ -1372,7 +1371,6 @@ def xml_to_dict(
|
|
1372
1371
|
|
1373
1372
|
|
1374
1373
|
def dict_to_xml(data: dict, /, root_tag: str = "root") -> str:
|
1375
|
-
|
1376
1374
|
root = ET.Element(root_tag)
|
1377
1375
|
|
1378
1376
|
def convert(dict_obj: dict, parent: Any) -> None:
|
@@ -1471,7 +1469,6 @@ def recursive_to_dict(
|
|
1471
1469
|
recursive_custom_types: bool = False,
|
1472
1470
|
**kwargs: Any,
|
1473
1471
|
) -> Any:
|
1474
|
-
|
1475
1472
|
if not isinstance(max_recursive_depth, int):
|
1476
1473
|
max_recursive_depth = 5
|
1477
1474
|
else:
|
@@ -1504,7 +1501,6 @@ def _recur_to_dict(
|
|
1504
1501
|
recursive_custom_types: bool = False,
|
1505
1502
|
**kwargs: Any,
|
1506
1503
|
) -> Any:
|
1507
|
-
|
1508
1504
|
if current_depth >= max_recursive_depth:
|
1509
1505
|
return input_
|
1510
1506
|
|
@@ -1675,7 +1671,6 @@ def _to_dict(
|
|
1675
1671
|
use_enum_values: bool = True,
|
1676
1672
|
**kwargs: Any,
|
1677
1673
|
) -> dict[str, Any]:
|
1678
|
-
|
1679
1674
|
if isinstance(input_, set):
|
1680
1675
|
return _set_to_dict(input_)
|
1681
1676
|
|
@@ -1705,6 +1700,9 @@ def _to_dict(
|
|
1705
1700
|
if isinstance(input_, Iterable):
|
1706
1701
|
return _iterable_to_dict(input_)
|
1707
1702
|
|
1703
|
+
with contextlib.suppress(Exception):
|
1704
|
+
return dataclasses.asdict(input_)
|
1705
|
+
|
1708
1706
|
return dict(input_)
|
1709
1707
|
|
1710
1708
|
|
@@ -2307,7 +2305,6 @@ def parse_number(type_and_value: tuple[str, str]) -> float | complex:
|
|
2307
2305
|
def breakdown_pydantic_annotation(
|
2308
2306
|
model: type[B], max_depth: int | None = None, current_depth: int = 0
|
2309
2307
|
) -> dict[str, Any]:
|
2310
|
-
|
2311
2308
|
if not _is_pydantic_model(model):
|
2312
2309
|
raise TypeError("Input must be a Pydantic model")
|
2313
2310
|
|
@@ -2397,7 +2394,7 @@ def check_import(
|
|
2397
2394
|
if not is_import_installed(package_name):
|
2398
2395
|
if attempt_install:
|
2399
2396
|
logging.info(
|
2400
|
-
f"Package {package_name} not found. Attempting
|
2397
|
+
f"Package {package_name} not found. Attempting to install.",
|
2401
2398
|
)
|
2402
2399
|
try:
|
2403
2400
|
return install_import(
|
@@ -2576,7 +2573,7 @@ def pdf_to_images(
|
|
2576
2573
|
saved_paths = []
|
2577
2574
|
for i, image in enumerate(images):
|
2578
2575
|
# Construct the output file name
|
2579
|
-
image_file = os.path.join(output_folder, f"page_{i+1}.{fmt}")
|
2576
|
+
image_file = os.path.join(output_folder, f"page_{i + 1}.{fmt}")
|
2580
2577
|
image.save(image_file, fmt.upper())
|
2581
2578
|
saved_paths.append(image_file)
|
2582
2579
|
|
lionagi/version.py
CHANGED
@@ -1 +1 @@
|
|
1
|
-
__version__ = "0.13.
|
1
|
+
__version__ = "0.13.3"
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: lionagi
|
3
|
-
Version: 0.13.
|
3
|
+
Version: 0.13.3
|
4
4
|
Summary: An Intelligence Operating System.
|
5
5
|
Author-email: HaiyangLi <quantocean.li@gmail.com>, Liangbingyan Luo <llby_luo@outlook.com>
|
6
6
|
License: Apache License
|
@@ -221,8 +221,10 @@ Classifier: Programming Language :: Python :: 3.13
|
|
221
221
|
Requires-Python: >=3.10
|
222
222
|
Requires-Dist: aiocache>=0.12.0
|
223
223
|
Requires-Dist: aiohttp>=3.12.0
|
224
|
+
Requires-Dist: anyio>=4.8.0
|
224
225
|
Requires-Dist: backoff>=2.2.1
|
225
226
|
Requires-Dist: jinja2>=3.1.0
|
227
|
+
Requires-Dist: json-repair>=0.47.8
|
226
228
|
Requires-Dist: pillow>=11.0.0
|
227
229
|
Requires-Dist: psutil>=7.0.0
|
228
230
|
Requires-Dist: pydantic-settings>=2.8.0
|
@@ -234,7 +236,8 @@ Provides-Extra: all
|
|
234
236
|
Requires-Dist: claude-code-sdk>=0.0.14; extra == 'all'
|
235
237
|
Requires-Dist: docling>=2.15.1; extra == 'all'
|
236
238
|
Requires-Dist: fastmcp>=2.10.5; extra == 'all'
|
237
|
-
Requires-Dist: ollama>=0.
|
239
|
+
Requires-Dist: ollama>=0.4.0; extra == 'all'
|
240
|
+
Requires-Dist: rich>=13.0.0; extra == 'all'
|
238
241
|
Provides-Extra: claude-code
|
239
242
|
Requires-Dist: claude-code-sdk>=0.0.14; extra == 'claude-code'
|
240
243
|
Provides-Extra: docs
|
@@ -248,9 +251,11 @@ Requires-Dist: pre-commit>=4.0.1; extra == 'lint'
|
|
248
251
|
Provides-Extra: mcp
|
249
252
|
Requires-Dist: fastmcp>=2.10.5; extra == 'mcp'
|
250
253
|
Provides-Extra: ollama
|
251
|
-
Requires-Dist: ollama>=0.
|
254
|
+
Requires-Dist: ollama>=0.4.0; extra == 'ollama'
|
252
255
|
Provides-Extra: reader
|
253
256
|
Requires-Dist: docling>=2.15.1; extra == 'reader'
|
257
|
+
Provides-Extra: rich
|
258
|
+
Requires-Dist: rich>=13.0.0; extra == 'rich'
|
254
259
|
Provides-Extra: test
|
255
260
|
Requires-Dist: pytest-asyncio>=1.0.0; extra == 'test'
|
256
261
|
Requires-Dist: pytest>=8.3.4; extra == 'test'
|