lionagi 0.13.2__py3-none-any.whl → 0.13.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (44) hide show
  1. lionagi/fields/action.py +0 -1
  2. lionagi/fields/reason.py +0 -1
  3. lionagi/libs/file/save.py +1 -1
  4. lionagi/libs/schema/as_readable.py +142 -196
  5. lionagi/libs/schema/extract_docstring.py +1 -2
  6. lionagi/libs/token_transform/synthlang_/base.py +0 -2
  7. lionagi/libs/validate/string_similarity.py +1 -2
  8. lionagi/models/hashable_model.py +0 -1
  9. lionagi/models/schema_model.py +0 -1
  10. lionagi/operations/ReAct/utils.py +0 -1
  11. lionagi/operations/_act/act.py +0 -1
  12. lionagi/operations/interpret/interpret.py +1 -4
  13. lionagi/operations/manager.py +0 -1
  14. lionagi/operations/plan/plan.py +0 -1
  15. lionagi/operations/select/utils.py +0 -2
  16. lionagi/protocols/forms/flow.py +3 -1
  17. lionagi/protocols/generic/pile.py +1 -2
  18. lionagi/protocols/generic/processor.py +0 -1
  19. lionagi/protocols/graph/graph.py +1 -3
  20. lionagi/protocols/mail/package.py +0 -1
  21. lionagi/protocols/messages/assistant_response.py +0 -2
  22. lionagi/protocols/messages/message.py +0 -1
  23. lionagi/service/connections/endpoint_config.py +6 -0
  24. lionagi/service/connections/match_endpoint.py +26 -8
  25. lionagi/service/connections/providers/claude_code_.py +8 -9
  26. lionagi/service/connections/providers/claude_code_cli.py +414 -0
  27. lionagi/service/connections/providers/oai_.py +1 -1
  28. lionagi/service/manager.py +0 -1
  29. lionagi/service/rate_limited_processor.py +0 -2
  30. lionagi/service/token_calculator.py +0 -3
  31. lionagi/session/branch.py +0 -2
  32. lionagi/session/session.py +0 -1
  33. lionagi/settings.py +0 -1
  34. lionagi/utils.py +6 -9
  35. lionagi/version.py +1 -1
  36. {lionagi-0.13.2.dist-info → lionagi-0.13.3.dist-info}/METADATA +5 -3
  37. {lionagi-0.13.2.dist-info → lionagi-0.13.3.dist-info}/RECORD +39 -43
  38. lionagi/traits/__init__.py +0 -58
  39. lionagi/traits/base.py +0 -216
  40. lionagi/traits/composer.py +0 -343
  41. lionagi/traits/protocols.py +0 -495
  42. lionagi/traits/registry.py +0 -1071
  43. {lionagi-0.13.2.dist-info → lionagi-0.13.3.dist-info}/WHEEL +0 -0
  44. {lionagi-0.13.2.dist-info → lionagi-0.13.3.dist-info}/licenses/LICENSE +0 -0
@@ -2,6 +2,8 @@
2
2
  #
3
3
  # SPDX-License-Identifier: Apache-2.0
4
4
 
5
+ from lionagi.service.connections.endpoint_config import EndpointConfig
6
+
5
7
  from .endpoint import Endpoint
6
8
 
7
9
 
@@ -45,13 +47,29 @@ def match_endpoint(
45
47
  from .providers.perplexity_ import PerplexityChatEndpoint
46
48
 
47
49
  return PerplexityChatEndpoint(**kwargs)
48
- if provider == "claude_code" and (
49
- "query" in endpoint or "code" in endpoint
50
- ):
51
- from lionagi.service.connections.providers.claude_code_ import (
52
- ClaudeCodeEndpoint,
53
- )
50
+ if provider == "claude_code":
51
+ if "cli" in endpoint:
52
+ from .providers.claude_code_cli import ClaudeCodeCLIEndpoint
53
+
54
+ return ClaudeCodeCLIEndpoint(**kwargs)
55
+
56
+ if "query" in endpoint or "code" in endpoint:
57
+ from lionagi.service.connections.providers.claude_code_ import (
58
+ ClaudeCodeEndpoint,
59
+ )
60
+
61
+ return ClaudeCodeEndpoint(**kwargs)
62
+
63
+ from .providers.oai_ import OpenaiChatEndpoint
54
64
 
55
- return ClaudeCodeEndpoint(**kwargs)
65
+ config = EndpointConfig(
66
+ provider=provider,
67
+ endpoint=endpoint or "chat/completions",
68
+ name="openai_compatible_chat",
69
+ auth_type="bearer",
70
+ content_type="application/json",
71
+ method="POST",
72
+ requires_tokens=True,
73
+ )
56
74
 
57
- return None
75
+ return Endpoint(config, **kwargs)
@@ -76,6 +76,7 @@ class ClaudeCodeRequest(BaseModel):
76
76
  description="Automatically finish the conversation after the first response",
77
77
  )
78
78
  verbose_output: bool = Field(default=False, exclude=True)
79
+ cli_display_theme: Literal["light", "dark"] = "light"
79
80
 
80
81
  # ------------------------ validators & helpers --------------------------
81
82
  @field_validator("permission_mode", mode="before")
@@ -141,8 +142,7 @@ class ClaudeCodeRequest(BaseModel):
141
142
  # ------------------------ CLI helpers -----------------------------------
142
143
  def as_cmd_args(self) -> list[str]:
143
144
  """Build argument list for the *Node* `claude` CLI."""
144
- full_prompt = f"Human User: {self.prompt}\n\nAssistant:"
145
- args: list[str] = ["-p", full_prompt, "--output-format", "stream-json"]
145
+ args: list[str] = ["-p", self.prompt, "--output-format", "stream-json"]
146
146
  if self.allowed_tools:
147
147
  args.append("--allowedTools")
148
148
  for tool in self.allowed_tools:
@@ -356,9 +356,8 @@ class ClaudeCodeEndpoint(Endpoint):
356
356
 
357
357
  # 1. stream the Claude Code response
358
358
  async for chunk in self._stream_claude_code(**payload):
359
-
360
359
  if request.verbose_output:
361
- _display_message(chunk)
360
+ _display_message(chunk, theme=request.cli_display_theme)
362
361
 
363
362
  if isinstance(chunk, cc_types.SystemMessage):
364
363
  system = chunk
@@ -391,7 +390,7 @@ class ClaudeCodeEndpoint(Endpoint):
391
390
  display_str=True,
392
391
  format_curly=True,
393
392
  max_panel_width=100,
394
- theme="light",
393
+ theme=request.cli_display_theme,
395
394
  )
396
395
 
397
396
  responses.append(chunk)
@@ -400,7 +399,7 @@ class ClaudeCodeEndpoint(Endpoint):
400
399
  return self._parse_claude_code_response(responses)
401
400
 
402
401
 
403
- def _display_message(chunk):
402
+ def _display_message(chunk, theme):
404
403
  if isinstance(
405
404
  chunk,
406
405
  cc_types.SystemMessage
@@ -415,7 +414,7 @@ def _display_message(chunk):
415
414
  md=True,
416
415
  display_str=True,
417
416
  max_panel_width=100,
418
- theme="light",
417
+ theme=theme,
419
418
  )
420
419
  else:
421
420
  as_readable(
@@ -423,7 +422,7 @@ def _display_message(chunk):
423
422
  format_curly=True,
424
423
  display_str=True,
425
424
  max_panel_width=100,
426
- theme="light",
425
+ theme=theme,
427
426
  )
428
427
 
429
428
  if isinstance(chunk, cc_types.ResultMessage):
@@ -434,7 +433,7 @@ def _display_message(chunk):
434
433
  display_str=True,
435
434
  format_curly=True,
436
435
  max_panel_width=100,
437
- theme="light",
436
+ theme=theme,
438
437
  )
439
438
 
440
439
 
@@ -0,0 +1,414 @@
1
+ from __future__ import annotations
2
+
3
+ import asyncio
4
+ import codecs
5
+ import contextlib
6
+ import dataclasses
7
+ import json
8
+ import logging
9
+ import shutil
10
+ from collections.abc import AsyncIterator, Callable
11
+ from datetime import datetime
12
+ from functools import partial
13
+ from textwrap import shorten
14
+ from typing import Any
15
+
16
+ from json_repair import repair_json
17
+ from pydantic import BaseModel
18
+
19
+ from lionagi.libs.schema.as_readable import as_readable
20
+ from lionagi.service.connections.endpoint import Endpoint, EndpointConfig
21
+ from lionagi.utils import to_dict
22
+
23
+ from .claude_code_ import ClaudeCodeRequest
24
+
25
+ CLAUDE = shutil.which("claude") or "claude"
26
+ if not shutil.which(CLAUDE):
27
+ raise RuntimeError(
28
+ "Claude CLI binary not found (npm i -g @anthropic-ai/claude-code)"
29
+ )
30
+ logging.basicConfig(level=logging.INFO)
31
+ log = logging.getLogger("claude-cli")
32
+
33
+
34
+ @dataclasses.dataclass
35
+ class ClaudeChunk:
36
+ """Low-level wrapper around every NDJSON object coming from the CLI."""
37
+
38
+ raw: dict[str, Any]
39
+ type: str
40
+ # convenience views
41
+ thinking: str | None = None
42
+ text: str | None = None
43
+ tool_use: dict[str, Any] | None = None
44
+ tool_result: dict[str, Any] | None = None
45
+
46
+
47
+ @dataclasses.dataclass
48
+ class ClaudeSession:
49
+ """Aggregated view of a whole CLI conversation."""
50
+
51
+ session_id: str | None = None
52
+ model: str | None = None
53
+
54
+ # chronological log
55
+ chunks: list[ClaudeChunk] = dataclasses.field(default_factory=list)
56
+
57
+ # materialised views
58
+ thinking_log: list[str] = dataclasses.field(default_factory=list)
59
+ messages: list[dict[str, Any]] = dataclasses.field(default_factory=list)
60
+ tool_uses: list[dict[str, Any]] = dataclasses.field(default_factory=list)
61
+ tool_results: list[dict[str, Any]] = dataclasses.field(
62
+ default_factory=list
63
+ )
64
+
65
+ # final summary
66
+ result: str = ""
67
+ usage: dict[str, Any] = dataclasses.field(default_factory=dict)
68
+ total_cost_usd: float | None = None
69
+ num_turns: int | None = None
70
+ duration_ms: int | None = None
71
+ duration_api_ms: int | None = None
72
+ is_error: bool = False
73
+
74
+
75
+ # --------------------------------------------------------------------------- helpers
76
+
77
+
78
+ async def ndjson_from_cli(request: ClaudeCodeRequest):
79
+ """
80
+ Yields each JSON object emitted by the *claude-code* CLI.
81
+
82
+ • Robust against UTF‑8 splits across chunks (incremental decoder).
83
+ • Robust against braces inside strings (uses json.JSONDecoder.raw_decode)
84
+ • Falls back to `json_repair.repair_json` when necessary.
85
+ """
86
+ workspace = request.cwd()
87
+ workspace.mkdir(parents=True, exist_ok=True)
88
+
89
+ proc = await asyncio.create_subprocess_exec(
90
+ CLAUDE,
91
+ *request.as_cmd_args(),
92
+ cwd=str(workspace),
93
+ stdout=asyncio.subprocess.PIPE,
94
+ stderr=asyncio.subprocess.PIPE,
95
+ )
96
+
97
+ decoder = codecs.getincrementaldecoder("utf-8")()
98
+ json_decoder = json.JSONDecoder()
99
+ buffer: str = "" # text buffer that may hold >1 JSON objects
100
+
101
+ try:
102
+ while True:
103
+ chunk = await proc.stdout.read(4096)
104
+ if not chunk:
105
+ break
106
+
107
+ # 1) decode *incrementally* so we never split multibyte chars
108
+ buffer += decoder.decode(chunk)
109
+
110
+ # 2) try to peel off as many complete JSON objs as possible
111
+ while buffer:
112
+ buffer = buffer.lstrip() # remove leading spaces/newlines
113
+ if not buffer:
114
+ break
115
+ try:
116
+ obj, idx = json_decoder.raw_decode(buffer)
117
+ yield obj
118
+ buffer = buffer[idx:] # keep remainder for next round
119
+ except json.JSONDecodeError:
120
+ # incomplete → need more bytes
121
+ break
122
+
123
+ # 3) flush any tail bytes in the incremental decoder
124
+ buffer += decoder.decode(b"", final=True)
125
+ buffer = buffer.strip()
126
+ if buffer:
127
+ try:
128
+ obj, idx = json_decoder.raw_decode(buffer)
129
+ yield obj
130
+ except json.JSONDecodeError:
131
+ try:
132
+ fixed = repair_json(buffer)
133
+ yield json.loads(fixed)
134
+ log.warning(
135
+ "Repaired malformed JSON fragment at stream end"
136
+ )
137
+ except Exception:
138
+ log.error(
139
+ "Skipped unrecoverable JSON tail: %.120s…", buffer
140
+ )
141
+
142
+ # 4) propagate non‑zero exit code
143
+ if await proc.wait() != 0:
144
+ err = (await proc.stderr.read()).decode().strip()
145
+ raise RuntimeError(err or "CLI exited non‑zero")
146
+
147
+ finally:
148
+ with contextlib.suppress(ProcessLookupError):
149
+ proc.terminate()
150
+ await proc.wait()
151
+
152
+
153
+ # --------------------------------------------------------------------------- SSE route
154
+ async def stream_events(request: ClaudeCodeRequest):
155
+ async for obj in ndjson_from_cli(request):
156
+ yield obj
157
+ yield {"type": "done"}
158
+
159
+
160
+ print_readable = partial(as_readable, md=True, display_str=True)
161
+
162
+
163
+ def _pp_system(sys_obj: dict[str, Any], theme) -> None:
164
+ txt = (
165
+ f"◼️ **Claude Code Session** \n"
166
+ f"- id: `{sys_obj.get('session_id', '?')}` \n"
167
+ f"- model: `{sys_obj.get('model', '?')}` \n"
168
+ f"- tools: {', '.join(sys_obj.get('tools', [])[:8])}"
169
+ + ("…" if len(sys_obj.get("tools", [])) > 8 else "")
170
+ )
171
+ print_readable(txt, border=False, theme=theme)
172
+
173
+
174
+ def _pp_thinking(thought: str, theme) -> None:
175
+ text = f"""
176
+ 🧠 Thinking:
177
+ {thought}
178
+ """
179
+ print_readable(text, border=True, theme=theme)
180
+
181
+
182
+ def _pp_assistant_text(text: str, theme) -> None:
183
+ txt = f"""
184
+ > 🗣️ Claude:
185
+ {text}
186
+ """
187
+ print_readable(txt, theme=theme)
188
+
189
+
190
+ def _pp_tool_use(tu: dict[str, Any], theme) -> None:
191
+ preview = shorten(str(tu["input"]).replace("\n", " "), 130)
192
+ body = f"- 🔧 Tool Use — {tu['name']}({tu['id']}) - input: {preview}"
193
+ print_readable(body, border=False, panel=False, theme=theme)
194
+
195
+
196
+ def _pp_tool_result(tr: dict[str, Any], theme) -> None:
197
+ body_preview = shorten(str(tr["content"]).replace("\n", " "), 130)
198
+ status = "ERR" if tr.get("is_error") else "OK"
199
+ body = (
200
+ f"- 📄 Tool Result({tr['tool_use_id']}) - {status}\n\n"
201
+ f"\tcontent: {body_preview}"
202
+ )
203
+ print_readable(body, border=False, panel=False, theme=theme)
204
+
205
+
206
+ def _pp_final(sess: ClaudeSession, theme) -> None:
207
+ usage = sess.usage or {}
208
+ txt = (
209
+ f"### ✅ Session complete - {datetime.utcnow().isoformat(timespec='seconds')} UTC\n"
210
+ f"**Result:**\n\n{sess.result or ''}\n\n"
211
+ f"- cost: **${sess.total_cost_usd:.4f}** \n"
212
+ f"- turns: **{sess.num_turns}** \n"
213
+ f"- duration: **{sess.duration_ms} ms** (API {sess.duration_api_ms} ms) \n"
214
+ f"- tokens in/out: {usage.get('input_tokens', 0)}/{usage.get('output_tokens', 0)}"
215
+ )
216
+ print_readable(txt, theme=theme)
217
+
218
+
219
+ # --------------------------------------------------------------------------- internal utils
220
+
221
+
222
+ async def _maybe_await(func, *args, **kw):
223
+ """Call func which may be sync or async."""
224
+ res = func(*args, **kw) if func else None
225
+ if asyncio.iscoroutine(res):
226
+ await res
227
+
228
+
229
+ # --------------------------------------------------------------------------- main parser
230
+
231
+
232
+ async def stream_claude_code_cli( # noqa: C901 (complexity from branching is fine here)
233
+ request: ClaudeCodeRequest,
234
+ session: ClaudeSession = ClaudeSession(),
235
+ *,
236
+ on_system: Callable[[dict[str, Any]], None] | None = None,
237
+ on_thinking: Callable[[str], None] | None = None,
238
+ on_text: Callable[[str], None] | None = None,
239
+ on_tool_use: Callable[[dict[str, Any]], None] | None = None,
240
+ on_tool_result: Callable[[dict[str, Any]], None] | None = None,
241
+ on_final: Callable[[ClaudeSession], None] | None = None,
242
+ ) -> AsyncIterator[ClaudeChunk | dict | ClaudeSession]:
243
+ """
244
+ Consume the ND‑JSON stream produced by ndjson_from_cli()
245
+ and return a fully‑populated ClaudeSession.
246
+
247
+ If callbacks are omitted a default pretty‑print is emitted.
248
+ """
249
+ stream = ndjson_from_cli(request)
250
+ theme = request.cli_display_theme or "light"
251
+
252
+ async for obj in stream:
253
+ typ = obj.get("type", "unknown")
254
+ chunk = ClaudeChunk(raw=obj, type=typ)
255
+ session.chunks.append(chunk)
256
+
257
+ # ------------------------ SYSTEM -----------------------------------
258
+ if typ == "system":
259
+ data = obj
260
+ session.session_id = data.get("session_id", session.session_id)
261
+ session.model = data.get("model", session.model)
262
+ await _maybe_await(on_system, data)
263
+ if request.verbose_output and on_system is None:
264
+ _pp_system(data, theme)
265
+ yield data
266
+
267
+ # ------------------------ ASSISTANT --------------------------------
268
+ elif typ == "assistant":
269
+ msg = obj["message"]
270
+ session.messages.append(msg)
271
+
272
+ for blk in msg.get("content", []):
273
+ btype = blk.get("type")
274
+ if btype == "thinking":
275
+ thought = blk.get("thinking", "").strip()
276
+ chunk.thinking = thought
277
+ session.thinking_log.append(thought)
278
+ await _maybe_await(on_thinking, thought)
279
+ if request.verbose_output and on_thinking is None:
280
+ _pp_thinking(thought, theme)
281
+
282
+ elif btype == "text":
283
+ text = blk.get("text", "")
284
+ chunk.text = text
285
+ await _maybe_await(on_text, text)
286
+ if request.verbose_output and on_text is None:
287
+ _pp_assistant_text(text, theme)
288
+
289
+ elif btype == "tool_use":
290
+ tu = {
291
+ "id": blk["id"],
292
+ "name": blk["name"],
293
+ "input": blk["input"],
294
+ }
295
+ chunk.tool_use = tu
296
+ session.tool_uses.append(tu)
297
+ await _maybe_await(on_tool_use, tu)
298
+ if request.verbose_output and on_tool_use is None:
299
+ _pp_tool_use(tu, theme)
300
+
301
+ elif btype == "tool_result":
302
+ tr = {
303
+ "tool_use_id": blk["tool_use_id"],
304
+ "content": blk["content"],
305
+ "is_error": blk.get("is_error", False),
306
+ }
307
+ chunk.tool_result = tr
308
+ session.tool_results.append(tr)
309
+ await _maybe_await(on_tool_result, tr)
310
+ if request.verbose_output and on_tool_result is None:
311
+ _pp_tool_result(tr, theme)
312
+ yield chunk
313
+
314
+ # ------------------------ USER (tool_result containers) ------------
315
+ elif typ == "user":
316
+ msg = obj["message"]
317
+ session.messages.append(msg)
318
+ for blk in msg.get("content", []):
319
+ if blk.get("type") == "tool_result":
320
+ tr = {
321
+ "tool_use_id": blk["tool_use_id"],
322
+ "content": blk["content"],
323
+ "is_error": blk.get("is_error", False),
324
+ }
325
+ chunk.tool_result = tr
326
+ session.tool_results.append(tr)
327
+ await _maybe_await(on_tool_result, tr)
328
+ if request.verbose_output and on_tool_result is None:
329
+ _pp_tool_result(tr, theme)
330
+ yield chunk
331
+
332
+ # ------------------------ RESULT -----------------------------------
333
+ elif typ == "result":
334
+ session.result = obj.get("result", "").strip()
335
+ session.usage = obj.get("usage", {})
336
+ session.total_cost_usd = obj.get("total_cost_usd")
337
+ session.num_turns = obj.get("num_turns")
338
+ session.duration_ms = obj.get("duration_ms")
339
+ session.duration_api_ms = obj.get("duration_api_ms")
340
+ session.is_error = obj.get("is_error", False)
341
+
342
+ # ------------------------ DONE -------------------------------------
343
+ elif typ == "done":
344
+ break
345
+
346
+ # final pretty print
347
+ await _maybe_await(on_final, session)
348
+ if request.verbose_output and on_final is None:
349
+ _pp_final(session, theme)
350
+
351
+ yield session
352
+
353
+
354
+ ENDPOINT_CONFIG = EndpointConfig(
355
+ name="claude_code_cli",
356
+ provider="claude_code",
357
+ base_url="internal",
358
+ endpoint="query_cli",
359
+ api_key="dummy",
360
+ request_options=ClaudeCodeRequest,
361
+ timeout=12000, # 20 mins
362
+ )
363
+
364
+
365
+ class ClaudeCodeCLIEndpoint(Endpoint):
366
+ def __init__(self, config: EndpointConfig = ENDPOINT_CONFIG, **kwargs):
367
+ super().__init__(config=config, **kwargs)
368
+
369
+ def create_payload(self, request: dict | BaseModel, **kwargs):
370
+ req_dict = {**self.config.kwargs, **to_dict(request), **kwargs}
371
+ messages = req_dict.pop("messages")
372
+ req_obj = ClaudeCodeRequest.create(messages=messages, **req_dict)
373
+ return {"request": req_obj}, {}
374
+
375
+ async def stream(self, request: dict | BaseModel, **kwargs):
376
+ payload, _ = self.create_payload(request, **kwargs)["request"]
377
+ async for chunk in stream_claude_code_cli(payload):
378
+ yield chunk
379
+
380
+ async def _call(
381
+ self,
382
+ payload: dict,
383
+ headers: dict, # type: ignore[unused-argument]
384
+ **kwargs,
385
+ ):
386
+ responses = []
387
+ request: ClaudeCodeRequest = payload["request"]
388
+ session: ClaudeSession = ClaudeSession()
389
+ system: dict = None
390
+
391
+ # 1. stream the Claude Code response
392
+ async for chunk in stream_claude_code_cli(request, session, **kwargs):
393
+ if isinstance(chunk, dict):
394
+ system = chunk
395
+ responses.append(chunk)
396
+
397
+ if request.auto_finish and not isinstance(
398
+ responses[-1], ClaudeSession
399
+ ):
400
+ req2 = request.model_copy(deep=True)
401
+ req2.max_turns = 1
402
+ req2.continue_conversation = True
403
+ if system:
404
+ req2.resume = system.get("session_id") if system else None
405
+
406
+ async for chunk in stream_claude_code_cli(req2, session, **kwargs):
407
+ responses.append(chunk)
408
+ if isinstance(chunk, ClaudeSession):
409
+ break
410
+ print(
411
+ f"Session {session.session_id} finished with {len(responses)} chunks"
412
+ )
413
+
414
+ return to_dict(session, recursive=True)
@@ -25,7 +25,7 @@ OPENAI_CHAT_ENDPOINT_CONFIG = EndpointConfig(
25
25
  provider="openai",
26
26
  base_url="https://api.openai.com/v1",
27
27
  endpoint="chat/completions",
28
- kwargs={"model": "gpt-4o"},
28
+ kwargs={"model": "gpt-4.1-nano"},
29
29
  api_key=settings.OPENAI_API_KEY or "dummy-key-for-testing",
30
30
  auth_type="bearer",
31
31
  content_type="application/json",
@@ -9,7 +9,6 @@ from .imodel import iModel
9
9
 
10
10
 
11
11
  class iModelManager(Manager):
12
-
13
12
  def __init__(self, *args: iModel, **kwargs):
14
13
  super().__init__()
15
14
 
@@ -18,7 +18,6 @@ __all__ = (
18
18
 
19
19
 
20
20
  class RateLimitedAPIProcessor(Processor):
21
-
22
21
  event_type = APICalling
23
22
 
24
23
  def __init__(
@@ -127,7 +126,6 @@ class RateLimitedAPIProcessor(Processor):
127
126
 
128
127
 
129
128
  class RateLimitedAPIExecutor(Executor):
130
-
131
129
  processor_type = RateLimitedAPIProcessor
132
130
 
133
131
  def __init__(
@@ -104,10 +104,8 @@ def get_image_pricing(model: str) -> dict:
104
104
 
105
105
 
106
106
  class TokenCalculator:
107
-
108
107
  @staticmethod
109
108
  def calculate_message_tokens(messages: list[dict], /, **kwargs) -> int:
110
-
111
109
  model = kwargs.get("model", "gpt-4o")
112
110
  tokenizer = tiktoken.get_encoding(get_encoding_name(model)).encode
113
111
 
@@ -149,7 +147,6 @@ class TokenCalculator:
149
147
  return_tokens: bool = False,
150
148
  return_decoded: bool = False,
151
149
  ) -> int | list[int]:
152
-
153
150
  if not s_:
154
151
  return 0
155
152
 
lionagi/session/branch.py CHANGED
@@ -185,7 +185,6 @@ class Branch(Element, Communicatable, Relational):
185
185
  use_lion_system_message,
186
186
  ]
187
187
  ):
188
-
189
188
  if use_lion_system_message:
190
189
  system = f"Developer Prompt: {str(system)}" if system else ""
191
190
  system = (LION_SYSTEM_MESSAGE + "\n\n" + system).strip()
@@ -647,7 +646,6 @@ class Branch(Element, Communicatable, Relational):
647
646
  """
648
647
  meta = {}
649
648
  if "clone_from" in self.metadata:
650
-
651
649
  # Provide some reference info about the source from which we cloned
652
650
  meta["clone_from"] = {
653
651
  "id": str(self.metadata["clone_from"].id),
@@ -86,7 +86,6 @@ class Session(Node, Communicatable, Relational):
86
86
  tools: Tool | Callable | list = None,
87
87
  **kwargs, # additional branch parameters
88
88
  ) -> Branch:
89
-
90
89
  kwargs["system"] = system
91
90
  kwargs["system_sender"] = system_sender
92
91
  kwargs["system_datetime"] = system_datetime
lionagi/settings.py CHANGED
@@ -56,7 +56,6 @@ LOG_CONFIG = {
56
56
 
57
57
 
58
58
  class Settings:
59
-
60
59
  class Config:
61
60
  TIMEZONE: timezone = timezone.utc
62
61
  LOG: dict = LOG_CONFIG