lionagi 0.13.1__py3-none-any.whl → 0.13.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- lionagi/fields/action.py +0 -1
- lionagi/fields/reason.py +0 -1
- lionagi/libs/file/save.py +1 -1
- lionagi/libs/schema/as_readable.py +184 -16
- lionagi/libs/schema/extract_docstring.py +1 -2
- lionagi/libs/token_transform/synthlang_/base.py +0 -2
- lionagi/libs/validate/string_similarity.py +1 -2
- lionagi/models/hashable_model.py +0 -1
- lionagi/models/schema_model.py +0 -1
- lionagi/operations/ReAct/utils.py +0 -1
- lionagi/operations/_act/act.py +0 -1
- lionagi/operations/interpret/interpret.py +1 -4
- lionagi/operations/manager.py +0 -1
- lionagi/operations/plan/plan.py +0 -1
- lionagi/operations/select/utils.py +0 -2
- lionagi/protocols/forms/flow.py +3 -1
- lionagi/protocols/generic/pile.py +1 -2
- lionagi/protocols/generic/processor.py +0 -1
- lionagi/protocols/graph/graph.py +1 -3
- lionagi/protocols/mail/package.py +0 -1
- lionagi/protocols/messages/assistant_response.py +0 -2
- lionagi/protocols/messages/message.py +0 -1
- lionagi/service/connections/endpoint_config.py +6 -0
- lionagi/service/connections/match_endpoint.py +26 -8
- lionagi/service/connections/providers/claude_code_.py +195 -22
- lionagi/service/connections/providers/claude_code_cli.py +414 -0
- lionagi/service/connections/providers/oai_.py +1 -1
- lionagi/service/manager.py +0 -1
- lionagi/service/rate_limited_processor.py +0 -2
- lionagi/service/token_calculator.py +0 -3
- lionagi/session/branch.py +0 -2
- lionagi/session/session.py +0 -1
- lionagi/settings.py +0 -1
- lionagi/utils.py +6 -9
- lionagi/version.py +1 -1
- {lionagi-0.13.1.dist-info → lionagi-0.13.3.dist-info}/METADATA +8 -3
- {lionagi-0.13.1.dist-info → lionagi-0.13.3.dist-info}/RECORD +39 -43
- lionagi/traits/__init__.py +0 -58
- lionagi/traits/base.py +0 -216
- lionagi/traits/composer.py +0 -343
- lionagi/traits/protocols.py +0 -495
- lionagi/traits/registry.py +0 -1071
- {lionagi-0.13.1.dist-info → lionagi-0.13.3.dist-info}/WHEEL +0 -0
- {lionagi-0.13.1.dist-info → lionagi-0.13.3.dist-info}/licenses/LICENSE +0 -0
@@ -10,11 +10,13 @@ from typing import Any, Literal
|
|
10
10
|
|
11
11
|
from claude_code_sdk import ClaudeCodeOptions
|
12
12
|
from claude_code_sdk import query as sdk_query
|
13
|
+
from claude_code_sdk import types as cc_types
|
13
14
|
from pydantic import BaseModel, Field, field_validator, model_validator
|
14
15
|
|
16
|
+
from lionagi.libs.schema.as_readable import as_readable
|
15
17
|
from lionagi.service.connections.endpoint import Endpoint
|
16
18
|
from lionagi.service.connections.endpoint_config import EndpointConfig
|
17
|
-
from lionagi.utils import to_dict
|
19
|
+
from lionagi.utils import to_dict, to_list
|
18
20
|
|
19
21
|
# --------------------------------------------------------------------------- constants
|
20
22
|
ClaudePermission = Literal[
|
@@ -67,6 +69,15 @@ class ClaudeCodeRequest(BaseModel):
|
|
67
69
|
permission_prompt_tool_name: str | None = None
|
68
70
|
disallowed_tools: list[str] = Field(default_factory=list)
|
69
71
|
|
72
|
+
# -- internal use --------------------------------------------------------
|
73
|
+
auto_finish: bool = Field(
|
74
|
+
default=True,
|
75
|
+
exclude=True,
|
76
|
+
description="Automatically finish the conversation after the first response",
|
77
|
+
)
|
78
|
+
verbose_output: bool = Field(default=False, exclude=True)
|
79
|
+
cli_display_theme: Literal["light", "dark"] = "light"
|
80
|
+
|
70
81
|
# ------------------------ validators & helpers --------------------------
|
71
82
|
@field_validator("permission_mode", mode="before")
|
72
83
|
def _norm_perm(cls, v):
|
@@ -131,8 +142,7 @@ class ClaudeCodeRequest(BaseModel):
|
|
131
142
|
# ------------------------ CLI helpers -----------------------------------
|
132
143
|
def as_cmd_args(self) -> list[str]:
|
133
144
|
"""Build argument list for the *Node* `claude` CLI."""
|
134
|
-
|
135
|
-
args: list[str] = ["-p", full_prompt, "--output-format", "stream-json"]
|
145
|
+
args: list[str] = ["-p", self.prompt, "--output-format", "stream-json"]
|
136
146
|
if self.allowed_tools:
|
137
147
|
args.append("--allowedTools")
|
138
148
|
for tool in self.allowed_tools:
|
@@ -182,27 +192,46 @@ class ClaudeCodeRequest(BaseModel):
|
|
182
192
|
if not messages:
|
183
193
|
raise ValueError("messages may not be empty")
|
184
194
|
|
185
|
-
prompt =
|
186
|
-
if isinstance(prompt, (dict, list)):
|
187
|
-
prompt = json.dumps(prompt)
|
195
|
+
prompt = ""
|
188
196
|
|
189
|
-
if resume
|
197
|
+
# 1. if resume or continue_conversation, use the last message
|
198
|
+
if resume or continue_conversation:
|
190
199
|
continue_conversation = True
|
191
|
-
|
200
|
+
prompt = messages[-1]["content"]
|
201
|
+
if isinstance(prompt, (dict, list)):
|
202
|
+
prompt = json.dumps(prompt)
|
203
|
+
|
204
|
+
# 2. else, use entire messages except system message
|
205
|
+
else:
|
206
|
+
prompts = []
|
207
|
+
continue_conversation = False
|
208
|
+
for message in messages:
|
209
|
+
if message["role"] != "system":
|
210
|
+
content = message["content"]
|
211
|
+
prompts.append(
|
212
|
+
json.dumps(content)
|
213
|
+
if isinstance(content, (dict, list))
|
214
|
+
else content
|
215
|
+
)
|
216
|
+
|
217
|
+
prompt = "\n".join(prompts)
|
218
|
+
|
219
|
+
# 3. assemble the request data
|
192
220
|
data: dict[str, Any] = dict(
|
193
221
|
prompt=prompt,
|
194
222
|
resume=resume,
|
195
223
|
continue_conversation=bool(continue_conversation),
|
196
224
|
)
|
197
225
|
|
226
|
+
# 4. extract system prompt if available
|
198
227
|
if (messages[0]["role"] == "system") and (
|
199
228
|
resume or continue_conversation
|
200
229
|
):
|
201
230
|
data["system_prompt"] = messages[0]["content"]
|
202
|
-
|
203
|
-
|
204
|
-
|
205
|
-
|
231
|
+
if kwargs.get("append_system_prompt"):
|
232
|
+
data["append_system_prompt"] = str(
|
233
|
+
kwargs.get("append_system_prompt")
|
234
|
+
)
|
206
235
|
|
207
236
|
data.update(kwargs)
|
208
237
|
return cls.model_validate(data, strict=False)
|
@@ -238,7 +267,7 @@ class ClaudeCodeEndpoint(Endpoint):
|
|
238
267
|
)
|
239
268
|
|
240
269
|
async def stream(self, request: dict | BaseModel, **kwargs):
|
241
|
-
payload = self.create_payload(request, **kwargs)["request"]
|
270
|
+
payload, _ = self.create_payload(request, **kwargs)["request"]
|
242
271
|
async for chunk in self._stream_claude_code(payload):
|
243
272
|
yield chunk
|
244
273
|
|
@@ -258,6 +287,7 @@ class ClaudeCodeEndpoint(Endpoint):
|
|
258
287
|
"session_id": None,
|
259
288
|
"model": "claude-code",
|
260
289
|
"result": "",
|
290
|
+
"tool_uses": [],
|
261
291
|
"tool_results": [],
|
262
292
|
"is_error": False,
|
263
293
|
"num_turns": None,
|
@@ -269,17 +299,31 @@ class ClaudeCodeEndpoint(Endpoint):
|
|
269
299
|
},
|
270
300
|
}
|
271
301
|
|
272
|
-
from claude_code_sdk import types
|
273
|
-
|
274
302
|
for response in responses:
|
275
|
-
if isinstance(response,
|
303
|
+
if isinstance(response, cc_types.SystemMessage):
|
276
304
|
results["session_id"] = response.data.get("session_id")
|
277
305
|
results["model"] = response.data.get("model", "claude-code")
|
278
|
-
if isinstance(
|
279
|
-
|
280
|
-
|
306
|
+
if isinstance(
|
307
|
+
response, cc_types.AssistantMessage | cc_types.UserMessage
|
308
|
+
):
|
309
|
+
for block in to_list(
|
310
|
+
response.content,
|
311
|
+
flatten=True,
|
312
|
+
flatten_tuple_set=True,
|
313
|
+
dropna=True,
|
314
|
+
):
|
315
|
+
if isinstance(block, cc_types.TextBlock):
|
281
316
|
results["result"] += block.text.strip() + "\n"
|
282
|
-
|
317
|
+
|
318
|
+
if isinstance(block, cc_types.ToolUseBlock):
|
319
|
+
entry = {
|
320
|
+
"id": block.id,
|
321
|
+
"name": block.name,
|
322
|
+
"input": block.input,
|
323
|
+
}
|
324
|
+
results["tool_uses"].append(entry)
|
325
|
+
|
326
|
+
if isinstance(block, cc_types.ToolResultBlock):
|
283
327
|
results["tool_results"].append(
|
284
328
|
{
|
285
329
|
"tool_use_id": block.tool_use_id,
|
@@ -287,8 +331,10 @@ class ClaudeCodeEndpoint(Endpoint):
|
|
287
331
|
"is_error": block.is_error,
|
288
332
|
}
|
289
333
|
)
|
290
|
-
|
291
|
-
|
334
|
+
|
335
|
+
if isinstance(response, cc_types.ResultMessage):
|
336
|
+
if response.result:
|
337
|
+
results["result"] = str(response.result).strip()
|
292
338
|
results["usage"] = response.usage
|
293
339
|
results["is_error"] = response.is_error
|
294
340
|
results["total_cost_usd"] = response.total_cost_usd
|
@@ -305,7 +351,134 @@ class ClaudeCodeEndpoint(Endpoint):
|
|
305
351
|
**kwargs,
|
306
352
|
):
|
307
353
|
responses = []
|
354
|
+
request: ClaudeCodeRequest = payload["request"]
|
355
|
+
system: cc_types.SystemMessage = None
|
356
|
+
|
357
|
+
# 1. stream the Claude Code response
|
308
358
|
async for chunk in self._stream_claude_code(**payload):
|
359
|
+
if request.verbose_output:
|
360
|
+
_display_message(chunk, theme=request.cli_display_theme)
|
361
|
+
|
362
|
+
if isinstance(chunk, cc_types.SystemMessage):
|
363
|
+
system = chunk
|
309
364
|
responses.append(chunk)
|
310
365
|
|
366
|
+
# 2. If the last response is not a ResultMessage and auto_finish is True,
|
367
|
+
# we need to query Claude Code again to get the final result message.
|
368
|
+
if request.auto_finish and not isinstance(
|
369
|
+
responses[-1], cc_types.ResultMessage
|
370
|
+
):
|
371
|
+
options = request.as_claude_options()
|
372
|
+
options.continue_conversation = True
|
373
|
+
options.max_turns = 1
|
374
|
+
if system:
|
375
|
+
options.resume = (
|
376
|
+
system.data.get("session_id", None) if system else None
|
377
|
+
)
|
378
|
+
|
379
|
+
async for chunk in sdk_query(
|
380
|
+
prompt="Please provide a the final result message only",
|
381
|
+
options=options,
|
382
|
+
):
|
383
|
+
if isinstance(chunk, cc_types.ResultMessage):
|
384
|
+
if request.verbose_output:
|
385
|
+
str_ = _verbose_output(chunk)
|
386
|
+
if str_:
|
387
|
+
as_readable(
|
388
|
+
str_,
|
389
|
+
md=True,
|
390
|
+
display_str=True,
|
391
|
+
format_curly=True,
|
392
|
+
max_panel_width=100,
|
393
|
+
theme=request.cli_display_theme,
|
394
|
+
)
|
395
|
+
|
396
|
+
responses.append(chunk)
|
397
|
+
|
398
|
+
# 3. Parse the responses into a clean format
|
311
399
|
return self._parse_claude_code_response(responses)
|
400
|
+
|
401
|
+
|
402
|
+
def _display_message(chunk, theme):
|
403
|
+
if isinstance(
|
404
|
+
chunk,
|
405
|
+
cc_types.SystemMessage
|
406
|
+
| cc_types.AssistantMessage
|
407
|
+
| cc_types.UserMessage,
|
408
|
+
):
|
409
|
+
str_ = _verbose_output(chunk)
|
410
|
+
if str_:
|
411
|
+
if str_.startswith("Claude:"):
|
412
|
+
as_readable(
|
413
|
+
str_,
|
414
|
+
md=True,
|
415
|
+
display_str=True,
|
416
|
+
max_panel_width=100,
|
417
|
+
theme=theme,
|
418
|
+
)
|
419
|
+
else:
|
420
|
+
as_readable(
|
421
|
+
str_,
|
422
|
+
format_curly=True,
|
423
|
+
display_str=True,
|
424
|
+
max_panel_width=100,
|
425
|
+
theme=theme,
|
426
|
+
)
|
427
|
+
|
428
|
+
if isinstance(chunk, cc_types.ResultMessage):
|
429
|
+
str_ = _verbose_output(chunk)
|
430
|
+
as_readable(
|
431
|
+
str_,
|
432
|
+
md=True,
|
433
|
+
display_str=True,
|
434
|
+
format_curly=True,
|
435
|
+
max_panel_width=100,
|
436
|
+
theme=theme,
|
437
|
+
)
|
438
|
+
|
439
|
+
|
440
|
+
def _verbose_output(res: cc_types.Message) -> str:
|
441
|
+
str_ = ""
|
442
|
+
if isinstance(res, cc_types.SystemMessage):
|
443
|
+
str_ = f"Claude Code Session Started: {res.data.get('session_id', 'unknown')}"
|
444
|
+
str_ += f"\nModel: {res.data.get('model', 'claude-code')}\n---"
|
445
|
+
return str_
|
446
|
+
|
447
|
+
if isinstance(res, cc_types.AssistantMessage | cc_types.UserMessage):
|
448
|
+
for block in to_list(
|
449
|
+
res.content, flatten=True, flatten_tuple_set=True, dropna=True
|
450
|
+
):
|
451
|
+
if isinstance(block, cc_types.TextBlock):
|
452
|
+
text = (
|
453
|
+
block.text.strip() if isinstance(block.text, str) else ""
|
454
|
+
)
|
455
|
+
str_ += f"Claude:\n{text}"
|
456
|
+
|
457
|
+
if isinstance(block, cc_types.ToolUseBlock):
|
458
|
+
input = (
|
459
|
+
json.dumps(block.input, indent=2)
|
460
|
+
if isinstance(block.input, dict)
|
461
|
+
else str(block.input)
|
462
|
+
)
|
463
|
+
input = input[:200] + "..." if len(input) > 200 else input
|
464
|
+
str_ += (
|
465
|
+
f"Tool Use: {block.name} - {block.id}\n - Input: {input}"
|
466
|
+
)
|
467
|
+
|
468
|
+
if isinstance(block, cc_types.ToolResultBlock):
|
469
|
+
content = str(block.content)
|
470
|
+
content = (
|
471
|
+
content[:200] + "..." if len(content) > 200 else content
|
472
|
+
)
|
473
|
+
str_ += (
|
474
|
+
f"Tool Result: {block.tool_use_id}\n - Content: {content}"
|
475
|
+
)
|
476
|
+
return str_
|
477
|
+
|
478
|
+
if isinstance(res, cc_types.ResultMessage):
|
479
|
+
str_ += f"Session Completion - {res.session_id}"
|
480
|
+
str_ += f"\nResult: {res.result or 'No result'}"
|
481
|
+
str_ += f"\n- Cost: ${res.total_cost_usd:.4f} USD"
|
482
|
+
str_ += f"\n- Duration: {res.duration_ms} ms (API: {res.duration_api_ms} ms)"
|
483
|
+
str_ += f"\n- Turns: {res.num_turns}"
|
484
|
+
return str_
|