lite-agent 0.1.0__py3-none-any.whl → 0.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of lite-agent might be problematic. Click here for more details.

@@ -0,0 +1,106 @@
1
+ from collections.abc import AsyncGenerator
2
+ from pathlib import Path
3
+
4
+ import aiofiles
5
+ import litellm
6
+ from aiofiles.threadpool.text import AsyncTextIOWrapper
7
+ from litellm.types.utils import Delta, ModelResponseStream, StreamingChoices
8
+
9
+ from lite_agent.loggers import logger
10
+ from lite_agent.processors import StreamChunkProcessor
11
+ from lite_agent.types import AgentChunk, CompletionRawChunk, ContentDeltaChunk, FinalMessageChunk, ToolCallDeltaChunk, UsageChunk
12
+
13
+
14
+ def ensure_record_file(record_to: Path | None) -> Path | None:
15
+ if not record_to:
16
+ return None
17
+ if not record_to.parent.exists():
18
+ logger.warning('Record directory "%s" does not exist, creating it.', record_to.parent)
19
+ record_to.parent.mkdir(parents=True, exist_ok=True)
20
+ return record_to
21
+
22
+
23
+ async def process_chunk(
24
+ processor: StreamChunkProcessor,
25
+ chunk: ModelResponseStream,
26
+ record_file: AsyncTextIOWrapper | None = None,
27
+ ) -> AsyncGenerator[AgentChunk, None]:
28
+ if record_file:
29
+ await record_file.write(chunk.model_dump_json() + "\n")
30
+ await record_file.flush()
31
+ yield CompletionRawChunk(type="completion_raw", raw=chunk)
32
+ usage_chunk = await handle_usage_chunk(processor, chunk)
33
+ if usage_chunk:
34
+ yield usage_chunk
35
+ return
36
+ if not chunk.choices:
37
+ return
38
+ choice = chunk.choices[0]
39
+ delta = choice.delta
40
+ for result in await handle_content_and_tool_calls(processor, chunk, choice, delta):
41
+ yield result
42
+ if choice.finish_reason:
43
+ current_message = processor.current_message
44
+ yield FinalMessageChunk(type="final_message", message=current_message, finish_reason=choice.finish_reason)
45
+
46
+
47
+ async def handle_usage_chunk(processor: StreamChunkProcessor, chunk: ModelResponseStream) -> UsageChunk | None:
48
+ usage = processor.handle_usage_info(chunk)
49
+ if usage:
50
+ return UsageChunk(type="usage", usage=usage)
51
+ return None
52
+
53
+
54
+ async def handle_content_and_tool_calls(
55
+ processor: StreamChunkProcessor,
56
+ chunk: ModelResponseStream,
57
+ choice: StreamingChoices,
58
+ delta: Delta,
59
+ ) -> list[AgentChunk]:
60
+ results: list[AgentChunk] = []
61
+ if not processor.is_initialized:
62
+ processor.initialize_message(chunk, choice)
63
+ if delta.content:
64
+ results.append(ContentDeltaChunk(type="content_delta", delta=delta.content))
65
+ processor.update_content(delta.content)
66
+ if delta.tool_calls is not None:
67
+ processor.update_tool_calls(delta.tool_calls)
68
+ if delta.tool_calls and processor.current_message.tool_calls:
69
+ results.extend(
70
+ [
71
+ ToolCallDeltaChunk(
72
+ type="tool_call_delta",
73
+ tool_call_id=processor.current_message.tool_calls[-1].id,
74
+ name=processor.current_message.tool_calls[-1].function.name,
75
+ arguments_delta=tool_call.function.arguments or "",
76
+ )
77
+ for tool_call in delta.tool_calls
78
+ if tool_call.function.arguments
79
+ ],
80
+ )
81
+ return results
82
+
83
+
84
+ async def litellm_stream_handler(
85
+ resp: litellm.CustomStreamWrapper,
86
+ record_to: Path | None = None,
87
+ ) -> AsyncGenerator[AgentChunk, None]:
88
+ """
89
+ Optimized chunk handler
90
+ """
91
+ processor = StreamChunkProcessor()
92
+ record_file: AsyncTextIOWrapper | None = None
93
+ record_path = ensure_record_file(record_to)
94
+ if record_path:
95
+ record_file = await aiofiles.open(record_path, "w", encoding="utf-8")
96
+ try:
97
+ async for chunk in resp: # type: ignore
98
+ if not isinstance(chunk, ModelResponseStream):
99
+ logger.warning("unexpected chunk type: %s", type(chunk))
100
+ logger.warning("chunk content: %s", chunk)
101
+ continue
102
+ async for result in process_chunk(processor, chunk, record_file):
103
+ yield result
104
+ finally:
105
+ if record_file:
106
+ await record_file.close()
@@ -0,0 +1,10 @@
1
+ <HandoffsGuide>
2
+ You are a parent agent that can assign tasks to sub-agents.
3
+
4
+ You can transfer conversations to other agents for specific tasks.
5
+ If you need to assign tasks to multiple agents, you should break down the tasks and assign them one by one.
6
+ You need to wait for one sub-agent to finish before assigning the task to the next sub-agent.
7
+ {% if extra_instructions %}
8
+ {{ extra_instructions }}
9
+ {% endif %}
10
+ </HandoffsGuide>
@@ -0,0 +1,9 @@
1
+ <TransferToParentGuide>
2
+ You are a sub-agent that is assigned to a specific task by your parent agent.
3
+
4
+ Everything you output is intended for your parent agent to read.
5
+ When you finish your task, you should call `transfer_to_parent` to transfer back to parent agent.
6
+ {% if extra_instructions %}
7
+ {{ extra_instructions }}
8
+ {% endif %}
9
+ </TransferToParentGuide>
@@ -0,0 +1,6 @@
1
+ <WaitForUserGuide>
2
+ When you have completed your assigned task or need more information from the user, you must call the `wait_for_user` function.
3
+ {% if extra_instructions %}
4
+ {{ extra_instructions }}
5
+ {% endif %}
6
+ </WaitForUserGuide>
@@ -0,0 +1,75 @@
1
+ # Export all types from submodules
2
+ from .chunks import (
3
+ AgentChunk,
4
+ AgentChunkType,
5
+ CompletionRawChunk,
6
+ ContentDeltaChunk,
7
+ FinalMessageChunk,
8
+ ToolCallChunk,
9
+ ToolCallDeltaChunk,
10
+ ToolCallResultChunk,
11
+ UsageChunk,
12
+ )
13
+ from .messages import (
14
+ AgentAssistantMessage,
15
+ AgentFunctionCallOutput,
16
+ AgentFunctionToolCallMessage,
17
+ AgentMessage,
18
+ AgentSystemMessage,
19
+ AgentUserMessage,
20
+ AssistantMessage,
21
+ AssistantMessageDict,
22
+ FlexibleRunnerMessage,
23
+ FunctionCallDict,
24
+ FunctionCallOutputDict,
25
+ Message,
26
+ MessageDict,
27
+ ResponseInputImage,
28
+ ResponseInputText,
29
+ RunnerMessage,
30
+ RunnerMessages,
31
+ SystemMessageDict,
32
+ UserInput,
33
+ UserMessageContentItemImageURL,
34
+ UserMessageContentItemImageURLImageURL,
35
+ UserMessageContentItemText,
36
+ UserMessageDict,
37
+ )
38
+ from .tool_calls import ToolCall, ToolCallFunction
39
+
40
+ __all__ = [
41
+ "AgentAssistantMessage",
42
+ "AgentChunk",
43
+ "AgentChunkType",
44
+ "AgentFunctionCallOutput",
45
+ "AgentFunctionToolCallMessage",
46
+ "AgentMessage",
47
+ "AgentSystemMessage",
48
+ "AgentUserMessage",
49
+ "AssistantMessage",
50
+ "AssistantMessageDict",
51
+ "CompletionRawChunk",
52
+ "ContentDeltaChunk",
53
+ "FinalMessageChunk",
54
+ "FlexibleRunnerMessage",
55
+ "FunctionCallDict",
56
+ "FunctionCallOutputDict",
57
+ "Message",
58
+ "MessageDict",
59
+ "ResponseInputImage",
60
+ "ResponseInputText",
61
+ "RunnerMessage",
62
+ "RunnerMessages",
63
+ "SystemMessageDict",
64
+ "ToolCall",
65
+ "ToolCallChunk",
66
+ "ToolCallDeltaChunk",
67
+ "ToolCallFunction",
68
+ "ToolCallResultChunk",
69
+ "UsageChunk",
70
+ "UserInput",
71
+ "UserMessageContentItemImageURL",
72
+ "UserMessageContentItemImageURLImageURL",
73
+ "UserMessageContentItemText",
74
+ "UserMessageDict",
75
+ ]
@@ -0,0 +1,89 @@
1
+ from typing import Literal
2
+
3
+ from litellm import Usage
4
+ from litellm.types.utils import ModelResponseStream
5
+ from pydantic import BaseModel
6
+
7
+ from .messages import AssistantMessage
8
+
9
+
10
+ class CompletionRawChunk(BaseModel):
11
+ """
12
+ Define the type of chunk
13
+ """
14
+
15
+ type: Literal["completion_raw"]
16
+ raw: ModelResponseStream
17
+
18
+
19
+ class UsageChunk(BaseModel):
20
+ """
21
+ Define the type of usage info chunk
22
+ """
23
+
24
+ type: Literal["usage"]
25
+ usage: Usage
26
+
27
+
28
+ class FinalMessageChunk(BaseModel):
29
+ """
30
+ Define the type of final message chunk
31
+ """
32
+
33
+ type: Literal["final_message"]
34
+ message: AssistantMessage
35
+ finish_reason: str | None = None # Literal["stop", "tool_calls"]
36
+
37
+
38
+ class ToolCallChunk(BaseModel):
39
+ """
40
+ Define the type of tool call chunk
41
+ """
42
+
43
+ type: Literal["tool_call"]
44
+ name: str
45
+ arguments: str
46
+
47
+
48
+ class ToolCallResultChunk(BaseModel):
49
+ """
50
+ Define the type of tool call result chunk
51
+ """
52
+
53
+ type: Literal["tool_call_result"]
54
+ tool_call_id: str
55
+ name: str
56
+ content: str
57
+
58
+
59
+ class ContentDeltaChunk(BaseModel):
60
+ """
61
+ Define the type of message chunk
62
+ """
63
+
64
+ type: Literal["content_delta"]
65
+ delta: str
66
+
67
+
68
+ class ToolCallDeltaChunk(BaseModel):
69
+ """
70
+ Define the type of tool call delta chunk
71
+ """
72
+
73
+ type: Literal["tool_call_delta"]
74
+ tool_call_id: str
75
+ name: str
76
+ arguments_delta: str
77
+
78
+
79
+ AgentChunk = CompletionRawChunk | UsageChunk | FinalMessageChunk | ToolCallChunk | ToolCallResultChunk | ContentDeltaChunk | ToolCallDeltaChunk
80
+
81
+ AgentChunkType = Literal[
82
+ "completion_raw",
83
+ "usage",
84
+ "final_message",
85
+ "tool_call",
86
+ "tool_call_result",
87
+ "content_delta",
88
+ "tool_call_delta",
89
+ ]
@@ -0,0 +1,135 @@
1
+ from collections.abc import Sequence
2
+ from typing import Any, Literal, NotRequired, TypedDict
3
+
4
+ from pydantic import BaseModel
5
+
6
+ from .tool_calls import ToolCall
7
+
8
+
9
+ class ResponseInputImageDict(TypedDict):
10
+ detail: NotRequired[Literal["low", "high", "auto"]]
11
+ type: Literal["input_image"]
12
+ file_id: str | None
13
+ image_url: str | None
14
+
15
+
16
+ class ResponseInputTextDict(TypedDict):
17
+ text: str
18
+ type: Literal["input_text"]
19
+
20
+
21
+ # TypedDict definitions for better type hints
22
+ class UserMessageDict(TypedDict):
23
+ role: Literal["user"]
24
+ content: str | Sequence[ResponseInputTextDict | ResponseInputImageDict]
25
+
26
+
27
+ class AssistantMessageDict(TypedDict):
28
+ role: Literal["assistant"]
29
+ content: str
30
+
31
+
32
+ class SystemMessageDict(TypedDict):
33
+ role: Literal["system"]
34
+ content: str
35
+
36
+
37
+ class FunctionCallDict(TypedDict):
38
+ type: Literal["function_call"]
39
+ function_call_id: str
40
+ name: str
41
+ arguments: str
42
+ content: str
43
+
44
+
45
+ class FunctionCallOutputDict(TypedDict):
46
+ type: Literal["function_call_output"]
47
+ call_id: str
48
+ output: str
49
+
50
+
51
+ # Union type for all supported message dictionary formats
52
+ MessageDict = UserMessageDict | AssistantMessageDict | SystemMessageDict | FunctionCallDict | FunctionCallOutputDict
53
+
54
+
55
+ # Response API format input types
56
+ class ResponseInputText(BaseModel):
57
+ text: str
58
+ type: Literal["input_text"]
59
+
60
+
61
+ class ResponseInputImage(BaseModel):
62
+ detail: Literal["low", "high", "auto"] = "auto"
63
+ type: Literal["input_image"]
64
+ file_id: str | None = None
65
+ image_url: str | None = None
66
+
67
+
68
+ # Compatibility types for old completion API format
69
+ class UserMessageContentItemText(BaseModel):
70
+ type: Literal["text"]
71
+ text: str
72
+
73
+
74
+ class UserMessageContentItemImageURLImageURL(BaseModel):
75
+ url: str
76
+
77
+
78
+ class UserMessageContentItemImageURL(BaseModel):
79
+ type: Literal["image_url"]
80
+ image_url: UserMessageContentItemImageURLImageURL
81
+
82
+
83
+ # Legacy types - keeping for compatibility
84
+ class AssistantMessage(BaseModel):
85
+ id: str
86
+ index: int
87
+ role: Literal["assistant"] = "assistant"
88
+ content: str = ""
89
+ tool_calls: list[ToolCall] | None = None
90
+
91
+
92
+ class Message(BaseModel):
93
+ role: str
94
+ content: str
95
+
96
+
97
+ class AgentUserMessage(BaseModel):
98
+ role: Literal["user"]
99
+ content: str | Sequence[ResponseInputText | ResponseInputImage | UserMessageContentItemText | UserMessageContentItemImageURL]
100
+
101
+
102
+ class AgentAssistantMessage(BaseModel):
103
+ role: Literal["assistant"]
104
+ content: str
105
+
106
+
107
+ class AgentSystemMessage(BaseModel):
108
+ role: Literal["system"]
109
+ content: str
110
+
111
+
112
+ class AgentFunctionToolCallMessage(BaseModel):
113
+ arguments: str
114
+ type: Literal["function_call"]
115
+ function_call_id: str
116
+ name: str
117
+ content: str
118
+
119
+
120
+ class AgentFunctionCallOutput(BaseModel):
121
+ call_id: str
122
+ output: str
123
+ type: Literal["function_call_output"]
124
+
125
+
126
+ RunnerMessage = AgentUserMessage | AgentAssistantMessage | AgentSystemMessage | AgentFunctionToolCallMessage | AgentFunctionCallOutput
127
+ AgentMessage = RunnerMessage | AgentSystemMessage
128
+
129
+ # Enhanced type definitions for better type hints
130
+ # Supports BaseModel instances, TypedDict, and plain dict
131
+ FlexibleRunnerMessage = RunnerMessage | MessageDict | dict[str, Any]
132
+ RunnerMessages = Sequence[FlexibleRunnerMessage]
133
+
134
+ # Type alias for user input - supports string, single message, or sequence of messages
135
+ UserInput = str | FlexibleRunnerMessage | RunnerMessages
@@ -0,0 +1,15 @@
1
+ from typing import Literal
2
+
3
+ from pydantic import BaseModel
4
+
5
+
6
+ class ToolCallFunction(BaseModel):
7
+ name: str
8
+ arguments: str | None = None
9
+
10
+
11
+ class ToolCall(BaseModel):
12
+ type: Literal["function"]
13
+ function: ToolCallFunction
14
+ id: str
15
+ index: int
@@ -0,0 +1,111 @@
1
+ Metadata-Version: 2.4
2
+ Name: lite-agent
3
+ Version: 0.3.0
4
+ Summary: A lightweight, extensible framework for building AI agent.
5
+ Author-email: Jianqi Pan <jannchie@gmail.com>
6
+ License: MIT
7
+ Keywords: AI,agent framework,assistant,chatbot,function call,openai,pydantic,rich,tooling
8
+ Classifier: Intended Audience :: Developers
9
+ Classifier: Intended Audience :: Science/Research
10
+ Classifier: License :: OSI Approved :: MIT License
11
+ Classifier: Operating System :: OS Independent
12
+ Classifier: Programming Language :: Python :: 3
13
+ Classifier: Programming Language :: Python :: 3.10
14
+ Classifier: Programming Language :: Python :: 3.11
15
+ Classifier: Programming Language :: Python :: 3.12
16
+ Classifier: Topic :: Communications :: Chat
17
+ Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
18
+ Classifier: Topic :: Software Development :: Libraries :: Python Modules
19
+ Requires-Python: >=3.10
20
+ Requires-Dist: aiofiles>=24.1.0
21
+ Requires-Dist: funcall>=0.7.0
22
+ Requires-Dist: prompt-toolkit>=3.0.51
23
+ Requires-Dist: rich>=14.0.0
24
+ Description-Content-Type: text/markdown
25
+
26
+ # LiteAgent
27
+
28
+ [![codecov](https://codecov.io/gh/Jannchie/lite-agent/graph/badge.svg?token=SJW89Z1VAZ)](https://codecov.io/gh/Jannchie/lite-agent)
29
+
30
+ ## Introduction
31
+
32
+ LiteAgent is an easy-to-learn, lightweight, and extensible AI agent framework built on top of [LiteLLM](https://github.com/BerriAI/litellm). It is designed as a minimal yet practical implementation for quickly building intelligent assistants and chatbots with robust tool-calling capabilities. The codebase is intentionally simple, making it ideal for learning, extension, and rapid prototyping.
33
+
34
+ **Key Advantages:**
35
+
36
+ - **Minimal and approachable:** The simplest agent implementation for fast learning and hacking.
37
+ - **Accurate and complete type hints:** All function signatures are fully type-hinted and never faked, ensuring reliable developer experience and static analysis.
38
+ - **Flexible parameter definition:** Supports defining tool function parameters using basic types, Pydantic models, or Python dataclasses—even in combination.
39
+ - **Streaming responses:** Seamless support for LiteLLM streaming output.
40
+ - **Custom tool functions:** Easily integrate your own Python functions (e.g., weather, temperature queries).
41
+ - **Rich type annotations, Pydantic-based.**
42
+ - **Easy to extend and test.**
43
+
44
+ ## Installation
45
+
46
+ You can install LiteAgent directly from PyPI:
47
+
48
+ ```bash
49
+ pip install lite-agent
50
+ ```
51
+
52
+ Or use [uv](https://github.com/astral-sh/uv):
53
+
54
+ ```bash
55
+ uv pip install lite-agent
56
+ ```
57
+
58
+ If you want to install from source for development:
59
+
60
+ ```bash
61
+ uv pip install -e .
62
+ # or
63
+ pip install -e .
64
+ ```
65
+
66
+ ## Quick Start
67
+
68
+ ### Code Example
69
+
70
+ See `examples/basic.py`:
71
+
72
+ ```python
73
+ import asyncio
74
+ from lite_agent.agent import Agent
75
+ from lite_agent.runner import Runner
76
+
77
+ async def get_whether(city: str) -> str:
78
+ await asyncio.sleep(1)
79
+ return f"The weather in {city} is sunny with a few clouds."
80
+
81
+ async def main():
82
+ agent = Agent(
83
+ model="gpt-4.1",
84
+ name="Weather Assistant",
85
+ instructions="You are a helpful weather assistant.",
86
+ tools=[get_whether],
87
+ )
88
+ runner = Runner(agent)
89
+ resp = await runner.run_until_complete("What's the weather in New York?")
90
+ for chunk in resp:
91
+ print(chunk)
92
+
93
+ if __name__ == "__main__":
94
+ asyncio.run(main())
95
+ ```
96
+
97
+ See `pyproject.toml` for details.
98
+
99
+ ## Testing
100
+
101
+ ```bash
102
+ pytest
103
+ ```
104
+
105
+ ## License
106
+
107
+ MIT License
108
+
109
+ ## Author
110
+
111
+ Jianqi Pan ([jannchie@gmail.com](mailto:jannchie@gmail.com))
@@ -0,0 +1,22 @@
1
+ lite_agent/__init__.py,sha256=Xaex4kVGxZzg_hhO17b8_tVXf63xFbSzBOlVdhRf-Ng,338
2
+ lite_agent/agent.py,sha256=MsdAnM2pqh9RLTGOFuynowkS52QXUb7vHaMHQyiOpoQ,17627
3
+ lite_agent/client.py,sha256=e_BsXo6KUgleRFkSPSESUoIPvyLXWyJ9E1AzExYXXsk,1236
4
+ lite_agent/loggers.py,sha256=XkNkdqwD_nQGfhQJ-bBWT7koci_mMkNw3aBpyMhOICw,57
5
+ lite_agent/message_transfers.py,sha256=nT7-tID20RK2yoN-rDiEE6sSclluSlhYSkayCzmPwk8,3984
6
+ lite_agent/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
7
+ lite_agent/rich_helpers.py,sha256=6HdjMwR2U7P5ieZ5KHQNUsQbW-apG67GzQ_nvJm774E,15583
8
+ lite_agent/runner.py,sha256=LWhtmxrYxL4sFi3ZKeWWRymK3tGCvWfIh1-EcWf8P0g,27822
9
+ lite_agent/processors/__init__.py,sha256=X78GKL_IWwW2lg8w4DD6GOFWLzAR2wTfKxHlvOkcuUQ,114
10
+ lite_agent/processors/stream_chunk_processor.py,sha256=nMA_cW7FDpXwJvm4F8vFwBXmHHsSELQFcoNEjH3xvn8,4751
11
+ lite_agent/stream_handlers/__init__.py,sha256=2GSiG0VUgcQlFMl6JkGAqikXMII1a43Hr-J5NIct6dk,115
12
+ lite_agent/stream_handlers/litellm.py,sha256=NNMAl8Bvoc2xe-qWKtfqvJQA2yr3sz1IUU90rQ_9iBw,3976
13
+ lite_agent/templates/handoffs_source_instructions.xml.j2,sha256=2XsXQlBzk38qbxGrfyt8y2b0KlZmsV_1xavLufcdkHc,428
14
+ lite_agent/templates/handoffs_target_instructions.xml.j2,sha256=gSbWVYYcovPKbGpFc0kqGSJ5Y5UC3fOHyUmZfcrDgSE,356
15
+ lite_agent/templates/wait_for_user_instructions.xml.j2,sha256=wXbcYD5Q1FaCGVBm3Hz_Cp7nnoK7KzloP0ao-jYMwPk,231
16
+ lite_agent/types/__init__.py,sha256=8l2RL-55sRHQW-sTmtKkKzCQGLrENaJT7Cgy5iA5xCo,1767
17
+ lite_agent/types/chunks.py,sha256=Ro5BtrrdsYGkKrEekIhs9vIrBM7HljtgOkHherH8B3k,1697
18
+ lite_agent/types/messages.py,sha256=A66YVl2IYMMTlnEdGlbCXqMztSSMSjS9F2yyebBlKR0,3364
19
+ lite_agent/types/tool_calls.py,sha256=Xnut8-2-Ld9vgA2GKJY6BbFlBaAv_n4W7vo7Jx21A-E,260
20
+ lite_agent-0.3.0.dist-info/METADATA,sha256=l4SLUuFQlcrlr-CHOIaWqQ3WOyYnPVIsHjjxKjR_g4E,3455
21
+ lite_agent-0.3.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
22
+ lite_agent-0.3.0.dist-info/RECORD,,
lite_agent/__main__.py DELETED
@@ -1,110 +0,0 @@
1
- import asyncio
2
- import logging
3
-
4
- from prompt_toolkit import PromptSession
5
- from prompt_toolkit.validation import Validator
6
- from rich.console import Console
7
- from rich.logging import RichHandler
8
-
9
- from open_agents.agent import Agent
10
- from open_agents.loggers import logger
11
- from open_agents.runner import Runner
12
- from open_agents.types import AgentChunk, ContentDeltaChunk
13
-
14
- logging.basicConfig(level=logging.WARNING, handlers=[RichHandler()], format="%(message)s")
15
- logger.setLevel(logging.INFO)
16
-
17
-
18
- # --- Tool functions (English) ---
19
- async def get_whether(city: str) -> str:
20
- """Get the weather for a city."""
21
- await asyncio.sleep(1) # 模擬網路延遲
22
- return f"The weather in {city} is sunny with a few clouds."
23
-
24
-
25
- async def get_temperature(city: str) -> str:
26
- """Get the temperature for a city."""
27
- await asyncio.sleep(1) # 模擬網路延遲
28
- return f"The temperature in {city} is 25°C."
29
-
30
-
31
- class RichChannel:
32
- def __init__(self) -> None:
33
- self.console = Console()
34
- self.map = {
35
- "final_message": self.handle_final_message,
36
- "tool_call": self.handle_tool_call,
37
- "tool_call_result": self.handle_tool_call_result,
38
- "tool_call_delta": self.handle_tool_call_delta,
39
- "content_delta": self.handle_content_delta,
40
- "usage": self.handle_usage,
41
- }
42
- self.new_turn = True
43
-
44
- def handle(self, chunk: AgentChunk):
45
- handler = self.map[chunk["type"]]
46
- handler(chunk)
47
-
48
- def handle_final_message(self, _chunk: AgentChunk):
49
- print()
50
- self.new_turn = True
51
-
52
- def handle_tool_call(self, chunk: AgentChunk):
53
- name = chunk.get("name", "<unknown>")
54
- arguments = chunk.get("arguments", "")
55
- self.console.print(f"🛠️ [green]{name}[/green]([yellow]{arguments}[/yellow])")
56
-
57
- def handle_tool_call_result(self, chunk: AgentChunk):
58
- name = chunk.get("name", "<unknown>")
59
- content = chunk.get("content", "")
60
- self.console.print(f"🛠️ [green]{name}[/green] → [yellow]{content}[/yellow]")
61
-
62
- def handle_tool_call_delta(self, chunk: AgentChunk): ...
63
- def handle_content_delta(self, chunk: ContentDeltaChunk):
64
- if self.new_turn:
65
- self.console.print("🤖 ", end="")
66
- self.new_turn = False
67
- print(chunk["delta"], end="", flush=True)
68
-
69
- def handle_usage(self, chunk: AgentChunk):
70
- if False:
71
- usage = chunk["usage"]
72
- self.console.print(f"In: {usage.prompt_tokens}, Out: {usage.completion_tokens}, Total: {usage.total_tokens}")
73
-
74
-
75
- async def main():
76
- agent = Agent(
77
- model="gpt-4.1",
78
- name="Weather Assistant",
79
- instructions="You are a helpful weather assistant. Before using tools, briefly explain what you are going to do. Provide friendly and informative responses.",
80
- tools=[get_whether, get_temperature],
81
- )
82
- session = PromptSession()
83
- rich_channel = RichChannel()
84
- runner = Runner(agent)
85
- not_empty_validator = Validator.from_callable(
86
- lambda text: bool(text.strip()),
87
- error_message="Input cannot be empty.",
88
- move_cursor_to_end=True,
89
- )
90
- while True:
91
- try:
92
- user_input = await session.prompt_async(
93
- "👤 ",
94
- default="",
95
- complete_while_typing=True,
96
- validator=not_empty_validator,
97
- validate_while_typing=False,
98
- )
99
- if user_input.lower() in {"exit", "quit"}:
100
- break
101
- response = runner.run_stream(user_input)
102
- async for chunk in response:
103
- rich_channel.handle(chunk)
104
-
105
- except (EOFError, KeyboardInterrupt):
106
- break
107
-
108
-
109
- if __name__ == "__main__":
110
- asyncio.run(main())