pagent 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
pagent/__init__.py ADDED
@@ -0,0 +1,17 @@
1
+ from .agent import Agent, AgentStats
2
+ from .llm import LLM, RunResult
3
+ from .session import Session
4
+ from .tool import FunctionTool, to_openai_tools, tool
5
+
6
+ __all__ = [
7
+ "Agent",
8
+ "AgentStats",
9
+ "FunctionTool",
10
+ "LLM",
11
+ "RunResult",
12
+ "Session",
13
+ "to_openai_tools",
14
+ "tool",
15
+ ]
16
+
17
+ __version__ = "0.1.0"
pagent/agent.py ADDED
@@ -0,0 +1,91 @@
1
+ from .llm import RunResult
2
+ from .tool import to_openai_tools
3
+
4
+
5
+ class AgentStats:
6
+ def __init__(self):
7
+ self.usage = None
8
+ self.prompt_tokens = 0
9
+ self.completion_tokens = 0
10
+ self.total_tokens = 0
11
+ self.turns = 0
12
+
13
+ def add_usage(self, usage):
14
+ self.usage = usage
15
+ self.turns += 1
16
+ if usage:
17
+ self.prompt_tokens += usage.prompt_tokens
18
+ self.completion_tokens += usage.completion_tokens
19
+ self.total_tokens += usage.total_tokens
20
+
21
+ def __str__(self):
22
+ return (
23
+ f"turns={self.turns}, "
24
+ f"prompt_tokens={self.prompt_tokens}, "
25
+ f"completion_tokens={self.completion_tokens}, "
26
+ f"total_tokens={self.total_tokens}"
27
+ )
28
+
29
+
30
+ class Agent:
31
+ def __init__(self, llm, session, tools=None, max_turns=8):
32
+ self.llm = llm
33
+ self.session = session
34
+ self.tools = tools or []
35
+ self.tool_schemas = to_openai_tools(self.tools)
36
+ names = [t.name for t in self.tools]
37
+ if len(names) != len(set(names)):
38
+ raise ValueError(f"duplicate tool names: {names}")
39
+ self.tool_map = {t.name: t for t in self.tools}
40
+ if max_turns < 1:
41
+ raise ValueError("max_turns must be >= 1")
42
+ self.max_turns = max_turns
43
+ self.stats = AgentStats()
44
+
45
+ def _execute_tool_calls(self, tool_calls):
46
+ for tool_call in tool_calls:
47
+ function_call = tool_call["function"]
48
+ name = function_call["name"]
49
+ tc = self.tool_map.get(name)
50
+ if tc is None:
51
+ self.session += {
52
+ "role": "tool",
53
+ "content": f"error: unknown tool {name!r}; available: {sorted(self.tool_map)}",
54
+ "tool_call_id": tool_call["id"],
55
+ }
56
+ continue
57
+ tool_result = tc.call(function_call["arguments"])
58
+ self.session += {
59
+ "role": "tool",
60
+ "content": tool_result,
61
+ "tool_call_id": tool_call["id"],
62
+ }
63
+
64
+ def reset(self):
65
+ self.session.reset()
66
+ self.stats = AgentStats()
67
+
68
+ async def run(self, user_input):
69
+ self.session += {"role": "user", "content": user_input}
70
+ result = RunResult(content="")
71
+
72
+ for _ in range(self.max_turns):
73
+ result = await self.llm.invoke(
74
+ self.session.messages, tools=self.tool_schemas
75
+ )
76
+ if result.tool_calls:
77
+ self.session += {
78
+ "role": "assistant",
79
+ "content": result.content or None,
80
+ "tool_calls": result.tool_calls,
81
+ }
82
+ else:
83
+ self.session += {"role": "assistant", "content": result.content}
84
+ self.stats.add_usage(result.usage)
85
+
86
+ if not result.has_tool_calls:
87
+ return result
88
+
89
+ self._execute_tool_calls(result.tool_calls)
90
+
91
+ return result
pagent/llm.py ADDED
@@ -0,0 +1,67 @@
1
+ import os
2
+ from dataclasses import dataclass, field
3
+
4
+
5
+ @dataclass
6
+ class RunResult:
7
+ content: str
8
+ tool_calls: list = field(default_factory=list)
9
+ usage: object | None = None
10
+
11
+ @property
12
+ def has_tool_calls(self):
13
+ return len(self.tool_calls) > 0
14
+
15
+
16
+ class LLM:
17
+ """Stateless wrapper: forwards to the model; no history (caller builds ``messages``)."""
18
+
19
+ API_KEY_ENV_VAR = "OPENAI_API_KEY"
20
+ BASE_URL = "https://api.openai.com"
21
+
22
+ def __init__(self, model_id, base_url=None, apikey=None, request_kwargs=None):
23
+ from openai import AsyncOpenAI
24
+
25
+ resolved_base_url = (base_url or self.BASE_URL).strip()
26
+ resolved_apikey = (apikey or self.get_api_key() or "").strip()
27
+
28
+ self.base_url = resolved_base_url
29
+ self.apikey = resolved_apikey
30
+ self.client = AsyncOpenAI(api_key=self.apikey, base_url=self.base_url)
31
+ self.model_id = model_id
32
+ self.request_kwargs = request_kwargs or {}
33
+
34
+ def get_api_key(self):
35
+ return os.getenv(self.API_KEY_ENV_VAR)
36
+
37
+ async def invoke(self, messages, tools=None):
38
+ kwargs = {
39
+ "model": self.model_id,
40
+ "messages": messages,
41
+ "stream": False,
42
+ **self.request_kwargs,
43
+ }
44
+ if tools:
45
+ kwargs["tools"] = tools
46
+
47
+ response = await self.client.chat.completions.create(**kwargs)
48
+ if not response.choices:
49
+ return RunResult(content="", tool_calls=[], usage=response.usage)
50
+ message = response.choices[0].message
51
+
52
+ content = message.content or ""
53
+ tool_calls = []
54
+ if message.tool_calls:
55
+ for tool_call in message.tool_calls:
56
+ tool_calls.append(
57
+ {
58
+ "id": tool_call.id,
59
+ "type": tool_call.type,
60
+ "function": {
61
+ "name": tool_call.function.name,
62
+ "arguments": tool_call.function.arguments,
63
+ },
64
+ }
65
+ )
66
+
67
+ return RunResult(content=content, tool_calls=tool_calls, usage=response.usage)
pagent/py.typed ADDED
File without changes
pagent/session.py ADDED
@@ -0,0 +1,27 @@
1
+ class Session:
2
+ """Conversation buffer: Chat Completions-shaped message dicts in ``messages``.
3
+
4
+ Append: ``session += {"role": "user", "content": "..."}`` or ``session += [msg, ...]``.
5
+ """
6
+
7
+ def __init__(self, system_prompt="You are a helpful assistant."):
8
+ self.system_prompt = system_prompt
9
+ self.messages = []
10
+ if not system_prompt:
11
+ return
12
+ self.messages.append({"role": "system", "content": system_prompt})
13
+
14
+ def __iadd__(self, other):
15
+ if isinstance(other, dict):
16
+ self.messages.append(dict(other))
17
+ elif isinstance(other, (str, bytes, bytearray)):
18
+ raise TypeError("session += expects message dict(s), not str/bytes")
19
+ else:
20
+ self.messages.extend(dict(m) for m in other)
21
+ return self
22
+
23
+ def reset(self):
24
+ self.messages.clear()
25
+ if not self.system_prompt:
26
+ return
27
+ self.messages.append({"role": "system", "content": self.system_prompt})
pagent/tool.py ADDED
@@ -0,0 +1,131 @@
1
+ from __future__ import annotations
2
+
3
+ import inspect
4
+ import json
5
+ from functools import reduce
6
+ from typing import Any, Union, get_args, get_origin, get_type_hints
7
+
8
+ from docstring_parser import parse
9
+
10
+
11
+ class FunctionTool:
12
+ def __init__(self, name, description, parameters, func=None):
13
+ self.name = name
14
+ self.description = description
15
+ self.parameters = parameters
16
+ self.func = func
17
+
18
+ def to_dict(self):
19
+ return {
20
+ "type": "function",
21
+ "function": {
22
+ "name": self.name,
23
+ "description": self.description,
24
+ "parameters": self.parameters,
25
+ },
26
+ }
27
+
28
+ def call(self, arguments=None):
29
+ if self.func is None:
30
+ raise ValueError(f"tool {self.name} has no bound function")
31
+
32
+ if arguments is None:
33
+ payload = {}
34
+ elif isinstance(arguments, str):
35
+ if not arguments.strip():
36
+ payload = {}
37
+ else:
38
+ try:
39
+ payload = json.loads(arguments)
40
+ except json.JSONDecodeError as e:
41
+ return f"Invalid JSON in tool arguments: {e}"
42
+ else:
43
+ payload = arguments
44
+
45
+ return str(self.func(**payload))
46
+
47
+
48
+ def unwrap_optional(type_hint):
49
+ origin = get_origin(type_hint)
50
+ if origin is Union:
51
+ args = get_args(type_hint)
52
+ if type(None) in args:
53
+ non_none_args = [arg for arg in args if arg is not type(None)]
54
+ if len(non_none_args) == 1:
55
+ return True, non_none_args[0]
56
+ return True, reduce(lambda x, y: x | y, non_none_args)
57
+ return False, type_hint
58
+
59
+
60
+ def type_to_schema(type_hint):
61
+ origin = get_origin(type_hint)
62
+ if origin is list:
63
+ args = get_args(type_hint)
64
+ item_type = args[0] if args else Any
65
+ return {"type": "array", "items": type_to_schema(item_type)}
66
+ if origin is dict:
67
+ return {"type": "object"}
68
+ if type_hint is str:
69
+ return {"type": "string"}
70
+ if type_hint is int:
71
+ return {"type": "integer"}
72
+ if type_hint is float:
73
+ return {"type": "number"}
74
+ if type_hint is bool:
75
+ return {"type": "boolean"}
76
+ return {"type": "string"}
77
+
78
+
79
+ def extract_function_schema(func, name_override=None, description_override=None):
80
+ func_name = name_override or func.__name__
81
+ sig = inspect.signature(func)
82
+ docstring = parse(func.__doc__ or "")
83
+ description = description_override or docstring.short_description
84
+ param_docs = {param.arg_name: param.description for param in docstring.params}
85
+ type_hints = get_type_hints(func)
86
+
87
+ properties = {}
88
+ required = []
89
+ for param_name, param in sig.parameters.items():
90
+ if param_name in ("self", "cls", "context"):
91
+ continue
92
+
93
+ param_type = type_hints.get(param_name, Any)
94
+ param_desc = param_docs.get(param_name)
95
+ is_optional, base_type = unwrap_optional(param_type)
96
+ schema = type_to_schema(base_type)
97
+ if param_desc:
98
+ schema["description"] = param_desc
99
+ properties[param_name] = schema
100
+
101
+ if param.default == inspect.Parameter.empty and not is_optional:
102
+ required.append(param_name)
103
+
104
+ json_schema = {
105
+ "type": "object",
106
+ "properties": properties,
107
+ "required": required,
108
+ "additionalProperties": False,
109
+ }
110
+ return func_name, description, json_schema
111
+
112
+
113
+ def tool(name=None, description=None):
114
+ def decorator(func):
115
+ func_name, func_description, parameters = extract_function_schema(
116
+ func,
117
+ name_override=name,
118
+ description_override=description,
119
+ )
120
+ return FunctionTool(
121
+ name=func_name,
122
+ description=func_description or "",
123
+ parameters=parameters,
124
+ func=func,
125
+ )
126
+
127
+ return decorator
128
+
129
+
130
+ def to_openai_tools(tools):
131
+ return [ft.to_dict() for ft in tools]
@@ -0,0 +1,160 @@
1
+ Metadata-Version: 2.3
2
+ Name: pagent
3
+ Version: 0.1.0
4
+ Summary: Minimal OpenAI Chat Completions agent (session + tools).
5
+ Author: gongyulei
6
+ Author-email: gongyulei <gongyulei@stu.xmu.edu.cn>
7
+ Requires-Dist: docstring-parser>=0.18.0
8
+ Requires-Dist: openai>=2.31.0
9
+ Requires-Python: >=3.11
10
+ Description-Content-Type: text/markdown
11
+
12
+ # pagent
13
+
14
+ 基于 **OpenAI 兼容 Chat Completions** 的轻量 **async** agent 核心:
15
+
16
+ - `Session`:对话缓冲 `messages`,用 `session += {...}` 追加消息,`reset()` 清空
17
+ - `LLM`:对 `AsyncOpenAI` 的薄封装,只负责单次 `invoke`;历史由 `Session` 组装
18
+ - `@tool()` / `FunctionTool`:把 Python 函数变成 function-calling 的 schema
19
+ - `Agent`:多轮循环,直到模型不再调用工具或达到 `max_turns`
20
+
21
+ ## 安装
22
+
23
+ ```bash
24
+ pip install pagent
25
+ ```
26
+
27
+ 从源码目录:
28
+
29
+ ```bash
30
+ cd pagent
31
+ uv sync
32
+ pip install -e .
33
+ ```
34
+
35
+ ## 环境变量(API Key)
36
+
37
+ 默认 `LLM` 会读 **`OPENAI_API_KEY`**(与 OpenAI 官方一致)。
38
+
39
+ 在终端里(当前 shell 有效):
40
+
41
+ ```bash
42
+ export OPENAI_API_KEY="sk-..." # macOS / Linux
43
+ # Windows (cmd): set OPENAI_API_KEY=sk-...
44
+ # Windows (PowerShell): $env:OPENAI_API_KEY="sk-..."
45
+ ```
46
+
47
+ 长期生效可写进 `~/.zshrc` / `~/.bashrc`,或用 [direnv](https://direnv.net)、`.env` + 你自己的加载方式(本库不读 `.env` 文件)。
48
+
49
+ 若既没设置环境变量、调用时也没传 `apikey=`,请求会带着空 key 发出,一般会认证失败——请至少满足其一。
50
+
51
+ ## 完整示例:工具调用 + 查看用量
52
+
53
+ 下面假设已设置 `OPENAI_API_KEY`,模型名按你账号可用模型修改(示例用 `gpt-4o-mini`)。
54
+
55
+ 将以下内容保存为 `demo.py` 后执行:`python demo.py`。
56
+
57
+ ```python
58
+ import asyncio
59
+ import os
60
+
61
+ from pagent import Agent, LLM, Session, tool
62
+
63
+
64
+ @tool()
65
+ def get_weather(city: str) -> str:
66
+ """Return a short fake weather line for the city.
67
+
68
+ Args:
69
+ city: City name in English or Chinese.
70
+ """
71
+ return f"It's sunny in {city} today."
72
+
73
+
74
+ async def main() -> None:
75
+ if not os.getenv("OPENAI_API_KEY"):
76
+ raise SystemExit("请先设置环境变量 OPENAI_API_KEY")
77
+
78
+ llm = LLM("gpt-4o-mini")
79
+ session = Session("You are a concise assistant. Use tools when needed.")
80
+ agent = Agent(llm=llm, session=session, tools=[get_weather], max_turns=8)
81
+
82
+ result = await agent.run("厦门今天天气怎样?用工具查。")
83
+ print("--- reply ---")
84
+ print(result.content)
85
+ print("--- stats ---")
86
+ print(agent.stats)
87
+
88
+
89
+ if __name__ == "__main__":
90
+ asyncio.run(main())
91
+ ```
92
+
93
+ `agent.stats` 上会累计 `turns`、token 用量等;`result` 为最后一轮模型返回(`result.usage` 等为该轮信息,具体以 SDK 为准)。
94
+
95
+ ## 接入自己的供应商(任意 OpenAI 兼容网关)
96
+
97
+ 思路:**同一个 `LLM` 类**,换 `base_url`、`model_id`,以及 key 的来源。
98
+
99
+ ### 方式一:显式传参(推荐,最直观)
100
+
101
+ 把网关文档里的 **Base URL**(通常带 `/v1`)、**模型 id**、**API Key** 填进去即可:
102
+
103
+ ```python
104
+ import os
105
+
106
+ from pagent import LLM
107
+
108
+ llm = LLM(
109
+ "your-model-id-on-that-host",
110
+ base_url="https://your-gateway.example.com/v1",
111
+ apikey=os.environ["MY_LLM_API_KEY"],
112
+ )
113
+ ```
114
+
115
+ 也可以继续用环境变量存 key,变量名你自己定,只要在代码里 `os.environ["..."]` 或 `os.getenv("...")` 读出来传给 `apikey` 即可。
116
+
117
+ ### 方式二:子类固定「自家」默认地址和 key 环境变量
118
+
119
+ 适合团队内封装:把 `BASE_URL`、`API_KEY_ENV_VAR` 写在子类上,`get_api_key()` 会自动读对应环境变量。
120
+
121
+ ```python
122
+ import os
123
+
124
+ from pagent import LLM
125
+
126
+
127
+ class AcmeChat(LLM):
128
+ """示例:公司统一网关。"""
129
+
130
+ API_KEY_ENV_VAR = "ACME_LLM_API_KEY"
131
+ BASE_URL = "https://llm.acme.internal/v1"
132
+
133
+ def __init__(self, model_id="acme-default", **kwargs):
134
+ super().__init__(model_id, **kwargs)
135
+
136
+
137
+ # 使用前: export ACME_LLM_API_KEY=...
138
+ llm = AcmeChat("acme-default")
139
+ ```
140
+
141
+ `kwargs` 仍可传 `base_url` / `apikey` / `request_kwargs`,用于临时覆盖默认(与基类 `LLM.__init__` 行为一致)。
142
+
143
+ ### `request_kwargs`:额外请求参数
144
+
145
+ 若网关要求温度、顶层 `extra_body` 等,可挂在构造函数的第四项:
146
+
147
+ ```python
148
+ LLM(
149
+ "some-model",
150
+ base_url="https://.../v1",
151
+ apikey=os.environ["MY_KEY"],
152
+ request_kwargs={"temperature": 0.2},
153
+ )
154
+ ```
155
+
156
+ 这些键会并入每次 `chat.completions.create(...)`(本库固定 `stream=False`)。
157
+
158
+ ---
159
+
160
+ **说明**:只要对方实现的是 **OpenAI Chat Completions** 兼容接口(路径、字段与官方相近),上述方式即可;若对方 API 形状完全不同,需要在网关侧做适配,或自行改写 `LLM.invoke`,那已超出本库的默认假设。
@@ -0,0 +1,9 @@
1
+ pagent/__init__.py,sha256=w8oh3XXsGAuZ9hmgoyG9xPHLS5DpTg8tl4RTcTA4x5k,319
2
+ pagent/agent.py,sha256=nXDfVGbObBj-o8gLg4iuuEtl015JRD8TtZTX-Crb1jI,3003
3
+ pagent/llm.py,sha256=hmfzWeoknf4UI7VBPKVwSJP6E_UCpL_Of3NXIcunEWc,2146
4
+ pagent/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
5
+ pagent/session.py,sha256=yquxM2yb76kQRu8JbiMOkPQVlWdfcCl3QdsPrPLlesk,993
6
+ pagent/tool.py,sha256=Mm6dUTy9YG1nsrBVZsIW7lkzDCDP42Q1Q74UNY4AJQs,3957
7
+ pagent-0.1.0.dist-info/WHEEL,sha256=lh7MMMfiuFQLQaR9J7pNBODdWf-aa5UOeuuDAol3xps,79
8
+ pagent-0.1.0.dist-info/METADATA,sha256=dHA-pbQXnMejLiEwtdYAH198Hhq4O82vbsCObGf_13I,4740
9
+ pagent-0.1.0.dist-info/RECORD,,
@@ -0,0 +1,4 @@
1
+ Wheel-Version: 1.0
2
+ Generator: uv 0.8.20
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any