agenthub-python 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agenthub/__init__.py +20 -0
- agenthub/auto_client.py +96 -0
- agenthub/base_client.py +185 -0
- agenthub/claude4_5/__init__.py +18 -0
- agenthub/claude4_5/client.py +315 -0
- agenthub/gemini3/__init__.py +18 -0
- agenthub/gemini3/client.py +231 -0
- agenthub/tracer.py +722 -0
- agenthub/types.py +134 -0
- agenthub_python-0.1.0.dist-info/METADATA +9 -0
- agenthub_python-0.1.0.dist-info/RECORD +12 -0
- agenthub_python-0.1.0.dist-info/WHEEL +4 -0
|
@@ -0,0 +1,231 @@
|
|
|
1
|
+
# Copyright 2025 Prism Shadow. and/or its affiliates
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
|
|
15
|
+
import os
|
|
16
|
+
from typing import AsyncIterator
|
|
17
|
+
|
|
18
|
+
from google import genai
|
|
19
|
+
from google.genai import types
|
|
20
|
+
|
|
21
|
+
from ..base_client import LLMClient
|
|
22
|
+
from ..types import (
|
|
23
|
+
FinishReason,
|
|
24
|
+
PartialContentItem,
|
|
25
|
+
PartialUniEvent,
|
|
26
|
+
ThinkingLevel,
|
|
27
|
+
ToolChoice,
|
|
28
|
+
UniConfig,
|
|
29
|
+
UniEvent,
|
|
30
|
+
UniMessage,
|
|
31
|
+
UsageMetadata,
|
|
32
|
+
)
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
class Gemini3Client(LLMClient):
|
|
36
|
+
"""Gemini 3-specific LLM client implementation."""
|
|
37
|
+
|
|
38
|
+
def __init__(self, model: str, api_key: str | None = None):
|
|
39
|
+
"""Initialize Gemini 3 client with model and API key."""
|
|
40
|
+
self._model = model
|
|
41
|
+
api_key = api_key or os.getenv("GEMINI_API_KEY") or os.getenv("GOOGLE_API_KEY")
|
|
42
|
+
self._client = genai.Client(api_key=api_key) if api_key else genai.Client()
|
|
43
|
+
self._history: list[UniMessage] = []
|
|
44
|
+
|
|
45
|
+
def _detect_mime_type(self, url: str) -> str | None:
|
|
46
|
+
"""Detect MIME type from URL extension."""
|
|
47
|
+
import mimetypes
|
|
48
|
+
|
|
49
|
+
mime_type, _ = mimetypes.guess_type(url)
|
|
50
|
+
return mime_type
|
|
51
|
+
|
|
52
|
+
def _convert_thinking_level(self, thinking_level: ThinkingLevel) -> types.ThinkingLevel | None:
|
|
53
|
+
"""Convert ThinkingLevel enum to Gemini's ThinkingLevel."""
|
|
54
|
+
mapping = {
|
|
55
|
+
ThinkingLevel.NONE: types.ThinkingLevel.MINIMAL,
|
|
56
|
+
ThinkingLevel.LOW: types.ThinkingLevel.LOW,
|
|
57
|
+
ThinkingLevel.MEDIUM: types.ThinkingLevel.MEDIUM,
|
|
58
|
+
ThinkingLevel.HIGH: types.ThinkingLevel.HIGH,
|
|
59
|
+
}
|
|
60
|
+
return mapping.get(thinking_level)
|
|
61
|
+
|
|
62
|
+
def _convert_tool_choice(self, tool_choice: ToolChoice) -> types.FunctionCallingConfig:
|
|
63
|
+
"""Convert ToolChoice to Gemini's tool config."""
|
|
64
|
+
if isinstance(tool_choice, list):
|
|
65
|
+
return types.FunctionCallingConfig(mode="ANY", allowed_function_names=tool_choice)
|
|
66
|
+
elif tool_choice == "none":
|
|
67
|
+
return types.FunctionCallingConfig(mode="NONE")
|
|
68
|
+
elif tool_choice == "auto":
|
|
69
|
+
return types.FunctionCallingConfig(mode="AUTO")
|
|
70
|
+
elif tool_choice == "required":
|
|
71
|
+
return types.FunctionCallingConfig(mode="ANY")
|
|
72
|
+
|
|
73
|
+
def transform_uni_config_to_model_config(self, config: UniConfig) -> types.GenerateContentConfig | None:
|
|
74
|
+
"""
|
|
75
|
+
Transform universal configuration to Gemini-specific configuration.
|
|
76
|
+
|
|
77
|
+
Args:
|
|
78
|
+
config: Universal configuration dict
|
|
79
|
+
|
|
80
|
+
Returns:
|
|
81
|
+
Gemini GenerateContentConfig object or None if no config needed
|
|
82
|
+
"""
|
|
83
|
+
config_params = {}
|
|
84
|
+
if config.get("system_prompt") is not None:
|
|
85
|
+
config_params["system_instruction"] = config["system_prompt"]
|
|
86
|
+
|
|
87
|
+
if config.get("max_tokens") is not None:
|
|
88
|
+
config_params["max_output_tokens"] = config["max_tokens"]
|
|
89
|
+
|
|
90
|
+
if config.get("temperature") is not None:
|
|
91
|
+
config_params["temperature"] = config["temperature"]
|
|
92
|
+
|
|
93
|
+
# Convert thinking level
|
|
94
|
+
thinking_summary = config.get("thinking_summary")
|
|
95
|
+
thinking_level = config.get("thinking_level")
|
|
96
|
+
if thinking_summary is not None or thinking_level is not None:
|
|
97
|
+
config_params["thinking_config"] = types.ThinkingConfig(
|
|
98
|
+
include_thoughts=thinking_summary, thinking_level=self._convert_thinking_level(thinking_level)
|
|
99
|
+
)
|
|
100
|
+
|
|
101
|
+
# Convert tools and tool choice
|
|
102
|
+
if config.get("tools") is not None:
|
|
103
|
+
config_params["tools"] = [types.Tool(function_declarations=config["tools"])]
|
|
104
|
+
tool_choice = config.get("tool_choice")
|
|
105
|
+
if tool_choice is not None:
|
|
106
|
+
tool_config = self._convert_tool_choice(tool_choice)
|
|
107
|
+
config_params["tool_config"] = types.ToolConfig(function_calling_config=tool_config)
|
|
108
|
+
|
|
109
|
+
return types.GenerateContentConfig(**config_params) if config_params else None
|
|
110
|
+
|
|
111
|
+
def transform_uni_message_to_model_input(self, messages: list[UniMessage]) -> list[types.Content]:
|
|
112
|
+
"""
|
|
113
|
+
Transform universal message format to Gemini's Content format.
|
|
114
|
+
|
|
115
|
+
Args:
|
|
116
|
+
messages: List of universal message dictionaries
|
|
117
|
+
|
|
118
|
+
Returns:
|
|
119
|
+
List of Gemini Content objects
|
|
120
|
+
"""
|
|
121
|
+
mapping = {"user": "user", "assistant": "model"}
|
|
122
|
+
contents = []
|
|
123
|
+
for msg in messages:
|
|
124
|
+
parts = []
|
|
125
|
+
for item in msg["content_items"]:
|
|
126
|
+
if item["type"] == "text":
|
|
127
|
+
parts.append(types.Part(text=item["text"], thought_signature=item.get("signature")))
|
|
128
|
+
elif item["type"] == "image_url":
|
|
129
|
+
# TODO: support base64 encoded images
|
|
130
|
+
url_value = item["image_url"]
|
|
131
|
+
mime_type = self._detect_mime_type(url_value)
|
|
132
|
+
parts.append(types.Part.from_uri(file_uri=url_value, mime_type=mime_type))
|
|
133
|
+
elif item["type"] == "thinking":
|
|
134
|
+
parts.append(
|
|
135
|
+
types.Part(text=item["thinking"], thought=True, thought_signature=item.get("signature"))
|
|
136
|
+
)
|
|
137
|
+
elif item["type"] == "tool_call":
|
|
138
|
+
function_call = types.FunctionCall(name=item["name"], args=item["argument"])
|
|
139
|
+
parts.append(types.Part(function_call=function_call, thought_signature=item.get("signature")))
|
|
140
|
+
elif item["type"] == "tool_result":
|
|
141
|
+
if "tool_call_id" not in item:
|
|
142
|
+
raise ValueError("tool_call_id is required for tool result.")
|
|
143
|
+
|
|
144
|
+
parts.append(
|
|
145
|
+
types.Part.from_function_response(
|
|
146
|
+
name=item["tool_call_id"], response={"result": item["result"]}
|
|
147
|
+
)
|
|
148
|
+
)
|
|
149
|
+
else:
|
|
150
|
+
raise ValueError(f"Unknown item: {item}")
|
|
151
|
+
|
|
152
|
+
contents.append(types.Content(role=mapping[msg["role"]], parts=parts))
|
|
153
|
+
|
|
154
|
+
return contents
|
|
155
|
+
|
|
156
|
+
def transform_model_output_to_uni_event(self, model_output: types.GenerateContentResponse) -> PartialUniEvent:
|
|
157
|
+
"""
|
|
158
|
+
Transform Gemini model output to universal event format.
|
|
159
|
+
|
|
160
|
+
Args:
|
|
161
|
+
model_output: Gemini response chunk
|
|
162
|
+
|
|
163
|
+
Returns:
|
|
164
|
+
Universal event dictionary
|
|
165
|
+
"""
|
|
166
|
+
content_items: list[PartialContentItem] = []
|
|
167
|
+
usage_metadata: UsageMetadata | None = None
|
|
168
|
+
finish_reason: FinishReason | None = None
|
|
169
|
+
|
|
170
|
+
candidate = model_output.candidates[0]
|
|
171
|
+
for part in candidate.content.parts:
|
|
172
|
+
if part.function_call is not None:
|
|
173
|
+
content_items.append(
|
|
174
|
+
{
|
|
175
|
+
"type": "tool_call",
|
|
176
|
+
"name": part.function_call.name,
|
|
177
|
+
"argument": part.function_call.args,
|
|
178
|
+
"tool_call_id": part.function_call.name,
|
|
179
|
+
"signature": part.thought_signature,
|
|
180
|
+
}
|
|
181
|
+
)
|
|
182
|
+
elif part.text is not None and part.thought:
|
|
183
|
+
content_items.append({"type": "thinking", "thinking": part.text, "signature": part.thought_signature})
|
|
184
|
+
elif part.text is not None:
|
|
185
|
+
content_items.append({"type": "text", "text": part.text, "signature": part.thought_signature})
|
|
186
|
+
else:
|
|
187
|
+
raise ValueError(f"Unknown output: {part}")
|
|
188
|
+
|
|
189
|
+
if model_output.usage_metadata:
|
|
190
|
+
usage_metadata = {
|
|
191
|
+
"prompt_tokens": model_output.usage_metadata.prompt_token_count,
|
|
192
|
+
"thoughts_tokens": model_output.usage_metadata.thoughts_token_count,
|
|
193
|
+
"response_tokens": model_output.usage_metadata.candidates_token_count,
|
|
194
|
+
}
|
|
195
|
+
|
|
196
|
+
if candidate.finish_reason:
|
|
197
|
+
stop_reason_mapping = {
|
|
198
|
+
types.FinishReason.STOP: "stop",
|
|
199
|
+
types.FinishReason.MAX_TOKENS: "length",
|
|
200
|
+
}
|
|
201
|
+
finish_reason = stop_reason_mapping.get(candidate.finish_reason, "unknown")
|
|
202
|
+
|
|
203
|
+
return {
|
|
204
|
+
"role": "assistant",
|
|
205
|
+
"event": "delta",
|
|
206
|
+
"content_items": content_items,
|
|
207
|
+
"usage_metadata": usage_metadata,
|
|
208
|
+
"finish_reason": finish_reason,
|
|
209
|
+
}
|
|
210
|
+
|
|
211
|
+
async def streaming_response(
|
|
212
|
+
self,
|
|
213
|
+
messages: list[UniMessage],
|
|
214
|
+
config: UniConfig,
|
|
215
|
+
) -> AsyncIterator[UniEvent]:
|
|
216
|
+
"""Stream generate using Gemini SDK with unified conversion methods."""
|
|
217
|
+
# Use unified config conversion
|
|
218
|
+
gemini_config = self.transform_uni_config_to_model_config(config)
|
|
219
|
+
|
|
220
|
+
# Use unified message conversion
|
|
221
|
+
contents = self.transform_uni_message_to_model_input(messages)
|
|
222
|
+
|
|
223
|
+
# Stream generate
|
|
224
|
+
response_stream = await self._client.aio.models.generate_content_stream(
|
|
225
|
+
model=self._model, contents=contents, config=gemini_config
|
|
226
|
+
)
|
|
227
|
+
async for chunk in response_stream:
|
|
228
|
+
event = self.transform_model_output_to_uni_event(chunk)
|
|
229
|
+
if event["event"] == "delta":
|
|
230
|
+
event.pop("event")
|
|
231
|
+
yield event
|