agenthub-python 0.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,9 @@
1
+ Metadata-Version: 2.3
2
+ Name: agenthub-python
3
+ Version: 0.1.0
4
+ Summary: AgentHub package
5
+ Requires-Dist: google-genai>=1.5.0
6
+ Requires-Dist: httpx[socks]
7
+ Requires-Dist: anthropic>=0.40.0
8
+ Requires-Dist: flask>=3.0.0
9
+ Requires-Python: >=3.11
@@ -0,0 +1,20 @@
1
+ # Copyright 2025 Prism Shadow. and/or its affiliates
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from .auto_client import AutoLLMClient
16
+ from .tracer import Tracer
17
+ from .types import ThinkingLevel
18
+
19
+
20
+ __all__ = ["AutoLLMClient", "ThinkingLevel", "Tracer"]
@@ -0,0 +1,96 @@
1
+ # Copyright 2025 Prism Shadow. and/or its affiliates
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import Any, AsyncIterator
16
+
17
+ from .base_client import LLMClient
18
+ from .types import UniConfig, UniEvent, UniMessage
19
+
20
+
21
+ class AutoLLMClient(LLMClient):
22
+ """
23
+ Auto-routing LLM client that dispatches to appropriate model-specific client.
24
+
25
+ This client is stateful - it knows the model name at initialization and maintains
26
+ conversation history for that specific model.
27
+ """
28
+
29
+ def __init__(self, model: str, api_key: str | None = None):
30
+ """
31
+ Initialize AutoLLMClient with a specific model.
32
+
33
+ Args:
34
+ model: Model identifier (determines which client to use)
35
+ api_key: Optional API key
36
+ """
37
+ self._client = self._create_client_for_model(model, api_key)
38
+
39
+ def _create_client_for_model(self, model: str, api_key: str | None = None) -> LLMClient:
40
+ """Create the appropriate client for the given model."""
41
+ if "gemini-3" in model.lower(): # e.g., gemini-3-flash-preview
42
+ from .gemini3 import Gemini3Client
43
+
44
+ return Gemini3Client(model=model, api_key=api_key)
45
+ elif "claude" in model.lower() and "4-5" in model.lower(): # e.g., claude-sonnet-4-5
46
+ from .claude4_5 import Claude4_5Client
47
+
48
+ return Claude4_5Client(model=model, api_key=api_key)
49
+ elif "gpt-5.2" in model.lower(): # e.g., gpt-5.2
50
+ raise NotImplementedError("GPT models not yet implemented.")
51
+ else:
52
+ raise ValueError(f"{model} is not supported.")
53
+
54
+ def transform_uni_config_to_model_config(self, config: UniConfig) -> Any:
55
+ """Delegate to underlying client's transform_uni_config_to_model_config."""
56
+ return self._client.transform_uni_config_to_model_config(config)
57
+
58
+ def transform_uni_message_to_model_input(self, messages: list[UniMessage]) -> Any:
59
+ """Delegate to underlying client's transform_uni_message_to_model_input."""
60
+ return self._client.transform_uni_message_to_model_input(messages)
61
+
62
+ def transform_model_output_to_uni_event(self, model_output: Any) -> UniEvent:
63
+ """Delegate to underlying client's transform_model_output_to_uni_event."""
64
+ return self._client.transform_model_output_to_uni_event(model_output)
65
+
66
+ async def streaming_response(
67
+ self,
68
+ messages: list[UniMessage],
69
+ config: UniConfig,
70
+ ) -> AsyncIterator[UniEvent]:
71
+ """Route to underlying client's streaming_response."""
72
+ async for event in self._client.streaming_response(
73
+ messages=messages,
74
+ config=config,
75
+ ):
76
+ yield event
77
+
78
+ async def streaming_response_stateful(
79
+ self,
80
+ message: UniMessage,
81
+ config: UniConfig,
82
+ ) -> AsyncIterator[UniEvent]:
83
+ """Route to underlying client's streaming_response_stateful."""
84
+ async for event in self._client.streaming_response_stateful(
85
+ message=message,
86
+ config=config,
87
+ ):
88
+ yield event
89
+
90
+ def clear_history(self) -> None:
91
+ """Clear history in the underlying client."""
92
+ self._client.clear_history()
93
+
94
+ def get_history(self) -> list[UniMessage]:
95
+ """Get history from the underlying client."""
96
+ return self._client.get_history()
@@ -0,0 +1,185 @@
1
+ # Copyright 2025 Prism Shadow. and/or its affiliates
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from abc import ABC, abstractmethod
16
+ from typing import Any, AsyncIterator
17
+
18
+ from .types import ContentItem, FinishReason, UniConfig, UniEvent, UniMessage, UsageMetadata
19
+
20
+
21
+ class LLMClient(ABC):
22
+ """
23
+ Abstract base class for LLM clients.
24
+
25
+ All model-specific clients must inherit from this class and implement
26
+ the required abstract methods for complete SDK abstraction.
27
+ """
28
+
29
+ _history: list[UniMessage] = []
30
+
31
+ @abstractmethod
32
+ def transform_uni_config_to_model_config(self, config: UniConfig) -> Any:
33
+ """
34
+ Transform universal configuration to model-specific configuration.
35
+
36
+ Args:
37
+ config: Universal configuration dict
38
+
39
+ Returns:
40
+ Model-specific configuration object
41
+ """
42
+ pass
43
+
44
+ @abstractmethod
45
+ def transform_uni_message_to_model_input(self, messages: list[UniMessage]) -> Any:
46
+ """
47
+ Transform universal message format to model-specific input format.
48
+
49
+ Args:
50
+ messages: List of universal message dictionaries
51
+
52
+ Returns:
53
+ Model-specific input format (e.g., Gemini's Content list, OpenAI's messages array)
54
+ """
55
+ pass
56
+
57
+ @abstractmethod
58
+ def transform_model_output_to_uni_event(self, model_output: Any) -> UniEvent:
59
+ """
60
+ Transform model output to universal event format.
61
+
62
+ Args:
63
+ model_output: Model-specific output object (streaming chunk)
64
+
65
+ Returns:
66
+ Universal event dictionary
67
+ """
68
+ pass
69
+
70
+ def concat_uni_events_to_uni_message(self, events: list[UniEvent]) -> UniMessage:
71
+ """
72
+ Concatenate a stream of universal events into a single universal message.
73
+
74
+ This is a concrete method implemented in the base class that can be reused
75
+ by all model clients. It accumulates events and builds a complete message.
76
+
77
+ Args:
78
+ events: List of universal events from streaming response
79
+
80
+ Returns:
81
+ Complete universal message dictionary
82
+ """
83
+ content_items: list[ContentItem] = []
84
+ usage_metadata: UsageMetadata | None = None
85
+ finish_reason: FinishReason | None = None
86
+
87
+ for event in events:
88
+ # Merge content_items from all events
89
+ for item in event["content_items"]:
90
+ if item["type"] == "text":
91
+ if content_items and content_items[-1]["type"] == "text":
92
+ content_items[-1]["text"] += item["text"]
93
+ if "signature" in item: # signature may appear at the last item
94
+ content_items[-1]["signature"] = item["signature"]
95
+ elif item["text"]: # omit empty text items
96
+ content_items.append(item.copy())
97
+ elif item["type"] == "thinking":
98
+ if content_items and content_items[-1]["type"] == "thinking":
99
+ content_items[-1]["thinking"] += item["thinking"]
100
+ if "signature" in item: # signature may appear at the last item
101
+ content_items[-1]["signature"] = item["signature"]
102
+ elif item["thinking"]: # omit empty thinking items
103
+ content_items.append(item.copy())
104
+ else:
105
+ content_items.append(item.copy())
106
+
107
+ usage_metadata = event.get("usage_metadata") # usage_metadata is taken from the last event
108
+ finish_reason = event.get("finish_reason") # finish_reason is taken from the last event
109
+
110
+ return {
111
+ "role": "assistant",
112
+ "content_items": content_items,
113
+ "usage_metadata": usage_metadata,
114
+ "finish_reason": finish_reason,
115
+ }
116
+
117
+ @abstractmethod
118
+ async def streaming_response(
119
+ self,
120
+ messages: list[UniMessage],
121
+ config: UniConfig,
122
+ ) -> AsyncIterator[UniEvent]:
123
+ """
124
+ Generate content in streaming mode (stateless).
125
+
126
+ This method should use transform_uni_config_to_model_config and
127
+ transform_uni_message_to_model_input to prepare the request, then
128
+ transform_model_output_to_uni_event to convert each chunk.
129
+
130
+ Args:
131
+ messages: List of universal message dictionaries containing conversation history
132
+ config: Universal configuration dict
133
+
134
+ Yields:
135
+ Universal events from the streaming response
136
+ """
137
+ pass
138
+
139
+ async def streaming_response_stateful(
140
+ self,
141
+ message: UniMessage,
142
+ config: UniConfig,
143
+ ) -> AsyncIterator[UniEvent]:
144
+ """
145
+ Generate content in streaming mode (stateful).
146
+
147
+ This method should use transform_uni_config_to_model_config,
148
+ transform_uni_message_to_model_input, transform_model_output_to_uni_event,
149
+ and transform_uni_event_to_uni_message to manage the conversation flow.
150
+
151
+ Args:
152
+ message: Latest universal message dictionary to add to conversation
153
+ config: Universal configuration dict
154
+
155
+ Yields:
156
+ Universal events from the streaming response
157
+ """
158
+ # Add user message to history
159
+ self._history.append(message)
160
+
161
+ # Collect all events for history
162
+ events = []
163
+ async for event in self.streaming_response(messages=self._history, config=config):
164
+ events.append(event)
165
+ yield event
166
+
167
+ # Convert events to message and add to history
168
+ if events:
169
+ assistant_message = self.concat_uni_events_to_uni_message(events)
170
+ self._history.append(assistant_message)
171
+
172
+ # Save history to file if trace_id is specified
173
+ if config.get("trace_id"):
174
+ from .tracer import Tracer
175
+
176
+ tracer = Tracer()
177
+ tracer.save_history(self._history, config["trace_id"], config)
178
+
179
+ def clear_history(self) -> None:
180
+ """Clear the message history."""
181
+ self._history.clear()
182
+
183
+ def get_history(self) -> list[UniMessage]:
184
+ """Get the current message history."""
185
+ return self._history.copy()
@@ -0,0 +1,18 @@
1
+ # Copyright 2025 Prism Shadow. and/or its affiliates
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from .client import Claude4_5Client
16
+
17
+
18
+ __all__ = ["Claude4_5Client"]
@@ -0,0 +1,315 @@
1
+ # Copyright 2025 Prism Shadow. and/or its affiliates
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import json
16
+ import os
17
+ from typing import Any, AsyncIterator
18
+
19
+ from anthropic import AsyncAnthropic
20
+ from anthropic.types import MessageParam, MessageStreamEvent
21
+
22
+ from ..base_client import LLMClient
23
+ from ..types import (
24
+ FinishReason,
25
+ PartialContentItem,
26
+ PartialUniEvent,
27
+ ThinkingLevel,
28
+ ToolChoice,
29
+ UniConfig,
30
+ UniEvent,
31
+ UniMessage,
32
+ UsageMetadata,
33
+ )
34
+
35
+
36
+ class Claude4_5Client(LLMClient):
37
+ """Claude 4.5-specific LLM client implementation."""
38
+
39
+ def __init__(self, model: str, api_key: str | None = None):
40
+ """Initialize Claude 4.5 client with model and API key."""
41
+ self._model = model
42
+ api_key = api_key or os.getenv("ANTHROPIC_API_KEY")
43
+ self._client = AsyncAnthropic(api_key=api_key)
44
+ self._history: list[UniMessage] = []
45
+
46
+ def _convert_thinking_level_to_budget(self, thinking_level: ThinkingLevel) -> dict:
47
+ """Convert ThinkingLevel enum to Claude's budget_tokens."""
48
+
49
+ mapping = {
50
+ ThinkingLevel.NONE: {"type": "disabled"},
51
+ ThinkingLevel.LOW: {"type": "enabled", "budget_tokens": 1024},
52
+ ThinkingLevel.MEDIUM: {"type": "enabled", "budget_tokens": 4096},
53
+ ThinkingLevel.HIGH: {"type": "enabled", "budget_tokens": 16384},
54
+ }
55
+ return mapping.get(thinking_level)
56
+
57
+ def _convert_tool_choice(self, tool_choice: ToolChoice) -> dict[str, Any]:
58
+ """Convert ToolChoice to Claude's tool_choice format."""
59
+ if isinstance(tool_choice, list):
60
+ if len(tool_choice) > 1:
61
+ raise ValueError("Claude supports only one tool choice.")
62
+
63
+ return {"type": "any", "name": tool_choice[0]}
64
+ elif tool_choice == "none":
65
+ return {"type": "none"}
66
+ elif tool_choice == "auto":
67
+ return {"type": "auto"}
68
+ elif tool_choice == "required":
69
+ return {"type": "any"}
70
+
71
+ def transform_uni_config_to_model_config(self, config: UniConfig) -> dict[str, Any]:
72
+ """
73
+ Transform universal configuration to Claude-specific configuration.
74
+
75
+ Args:
76
+ config: Universal configuration dict
77
+
78
+ Returns:
79
+ Claude configuration dictionary
80
+ """
81
+ claude_config = {"model": self._model}
82
+
83
+ # Add max_tokens (required for Claude)
84
+ if config.get("max_tokens") is not None:
85
+ claude_config["max_tokens"] = config["max_tokens"]
86
+ else:
87
+ claude_config["max_tokens"] = 32768 # Claude requires max_tokens to be specified
88
+
89
+ # Add temperature
90
+ if config.get("temperature") is not None:
91
+ claude_config["temperature"] = config["temperature"]
92
+
93
+ # Add system prompt
94
+ if config.get("system_prompt") is not None:
95
+ claude_config["system"] = config["system_prompt"]
96
+
97
+ # Convert thinking configuration
98
+ # NOTE: Claude always provides thinking summary
99
+ if config.get("thinking_level") is not None:
100
+ claude_config["temperature"] = 1.0 # `temperature` may only be set to 1 when thinking is enabled
101
+ claude_config["thinking"] = self._convert_thinking_level_to_budget(config["thinking_level"])
102
+
103
+ # Convert tools to Claude's tool schema
104
+ if config.get("tools") is not None:
105
+ claude_tools = []
106
+ for tool in config["tools"]:
107
+ claude_tool = {}
108
+ for key, value in tool.items():
109
+ claude_tool[key.replace("parameters", "input_schema")] = value
110
+
111
+ claude_tools.append(claude_tool)
112
+
113
+ claude_config["tools"] = claude_tools
114
+
115
+ # Convert tool_choice
116
+ if config.get("tool_choice") is not None:
117
+ claude_config["tool_choice"] = self._convert_tool_choice(config["tool_choice"])
118
+
119
+ return claude_config
120
+
121
+ def transform_uni_message_to_model_input(self, messages: list[UniMessage]) -> list[MessageParam]:
122
+ """
123
+ Transform universal message format to Claude's MessageParam format.
124
+
125
+ Args:
126
+ messages: List of universal message dictionaries
127
+
128
+ Returns:
129
+ List of Claude MessageParam objects
130
+ """
131
+ claude_messages: list[MessageParam] = []
132
+
133
+ for msg in messages:
134
+ content_blocks = []
135
+ for item in msg["content_items"]:
136
+ if item["type"] == "text":
137
+ content_blocks.append({"type": "text", "text": item["text"]})
138
+ elif item["type"] == "image_url":
139
+ # TODO: support base64 encoded images
140
+ content_blocks.append({"type": "image", "source": {"type": "url", "url": item["image_url"]}})
141
+ elif item["type"] == "thinking":
142
+ content_blocks.append(
143
+ {"type": "thinking", "thinking": item["thinking"], "signature": item["signature"]}
144
+ )
145
+ elif item["type"] == "tool_call":
146
+ content_blocks.append(
147
+ {
148
+ "type": "tool_use",
149
+ "id": item["tool_call_id"],
150
+ "name": item["name"],
151
+ "input": item["argument"],
152
+ }
153
+ )
154
+ elif item["type"] == "tool_result":
155
+ if "tool_call_id" not in item:
156
+ raise ValueError("tool_call_id is required for tool result.")
157
+
158
+ content_blocks.append(
159
+ {"type": "tool_result", "content": item["result"], "tool_use_id": item["tool_call_id"]}
160
+ )
161
+ else:
162
+ raise ValueError(f"Unknown item: {item}")
163
+
164
+ claude_messages.append({"role": msg["role"], "content": content_blocks})
165
+
166
+ return claude_messages
167
+
168
+ def transform_model_output_to_uni_event(self, model_output: MessageStreamEvent) -> PartialUniEvent:
169
+ """
170
+ Transform Claude model output to universal event format.
171
+
172
+ NOTE: Claude always has only one content item per event.
173
+
174
+ Args:
175
+ model_output: Claude streaming event
176
+
177
+ Returns:
178
+ Universal event dictionary
179
+ """
180
+ event_type = None
181
+ content_items: list[PartialContentItem] = []
182
+ usage_metadata: UsageMetadata | None = None
183
+ finish_reason: FinishReason | None = None
184
+
185
+ claude_event_type = model_output.type
186
+ if claude_event_type == "content_block_start":
187
+ event_type = "start"
188
+ block = model_output.content_block
189
+ if block.type == "tool_use":
190
+ content_items.append(
191
+ {"type": "partial_tool_call", "name": block.name, "argument": "", "tool_call_id": block.id}
192
+ )
193
+
194
+ elif claude_event_type == "content_block_delta":
195
+ event_type = "delta"
196
+ delta = model_output.delta
197
+ if delta.type == "thinking_delta":
198
+ content_items.append({"type": "thinking", "thinking": delta.thinking})
199
+ elif delta.type == "text_delta":
200
+ content_items.append({"type": "text", "text": delta.text})
201
+ elif delta.type == "input_json_delta":
202
+ content_items.append({"type": "partial_tool_call", "argument": delta.partial_json})
203
+ elif delta.type == "signature_delta":
204
+ content_items.append({"type": "thinking", "thinking": "", "signature": delta.signature})
205
+
206
+ elif claude_event_type == "content_block_stop":
207
+ event_type = "stop"
208
+
209
+ elif claude_event_type == "message_start":
210
+ event_type = "start"
211
+ message = model_output.message
212
+ if getattr(message, "usage", None):
213
+ usage_metadata = {
214
+ "prompt_tokens": message.usage.input_tokens,
215
+ "thoughts_tokens": None,
216
+ "response_tokens": None,
217
+ }
218
+
219
+ elif claude_event_type == "message_delta":
220
+ event_type = "stop"
221
+ delta = model_output.delta
222
+ if getattr(delta, "stop_reason", None):
223
+ stop_reason_mapping = {
224
+ "end_turn": "stop",
225
+ "max_tokens": "length",
226
+ "stop_sequence": "stop",
227
+ "tool_use": "stop",
228
+ }
229
+ finish_reason = stop_reason_mapping.get(delta.stop_reason, "unknown")
230
+
231
+ if getattr(model_output, "usage", None):
232
+ usage_metadata = {
233
+ "prompt_tokens": None,
234
+ "thoughts_tokens": None,
235
+ "response_tokens": model_output.usage.output_tokens,
236
+ }
237
+
238
+ elif claude_event_type == "message_stop":
239
+ event_type = "stop"
240
+
241
+ elif claude_event_type in ["text", "thinking", "signature", "input_json"]:
242
+ event_type = "unused"
243
+
244
+ else:
245
+ raise ValueError(f"Unknown output: {model_output}")
246
+
247
+ return {
248
+ "role": "assistant",
249
+ "event": event_type,
250
+ "content_items": content_items,
251
+ "usage_metadata": usage_metadata,
252
+ "finish_reason": finish_reason,
253
+ }
254
+
255
+ async def streaming_response(
256
+ self,
257
+ messages: list[UniMessage],
258
+ config: UniConfig,
259
+ ) -> AsyncIterator[UniEvent]:
260
+ """Stream generate using Claude SDK with unified conversion methods."""
261
+ # Use unified config conversion
262
+ claude_config = self.transform_uni_config_to_model_config(config)
263
+
264
+ # Use unified message conversion
265
+ claude_messages = self.transform_uni_message_to_model_input(messages)
266
+
267
+ # Stream generate
268
+ partial_tool_call = {}
269
+ partial_usage = {}
270
+ async with self._client.messages.stream(**claude_config, messages=claude_messages) as stream:
271
+ async for event in stream:
272
+ event = self.transform_model_output_to_uni_event(event)
273
+ if event["event"] == "start":
274
+ if event["content_items"] and event["content_items"][0]["type"] == "partial_tool_call":
275
+ partial_tool_call["name"] = event["content_items"][0]["name"]
276
+ partial_tool_call["argument"] = ""
277
+ partial_tool_call["tool_call_id"] = event["content_items"][0]["tool_call_id"]
278
+
279
+ if event["usage_metadata"] is not None:
280
+ partial_usage["prompt_tokens"] = event["usage_metadata"]["prompt_tokens"]
281
+
282
+ elif event["event"] == "delta":
283
+ if event["content_items"][0]["type"] == "partial_tool_call":
284
+ partial_tool_call["argument"] += event["content_items"][0]["argument"]
285
+ else:
286
+ event.pop("event")
287
+ yield event
288
+
289
+ elif event["event"] == "stop":
290
+ if "name" in partial_tool_call and "argument" in partial_tool_call:
291
+ yield {
292
+ "role": "assistant",
293
+ "content_items": [
294
+ {
295
+ "type": "tool_call",
296
+ "name": partial_tool_call["name"],
297
+ "argument": json.loads(partial_tool_call["argument"]),
298
+ "tool_call_id": partial_tool_call["tool_call_id"],
299
+ }
300
+ ],
301
+ }
302
+ partial_tool_call = {}
303
+
304
+ if "prompt_tokens" in partial_usage and event["usage_metadata"] is not None:
305
+ yield {
306
+ "role": "assistant",
307
+ "content_items": [],
308
+ "usage_metadata": {
309
+ "prompt_tokens": partial_usage["prompt_tokens"],
310
+ "thoughts_tokens": None,
311
+ "response_tokens": event["usage_metadata"]["response_tokens"],
312
+ },
313
+ "finish_reason": event["finish_reason"],
314
+ }
315
+ partial_usage = {}
@@ -0,0 +1,18 @@
1
+ # Copyright 2025 Prism Shadow. and/or its affiliates
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from .client import Gemini3Client
16
+
17
+
18
+ __all__ = ["Gemini3Client"]