agent-starter-pack 0.5.3__py3-none-any.whl → 0.6.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (67) hide show
  1. {agent_starter_pack-0.5.3.dist-info → agent_starter_pack-0.6.1.dist-info}/METADATA +1 -1
  2. {agent_starter_pack-0.5.3.dist-info → agent_starter_pack-0.6.1.dist-info}/RECORD +59 -33
  3. agents/adk_base/notebooks/adk_app_testing.ipynb +1 -1
  4. agents/adk_gemini_fullstack/README.md +148 -0
  5. agents/adk_gemini_fullstack/app/agent.py +363 -0
  6. src/frontends/streamlit_adk/frontend/style/app_markdown.py → agents/adk_gemini_fullstack/app/config.py +19 -23
  7. agents/adk_gemini_fullstack/notebooks/adk_app_testing.ipynb +353 -0
  8. agents/adk_gemini_fullstack/notebooks/evaluating_adk_agent.ipynb +1528 -0
  9. agents/adk_gemini_fullstack/template/.templateconfig.yaml +37 -0
  10. agents/adk_gemini_fullstack/tests/integration/test_agent.py +58 -0
  11. agents/agentic_rag/notebooks/adk_app_testing.ipynb +1 -1
  12. src/base_template/Makefile +21 -2
  13. src/base_template/README.md +8 -3
  14. src/base_template/pyproject.toml +1 -4
  15. src/cli/commands/create.py +17 -10
  16. src/cli/utils/template.py +13 -10
  17. src/deployment_targets/cloud_run/app/server.py +7 -1
  18. src/deployment_targets/cloud_run/tests/integration/test_server_e2e.py +1 -1
  19. src/deployment_targets/cloud_run/tests/load_test/.results/.placeholder +321 -0
  20. src/frontends/adk_gemini_fullstack/frontend/components.json +21 -0
  21. src/frontends/adk_gemini_fullstack/frontend/eslint.config.js +28 -0
  22. src/frontends/adk_gemini_fullstack/frontend/index.html +12 -0
  23. src/frontends/adk_gemini_fullstack/frontend/package-lock.json +5829 -0
  24. src/frontends/adk_gemini_fullstack/frontend/package.json +46 -0
  25. src/frontends/adk_gemini_fullstack/frontend/public/vite.svg +1 -0
  26. src/frontends/adk_gemini_fullstack/frontend/src/App.tsx +565 -0
  27. src/frontends/adk_gemini_fullstack/frontend/src/components/ActivityTimeline.tsx +244 -0
  28. src/frontends/adk_gemini_fullstack/frontend/src/components/ChatMessagesView.tsx +419 -0
  29. src/frontends/adk_gemini_fullstack/frontend/src/components/InputForm.tsx +60 -0
  30. src/frontends/adk_gemini_fullstack/frontend/src/components/WelcomeScreen.tsx +56 -0
  31. src/frontends/adk_gemini_fullstack/frontend/src/components/ui/badge.tsx +46 -0
  32. src/frontends/adk_gemini_fullstack/frontend/src/components/ui/button.tsx +59 -0
  33. src/frontends/adk_gemini_fullstack/frontend/src/components/ui/card.tsx +92 -0
  34. src/frontends/adk_gemini_fullstack/frontend/src/components/ui/input.tsx +21 -0
  35. src/frontends/adk_gemini_fullstack/frontend/src/components/ui/scroll-area.tsx +56 -0
  36. src/frontends/adk_gemini_fullstack/frontend/src/components/ui/select.tsx +183 -0
  37. src/frontends/adk_gemini_fullstack/frontend/src/components/ui/tabs.tsx +64 -0
  38. src/frontends/adk_gemini_fullstack/frontend/src/components/ui/textarea.tsx +18 -0
  39. src/frontends/adk_gemini_fullstack/frontend/src/global.css +154 -0
  40. src/frontends/adk_gemini_fullstack/frontend/src/main.tsx +13 -0
  41. src/frontends/adk_gemini_fullstack/frontend/src/utils.ts +7 -0
  42. src/frontends/adk_gemini_fullstack/frontend/src/vite-env.d.ts +1 -0
  43. src/frontends/adk_gemini_fullstack/frontend/tsconfig.json +28 -0
  44. src/frontends/adk_gemini_fullstack/frontend/tsconfig.node.json +24 -0
  45. src/frontends/adk_gemini_fullstack/frontend/vite.config.ts +37 -0
  46. src/resources/locks/uv-adk_base-agent_engine.lock +24 -24
  47. src/resources/locks/uv-adk_base-cloud_run.lock +24 -24
  48. src/resources/locks/uv-adk_gemini_fullstack-agent_engine.lock +3217 -0
  49. src/resources/locks/uv-adk_gemini_fullstack-cloud_run.lock +3513 -0
  50. src/resources/locks/uv-agentic_rag-agent_engine.lock +88 -85
  51. src/resources/locks/uv-agentic_rag-cloud_run.lock +124 -119
  52. src/resources/locks/uv-crewai_coding_crew-agent_engine.lock +94 -91
  53. src/resources/locks/uv-crewai_coding_crew-cloud_run.lock +130 -125
  54. src/resources/locks/uv-langgraph_base_react-agent_engine.lock +91 -88
  55. src/resources/locks/uv-langgraph_base_react-cloud_run.lock +130 -125
  56. src/resources/locks/uv-live_api-cloud_run.lock +121 -116
  57. src/frontends/streamlit_adk/frontend/side_bar.py +0 -214
  58. src/frontends/streamlit_adk/frontend/streamlit_app.py +0 -314
  59. src/frontends/streamlit_adk/frontend/utils/chat_utils.py +0 -84
  60. src/frontends/streamlit_adk/frontend/utils/local_chat_history.py +0 -110
  61. src/frontends/streamlit_adk/frontend/utils/message_editing.py +0 -61
  62. src/frontends/streamlit_adk/frontend/utils/multimodal_utils.py +0 -223
  63. src/frontends/streamlit_adk/frontend/utils/stream_handler.py +0 -311
  64. src/frontends/streamlit_adk/frontend/utils/title_summary.py +0 -129
  65. {agent_starter_pack-0.5.3.dist-info → agent_starter_pack-0.6.1.dist-info}/WHEEL +0 -0
  66. {agent_starter_pack-0.5.3.dist-info → agent_starter_pack-0.6.1.dist-info}/entry_points.txt +0 -0
  67. {agent_starter_pack-0.5.3.dist-info → agent_starter_pack-0.6.1.dist-info}/licenses/LICENSE +0 -0
@@ -1,311 +0,0 @@
1
- # Copyright 2025 Google LLC
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
- # mypy: disable-error-code="unreachable"
16
- import importlib
17
- import json
18
- from collections.abc import Generator
19
- from typing import Any
20
- from urllib.parse import urljoin
21
-
22
- import google.auth
23
- import google.auth.transport.requests
24
- import google.oauth2.id_token
25
- import requests
26
- import streamlit as st
27
- import vertexai
28
- from google.adk.events.event import Event
29
- from google.auth.exceptions import DefaultCredentialsError
30
- from vertexai import agent_engines
31
-
32
- from frontend.utils.multimodal_utils import format_content
33
-
34
- st.cache_resource.clear()
35
-
36
-
37
- @st.cache_resource
38
- def get_remote_agent(remote_agent_engine_id: str) -> Any:
39
- """Get cached remote agent instance."""
40
- # Extract location and engine ID from the full resource ID.
41
- parts = remote_agent_engine_id.split("/")
42
- project_id = parts[1]
43
- location = parts[3]
44
- vertexai.init(project=project_id, location=location)
45
- return agent_engines.AgentEngine(remote_agent_engine_id)
46
-
47
-
48
- @st.cache_resource
49
- def get_remote_url_config(url: str, authenticate_request: bool) -> dict[str, Any]:
50
- """Get cached remote URL agent configuration."""
51
- stream_url = urljoin(url, "stream_messages")
52
- creds, _ = google.auth.default()
53
- id_token = None
54
- if authenticate_request:
55
- auth_req = google.auth.transport.requests.Request()
56
- try:
57
- id_token = google.oauth2.id_token.fetch_id_token(auth_req, stream_url)
58
- except DefaultCredentialsError:
59
- creds.refresh(auth_req)
60
- id_token = creds.id_token
61
- return {
62
- "url": stream_url,
63
- "authenticate_request": authenticate_request,
64
- "creds": creds,
65
- "id_token": id_token,
66
- }
67
-
68
-
69
- @st.cache_resource()
70
- def get_local_agent(agent_callable_path: str) -> Any:
71
- """Get cached local agent instance."""
72
- module_path, class_name = agent_callable_path.rsplit(".", 1)
73
- module = importlib.import_module(module_path)
74
- agent = getattr(module, class_name)()
75
- agent.set_up()
76
- return agent
77
-
78
-
79
- class Client:
80
- """A client for streaming events from a server."""
81
-
82
- def __init__(
83
- self,
84
- agent_callable_path: str | None = None,
85
- remote_agent_engine_id: str | None = None,
86
- url: str | None = None,
87
- authenticate_request: bool = False,
88
- ) -> None:
89
- """Initialize the Client with appropriate configuration.
90
-
91
- Args:
92
- agent_callable_path: Path to local agent class
93
- remote_agent_engine_id: ID of remote Agent engine
94
- url: URL for remote service
95
- authenticate_request: Whether to authenticate requests to remote URL
96
- """
97
- if url:
98
- remote_config = get_remote_url_config(url, authenticate_request)
99
- self.url = remote_config["url"]
100
- self.authenticate_request = remote_config["authenticate_request"]
101
- self.creds = remote_config["creds"]
102
- self.id_token = remote_config["id_token"]
103
- self.agent = None
104
- elif remote_agent_engine_id:
105
- self.agent = get_remote_agent(remote_agent_engine_id)
106
- self.url = None
107
- else:
108
- self.url = None
109
- if agent_callable_path is None:
110
- raise ValueError("agent_callable_path cannot be None")
111
- self.agent = get_local_agent(agent_callable_path)
112
-
113
- def log_feedback(self, feedback_dict: dict[str, Any], invocation_id: str) -> None:
114
- """Log user feedback for a specific run."""
115
- score = feedback_dict["score"]
116
- if score == "😞":
117
- score = 0.0
118
- elif score == "🙁":
119
- score = 0.25
120
- elif score == "😐":
121
- score = 0.5
122
- elif score == "🙂":
123
- score = 0.75
124
- elif score == "😀":
125
- score = 1.0
126
- feedback_dict["score"] = score
127
- feedback_dict["invocation_id"] = invocation_id
128
- feedback_dict["log_type"] = "feedback"
129
- # Ensure text field is not None
130
- if feedback_dict.get("text") is None:
131
- feedback_dict["text"] = ""
132
- feedback_dict.pop("type")
133
- url = urljoin(self.url, "feedback")
134
- headers = {
135
- "Content-Type": "application/json",
136
- }
137
- if self.url:
138
- url = urljoin(self.url, "feedback")
139
- headers = {
140
- "Content-Type": "application/json",
141
- }
142
- if self.authenticate_request:
143
- headers["Authorization"] = f"Bearer {self.id_token}"
144
- requests.post(
145
- url, data=json.dumps(feedback_dict), headers=headers, timeout=10
146
- )
147
- elif self.agent is not None:
148
- self.agent.register_feedback(feedback=feedback_dict)
149
- else:
150
- raise ValueError("No agent or URL configured for feedback logging")
151
-
152
- def stream_messages(
153
- self, data: dict[str, Any]
154
- ) -> Generator[dict[str, Any], None, None]:
155
- """Stream events from the server, yielding parsed event data."""
156
- if self.url:
157
- headers = {
158
- "Content-Type": "application/json",
159
- "Accept": "text/event-stream",
160
- }
161
- if self.authenticate_request:
162
- headers["Authorization"] = f"Bearer {self.id_token}"
163
- with requests.post(
164
- self.url, json=data, headers=headers, stream=True, timeout=60
165
- ) as response:
166
- for line in response.iter_lines():
167
- if line:
168
- try:
169
- event = json.loads(line.decode("utf-8"))
170
- yield event
171
- except json.JSONDecodeError:
172
- print(f"Failed to parse event: {line.decode('utf-8')}")
173
- elif self.agent is not None:
174
- yield from self.agent.stream_query(**data)
175
-
176
-
177
- class StreamHandler:
178
- """Handles streaming updates to a Streamlit interface."""
179
-
180
- def __init__(self, st: Any, initial_text: str = "") -> None:
181
- """Initialize the StreamHandler with Streamlit context and initial text."""
182
- self.st = st
183
- self.tool_expander = st.expander("Tool Calls:", expanded=False)
184
- self.container = st.empty()
185
- self.text = initial_text
186
- self.tools_logs = initial_text
187
-
188
- def new_token(self, token: str) -> None:
189
- """Add a new token to the main text display."""
190
- self.text += token
191
- self.container.markdown(format_content(self.text), unsafe_allow_html=True)
192
-
193
- def new_status(self, status_update: str) -> None:
194
- """Add a new status update to the tool calls expander."""
195
- self.tools_logs += status_update
196
- self.tool_expander.markdown(status_update)
197
-
198
-
199
- class EventProcessor:
200
- """Processes events from the stream and updates the UI accordingly."""
201
-
202
- def __init__(self, st: Any, client: Client, stream_handler: StreamHandler) -> None:
203
- """Initialize the EventProcessor with Streamlit context, client, and stream handler."""
204
- self.st = st
205
- self.client = client
206
- self.stream_handler = stream_handler
207
- self.final_content = ""
208
- self.tool_calls: list[dict[str, Any]] = []
209
- self.additional_kwargs: dict[str, Any] = {}
210
-
211
- def process_events(self) -> None:
212
- """Process events from the stream, handling each event type appropriately."""
213
- messages = self.st.session_state.user_chats[
214
- self.st.session_state["session_id"]
215
- ]["messages"]
216
-
217
- session = self.st.session_state["session_id"]
218
- stream = self.client.stream_messages(
219
- data={
220
- "message": messages[-1]["content"],
221
- "events": messages[:-1],
222
- "user_id": self.st.session_state["user_id"],
223
- "session_id": self.st.session_state["session_id"],
224
- }
225
- )
226
-
227
- for message in stream:
228
- event = Event.model_validate(message)
229
-
230
- # Skip processing if event has no content or parts
231
- if not event.content or not event.content.parts:
232
- continue
233
-
234
- # Process each part in the event content
235
- for part in event.content.parts:
236
- # Case 1: Process function/tool calls
237
- if part.function_call:
238
- # Extract tool call information
239
- tool_call = {
240
- "name": part.function_call.name,
241
- "args": part.function_call.args,
242
- "id": part.function_call.id,
243
- }
244
-
245
- # Track tool calls and update UI
246
- self.tool_calls.append(event.model_dump())
247
- tool_message = (
248
- f"\n\nCalling tool: `{tool_call['name']}` "
249
- f"with args: `{tool_call['args']}`"
250
- )
251
- self.stream_handler.new_status(tool_message)
252
-
253
- # Add to conversation history
254
- self.st.session_state.user_chats[session]["messages"].append(
255
- event.model_dump(mode="json")
256
- )
257
- # Case 2: Process function/tool responses
258
- elif part.function_response:
259
- # Extract response content
260
- content = str(part.function_response.response)
261
-
262
- # Track responses and update UI
263
- self.tool_calls.append(event.model_dump())
264
- response_message = f"\n\nTool response: `{content}`"
265
- self.stream_handler.new_status(response_message)
266
-
267
- # Add to conversation history
268
- self.st.session_state.user_chats[session]["messages"].append(
269
- event.model_dump(mode="json")
270
- )
271
- # Case 3: Process text responses
272
- elif part.text:
273
- if event.is_final_response():
274
- # Store the final response in conversation history
275
- self.st.session_state.user_chats[session]["messages"].append(
276
- event.model_dump(mode="json")
277
- )
278
- # Save the invocation ID - it might be used by the feedback functionality
279
- self.st.session_state["invocation_id"] = event.invocation_id
280
- else:
281
- # For streaming chunks, accumulate text and update UI
282
- self.final_content += part.text
283
- self.stream_handler.new_token(part.text)
284
-
285
-
286
- def get_chain_response(st: Any, client: Client, stream_handler: StreamHandler) -> None:
287
- """Process the chain response update the Streamlit UI.
288
-
289
- This function initiates the event processing for a chain of operations,
290
- involving an AI model's response generation and potential tool calls.
291
- It creates an EventProcessor instance and starts the event processing loop.
292
-
293
- Args:
294
- st (Any): The Streamlit app instance, used for accessing session state
295
- and updating the UI.
296
- client (Client): An instance of the Client class used to stream events
297
- from the server.
298
- stream_handler (StreamHandler): An instance of the StreamHandler class
299
- used to update the Streamlit UI with
300
- streaming content.
301
-
302
- Returns:
303
- None
304
-
305
- Side effects:
306
- - Updates the Streamlit UI with streaming tokens and tool call information.
307
- - Modifies the session state to include the final AI message and run ID.
308
- - Handles various events like chain starts/ends, tool calls, and model outputs.
309
- """
310
- processor = EventProcessor(st, client, stream_handler)
311
- processor.process_events()
@@ -1,129 +0,0 @@
1
- # Copyright 2025 Google LLC
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
- # mypy: disable-error-code="assignment"
16
- import os
17
- from typing import Any
18
-
19
- import google.auth
20
- from google import genai
21
- from google.adk.events.event import Event
22
- from google.genai.types import Content, GenerateContentConfig, HttpOptions, Part
23
-
24
- SYS_INSTRUCTION = """Given a list of messages between a human and AI, come up with a short and relevant title for the conversation. Use up to 10 words. The title needs to be concise.
25
- Examples:
26
- **Input:**
27
- ```
28
- Human: hi, what is the best italian dish?
29
- AI: That's a tough one! Italy has so many amazing dishes, it's hard to pick just one "best." To help me give you a great suggestion, tell me a little more about what you're looking for.
30
- ```
31
- **Output:** Best italian dish
32
-
33
- **Input:**
34
-
35
- ```
36
- Human: How to fix a broken laptop screen?
37
- AI: Fixing a broken laptop screen can be tricky and often requires professional help. However, there are a few things you can try at home before resorting to a repair shop.
38
- ```
39
-
40
- **Output:** Fixing a broken laptop screen
41
-
42
- **Input:**
43
-
44
- ```
45
- Human: Can you write me a poem about the beach?
46
- AI: As the sun dips down below the horizon
47
- And the waves gently kiss the shore,
48
- I sit here and watch the ocean
49
- And feel its power evermore.
50
- ```
51
-
52
- **Output:** Poem about the beach
53
-
54
- **Input:**
55
-
56
- ```
57
- Human: What's the best way to learn to code?
58
- AI: There are many ways to learn to code, and the best method for you will depend on your learning style and goals.
59
- ```
60
-
61
- **Output:** How to learn to code
62
-
63
- If there's not enough context in the conversation to create a meaningful title, create a generic title like "New Conversation", or "A simple greeting".
64
-
65
- """
66
-
67
-
68
- class TitleGenerator:
69
- """Generates concise titles for conversations using Gemini model."""
70
-
71
- def __init__(self) -> None:
72
- _, project_id = google.auth.default()
73
-
74
- self.client = genai.Client(
75
- http_options=HttpOptions(api_version="v1"),
76
- vertexai=True,
77
- project=project_id,
78
- location=os.getenv("LOCATION", "us-central1"),
79
- )
80
-
81
- def summarize(self, events: list[Event]) -> str:
82
- """Generates a title based on a list of conversation events."""
83
- contents = []
84
- # Extract text content from each event and add it to the contents list
85
- for event in events:
86
- if event.get("content") and event["content"].get("parts"):
87
- text_parts = [
88
- part.get("text", "")
89
- for part in event["content"]["parts"]
90
- if part.get("text")
91
- ]
92
- text_content = "\n".join(text_parts)
93
- if text_content.strip():
94
- contents.append(
95
- Content(
96
- role=event["content"].get("role", "user"),
97
- parts=[Part.from_text(text=text_content)],
98
- )
99
- )
100
- contents.append(
101
- Content(
102
- role="user",
103
- parts=[
104
- Part.from_text(text="End of conversation - Create one single title")
105
- ],
106
- )
107
- )
108
- response = self.client.models.generate_content(
109
- model="gemini-2.0-flash-001",
110
- contents=contents,
111
- config=GenerateContentConfig(
112
- system_instruction=SYS_INSTRUCTION,
113
- max_output_tokens=10,
114
- temperature=0,
115
- ),
116
- ).text
117
- return response
118
-
119
-
120
- class DummySummarizer:
121
- """A simple summarizer that returns a fixed string."""
122
-
123
- def __init__(self) -> None:
124
- """Initialize the dummy summarizer."""
125
- pass
126
-
127
- def summarize(self, **kwargs: Any) -> str:
128
- """Return a simple summary string regardless of input."""
129
- return "Conversation"