universal-mcp-applications 0.1.2__py3-none-any.whl → 0.1.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of universal-mcp-applications might be problematic. Click here for more details.
- universal_mcp/applications/airtable/app.py +1 -0
- universal_mcp/applications/apollo/app.py +1 -0
- universal_mcp/applications/aws_s3/app.py +3 -4
- universal_mcp/applications/bill/app.py +3 -3
- universal_mcp/applications/box/app.py +2 -6
- universal_mcp/applications/braze/app.py +2 -6
- universal_mcp/applications/cal_com_v2/app.py +22 -64
- universal_mcp/applications/confluence/app.py +1 -0
- universal_mcp/applications/contentful/app.py +8 -19
- universal_mcp/applications/digitalocean/app.py +9 -27
- universal_mcp/applications/{domain-checker → domain_checker}/app.py +2 -1
- universal_mcp/applications/elevenlabs/app.py +98 -3188
- universal_mcp/applications/falai/app.py +1 -0
- universal_mcp/applications/file_system/__init__.py +1 -0
- universal_mcp/applications/file_system/app.py +96 -0
- universal_mcp/applications/fireflies/app.py +4 -3
- universal_mcp/applications/fpl/app.py +1 -0
- universal_mcp/applications/fpl/utils/fixtures.py +1 -1
- universal_mcp/applications/fpl/utils/helper.py +1 -1
- universal_mcp/applications/fpl/utils/position_utils.py +0 -1
- universal_mcp/applications/{ghost-content → ghost_content}/app.py +2 -1
- universal_mcp/applications/github/app.py +3 -1
- universal_mcp/applications/google_calendar/app.py +2 -1
- universal_mcp/applications/google_docs/app.py +1 -1
- universal_mcp/applications/google_drive/app.py +3 -68
- universal_mcp/applications/google_gemini/app.py +138 -618
- universal_mcp/applications/google_mail/app.py +2 -1
- universal_mcp/applications/{google-searchconsole → google_searchconsole}/app.py +1 -1
- universal_mcp/applications/google_sheet/app.py +2 -1
- universal_mcp/applications/google_sheet/helper.py +156 -116
- universal_mcp/applications/hashnode/app.py +1 -0
- universal_mcp/applications/{http-tools → http_tools}/app.py +2 -1
- universal_mcp/applications/hubspot/app.py +4 -1
- universal_mcp/applications/jira/app.py +7 -18
- universal_mcp/applications/markitdown/app.py +2 -3
- universal_mcp/applications/ms_teams/app.py +1 -1
- universal_mcp/applications/openai/app.py +2 -3
- universal_mcp/applications/outlook/app.py +1 -3
- universal_mcp/applications/pipedrive/app.py +2 -6
- universal_mcp/applications/reddit/app.py +1 -0
- universal_mcp/applications/replicate/app.py +3 -3
- universal_mcp/applications/resend/app.py +1 -2
- universal_mcp/applications/rocketlane/app.py +1 -0
- universal_mcp/applications/semrush/app.py +1 -1
- universal_mcp/applications/sentry/README.md +20 -20
- universal_mcp/applications/sentry/app.py +40 -40
- universal_mcp/applications/serpapi/app.py +2 -2
- universal_mcp/applications/sharepoint/app.py +1 -0
- universal_mcp/applications/shopify/app.py +1 -0
- universal_mcp/applications/slack/app.py +3 -3
- universal_mcp/applications/trello/app.py +9 -27
- universal_mcp/applications/twilio/__init__.py +1 -0
- universal_mcp/applications/{twillo → twilio}/app.py +2 -2
- universal_mcp/applications/twitter/README.md +1 -1
- universal_mcp/applications/twitter/api_segments/dm_conversations_api.py +2 -2
- universal_mcp/applications/twitter/api_segments/lists_api.py +1 -1
- universal_mcp/applications/unipile/app.py +5 -1
- universal_mcp/applications/whatsapp/app.py +18 -17
- universal_mcp/applications/whatsapp/audio.py +110 -0
- universal_mcp/applications/whatsapp/whatsapp.py +398 -0
- universal_mcp/applications/whatsapp_business/app.py +1 -1
- universal_mcp/applications/youtube/app.py +195 -191
- universal_mcp/applications/zenquotes/app.py +1 -1
- {universal_mcp_applications-0.1.2.dist-info → universal_mcp_applications-0.1.4.dist-info}/METADATA +4 -2
- {universal_mcp_applications-0.1.2.dist-info → universal_mcp_applications-0.1.4.dist-info}/RECORD +76 -75
- universal_mcp/applications/google-ads/__init__.py +0 -1
- universal_mcp/applications/google-ads/app.py +0 -23
- universal_mcp/applications/twillo/README.md +0 -0
- universal_mcp/applications/twillo/__init__.py +0 -1
- /universal_mcp/applications/{domain-checker → domain_checker}/README.md +0 -0
- /universal_mcp/applications/{domain-checker → domain_checker}/__init__.py +0 -0
- /universal_mcp/applications/{ghost-content → ghost_content}/README.md +0 -0
- /universal_mcp/applications/{ghost-content → ghost_content}/__init__.py +0 -0
- /universal_mcp/applications/{google-searchconsole → google_searchconsole}/README.md +0 -0
- /universal_mcp/applications/{google-searchconsole → google_searchconsole}/__init__.py +0 -0
- /universal_mcp/applications/{http-tools → http_tools}/README.md +0 -0
- /universal_mcp/applications/{http-tools → http_tools}/__init__.py +0 -0
- /universal_mcp/applications/{google-ads → twilio}/README.md +0 -0
- {universal_mcp_applications-0.1.2.dist-info → universal_mcp_applications-0.1.4.dist-info}/WHEEL +0 -0
- {universal_mcp_applications-0.1.2.dist-info → universal_mcp_applications-0.1.4.dist-info}/licenses/LICENSE +0 -0
|
@@ -1,663 +1,183 @@
|
|
|
1
|
-
import
|
|
2
|
-
|
|
1
|
+
import uuid
|
|
2
|
+
import wave
|
|
3
|
+
from typing import Annotated # Added Literal for type hinting
|
|
4
|
+
|
|
5
|
+
from google import genai
|
|
6
|
+
from google.genai import types
|
|
7
|
+
from loguru import logger
|
|
8
|
+
from PIL import Image
|
|
3
9
|
|
|
4
|
-
import httpx
|
|
5
10
|
from universal_mcp.applications.application import APIApplication
|
|
11
|
+
from universal_mcp.applications.file_system.app import FileSystemApp
|
|
6
12
|
from universal_mcp.integrations import Integration
|
|
7
13
|
|
|
8
|
-
logger = logging.getLogger(__name__)
|
|
9
|
-
if not logger.handlers:
|
|
10
|
-
logging.basicConfig(
|
|
11
|
-
level=logging.DEBUG,
|
|
12
|
-
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
|
|
13
|
-
)
|
|
14
|
-
|
|
15
14
|
|
|
16
15
|
class GoogleGeminiApp(APIApplication):
|
|
17
16
|
def __init__(self, integration: Integration = None, **kwargs) -> None:
|
|
18
|
-
super().__init__(name="
|
|
19
|
-
self.
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
return {}
|
|
31
|
-
|
|
32
|
-
def _add_api_key_param(self, params: dict[str, Any] | None) -> dict[str, Any]:
|
|
33
|
-
"""Helper to add the API key as a 'key' query parameter."""
|
|
34
|
-
actual_params = params.copy() if params else {}
|
|
35
|
-
if "key" not in actual_params and self.integration:
|
|
36
|
-
try:
|
|
37
|
-
credentials = self.integration.get_credentials()
|
|
38
|
-
if not isinstance(credentials, dict):
|
|
39
|
-
logger.warning(
|
|
40
|
-
f"Integration credentials for {self.name} are not a dictionary. Cannot retrieve API key."
|
|
41
|
-
)
|
|
42
|
-
return actual_params # or raise error
|
|
43
|
-
|
|
44
|
-
api_key = (
|
|
45
|
-
credentials.get("api_key")
|
|
46
|
-
or credentials.get("API_KEY")
|
|
47
|
-
or credentials.get("apiKey")
|
|
48
|
-
)
|
|
49
|
-
if api_key:
|
|
50
|
-
actual_params["key"] = api_key
|
|
51
|
-
logger.debug("Added API key as query parameter.")
|
|
52
|
-
else:
|
|
53
|
-
logger.warning(
|
|
54
|
-
f"API key not found in integration credentials for {self.name} using keys: api_key, API_KEY, apiKey."
|
|
55
|
-
)
|
|
56
|
-
except Exception as e:
|
|
57
|
-
logger.error(
|
|
58
|
-
f"Error retrieving API key from integration for {self.name}: {e}"
|
|
59
|
-
)
|
|
60
|
-
elif not self.integration:
|
|
61
|
-
logger.warning(
|
|
62
|
-
f"No integration provided for {self.name}. API key cannot be added automatically."
|
|
63
|
-
)
|
|
64
|
-
return actual_params
|
|
65
|
-
|
|
66
|
-
def _get(self, url: str, params: dict[str, Any] | None = None) -> httpx.Response:
|
|
67
|
-
"""
|
|
68
|
-
Make a GET request, ensuring the API key is added as a query parameter.
|
|
69
|
-
"""
|
|
70
|
-
actual_params = self._add_api_key_param(params)
|
|
71
|
-
logger.debug(f"Making GET request to {url} with params: {actual_params}")
|
|
72
|
-
return super()._get(url, params=actual_params)
|
|
73
|
-
|
|
74
|
-
def _post(
|
|
75
|
-
self, url: str, data: dict[str, Any], params: dict[str, Any] | None = None
|
|
76
|
-
) -> httpx.Response:
|
|
77
|
-
"""
|
|
78
|
-
Make a POST request, ensuring the API key is added as a query parameter
|
|
79
|
-
and content_type is explicitly set to application/json.
|
|
80
|
-
"""
|
|
81
|
-
actual_params = self._add_api_key_param(params)
|
|
82
|
-
logger.debug(
|
|
83
|
-
f"Making POST request to {url} with params: {actual_params} and data: {data}"
|
|
84
|
-
)
|
|
85
|
-
# Explicitly set content_type for clarity and robustness
|
|
86
|
-
return super()._post(
|
|
87
|
-
url, data=data, params=actual_params, content_type="application/json"
|
|
17
|
+
super().__init__(name="google_gemini", integration=integration, **kwargs)
|
|
18
|
+
self._genai_client = None
|
|
19
|
+
|
|
20
|
+
@property
|
|
21
|
+
def genai_client(self) -> genai.Client:
|
|
22
|
+
if self._genai_client is not None:
|
|
23
|
+
return self._genai_client
|
|
24
|
+
credentials = self.integration.get_credentials()
|
|
25
|
+
api_key = (
|
|
26
|
+
credentials.get("api_key")
|
|
27
|
+
or credentials.get("API_KEY")
|
|
28
|
+
or credentials.get("apiKey")
|
|
88
29
|
)
|
|
30
|
+
if not api_key:
|
|
31
|
+
raise ValueError("API key not found in integration credentials")
|
|
32
|
+
self._genai_client = genai.Client(api_key=api_key)
|
|
33
|
+
return self._genai_client
|
|
89
34
|
|
|
90
|
-
def
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
""
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
return super()._delete(url, params=actual_params)
|
|
97
|
-
|
|
98
|
-
def fetch_model(self) -> dict[str, Any]:
|
|
99
|
-
"""
|
|
100
|
-
Retrieves the configuration details of current model via a GET request.
|
|
101
|
-
|
|
102
|
-
Returns:
|
|
103
|
-
dict[str, Any]: model
|
|
104
|
-
|
|
105
|
-
Tags:
|
|
106
|
-
Models, important
|
|
107
|
-
"""
|
|
108
|
-
url = f"{self.base_url}/v1beta/models/gemini-2.0-flash"
|
|
109
|
-
query_params = {}
|
|
110
|
-
response = self._get(url, params=query_params)
|
|
111
|
-
response.raise_for_status()
|
|
112
|
-
return response.json()
|
|
113
|
-
|
|
114
|
-
def fetch_models(self, pageSize=None, pageToken=None) -> dict[str, Any]:
|
|
115
|
-
"""
|
|
116
|
-
Retrieves a paginated list of available models, supporting page size and token parameters for result navigation.
|
|
117
|
-
|
|
118
|
-
Args:
|
|
119
|
-
pageSize (string): The `pageSize` parameter specifies the maximum number of items to include in each page of the response for the GET operation at the `/v1beta/models` path. Example: '5'.
|
|
120
|
-
pageToken (string): Used in GET requests to specify the page token for fetching the next page of results. Example: 'Chxtb2RlbHMvZ2VtaW5pLTEuNS1wcm8tbGF0ZXN0'.
|
|
121
|
-
|
|
122
|
-
Returns:
|
|
123
|
-
dict[str, Any]: models
|
|
124
|
-
|
|
125
|
-
Tags:
|
|
126
|
-
Models, important
|
|
127
|
-
"""
|
|
128
|
-
url = f"{self.base_url}/v1beta/models"
|
|
129
|
-
query_params = {
|
|
130
|
-
k: v
|
|
131
|
-
for k, v in [("pageSize", pageSize), ("pageToken", pageToken)]
|
|
132
|
-
if v is not None
|
|
133
|
-
}
|
|
134
|
-
response = self._get(url, params=query_params)
|
|
135
|
-
response.raise_for_status()
|
|
136
|
-
return response.json()
|
|
137
|
-
|
|
138
|
-
def text_only_input(self, query: str) -> dict[str, Any]:
|
|
139
|
-
"""
|
|
140
|
-
Generates content using the Gemini 1.5 Flash model via POST request,
|
|
141
|
-
taking a simple string query.
|
|
35
|
+
async def generate_text(
|
|
36
|
+
self,
|
|
37
|
+
prompt: Annotated[str, "The prompt to generate text from"],
|
|
38
|
+
model: str = "gemini-2.5-flash",
|
|
39
|
+
) -> str:
|
|
40
|
+
"""Generates text using the Google Gemini model.
|
|
142
41
|
|
|
143
42
|
Args:
|
|
144
|
-
|
|
145
|
-
|
|
43
|
+
prompt (str): The prompt to generate text from.
|
|
44
|
+
model (str, optional): The Gemini model to use for text generation. Defaults to "gemini-2.5-flash".
|
|
146
45
|
|
|
147
46
|
Returns:
|
|
148
|
-
|
|
47
|
+
str: The generated text response from the Gemini model.
|
|
149
48
|
|
|
150
49
|
Raises:
|
|
151
|
-
ValueError: If the
|
|
152
|
-
|
|
50
|
+
ValueError: If the API key is not found in the integration credentials.
|
|
51
|
+
Exception: If the underlying client or API call fails.
|
|
153
52
|
|
|
53
|
+
Example:
|
|
54
|
+
response = app.generate_text("Tell me a joke.")
|
|
55
|
+
|
|
154
56
|
Tags:
|
|
155
57
|
important
|
|
156
58
|
"""
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
contents_payload = [{"parts": [{"text": query}]}]
|
|
161
|
-
|
|
162
|
-
request_body = {
|
|
163
|
-
"contents": contents_payload,
|
|
164
|
-
}
|
|
165
|
-
model_name = "gemini-2.0-flash"
|
|
166
|
-
|
|
167
|
-
url = f"{self.base_url}/v1beta/models/{model_name}:generateContent"
|
|
168
|
-
|
|
169
|
-
query_params = {}
|
|
170
|
-
|
|
171
|
-
logger.info(
|
|
172
|
-
f'Calling Gemini API for model: {model_name} with query: "{query[:70]}{"..." if len(query) > 70 else ""}"'
|
|
173
|
-
)
|
|
174
|
-
|
|
175
|
-
response = self._post(url, data=request_body, params=query_params)
|
|
176
|
-
response.raise_for_status()
|
|
177
|
-
data = response.json()
|
|
178
|
-
try:
|
|
179
|
-
extracted_text = data["candidates"][0]["content"]["parts"][0]["text"]
|
|
180
|
-
return extracted_text
|
|
181
|
-
except (KeyError, IndexError, TypeError):
|
|
182
|
-
return data
|
|
183
|
-
|
|
184
|
-
def generate_atext_stream(self, query: str) -> dict[str, Any]:
|
|
185
|
-
"""
|
|
186
|
-
Generates a streaming response from the Gemini 1.5 Flash model for multimodal input content.
|
|
187
|
-
|
|
188
|
-
Args:
|
|
189
|
-
query (str): The text prompt for the model.
|
|
190
|
-
|
|
191
|
-
Returns:
|
|
192
|
-
Any: generate a text stream
|
|
193
|
-
|
|
194
|
-
Tags:
|
|
195
|
-
Text Generation
|
|
196
|
-
"""
|
|
197
|
-
if not query or not isinstance(query, str):
|
|
198
|
-
raise ValueError("Query must be a non-empty string.")
|
|
199
|
-
|
|
200
|
-
contents_payload = [{"parts": [{"text": query}]}]
|
|
201
|
-
|
|
202
|
-
request_body = {
|
|
203
|
-
"contents": contents_payload,
|
|
204
|
-
}
|
|
205
|
-
model_name = "gemini-2.0-flash"
|
|
206
|
-
url = f"{self.base_url}/v1beta/models/{model_name}:streamGenerateContent"
|
|
207
|
-
query_params = {}
|
|
208
|
-
|
|
209
|
-
response = self._post(url, data=request_body, params=query_params)
|
|
210
|
-
response.raise_for_status()
|
|
211
|
-
data = response.json()
|
|
212
|
-
try:
|
|
213
|
-
extracted_text = data["candidates"][0]["content"]["parts"][0]["text"]
|
|
214
|
-
return extracted_text
|
|
215
|
-
except (KeyError, IndexError, TypeError):
|
|
216
|
-
return data
|
|
59
|
+
response = self.genai_client.generate_content(prompt, model=model)
|
|
60
|
+
return response.text
|
|
217
61
|
|
|
218
|
-
def
|
|
219
|
-
self,
|
|
220
|
-
|
|
62
|
+
async def generate_image(
|
|
63
|
+
self,
|
|
64
|
+
prompt: Annotated[str, "The prompt to generate image from"],
|
|
65
|
+
image: Annotated[str, "The reference image path"] | None = None,
|
|
66
|
+
model: str = "gemini-2.5-flash-image-preview",
|
|
67
|
+
) -> list:
|
|
221
68
|
"""
|
|
222
|
-
|
|
223
|
-
|
|
69
|
+
Generates an image using the Google Gemini model and returns a list of results.
|
|
70
|
+
Each result is a dict with either 'text' or 'image_bytes' (raw image data).
|
|
224
71
|
|
|
225
72
|
Args:
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
If None, the 'file' field will be omitted from the request if the API supports that,
|
|
229
|
-
or it might result in an error if the 'file' field is mandatory.
|
|
73
|
+
prompt (str): The prompt to generate image from.
|
|
74
|
+
model (str, optional): The Gemini model to use for image generation. Defaults to "gemini-2.5-flash-image-preview".
|
|
230
75
|
|
|
231
76
|
Returns:
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
|
|
77
|
+
list: A list of dicts, each containing either 'text' or 'image_bytes'.
|
|
78
|
+
|
|
235
79
|
Tags:
|
|
236
|
-
|
|
80
|
+
important
|
|
237
81
|
"""
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
|
|
244
|
-
|
|
245
|
-
|
|
246
|
-
url = f"{self.base_url}/upload/v1beta/files"
|
|
247
|
-
|
|
248
|
-
query_params = {}
|
|
249
|
-
|
|
250
|
-
response = self._post(
|
|
251
|
-
url, data=request_body if request_body else None, params=query_params
|
|
82
|
+
# The Gemini API is synchronous, so run in a thread
|
|
83
|
+
contents = [prompt]
|
|
84
|
+
if image:
|
|
85
|
+
image = Image.open(image)
|
|
86
|
+
contents.append(image)
|
|
87
|
+
response = self.genai_client.models.generate_content(
|
|
88
|
+
model=model,
|
|
89
|
+
contents=contents,
|
|
252
90
|
)
|
|
253
|
-
|
|
254
|
-
|
|
255
|
-
|
|
256
|
-
|
|
257
|
-
|
|
258
|
-
|
|
259
|
-
|
|
260
|
-
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
{
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
"text": "Summarize the uploaded document."
|
|
273
|
-
},
|
|
274
|
-
{
|
|
275
|
-
"file_data": {
|
|
276
|
-
"file_uri": "{{FILE_URI}}",
|
|
277
|
-
"mime_type": "application/pdf"
|
|
278
|
-
}
|
|
279
|
-
}
|
|
280
|
-
]
|
|
281
|
-
}
|
|
282
|
-
]
|
|
283
|
-
}
|
|
284
|
-
```
|
|
285
|
-
|
|
286
|
-
Returns:
|
|
287
|
-
dict[str, Any]: prompt document
|
|
288
|
-
|
|
289
|
-
Tags:
|
|
290
|
-
Document Processing
|
|
291
|
-
"""
|
|
292
|
-
request_body = {
|
|
293
|
-
"contents": contents,
|
|
294
|
-
}
|
|
295
|
-
request_body = {k: v for k, v in request_body.items() if v is not None}
|
|
296
|
-
url = f"{self.base_url}/v1beta/models/gemini-1.5-pro-latest:generateContent"
|
|
297
|
-
query_params = {}
|
|
298
|
-
response = self._post(url, data=request_body, params=query_params)
|
|
299
|
-
response.raise_for_status()
|
|
300
|
-
return response.json()
|
|
301
|
-
|
|
302
|
-
def prompt_document(
|
|
303
|
-
self, contents: list[dict[str, Any]] | None = None
|
|
304
|
-
) -> dict[str, Any]:
|
|
305
|
-
"""
|
|
306
|
-
Generates content using the Gemini model with document context.
|
|
307
|
-
|
|
308
|
-
Args:
|
|
309
|
-
contents (Optional[List[Dict[str, Any]]]): List of content parts, including text and file data.
|
|
310
|
-
Example:
|
|
311
|
-
```json
|
|
312
|
-
[
|
|
313
|
-
{
|
|
314
|
-
"parts": [
|
|
315
|
-
{"text": "Summarize the uploaded document."},
|
|
316
|
-
{"file_data": {"file_uri": "files/your_file_id", "mime_type": "application/pdf"}}
|
|
317
|
-
]
|
|
318
|
-
}
|
|
319
|
-
]
|
|
320
|
-
```
|
|
321
|
-
|
|
322
|
-
Returns:
|
|
323
|
-
dict[str, Any]: The model's response.
|
|
324
|
-
"""
|
|
325
|
-
request_body = {"contents": contents}
|
|
326
|
-
request_body = {k: v for k, v in request_body.items() if v is not None}
|
|
327
|
-
if not request_body.get("contents"): # API might require contents
|
|
328
|
-
raise ValueError(
|
|
329
|
-
"Missing required parameter 'contents' for prompt_document."
|
|
330
|
-
)
|
|
331
|
-
|
|
332
|
-
url = f"{self.base_url}/v1beta/models/gemini-:generateContent"
|
|
333
|
-
query_params = {}
|
|
334
|
-
response = self._post(url, data=request_body, params=query_params)
|
|
335
|
-
return response.json()
|
|
336
|
-
|
|
337
|
-
def text_tokens(self, query: str) -> dict[str, Any]:
|
|
338
|
-
"""
|
|
339
|
-
Calculates the number of tokens and billable characters for input content using a gemini-2.0-flash.
|
|
340
|
-
|
|
341
|
-
Args:
|
|
342
|
-
query (str): The text prompt for the model.
|
|
343
|
-
|
|
344
|
-
Returns:
|
|
345
|
-
dict[str, Any]: text tokens / chat tokens / media tokens
|
|
346
|
-
|
|
347
|
-
Tags:
|
|
348
|
-
Count Tokens, important
|
|
349
|
-
"""
|
|
350
|
-
if not query or not isinstance(query, str):
|
|
351
|
-
raise ValueError("Query must be a non-empty string.")
|
|
352
|
-
|
|
353
|
-
contents = [{"parts": [{"text": query}]}]
|
|
354
|
-
request_body = {
|
|
355
|
-
"contents": contents,
|
|
356
|
-
}
|
|
357
|
-
request_body = {k: v for k, v in request_body.items() if v is not None}
|
|
358
|
-
model_name = "gemini-2.0-flash"
|
|
359
|
-
url = f"{self.base_url}/v1beta/models/{model_name}:countTokens"
|
|
360
|
-
query_params = {}
|
|
361
|
-
response = self._post(url, data=request_body, params=query_params)
|
|
362
|
-
response.raise_for_status()
|
|
363
|
-
return response.json()
|
|
364
|
-
|
|
365
|
-
def fetch_tuned_models(self, page_size=None) -> dict[str, Any]:
|
|
366
|
-
"""
|
|
367
|
-
Retrieves a list of tuned models at the specified page size using the GET method.
|
|
368
|
-
|
|
369
|
-
Args:
|
|
370
|
-
page_size (string): Specifies the maximum number of items to return in a single response page. Example: '10'.
|
|
371
|
-
|
|
372
|
-
Returns:
|
|
373
|
-
dict[str, Any]: fetch models Copy
|
|
374
|
-
|
|
375
|
-
Tags:
|
|
376
|
-
Fine Tunning
|
|
377
|
-
"""
|
|
378
|
-
url = f"{self.base_url}/v1beta/tunedModels"
|
|
379
|
-
query_params = {k: v for k, v in [("page_size", page_size)] if v is not None}
|
|
380
|
-
response = self._get(url, params=query_params)
|
|
381
|
-
response.raise_for_status()
|
|
382
|
-
return response.json()
|
|
383
|
-
|
|
384
|
-
def create_atuned_model(
|
|
385
|
-
self, base_model=None, display_name=None, tuning_task=None
|
|
386
|
-
) -> dict[str, Any]:
|
|
387
|
-
"""
|
|
388
|
-
Creates a tuned model using the "POST" method at the "/v1beta/tunedModels" endpoint and returns a response upon successful creation.
|
|
389
|
-
|
|
390
|
-
Args:
|
|
391
|
-
base_model (string): base_model Example: 'models/gemini-1.5-flash-001-tuning'.
|
|
392
|
-
display_name (string): display_name Example: 'number generator model'.
|
|
393
|
-
tuning_task (object): tuning_task
|
|
394
|
-
Example:
|
|
395
|
-
```json
|
|
396
|
-
{
|
|
397
|
-
"base_model": "models/gemini-1.5-flash-001-tuning",
|
|
398
|
-
"display_name": "number generator model",
|
|
399
|
-
"tuning_task": {
|
|
400
|
-
"hyperparameters": {
|
|
401
|
-
"batch_size": 2,
|
|
402
|
-
"epoch_count": 5,
|
|
403
|
-
"learning_rate": 0.001
|
|
404
|
-
},
|
|
405
|
-
"training_data": {
|
|
406
|
-
"examples": {
|
|
407
|
-
"examples": [
|
|
408
|
-
{
|
|
409
|
-
"output": "2",
|
|
410
|
-
"text_input": "1"
|
|
411
|
-
},
|
|
412
|
-
{
|
|
413
|
-
"output": "4",
|
|
414
|
-
"text_input": "3"
|
|
415
|
-
},
|
|
416
|
-
{
|
|
417
|
-
"output": "-2",
|
|
418
|
-
"text_input": "-3"
|
|
419
|
-
},
|
|
420
|
-
{
|
|
421
|
-
"output": "twenty three",
|
|
422
|
-
"text_input": "twenty two"
|
|
423
|
-
},
|
|
424
|
-
{
|
|
425
|
-
"output": "two hundred one",
|
|
426
|
-
"text_input": "two hundred"
|
|
427
|
-
},
|
|
428
|
-
{
|
|
429
|
-
"output": "one hundred",
|
|
430
|
-
"text_input": "ninety nine"
|
|
431
|
-
},
|
|
432
|
-
{
|
|
433
|
-
"output": "9",
|
|
434
|
-
"text_input": "8"
|
|
435
|
-
},
|
|
436
|
-
{
|
|
437
|
-
"output": "-97",
|
|
438
|
-
"text_input": "-98"
|
|
439
|
-
},
|
|
440
|
-
{
|
|
441
|
-
"output": "1,001",
|
|
442
|
-
"text_input": "1,000"
|
|
443
|
-
},
|
|
444
|
-
{
|
|
445
|
-
"output": "10,100,001",
|
|
446
|
-
"text_input": "10,100,000"
|
|
447
|
-
},
|
|
448
|
-
{
|
|
449
|
-
"output": "fourteen",
|
|
450
|
-
"text_input": "thirteen"
|
|
451
|
-
},
|
|
452
|
-
{
|
|
453
|
-
"output": "eighty one",
|
|
454
|
-
"text_input": "eighty"
|
|
455
|
-
},
|
|
456
|
-
{
|
|
457
|
-
"output": "two",
|
|
458
|
-
"text_input": "one"
|
|
459
|
-
},
|
|
460
|
-
{
|
|
461
|
-
"output": "four",
|
|
462
|
-
"text_input": "three"
|
|
463
|
-
},
|
|
464
|
-
{
|
|
465
|
-
"output": "eight",
|
|
466
|
-
"text_input": "seven"
|
|
467
|
-
}
|
|
468
|
-
]
|
|
469
|
-
}
|
|
470
|
-
}
|
|
471
|
-
}
|
|
472
|
-
}
|
|
473
|
-
```
|
|
474
|
-
|
|
475
|
-
Returns:
|
|
476
|
-
dict[str, Any]: create a tuned model
|
|
477
|
-
|
|
478
|
-
Tags:
|
|
479
|
-
Fine Tunning
|
|
480
|
-
"""
|
|
481
|
-
request_body = {
|
|
482
|
-
"base_model": base_model,
|
|
483
|
-
"display_name": display_name,
|
|
484
|
-
"tuning_task": tuning_task,
|
|
485
|
-
}
|
|
486
|
-
request_body = {k: v for k, v in request_body.items() if v is not None}
|
|
487
|
-
url = f"{self.base_url}/v1beta/tunedModels"
|
|
488
|
-
query_params = {}
|
|
489
|
-
response = self._post(url, data=request_body, params=query_params)
|
|
490
|
-
response.raise_for_status()
|
|
491
|
-
return response.json()
|
|
492
|
-
|
|
493
|
-
def prompt_the_tuned_model(self, tunedModel, contents=None) -> dict[str, Any]:
|
|
494
|
-
"""
|
|
495
|
-
Generates content using a specified tuned model defined at path "/v1beta/{tunedModel}:generateContent" by sending a POST request.
|
|
496
|
-
|
|
497
|
-
Args:
|
|
498
|
-
tunedModel (string): tunedModel
|
|
499
|
-
contents (array): contents
|
|
500
|
-
Example:
|
|
501
|
-
```json
|
|
502
|
-
{
|
|
503
|
-
"contents": [
|
|
504
|
-
{
|
|
505
|
-
"parts": [
|
|
506
|
-
{
|
|
507
|
-
"text": "LXIII"
|
|
508
|
-
}
|
|
509
|
-
]
|
|
510
|
-
}
|
|
511
|
-
]
|
|
512
|
-
}
|
|
513
|
-
```
|
|
514
|
-
|
|
515
|
-
Returns:
|
|
516
|
-
dict[str, Any]: prompt the tuned model
|
|
517
|
-
|
|
518
|
-
Tags:
|
|
519
|
-
Fine Tunning
|
|
520
|
-
"""
|
|
521
|
-
if tunedModel is None:
|
|
522
|
-
raise ValueError("Missing required parameter 'tunedModel'")
|
|
523
|
-
request_body = {
|
|
524
|
-
"contents": contents,
|
|
525
|
-
}
|
|
526
|
-
request_body = {k: v for k, v in request_body.items() if v is not None}
|
|
527
|
-
url = f"{self.base_url}/v1beta/{tunedModel}:generateContent"
|
|
528
|
-
query_params = {}
|
|
529
|
-
response = self._post(url, data=request_body, params=query_params)
|
|
530
|
-
response.raise_for_status()
|
|
531
|
-
return response.json()
|
|
532
|
-
|
|
533
|
-
def delete_tuned_model(self, tunedModel) -> dict[str, Any]:
|
|
534
|
-
"""
|
|
535
|
-
Deletes a specified tuned model and returns a success status upon removal.
|
|
536
|
-
|
|
537
|
-
Args:
|
|
538
|
-
tunedModel (string): tunedModel
|
|
539
|
-
|
|
540
|
-
Returns:
|
|
541
|
-
dict[str, Any]: delete tuned model
|
|
542
|
-
|
|
543
|
-
Tags:
|
|
544
|
-
Fine Tunning
|
|
545
|
-
"""
|
|
546
|
-
if tunedModel is None:
|
|
547
|
-
raise ValueError("Missing required parameter 'tunedModel'")
|
|
548
|
-
url = f"{self.base_url}/v1beta/{tunedModel}"
|
|
549
|
-
query_params = {}
|
|
550
|
-
response = self._delete(url, params=query_params)
|
|
551
|
-
response.raise_for_status()
|
|
552
|
-
return response.json()
|
|
553
|
-
|
|
554
|
-
def generate_embeddings(
|
|
91
|
+
candidate = response.candidates[0]
|
|
92
|
+
text = ""
|
|
93
|
+
for part in candidate.content.parts:
|
|
94
|
+
if part.text is not None:
|
|
95
|
+
text += part.text
|
|
96
|
+
elif part.inline_data is not None:
|
|
97
|
+
# Return the raw image bytes
|
|
98
|
+
image_bytes = part.inline_data.data
|
|
99
|
+
upload_result = await FileSystemApp.write_file(
|
|
100
|
+
image_bytes, f"/tmp/{uuid.uuid4()}.png"
|
|
101
|
+
)
|
|
102
|
+
logger.info(f"Upload result: {upload_result['status']}")
|
|
103
|
+
image_url = upload_result["data"]["url"]
|
|
104
|
+
logger.info(f"Image URL: {image_url}")
|
|
105
|
+
text += f""
|
|
106
|
+
logger.info(f"Text: {text}")
|
|
107
|
+
return {"text": text}
|
|
108
|
+
|
|
109
|
+
async def generate_audio(
|
|
555
110
|
self,
|
|
556
|
-
|
|
557
|
-
|
|
558
|
-
|
|
559
|
-
|
|
560
|
-
) -> dict[str, Any]:
|
|
561
|
-
"""
|
|
562
|
-
Generates a text embedding vector from input text using the specified Gemini Embedding model, allowing for semantic analysis and comparison of textual content.
|
|
111
|
+
prompt: Annotated[str, "The prompt to generate audio from"],
|
|
112
|
+
model: str = "gemini-2.5-flash-preview-tts",
|
|
113
|
+
) -> str:
|
|
114
|
+
"""Generates audio using the Google Gemini model and returns the uploaded audio URL.
|
|
563
115
|
|
|
564
116
|
Args:
|
|
565
|
-
|
|
566
|
-
|
|
117
|
+
prompt (str): The prompt to generate audio from.
|
|
118
|
+
model (str, optional): The Gemini model to use for audio generation. Defaults to "gemini-2.5-flash-preview-tts".
|
|
567
119
|
|
|
568
120
|
Returns:
|
|
569
|
-
|
|
570
|
-
|
|
121
|
+
str: The URL of the uploaded audio file.
|
|
122
|
+
|
|
571
123
|
Tags:
|
|
572
|
-
|
|
124
|
+
important
|
|
573
125
|
"""
|
|
574
|
-
if not query or not isinstance(query, str):
|
|
575
|
-
raise ValueError("Query must be a non-empty string.")
|
|
576
126
|
|
|
577
|
-
|
|
578
|
-
|
|
579
|
-
"
|
|
580
|
-
|
|
581
|
-
|
|
582
|
-
|
|
583
|
-
|
|
584
|
-
|
|
585
|
-
|
|
586
|
-
|
|
587
|
-
|
|
588
|
-
|
|
589
|
-
|
|
590
|
-
|
|
591
|
-
|
|
592
|
-
|
|
593
|
-
|
|
594
|
-
|
|
595
|
-
|
|
596
|
-
|
|
597
|
-
|
|
598
|
-
|
|
127
|
+
# Set up the wave file to save the output:
|
|
128
|
+
def wave_file(filename, pcm, channels=1, rate=24000, sample_width=2):
|
|
129
|
+
with wave.open(filename, "wb") as wf:
|
|
130
|
+
wf.setnchannels(channels)
|
|
131
|
+
wf.setsampwidth(sample_width)
|
|
132
|
+
wf.setframerate(rate)
|
|
133
|
+
wf.writeframes(pcm)
|
|
134
|
+
|
|
135
|
+
response = self.genai_client.models.generate_content(
|
|
136
|
+
model=model,
|
|
137
|
+
contents=prompt,
|
|
138
|
+
config=types.GenerateContentConfig(
|
|
139
|
+
response_modalities=["AUDIO"],
|
|
140
|
+
speech_config=types.SpeechConfig(
|
|
141
|
+
voice_config=types.VoiceConfig(
|
|
142
|
+
prebuilt_voice_config=types.PrebuiltVoiceConfig(
|
|
143
|
+
voice_name="Kore",
|
|
144
|
+
)
|
|
145
|
+
)
|
|
146
|
+
),
|
|
147
|
+
),
|
|
148
|
+
)
|
|
599
149
|
|
|
600
|
-
|
|
601
|
-
queries (List[str]): A list of texts to generate embeddings for.
|
|
602
|
-
model_name (string): The name of the embedding model to use. Default is "gemini-embedding-exp-03-07".
|
|
150
|
+
data = response.candidates[0].content.parts[0].inline_data.data
|
|
603
151
|
|
|
604
|
-
|
|
605
|
-
|
|
152
|
+
file_name = "/tmp/audio.wav"
|
|
153
|
+
wave_file(file_name, data) # Saves the file to current directory
|
|
154
|
+
# Upload the audio file directly
|
|
155
|
+
upload_result = await FileSystemApp.move_file(
|
|
156
|
+
file_name, f"/tmp/{uuid.uuid4()}.wav"
|
|
157
|
+
)
|
|
158
|
+
logger.info(f"Audio upload result: {upload_result['status']}")
|
|
159
|
+
audio_url = upload_result["data"]["url"]
|
|
160
|
+
logger.info(f"Audio URL: {audio_url}")
|
|
606
161
|
|
|
607
|
-
|
|
608
|
-
Embeddings
|
|
609
|
-
"""
|
|
610
|
-
if not queries:
|
|
611
|
-
raise ValueError("Queries list cannot be empty.")
|
|
612
|
-
if not all(isinstance(q, str) and q for q in queries):
|
|
613
|
-
raise ValueError("All items in the queries list must be non-empty strings.")
|
|
162
|
+
return audio_url
|
|
614
163
|
|
|
615
|
-
|
|
616
|
-
|
|
617
|
-
|
|
618
|
-
|
|
619
|
-
|
|
620
|
-
|
|
621
|
-
}
|
|
622
|
-
)
|
|
623
|
-
request_body = {"requests": individual_requests}
|
|
164
|
+
def list_tools(self):
|
|
165
|
+
return [
|
|
166
|
+
self.generate_text,
|
|
167
|
+
self.generate_image,
|
|
168
|
+
self.generate_audio,
|
|
169
|
+
]
|
|
624
170
|
|
|
625
|
-
url = f"{self.base_url}/v1beta/models/{model_name}:batchEmbedContents"
|
|
626
|
-
query_params = {}
|
|
627
|
-
response = self._post(url, data=request_body, params=query_params)
|
|
628
|
-
response.raise_for_status()
|
|
629
|
-
return response.json()
|
|
630
171
|
|
|
631
|
-
|
|
632
|
-
|
|
633
|
-
|
|
172
|
+
async def test_google_gemini():
|
|
173
|
+
app = GoogleGeminiApp()
|
|
174
|
+
result = await app.generate_image(
|
|
175
|
+
"A beautiful women potrait with red green hair color"
|
|
176
|
+
)
|
|
177
|
+
print(result)
|
|
634
178
|
|
|
635
|
-
Args:
|
|
636
|
-
version (string): Specifies the API version to use for the request, allowing clients to target a specific release without modifying the URI structure. Example: 'v1beta'.
|
|
637
179
|
|
|
638
|
-
|
|
639
|
-
|
|
640
|
-
"""
|
|
641
|
-
url = f"{self.base_url}/$discovery/rest"
|
|
642
|
-
query_params = {k: v for k, v in [("version", version)] if v is not None}
|
|
643
|
-
response = self._get(url, params=query_params)
|
|
644
|
-
response.raise_for_status()
|
|
645
|
-
return response.json()
|
|
180
|
+
if __name__ == "__main__":
|
|
181
|
+
import asyncio
|
|
646
182
|
|
|
647
|
-
|
|
648
|
-
return [
|
|
649
|
-
self.fetch_model,
|
|
650
|
-
self.fetch_models,
|
|
651
|
-
self.text_only_input,
|
|
652
|
-
self.generate_atext_stream,
|
|
653
|
-
self.resumable_upload_request,
|
|
654
|
-
self.prompt_document,
|
|
655
|
-
self.text_tokens,
|
|
656
|
-
self.fetch_tuned_models,
|
|
657
|
-
self.create_atuned_model,
|
|
658
|
-
self.prompt_the_tuned_model,
|
|
659
|
-
self.delete_tuned_model,
|
|
660
|
-
self.generate_embeddings,
|
|
661
|
-
self.batch_embeddings,
|
|
662
|
-
self.discovery_document,
|
|
663
|
-
]
|
|
183
|
+
asyncio.run(test_google_gemini())
|