universal-mcp-applications 0.1.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- universal_mcp/applications/ahrefs/README.md +51 -0
- universal_mcp/applications/ahrefs/__init__.py +1 -0
- universal_mcp/applications/ahrefs/app.py +2291 -0
- universal_mcp/applications/airtable/README.md +22 -0
- universal_mcp/applications/airtable/__init__.py +1 -0
- universal_mcp/applications/airtable/app.py +479 -0
- universal_mcp/applications/apollo/README.md +44 -0
- universal_mcp/applications/apollo/__init__.py +1 -0
- universal_mcp/applications/apollo/app.py +1847 -0
- universal_mcp/applications/asana/README.md +199 -0
- universal_mcp/applications/asana/__init__.py +1 -0
- universal_mcp/applications/asana/app.py +9509 -0
- universal_mcp/applications/aws-s3/README.md +0 -0
- universal_mcp/applications/aws-s3/__init__.py +1 -0
- universal_mcp/applications/aws-s3/app.py +552 -0
- universal_mcp/applications/bill/README.md +0 -0
- universal_mcp/applications/bill/__init__.py +1 -0
- universal_mcp/applications/bill/app.py +8705 -0
- universal_mcp/applications/box/README.md +307 -0
- universal_mcp/applications/box/__init__.py +1 -0
- universal_mcp/applications/box/app.py +15987 -0
- universal_mcp/applications/braze/README.md +106 -0
- universal_mcp/applications/braze/__init__.py +1 -0
- universal_mcp/applications/braze/app.py +4754 -0
- universal_mcp/applications/cal-com-v2/README.md +150 -0
- universal_mcp/applications/cal-com-v2/__init__.py +1 -0
- universal_mcp/applications/cal-com-v2/app.py +5541 -0
- universal_mcp/applications/calendly/README.md +53 -0
- universal_mcp/applications/calendly/__init__.py +1 -0
- universal_mcp/applications/calendly/app.py +1436 -0
- universal_mcp/applications/canva/README.md +43 -0
- universal_mcp/applications/canva/__init__.py +1 -0
- universal_mcp/applications/canva/app.py +941 -0
- universal_mcp/applications/clickup/README.md +135 -0
- universal_mcp/applications/clickup/__init__.py +1 -0
- universal_mcp/applications/clickup/app.py +5009 -0
- universal_mcp/applications/coda/README.md +108 -0
- universal_mcp/applications/coda/__init__.py +1 -0
- universal_mcp/applications/coda/app.py +3671 -0
- universal_mcp/applications/confluence/README.md +198 -0
- universal_mcp/applications/confluence/__init__.py +1 -0
- universal_mcp/applications/confluence/app.py +6273 -0
- universal_mcp/applications/contentful/README.md +17 -0
- universal_mcp/applications/contentful/__init__.py +1 -0
- universal_mcp/applications/contentful/app.py +364 -0
- universal_mcp/applications/crustdata/README.md +25 -0
- universal_mcp/applications/crustdata/__init__.py +1 -0
- universal_mcp/applications/crustdata/app.py +586 -0
- universal_mcp/applications/dialpad/README.md +202 -0
- universal_mcp/applications/dialpad/__init__.py +1 -0
- universal_mcp/applications/dialpad/app.py +5949 -0
- universal_mcp/applications/digitalocean/README.md +463 -0
- universal_mcp/applications/digitalocean/__init__.py +1 -0
- universal_mcp/applications/digitalocean/app.py +20835 -0
- universal_mcp/applications/domain-checker/README.md +13 -0
- universal_mcp/applications/domain-checker/__init__.py +1 -0
- universal_mcp/applications/domain-checker/app.py +265 -0
- universal_mcp/applications/e2b/README.md +12 -0
- universal_mcp/applications/e2b/__init__.py +1 -0
- universal_mcp/applications/e2b/app.py +187 -0
- universal_mcp/applications/elevenlabs/README.md +88 -0
- universal_mcp/applications/elevenlabs/__init__.py +1 -0
- universal_mcp/applications/elevenlabs/app.py +3235 -0
- universal_mcp/applications/exa/README.md +15 -0
- universal_mcp/applications/exa/__init__.py +1 -0
- universal_mcp/applications/exa/app.py +221 -0
- universal_mcp/applications/falai/README.md +17 -0
- universal_mcp/applications/falai/__init__.py +1 -0
- universal_mcp/applications/falai/app.py +331 -0
- universal_mcp/applications/figma/README.md +49 -0
- universal_mcp/applications/figma/__init__.py +1 -0
- universal_mcp/applications/figma/app.py +1090 -0
- universal_mcp/applications/firecrawl/README.md +20 -0
- universal_mcp/applications/firecrawl/__init__.py +1 -0
- universal_mcp/applications/firecrawl/app.py +514 -0
- universal_mcp/applications/fireflies/README.md +25 -0
- universal_mcp/applications/fireflies/__init__.py +1 -0
- universal_mcp/applications/fireflies/app.py +506 -0
- universal_mcp/applications/fpl/README.md +23 -0
- universal_mcp/applications/fpl/__init__.py +1 -0
- universal_mcp/applications/fpl/app.py +1327 -0
- universal_mcp/applications/fpl/utils/api.py +142 -0
- universal_mcp/applications/fpl/utils/fixtures.py +629 -0
- universal_mcp/applications/fpl/utils/helper.py +982 -0
- universal_mcp/applications/fpl/utils/league_utils.py +546 -0
- universal_mcp/applications/fpl/utils/position_utils.py +68 -0
- universal_mcp/applications/ghost-content/README.md +25 -0
- universal_mcp/applications/ghost-content/__init__.py +1 -0
- universal_mcp/applications/ghost-content/app.py +654 -0
- universal_mcp/applications/github/README.md +1049 -0
- universal_mcp/applications/github/__init__.py +1 -0
- universal_mcp/applications/github/app.py +50600 -0
- universal_mcp/applications/gong/README.md +63 -0
- universal_mcp/applications/gong/__init__.py +1 -0
- universal_mcp/applications/gong/app.py +2297 -0
- universal_mcp/applications/google-ads/README.md +0 -0
- universal_mcp/applications/google-ads/__init__.py +1 -0
- universal_mcp/applications/google-ads/app.py +23 -0
- universal_mcp/applications/google-calendar/README.md +21 -0
- universal_mcp/applications/google-calendar/__init__.py +1 -0
- universal_mcp/applications/google-calendar/app.py +574 -0
- universal_mcp/applications/google-docs/README.md +25 -0
- universal_mcp/applications/google-docs/__init__.py +1 -0
- universal_mcp/applications/google-docs/app.py +760 -0
- universal_mcp/applications/google-drive/README.md +68 -0
- universal_mcp/applications/google-drive/__init__.py +1 -0
- universal_mcp/applications/google-drive/app.py +4936 -0
- universal_mcp/applications/google-gemini/README.md +25 -0
- universal_mcp/applications/google-gemini/__init__.py +1 -0
- universal_mcp/applications/google-gemini/app.py +663 -0
- universal_mcp/applications/google-mail/README.md +31 -0
- universal_mcp/applications/google-mail/__init__.py +1 -0
- universal_mcp/applications/google-mail/app.py +1354 -0
- universal_mcp/applications/google-searchconsole/README.md +21 -0
- universal_mcp/applications/google-searchconsole/__init__.py +1 -0
- universal_mcp/applications/google-searchconsole/app.py +320 -0
- universal_mcp/applications/google-sheet/README.md +36 -0
- universal_mcp/applications/google-sheet/__init__.py +1 -0
- universal_mcp/applications/google-sheet/app.py +1941 -0
- universal_mcp/applications/hashnode/README.md +20 -0
- universal_mcp/applications/hashnode/__init__.py +1 -0
- universal_mcp/applications/hashnode/app.py +455 -0
- universal_mcp/applications/heygen/README.md +44 -0
- universal_mcp/applications/heygen/__init__.py +1 -0
- universal_mcp/applications/heygen/app.py +961 -0
- universal_mcp/applications/http-tools/README.md +16 -0
- universal_mcp/applications/http-tools/__init__.py +1 -0
- universal_mcp/applications/http-tools/app.py +153 -0
- universal_mcp/applications/hubspot/README.md +239 -0
- universal_mcp/applications/hubspot/__init__.py +1 -0
- universal_mcp/applications/hubspot/app.py +416 -0
- universal_mcp/applications/jira/README.md +600 -0
- universal_mcp/applications/jira/__init__.py +1 -0
- universal_mcp/applications/jira/app.py +28804 -0
- universal_mcp/applications/klaviyo/README.md +313 -0
- universal_mcp/applications/klaviyo/__init__.py +1 -0
- universal_mcp/applications/klaviyo/app.py +11236 -0
- universal_mcp/applications/linkedin/README.md +15 -0
- universal_mcp/applications/linkedin/__init__.py +1 -0
- universal_mcp/applications/linkedin/app.py +243 -0
- universal_mcp/applications/mailchimp/README.md +281 -0
- universal_mcp/applications/mailchimp/__init__.py +1 -0
- universal_mcp/applications/mailchimp/app.py +10937 -0
- universal_mcp/applications/markitdown/README.md +12 -0
- universal_mcp/applications/markitdown/__init__.py +1 -0
- universal_mcp/applications/markitdown/app.py +63 -0
- universal_mcp/applications/miro/README.md +151 -0
- universal_mcp/applications/miro/__init__.py +1 -0
- universal_mcp/applications/miro/app.py +5429 -0
- universal_mcp/applications/ms-teams/README.md +42 -0
- universal_mcp/applications/ms-teams/__init__.py +1 -0
- universal_mcp/applications/ms-teams/app.py +1823 -0
- universal_mcp/applications/neon/README.md +74 -0
- universal_mcp/applications/neon/__init__.py +1 -0
- universal_mcp/applications/neon/app.py +2018 -0
- universal_mcp/applications/notion/README.md +30 -0
- universal_mcp/applications/notion/__init__.py +1 -0
- universal_mcp/applications/notion/app.py +527 -0
- universal_mcp/applications/openai/README.md +22 -0
- universal_mcp/applications/openai/__init__.py +1 -0
- universal_mcp/applications/openai/app.py +759 -0
- universal_mcp/applications/outlook/README.md +20 -0
- universal_mcp/applications/outlook/__init__.py +1 -0
- universal_mcp/applications/outlook/app.py +444 -0
- universal_mcp/applications/perplexity/README.md +12 -0
- universal_mcp/applications/perplexity/__init__.py +1 -0
- universal_mcp/applications/perplexity/app.py +65 -0
- universal_mcp/applications/pipedrive/README.md +284 -0
- universal_mcp/applications/pipedrive/__init__.py +1 -0
- universal_mcp/applications/pipedrive/app.py +12924 -0
- universal_mcp/applications/posthog/README.md +132 -0
- universal_mcp/applications/posthog/__init__.py +1 -0
- universal_mcp/applications/posthog/app.py +7125 -0
- universal_mcp/applications/reddit/README.md +135 -0
- universal_mcp/applications/reddit/__init__.py +1 -0
- universal_mcp/applications/reddit/app.py +4652 -0
- universal_mcp/applications/replicate/README.md +18 -0
- universal_mcp/applications/replicate/__init__.py +1 -0
- universal_mcp/applications/replicate/app.py +495 -0
- universal_mcp/applications/resend/README.md +40 -0
- universal_mcp/applications/resend/__init__.py +1 -0
- universal_mcp/applications/resend/app.py +881 -0
- universal_mcp/applications/retell/README.md +21 -0
- universal_mcp/applications/retell/__init__.py +1 -0
- universal_mcp/applications/retell/app.py +333 -0
- universal_mcp/applications/rocketlane/README.md +70 -0
- universal_mcp/applications/rocketlane/__init__.py +1 -0
- universal_mcp/applications/rocketlane/app.py +4346 -0
- universal_mcp/applications/semanticscholar/README.md +25 -0
- universal_mcp/applications/semanticscholar/__init__.py +1 -0
- universal_mcp/applications/semanticscholar/app.py +482 -0
- universal_mcp/applications/semrush/README.md +44 -0
- universal_mcp/applications/semrush/__init__.py +1 -0
- universal_mcp/applications/semrush/app.py +2081 -0
- universal_mcp/applications/sendgrid/README.md +362 -0
- universal_mcp/applications/sendgrid/__init__.py +1 -0
- universal_mcp/applications/sendgrid/app.py +9752 -0
- universal_mcp/applications/sentry/README.md +186 -0
- universal_mcp/applications/sentry/__init__.py +1 -0
- universal_mcp/applications/sentry/app.py +7471 -0
- universal_mcp/applications/serpapi/README.md +14 -0
- universal_mcp/applications/serpapi/__init__.py +1 -0
- universal_mcp/applications/serpapi/app.py +293 -0
- universal_mcp/applications/sharepoint/README.md +0 -0
- universal_mcp/applications/sharepoint/__init__.py +1 -0
- universal_mcp/applications/sharepoint/app.py +215 -0
- universal_mcp/applications/shopify/README.md +321 -0
- universal_mcp/applications/shopify/__init__.py +1 -0
- universal_mcp/applications/shopify/app.py +15392 -0
- universal_mcp/applications/shortcut/README.md +128 -0
- universal_mcp/applications/shortcut/__init__.py +1 -0
- universal_mcp/applications/shortcut/app.py +4478 -0
- universal_mcp/applications/slack/README.md +0 -0
- universal_mcp/applications/slack/__init__.py +1 -0
- universal_mcp/applications/slack/app.py +570 -0
- universal_mcp/applications/spotify/README.md +91 -0
- universal_mcp/applications/spotify/__init__.py +1 -0
- universal_mcp/applications/spotify/app.py +2526 -0
- universal_mcp/applications/supabase/README.md +87 -0
- universal_mcp/applications/supabase/__init__.py +1 -0
- universal_mcp/applications/supabase/app.py +2970 -0
- universal_mcp/applications/tavily/README.md +12 -0
- universal_mcp/applications/tavily/__init__.py +1 -0
- universal_mcp/applications/tavily/app.py +51 -0
- universal_mcp/applications/trello/README.md +266 -0
- universal_mcp/applications/trello/__init__.py +1 -0
- universal_mcp/applications/trello/app.py +10875 -0
- universal_mcp/applications/twillo/README.md +0 -0
- universal_mcp/applications/twillo/__init__.py +1 -0
- universal_mcp/applications/twillo/app.py +269 -0
- universal_mcp/applications/twitter/README.md +100 -0
- universal_mcp/applications/twitter/__init__.py +1 -0
- universal_mcp/applications/twitter/api_segments/__init__.py +0 -0
- universal_mcp/applications/twitter/api_segments/api_segment_base.py +51 -0
- universal_mcp/applications/twitter/api_segments/compliance_api.py +122 -0
- universal_mcp/applications/twitter/api_segments/dm_conversations_api.py +255 -0
- universal_mcp/applications/twitter/api_segments/dm_events_api.py +140 -0
- universal_mcp/applications/twitter/api_segments/likes_api.py +159 -0
- universal_mcp/applications/twitter/api_segments/lists_api.py +395 -0
- universal_mcp/applications/twitter/api_segments/openapi_json_api.py +34 -0
- universal_mcp/applications/twitter/api_segments/spaces_api.py +309 -0
- universal_mcp/applications/twitter/api_segments/trends_api.py +40 -0
- universal_mcp/applications/twitter/api_segments/tweets_api.py +1403 -0
- universal_mcp/applications/twitter/api_segments/usage_api.py +40 -0
- universal_mcp/applications/twitter/api_segments/users_api.py +1498 -0
- universal_mcp/applications/twitter/app.py +46 -0
- universal_mcp/applications/unipile/README.md +28 -0
- universal_mcp/applications/unipile/__init__.py +1 -0
- universal_mcp/applications/unipile/app.py +829 -0
- universal_mcp/applications/whatsapp/README.md +23 -0
- universal_mcp/applications/whatsapp/__init__.py +1 -0
- universal_mcp/applications/whatsapp/app.py +595 -0
- universal_mcp/applications/whatsapp-business/README.md +34 -0
- universal_mcp/applications/whatsapp-business/__init__.py +1 -0
- universal_mcp/applications/whatsapp-business/app.py +1065 -0
- universal_mcp/applications/wrike/README.md +46 -0
- universal_mcp/applications/wrike/__init__.py +1 -0
- universal_mcp/applications/wrike/app.py +1583 -0
- universal_mcp/applications/youtube/README.md +57 -0
- universal_mcp/applications/youtube/__init__.py +1 -0
- universal_mcp/applications/youtube/app.py +1696 -0
- universal_mcp/applications/zenquotes/README.md +12 -0
- universal_mcp/applications/zenquotes/__init__.py +1 -0
- universal_mcp/applications/zenquotes/app.py +31 -0
- universal_mcp_applications-0.1.1.dist-info/METADATA +172 -0
- universal_mcp_applications-0.1.1.dist-info/RECORD +268 -0
- universal_mcp_applications-0.1.1.dist-info/WHEEL +4 -0
- universal_mcp_applications-0.1.1.dist-info/licenses/LICENSE +21 -0
|
@@ -0,0 +1,25 @@
|
|
|
1
|
+
# GoogleGeminiApp MCP Server
|
|
2
|
+
|
|
3
|
+
An MCP Server for the GoogleGeminiApp API.
|
|
4
|
+
|
|
5
|
+
## 🛠️ Tool List
|
|
6
|
+
|
|
7
|
+
This is automatically generated from OpenAPI schema for the GoogleGeminiApp API.
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
| Tool | Description |
|
|
11
|
+
|------|-------------|
|
|
12
|
+
| `fetch_model` | Retrieves the configuration details of current model via a GET request. |
|
|
13
|
+
| `fetch_models` | Retrieves a paginated list of available models, supporting page size and token parameters for result navigation. |
|
|
14
|
+
| `text_only_input` | Generates content using the Gemini 1.5 Flash model via POST request, |
|
|
15
|
+
| `generate_atext_stream` | Generates a streaming response from the Gemini 1.5 Flash model for multimodal input content. |
|
|
16
|
+
| `resumable_upload_request` | Initiates a file upload by sending file metadata. |
|
|
17
|
+
| `prompt_document` | Generates content using the Gemini model with document context. |
|
|
18
|
+
| `text_tokens` | Calculates the number of tokens and billable characters for input content using a gemini-2.0-flash. |
|
|
19
|
+
| `fetch_tuned_models` | Retrieves a list of tuned models at the specified page size using the GET method. |
|
|
20
|
+
| `create_atuned_model` | Creates a tuned model using the "POST" method at the "/v1beta/tunedModels" endpoint and returns a response upon successful creation. |
|
|
21
|
+
| `prompt_the_tuned_model` | Generates content using a specified tuned model defined at path "/v1beta/{tunedModel}:generateContent" by sending a POST request. |
|
|
22
|
+
| `delete_tuned_model` | Deletes a specified tuned model and returns a success status upon removal. |
|
|
23
|
+
| `generate_embeddings` | Generates a text embedding vector from input text using the specified Gemini Embedding model, allowing for semantic analysis and comparison of textual content. |
|
|
24
|
+
| `batch_embeddings` | Generates batch embeddings for a list of text inputs using the "gemini-embedding-exp-03-07" model via a POST request to the "/v1beta/models/text-embedding-004:batchEmbedContents" endpoint. |
|
|
25
|
+
| `discovery_document` | Retrieves discovery metadata for REST APIs, including available endpoints and parameters, based on the specified version. |
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
from .app import GoogleGeminiApp
|
|
@@ -0,0 +1,663 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
from typing import Any, Literal # Added Literal for type hinting
|
|
3
|
+
|
|
4
|
+
import httpx
|
|
5
|
+
from universal_mcp.applications.application import APIApplication
|
|
6
|
+
from universal_mcp.integrations import Integration
|
|
7
|
+
|
|
8
|
+
logger = logging.getLogger(__name__)
|
|
9
|
+
if not logger.handlers:
|
|
10
|
+
logging.basicConfig(
|
|
11
|
+
level=logging.DEBUG,
|
|
12
|
+
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
|
|
13
|
+
)
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class GoogleGeminiApp(APIApplication):
|
|
17
|
+
def __init__(self, integration: Integration = None, **kwargs) -> None:
|
|
18
|
+
super().__init__(name="google-gemini", integration=integration, **kwargs)
|
|
19
|
+
self.base_url = "https://generativelanguage.googleapis.com"
|
|
20
|
+
|
|
21
|
+
def _get_headers(self) -> dict[str, str]:
|
|
22
|
+
"""
|
|
23
|
+
Override the base method to return empty headers.
|
|
24
|
+
The Gemini API key is passed as a query parameter ('key'),
|
|
25
|
+
not in an Authorization header for these endpoints.
|
|
26
|
+
"""
|
|
27
|
+
logger.debug(
|
|
28
|
+
f"Overriding _get_headers for {self.name}. Returning empty dict to prevent default auth header."
|
|
29
|
+
)
|
|
30
|
+
return {}
|
|
31
|
+
|
|
32
|
+
def _add_api_key_param(self, params: dict[str, Any] | None) -> dict[str, Any]:
|
|
33
|
+
"""Helper to add the API key as a 'key' query parameter."""
|
|
34
|
+
actual_params = params.copy() if params else {}
|
|
35
|
+
if "key" not in actual_params and self.integration:
|
|
36
|
+
try:
|
|
37
|
+
credentials = self.integration.get_credentials()
|
|
38
|
+
if not isinstance(credentials, dict):
|
|
39
|
+
logger.warning(
|
|
40
|
+
f"Integration credentials for {self.name} are not a dictionary. Cannot retrieve API key."
|
|
41
|
+
)
|
|
42
|
+
return actual_params # or raise error
|
|
43
|
+
|
|
44
|
+
api_key = (
|
|
45
|
+
credentials.get("api_key")
|
|
46
|
+
or credentials.get("API_KEY")
|
|
47
|
+
or credentials.get("apiKey")
|
|
48
|
+
)
|
|
49
|
+
if api_key:
|
|
50
|
+
actual_params["key"] = api_key
|
|
51
|
+
logger.debug("Added API key as query parameter.")
|
|
52
|
+
else:
|
|
53
|
+
logger.warning(
|
|
54
|
+
f"API key not found in integration credentials for {self.name} using keys: api_key, API_KEY, apiKey."
|
|
55
|
+
)
|
|
56
|
+
except Exception as e:
|
|
57
|
+
logger.error(
|
|
58
|
+
f"Error retrieving API key from integration for {self.name}: {e}"
|
|
59
|
+
)
|
|
60
|
+
elif not self.integration:
|
|
61
|
+
logger.warning(
|
|
62
|
+
f"No integration provided for {self.name}. API key cannot be added automatically."
|
|
63
|
+
)
|
|
64
|
+
return actual_params
|
|
65
|
+
|
|
66
|
+
def _get(self, url: str, params: dict[str, Any] | None = None) -> httpx.Response:
|
|
67
|
+
"""
|
|
68
|
+
Make a GET request, ensuring the API key is added as a query parameter.
|
|
69
|
+
"""
|
|
70
|
+
actual_params = self._add_api_key_param(params)
|
|
71
|
+
logger.debug(f"Making GET request to {url} with params: {actual_params}")
|
|
72
|
+
return super()._get(url, params=actual_params)
|
|
73
|
+
|
|
74
|
+
def _post(
|
|
75
|
+
self, url: str, data: dict[str, Any], params: dict[str, Any] | None = None
|
|
76
|
+
) -> httpx.Response:
|
|
77
|
+
"""
|
|
78
|
+
Make a POST request, ensuring the API key is added as a query parameter
|
|
79
|
+
and content_type is explicitly set to application/json.
|
|
80
|
+
"""
|
|
81
|
+
actual_params = self._add_api_key_param(params)
|
|
82
|
+
logger.debug(
|
|
83
|
+
f"Making POST request to {url} with params: {actual_params} and data: {data}"
|
|
84
|
+
)
|
|
85
|
+
# Explicitly set content_type for clarity and robustness
|
|
86
|
+
return super()._post(
|
|
87
|
+
url, data=data, params=actual_params, content_type="application/json"
|
|
88
|
+
)
|
|
89
|
+
|
|
90
|
+
def _delete(self, url: str, params: dict[str, Any] | None = None) -> httpx.Response:
|
|
91
|
+
"""
|
|
92
|
+
Make a DELETE request, ensuring the API key is added as a query parameter.
|
|
93
|
+
"""
|
|
94
|
+
actual_params = self._add_api_key_param(params)
|
|
95
|
+
logger.debug(f"Making DELETE request to {url} with params: {actual_params}")
|
|
96
|
+
return super()._delete(url, params=actual_params)
|
|
97
|
+
|
|
98
|
+
def fetch_model(self) -> dict[str, Any]:
|
|
99
|
+
"""
|
|
100
|
+
Retrieves the configuration details of current model via a GET request.
|
|
101
|
+
|
|
102
|
+
Returns:
|
|
103
|
+
dict[str, Any]: model
|
|
104
|
+
|
|
105
|
+
Tags:
|
|
106
|
+
Models, important
|
|
107
|
+
"""
|
|
108
|
+
url = f"{self.base_url}/v1beta/models/gemini-2.0-flash"
|
|
109
|
+
query_params = {}
|
|
110
|
+
response = self._get(url, params=query_params)
|
|
111
|
+
response.raise_for_status()
|
|
112
|
+
return response.json()
|
|
113
|
+
|
|
114
|
+
def fetch_models(self, pageSize=None, pageToken=None) -> dict[str, Any]:
|
|
115
|
+
"""
|
|
116
|
+
Retrieves a paginated list of available models, supporting page size and token parameters for result navigation.
|
|
117
|
+
|
|
118
|
+
Args:
|
|
119
|
+
pageSize (string): The `pageSize` parameter specifies the maximum number of items to include in each page of the response for the GET operation at the `/v1beta/models` path. Example: '5'.
|
|
120
|
+
pageToken (string): Used in GET requests to specify the page token for fetching the next page of results. Example: 'Chxtb2RlbHMvZ2VtaW5pLTEuNS1wcm8tbGF0ZXN0'.
|
|
121
|
+
|
|
122
|
+
Returns:
|
|
123
|
+
dict[str, Any]: models
|
|
124
|
+
|
|
125
|
+
Tags:
|
|
126
|
+
Models, important
|
|
127
|
+
"""
|
|
128
|
+
url = f"{self.base_url}/v1beta/models"
|
|
129
|
+
query_params = {
|
|
130
|
+
k: v
|
|
131
|
+
for k, v in [("pageSize", pageSize), ("pageToken", pageToken)]
|
|
132
|
+
if v is not None
|
|
133
|
+
}
|
|
134
|
+
response = self._get(url, params=query_params)
|
|
135
|
+
response.raise_for_status()
|
|
136
|
+
return response.json()
|
|
137
|
+
|
|
138
|
+
def text_only_input(self, query: str) -> dict[str, Any]:
|
|
139
|
+
"""
|
|
140
|
+
Generates content using the Gemini 1.5 Flash model via POST request,
|
|
141
|
+
taking a simple string query.
|
|
142
|
+
|
|
143
|
+
Args:
|
|
144
|
+
query (str): The text prompt for the model.
|
|
145
|
+
Example: "Write a story about a magic backpack."
|
|
146
|
+
|
|
147
|
+
Returns:
|
|
148
|
+
Dict[str, Any]: The JSON response from the API.
|
|
149
|
+
|
|
150
|
+
Raises:
|
|
151
|
+
ValueError: If the query is empty or not a string.
|
|
152
|
+
httpx.HTTPStatusError: If the API returns an error status.
|
|
153
|
+
|
|
154
|
+
Tags:
|
|
155
|
+
important
|
|
156
|
+
"""
|
|
157
|
+
if not query or not isinstance(query, str):
|
|
158
|
+
raise ValueError("Query must be a non-empty string.")
|
|
159
|
+
|
|
160
|
+
contents_payload = [{"parts": [{"text": query}]}]
|
|
161
|
+
|
|
162
|
+
request_body = {
|
|
163
|
+
"contents": contents_payload,
|
|
164
|
+
}
|
|
165
|
+
model_name = "gemini-2.0-flash"
|
|
166
|
+
|
|
167
|
+
url = f"{self.base_url}/v1beta/models/{model_name}:generateContent"
|
|
168
|
+
|
|
169
|
+
query_params = {}
|
|
170
|
+
|
|
171
|
+
logger.info(
|
|
172
|
+
f'Calling Gemini API for model: {model_name} with query: "{query[:70]}{"..." if len(query) > 70 else ""}"'
|
|
173
|
+
)
|
|
174
|
+
|
|
175
|
+
response = self._post(url, data=request_body, params=query_params)
|
|
176
|
+
response.raise_for_status()
|
|
177
|
+
data = response.json()
|
|
178
|
+
try:
|
|
179
|
+
extracted_text = data["candidates"][0]["content"]["parts"][0]["text"]
|
|
180
|
+
return extracted_text
|
|
181
|
+
except (KeyError, IndexError, TypeError):
|
|
182
|
+
return data
|
|
183
|
+
|
|
184
|
+
def generate_atext_stream(self, query: str) -> dict[str, Any]:
|
|
185
|
+
"""
|
|
186
|
+
Generates a streaming response from the Gemini 1.5 Flash model for multimodal input content.
|
|
187
|
+
|
|
188
|
+
Args:
|
|
189
|
+
query (str): The text prompt for the model.
|
|
190
|
+
|
|
191
|
+
Returns:
|
|
192
|
+
Any: generate a text stream
|
|
193
|
+
|
|
194
|
+
Tags:
|
|
195
|
+
Text Generation
|
|
196
|
+
"""
|
|
197
|
+
if not query or not isinstance(query, str):
|
|
198
|
+
raise ValueError("Query must be a non-empty string.")
|
|
199
|
+
|
|
200
|
+
contents_payload = [{"parts": [{"text": query}]}]
|
|
201
|
+
|
|
202
|
+
request_body = {
|
|
203
|
+
"contents": contents_payload,
|
|
204
|
+
}
|
|
205
|
+
model_name = "gemini-2.0-flash"
|
|
206
|
+
url = f"{self.base_url}/v1beta/models/{model_name}:streamGenerateContent"
|
|
207
|
+
query_params = {}
|
|
208
|
+
|
|
209
|
+
response = self._post(url, data=request_body, params=query_params)
|
|
210
|
+
response.raise_for_status()
|
|
211
|
+
data = response.json()
|
|
212
|
+
try:
|
|
213
|
+
extracted_text = data["candidates"][0]["content"]["parts"][0]["text"]
|
|
214
|
+
return extracted_text
|
|
215
|
+
except (KeyError, IndexError, TypeError):
|
|
216
|
+
return data
|
|
217
|
+
|
|
218
|
+
def resumable_upload_request(
|
|
219
|
+
self, file_metadata: dict[str, Any] | None = None
|
|
220
|
+
) -> Any:
|
|
221
|
+
"""
|
|
222
|
+
Initiates a file upload by sending file metadata.
|
|
223
|
+
This typically returns an upload URL or session URI for subsequent data upload.
|
|
224
|
+
|
|
225
|
+
Args:
|
|
226
|
+
file_metadata (Optional[Dict[str, Any]]): Metadata for the file to be uploaded.
|
|
227
|
+
Example: {"display_name": "my_audio_file.mp3"}
|
|
228
|
+
If None, the 'file' field will be omitted from the request if the API supports that,
|
|
229
|
+
or it might result in an error if the 'file' field is mandatory.
|
|
230
|
+
|
|
231
|
+
Returns:
|
|
232
|
+
Any: The JSON response from the API, typically containing upload instructions
|
|
233
|
+
or a file resource representation.
|
|
234
|
+
|
|
235
|
+
Tags:
|
|
236
|
+
Document Processing
|
|
237
|
+
"""
|
|
238
|
+
request_body: dict[str, Any] = {}
|
|
239
|
+
if file_metadata is not None:
|
|
240
|
+
request_body["file"] = file_metadata
|
|
241
|
+
|
|
242
|
+
if not request_body and file_metadata is None:
|
|
243
|
+
pass
|
|
244
|
+
# request_body will be {} if file_metadata is None
|
|
245
|
+
|
|
246
|
+
url = f"{self.base_url}/upload/v1beta/files"
|
|
247
|
+
|
|
248
|
+
query_params = {}
|
|
249
|
+
|
|
250
|
+
response = self._post(
|
|
251
|
+
url, data=request_body if request_body else None, params=query_params
|
|
252
|
+
)
|
|
253
|
+
response_json = None
|
|
254
|
+
response_json = response.json()
|
|
255
|
+
|
|
256
|
+
response.raise_for_status() # Raises an HTTPError for bad responses (4XX or 5XX)
|
|
257
|
+
return response_json if response_json else {}
|
|
258
|
+
|
|
259
|
+
def prompt_document(self, contents=None) -> dict[str, Any]:
|
|
260
|
+
"""
|
|
261
|
+
Generates content using the Gemini model, accepting input prompts and returning a streamed response across various media types, such as text, images, and audio.
|
|
262
|
+
|
|
263
|
+
Args:
|
|
264
|
+
contents (array): contents
|
|
265
|
+
Example:
|
|
266
|
+
```json
|
|
267
|
+
{
|
|
268
|
+
"contents": [
|
|
269
|
+
{
|
|
270
|
+
"parts": [
|
|
271
|
+
{
|
|
272
|
+
"text": "Summarize the uploaded document."
|
|
273
|
+
},
|
|
274
|
+
{
|
|
275
|
+
"file_data": {
|
|
276
|
+
"file_uri": "{{FILE_URI}}",
|
|
277
|
+
"mime_type": "application/pdf"
|
|
278
|
+
}
|
|
279
|
+
}
|
|
280
|
+
]
|
|
281
|
+
}
|
|
282
|
+
]
|
|
283
|
+
}
|
|
284
|
+
```
|
|
285
|
+
|
|
286
|
+
Returns:
|
|
287
|
+
dict[str, Any]: prompt document
|
|
288
|
+
|
|
289
|
+
Tags:
|
|
290
|
+
Document Processing
|
|
291
|
+
"""
|
|
292
|
+
request_body = {
|
|
293
|
+
"contents": contents,
|
|
294
|
+
}
|
|
295
|
+
request_body = {k: v for k, v in request_body.items() if v is not None}
|
|
296
|
+
url = f"{self.base_url}/v1beta/models/gemini-1.5-pro-latest:generateContent"
|
|
297
|
+
query_params = {}
|
|
298
|
+
response = self._post(url, data=request_body, params=query_params)
|
|
299
|
+
response.raise_for_status()
|
|
300
|
+
return response.json()
|
|
301
|
+
|
|
302
|
+
def prompt_document(
|
|
303
|
+
self, contents: list[dict[str, Any]] | None = None
|
|
304
|
+
) -> dict[str, Any]:
|
|
305
|
+
"""
|
|
306
|
+
Generates content using the Gemini model with document context.
|
|
307
|
+
|
|
308
|
+
Args:
|
|
309
|
+
contents (Optional[List[Dict[str, Any]]]): List of content parts, including text and file data.
|
|
310
|
+
Example:
|
|
311
|
+
```json
|
|
312
|
+
[
|
|
313
|
+
{
|
|
314
|
+
"parts": [
|
|
315
|
+
{"text": "Summarize the uploaded document."},
|
|
316
|
+
{"file_data": {"file_uri": "files/your_file_id", "mime_type": "application/pdf"}}
|
|
317
|
+
]
|
|
318
|
+
}
|
|
319
|
+
]
|
|
320
|
+
```
|
|
321
|
+
|
|
322
|
+
Returns:
|
|
323
|
+
dict[str, Any]: The model's response.
|
|
324
|
+
"""
|
|
325
|
+
request_body = {"contents": contents}
|
|
326
|
+
request_body = {k: v for k, v in request_body.items() if v is not None}
|
|
327
|
+
if not request_body.get("contents"): # API might require contents
|
|
328
|
+
raise ValueError(
|
|
329
|
+
"Missing required parameter 'contents' for prompt_document."
|
|
330
|
+
)
|
|
331
|
+
|
|
332
|
+
url = f"{self.base_url}/v1beta/models/gemini-:generateContent"
|
|
333
|
+
query_params = {}
|
|
334
|
+
response = self._post(url, data=request_body, params=query_params)
|
|
335
|
+
return response.json()
|
|
336
|
+
|
|
337
|
+
def text_tokens(self, query: str) -> dict[str, Any]:
|
|
338
|
+
"""
|
|
339
|
+
Calculates the number of tokens and billable characters for input content using a gemini-2.0-flash.
|
|
340
|
+
|
|
341
|
+
Args:
|
|
342
|
+
query (str): The text prompt for the model.
|
|
343
|
+
|
|
344
|
+
Returns:
|
|
345
|
+
dict[str, Any]: text tokens / chat tokens / media tokens
|
|
346
|
+
|
|
347
|
+
Tags:
|
|
348
|
+
Count Tokens, important
|
|
349
|
+
"""
|
|
350
|
+
if not query or not isinstance(query, str):
|
|
351
|
+
raise ValueError("Query must be a non-empty string.")
|
|
352
|
+
|
|
353
|
+
contents = [{"parts": [{"text": query}]}]
|
|
354
|
+
request_body = {
|
|
355
|
+
"contents": contents,
|
|
356
|
+
}
|
|
357
|
+
request_body = {k: v for k, v in request_body.items() if v is not None}
|
|
358
|
+
model_name = "gemini-2.0-flash"
|
|
359
|
+
url = f"{self.base_url}/v1beta/models/{model_name}:countTokens"
|
|
360
|
+
query_params = {}
|
|
361
|
+
response = self._post(url, data=request_body, params=query_params)
|
|
362
|
+
response.raise_for_status()
|
|
363
|
+
return response.json()
|
|
364
|
+
|
|
365
|
+
def fetch_tuned_models(self, page_size=None) -> dict[str, Any]:
|
|
366
|
+
"""
|
|
367
|
+
Retrieves a list of tuned models at the specified page size using the GET method.
|
|
368
|
+
|
|
369
|
+
Args:
|
|
370
|
+
page_size (string): Specifies the maximum number of items to return in a single response page. Example: '10'.
|
|
371
|
+
|
|
372
|
+
Returns:
|
|
373
|
+
dict[str, Any]: fetch models Copy
|
|
374
|
+
|
|
375
|
+
Tags:
|
|
376
|
+
Fine Tunning
|
|
377
|
+
"""
|
|
378
|
+
url = f"{self.base_url}/v1beta/tunedModels"
|
|
379
|
+
query_params = {k: v for k, v in [("page_size", page_size)] if v is not None}
|
|
380
|
+
response = self._get(url, params=query_params)
|
|
381
|
+
response.raise_for_status()
|
|
382
|
+
return response.json()
|
|
383
|
+
|
|
384
|
+
def create_atuned_model(
|
|
385
|
+
self, base_model=None, display_name=None, tuning_task=None
|
|
386
|
+
) -> dict[str, Any]:
|
|
387
|
+
"""
|
|
388
|
+
Creates a tuned model using the "POST" method at the "/v1beta/tunedModels" endpoint and returns a response upon successful creation.
|
|
389
|
+
|
|
390
|
+
Args:
|
|
391
|
+
base_model (string): base_model Example: 'models/gemini-1.5-flash-001-tuning'.
|
|
392
|
+
display_name (string): display_name Example: 'number generator model'.
|
|
393
|
+
tuning_task (object): tuning_task
|
|
394
|
+
Example:
|
|
395
|
+
```json
|
|
396
|
+
{
|
|
397
|
+
"base_model": "models/gemini-1.5-flash-001-tuning",
|
|
398
|
+
"display_name": "number generator model",
|
|
399
|
+
"tuning_task": {
|
|
400
|
+
"hyperparameters": {
|
|
401
|
+
"batch_size": 2,
|
|
402
|
+
"epoch_count": 5,
|
|
403
|
+
"learning_rate": 0.001
|
|
404
|
+
},
|
|
405
|
+
"training_data": {
|
|
406
|
+
"examples": {
|
|
407
|
+
"examples": [
|
|
408
|
+
{
|
|
409
|
+
"output": "2",
|
|
410
|
+
"text_input": "1"
|
|
411
|
+
},
|
|
412
|
+
{
|
|
413
|
+
"output": "4",
|
|
414
|
+
"text_input": "3"
|
|
415
|
+
},
|
|
416
|
+
{
|
|
417
|
+
"output": "-2",
|
|
418
|
+
"text_input": "-3"
|
|
419
|
+
},
|
|
420
|
+
{
|
|
421
|
+
"output": "twenty three",
|
|
422
|
+
"text_input": "twenty two"
|
|
423
|
+
},
|
|
424
|
+
{
|
|
425
|
+
"output": "two hundred one",
|
|
426
|
+
"text_input": "two hundred"
|
|
427
|
+
},
|
|
428
|
+
{
|
|
429
|
+
"output": "one hundred",
|
|
430
|
+
"text_input": "ninety nine"
|
|
431
|
+
},
|
|
432
|
+
{
|
|
433
|
+
"output": "9",
|
|
434
|
+
"text_input": "8"
|
|
435
|
+
},
|
|
436
|
+
{
|
|
437
|
+
"output": "-97",
|
|
438
|
+
"text_input": "-98"
|
|
439
|
+
},
|
|
440
|
+
{
|
|
441
|
+
"output": "1,001",
|
|
442
|
+
"text_input": "1,000"
|
|
443
|
+
},
|
|
444
|
+
{
|
|
445
|
+
"output": "10,100,001",
|
|
446
|
+
"text_input": "10,100,000"
|
|
447
|
+
},
|
|
448
|
+
{
|
|
449
|
+
"output": "fourteen",
|
|
450
|
+
"text_input": "thirteen"
|
|
451
|
+
},
|
|
452
|
+
{
|
|
453
|
+
"output": "eighty one",
|
|
454
|
+
"text_input": "eighty"
|
|
455
|
+
},
|
|
456
|
+
{
|
|
457
|
+
"output": "two",
|
|
458
|
+
"text_input": "one"
|
|
459
|
+
},
|
|
460
|
+
{
|
|
461
|
+
"output": "four",
|
|
462
|
+
"text_input": "three"
|
|
463
|
+
},
|
|
464
|
+
{
|
|
465
|
+
"output": "eight",
|
|
466
|
+
"text_input": "seven"
|
|
467
|
+
}
|
|
468
|
+
]
|
|
469
|
+
}
|
|
470
|
+
}
|
|
471
|
+
}
|
|
472
|
+
}
|
|
473
|
+
```
|
|
474
|
+
|
|
475
|
+
Returns:
|
|
476
|
+
dict[str, Any]: create a tuned model
|
|
477
|
+
|
|
478
|
+
Tags:
|
|
479
|
+
Fine Tunning
|
|
480
|
+
"""
|
|
481
|
+
request_body = {
|
|
482
|
+
"base_model": base_model,
|
|
483
|
+
"display_name": display_name,
|
|
484
|
+
"tuning_task": tuning_task,
|
|
485
|
+
}
|
|
486
|
+
request_body = {k: v for k, v in request_body.items() if v is not None}
|
|
487
|
+
url = f"{self.base_url}/v1beta/tunedModels"
|
|
488
|
+
query_params = {}
|
|
489
|
+
response = self._post(url, data=request_body, params=query_params)
|
|
490
|
+
response.raise_for_status()
|
|
491
|
+
return response.json()
|
|
492
|
+
|
|
493
|
+
def prompt_the_tuned_model(self, tunedModel, contents=None) -> dict[str, Any]:
|
|
494
|
+
"""
|
|
495
|
+
Generates content using a specified tuned model defined at path "/v1beta/{tunedModel}:generateContent" by sending a POST request.
|
|
496
|
+
|
|
497
|
+
Args:
|
|
498
|
+
tunedModel (string): tunedModel
|
|
499
|
+
contents (array): contents
|
|
500
|
+
Example:
|
|
501
|
+
```json
|
|
502
|
+
{
|
|
503
|
+
"contents": [
|
|
504
|
+
{
|
|
505
|
+
"parts": [
|
|
506
|
+
{
|
|
507
|
+
"text": "LXIII"
|
|
508
|
+
}
|
|
509
|
+
]
|
|
510
|
+
}
|
|
511
|
+
]
|
|
512
|
+
}
|
|
513
|
+
```
|
|
514
|
+
|
|
515
|
+
Returns:
|
|
516
|
+
dict[str, Any]: prompt the tuned model
|
|
517
|
+
|
|
518
|
+
Tags:
|
|
519
|
+
Fine Tunning
|
|
520
|
+
"""
|
|
521
|
+
if tunedModel is None:
|
|
522
|
+
raise ValueError("Missing required parameter 'tunedModel'")
|
|
523
|
+
request_body = {
|
|
524
|
+
"contents": contents,
|
|
525
|
+
}
|
|
526
|
+
request_body = {k: v for k, v in request_body.items() if v is not None}
|
|
527
|
+
url = f"{self.base_url}/v1beta/{tunedModel}:generateContent"
|
|
528
|
+
query_params = {}
|
|
529
|
+
response = self._post(url, data=request_body, params=query_params)
|
|
530
|
+
response.raise_for_status()
|
|
531
|
+
return response.json()
|
|
532
|
+
|
|
533
|
+
def delete_tuned_model(self, tunedModel) -> dict[str, Any]:
|
|
534
|
+
"""
|
|
535
|
+
Deletes a specified tuned model and returns a success status upon removal.
|
|
536
|
+
|
|
537
|
+
Args:
|
|
538
|
+
tunedModel (string): tunedModel
|
|
539
|
+
|
|
540
|
+
Returns:
|
|
541
|
+
dict[str, Any]: delete tuned model
|
|
542
|
+
|
|
543
|
+
Tags:
|
|
544
|
+
Fine Tunning
|
|
545
|
+
"""
|
|
546
|
+
if tunedModel is None:
|
|
547
|
+
raise ValueError("Missing required parameter 'tunedModel'")
|
|
548
|
+
url = f"{self.base_url}/v1beta/{tunedModel}"
|
|
549
|
+
query_params = {}
|
|
550
|
+
response = self._delete(url, params=query_params)
|
|
551
|
+
response.raise_for_status()
|
|
552
|
+
return response.json()
|
|
553
|
+
|
|
554
|
+
def generate_embeddings(
|
|
555
|
+
self,
|
|
556
|
+
query: str,
|
|
557
|
+
model_name: Literal[
|
|
558
|
+
"gemini-embedding-exp-03-07", "text-embedding-004", "embedding-001"
|
|
559
|
+
] = "gemini-embedding-exp-03-07",
|
|
560
|
+
) -> dict[str, Any]:
|
|
561
|
+
"""
|
|
562
|
+
Generates a text embedding vector from input text using the specified Gemini Embedding model, allowing for semantic analysis and comparison of textual content.
|
|
563
|
+
|
|
564
|
+
Args:
|
|
565
|
+
query (str): The text to generate an embedding.
|
|
566
|
+
model_name (string): The name of the embedding model to use. Default is "gemini-embedding-exp-03-07".
|
|
567
|
+
|
|
568
|
+
Returns:
|
|
569
|
+
dict[str, Any]: generate embeddings
|
|
570
|
+
|
|
571
|
+
Tags:
|
|
572
|
+
Embeddings
|
|
573
|
+
"""
|
|
574
|
+
if not query or not isinstance(query, str):
|
|
575
|
+
raise ValueError("Query must be a non-empty string.")
|
|
576
|
+
|
|
577
|
+
request_body = {
|
|
578
|
+
"model": f"models/{model_name}", # Fully qualified model name for the body
|
|
579
|
+
"content": { # 'content' is a dictionary (JSON object)
|
|
580
|
+
"parts": [{"text": query}] # 'parts' is a list of dictionaries
|
|
581
|
+
},
|
|
582
|
+
}
|
|
583
|
+
request_body = {k: v for k, v in request_body.items() if v is not None}
|
|
584
|
+
url = f"{self.base_url}/v1beta/models/{model_name}:embedContent"
|
|
585
|
+
query_params = {}
|
|
586
|
+
response = self._post(url, data=request_body, params=query_params)
|
|
587
|
+
response.raise_for_status()
|
|
588
|
+
return response.json()
|
|
589
|
+
|
|
590
|
+
def batch_embeddings(
|
|
591
|
+
self,
|
|
592
|
+
queries: list[str],
|
|
593
|
+
model_name: Literal[
|
|
594
|
+
"gemini-embedding-exp-03-07", "text-embedding-004", "embedding-001"
|
|
595
|
+
] = "gemini-embedding-exp-03-07",
|
|
596
|
+
) -> dict[str, Any]:
|
|
597
|
+
"""
|
|
598
|
+
Generates batch embeddings for a list of text inputs using the "gemini-embedding-exp-03-07" model via a POST request to the "/v1beta/models/text-embedding-004:batchEmbedContents" endpoint.
|
|
599
|
+
|
|
600
|
+
Args:
|
|
601
|
+
queries (List[str]): A list of texts to generate embeddings for.
|
|
602
|
+
model_name (string): The name of the embedding model to use. Default is "gemini-embedding-exp-03-07".
|
|
603
|
+
|
|
604
|
+
Returns:
|
|
605
|
+
dict[str, Any]: batch embeddings
|
|
606
|
+
|
|
607
|
+
Tags:
|
|
608
|
+
Embeddings
|
|
609
|
+
"""
|
|
610
|
+
if not queries:
|
|
611
|
+
raise ValueError("Queries list cannot be empty.")
|
|
612
|
+
if not all(isinstance(q, str) and q for q in queries):
|
|
613
|
+
raise ValueError("All items in the queries list must be non-empty strings.")
|
|
614
|
+
|
|
615
|
+
individual_requests = []
|
|
616
|
+
for query_text in queries:
|
|
617
|
+
individual_requests.append(
|
|
618
|
+
{
|
|
619
|
+
"model": f"models/{model_name}", # Model specified for each request
|
|
620
|
+
"content": {"parts": [{"text": query_text}]},
|
|
621
|
+
}
|
|
622
|
+
)
|
|
623
|
+
request_body = {"requests": individual_requests}
|
|
624
|
+
|
|
625
|
+
url = f"{self.base_url}/v1beta/models/{model_name}:batchEmbedContents"
|
|
626
|
+
query_params = {}
|
|
627
|
+
response = self._post(url, data=request_body, params=query_params)
|
|
628
|
+
response.raise_for_status()
|
|
629
|
+
return response.json()
|
|
630
|
+
|
|
631
|
+
def discovery_document(self, version=None) -> dict[str, Any]:
|
|
632
|
+
"""
|
|
633
|
+
Retrieves discovery metadata for REST APIs, including available endpoints and parameters, based on the specified version.
|
|
634
|
+
|
|
635
|
+
Args:
|
|
636
|
+
version (string): Specifies the API version to use for the request, allowing clients to target a specific release without modifying the URI structure. Example: 'v1beta'.
|
|
637
|
+
|
|
638
|
+
Returns:
|
|
639
|
+
dict[str, Any]: Get Discovery Document
|
|
640
|
+
"""
|
|
641
|
+
url = f"{self.base_url}/$discovery/rest"
|
|
642
|
+
query_params = {k: v for k, v in [("version", version)] if v is not None}
|
|
643
|
+
response = self._get(url, params=query_params)
|
|
644
|
+
response.raise_for_status()
|
|
645
|
+
return response.json()
|
|
646
|
+
|
|
647
|
+
def list_tools(self):
|
|
648
|
+
return [
|
|
649
|
+
self.fetch_model,
|
|
650
|
+
self.fetch_models,
|
|
651
|
+
self.text_only_input,
|
|
652
|
+
self.generate_atext_stream,
|
|
653
|
+
self.resumable_upload_request,
|
|
654
|
+
self.prompt_document,
|
|
655
|
+
self.text_tokens,
|
|
656
|
+
self.fetch_tuned_models,
|
|
657
|
+
self.create_atuned_model,
|
|
658
|
+
self.prompt_the_tuned_model,
|
|
659
|
+
self.delete_tuned_model,
|
|
660
|
+
self.generate_embeddings,
|
|
661
|
+
self.batch_embeddings,
|
|
662
|
+
self.discovery_document,
|
|
663
|
+
]
|