chub-dev 0.1.0 → 0.1.2-beta.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +55 -0
- package/bin/chub-mcp +2 -0
- package/dist/airtable/docs/database/javascript/DOC.md +1437 -0
- package/dist/airtable/docs/database/python/DOC.md +1735 -0
- package/dist/amplitude/docs/analytics/javascript/DOC.md +1282 -0
- package/dist/amplitude/docs/analytics/python/DOC.md +1199 -0
- package/dist/anthropic/docs/claude-api/javascript/DOC.md +503 -0
- package/dist/anthropic/docs/claude-api/python/DOC.md +389 -0
- package/dist/asana/docs/tasks/DOC.md +1396 -0
- package/dist/assemblyai/docs/transcription/DOC.md +1043 -0
- package/dist/atlassian/docs/confluence/javascript/DOC.md +1347 -0
- package/dist/atlassian/docs/confluence/python/DOC.md +1604 -0
- package/dist/auth0/docs/identity/javascript/DOC.md +968 -0
- package/dist/auth0/docs/identity/python/DOC.md +1199 -0
- package/dist/aws/docs/s3/javascript/DOC.md +1773 -0
- package/dist/aws/docs/s3/python/DOC.md +1807 -0
- package/dist/binance/docs/trading/javascript/DOC.md +1315 -0
- package/dist/binance/docs/trading/python/DOC.md +1454 -0
- package/dist/braintree/docs/gateway/javascript/DOC.md +1278 -0
- package/dist/braintree/docs/gateway/python/DOC.md +1179 -0
- package/dist/chromadb/docs/embeddings-db/javascript/DOC.md +1263 -0
- package/dist/chromadb/docs/embeddings-db/python/DOC.md +1707 -0
- package/dist/clerk/docs/auth/javascript/DOC.md +1220 -0
- package/dist/clerk/docs/auth/python/DOC.md +274 -0
- package/dist/cloudflare/docs/workers/javascript/DOC.md +918 -0
- package/dist/cloudflare/docs/workers/python/DOC.md +994 -0
- package/dist/cockroachdb/docs/distributed-db/DOC.md +1500 -0
- package/dist/cohere/docs/llm/DOC.md +1335 -0
- package/dist/datadog/docs/monitoring/javascript/DOC.md +1740 -0
- package/dist/datadog/docs/monitoring/python/DOC.md +1815 -0
- package/dist/deepgram/docs/speech/javascript/DOC.md +885 -0
- package/dist/deepgram/docs/speech/python/DOC.md +685 -0
- package/dist/deepl/docs/translation/javascript/DOC.md +887 -0
- package/dist/deepl/docs/translation/python/DOC.md +944 -0
- package/dist/deepseek/docs/llm/DOC.md +1220 -0
- package/dist/directus/docs/headless-cms/javascript/DOC.md +1128 -0
- package/dist/directus/docs/headless-cms/python/DOC.md +1276 -0
- package/dist/discord/docs/bot/javascript/DOC.md +1090 -0
- package/dist/discord/docs/bot/python/DOC.md +1130 -0
- package/dist/elasticsearch/docs/search/DOC.md +1634 -0
- package/dist/elevenlabs/docs/text-to-speech/javascript/DOC.md +336 -0
- package/dist/elevenlabs/docs/text-to-speech/python/DOC.md +552 -0
- package/dist/firebase/docs/auth/DOC.md +1015 -0
- package/dist/gemini/docs/genai/javascript/DOC.md +691 -0
- package/dist/gemini/docs/genai/python/DOC.md +555 -0
- package/dist/github/docs/octokit/DOC.md +1560 -0
- package/dist/google/docs/bigquery/javascript/DOC.md +1688 -0
- package/dist/google/docs/bigquery/python/DOC.md +1503 -0
- package/dist/hubspot/docs/crm/javascript/DOC.md +1805 -0
- package/dist/hubspot/docs/crm/python/DOC.md +2033 -0
- package/dist/huggingface/docs/transformers/DOC.md +948 -0
- package/dist/intercom/docs/messaging/javascript/DOC.md +1844 -0
- package/dist/intercom/docs/messaging/python/DOC.md +1797 -0
- package/dist/jira/docs/issues/javascript/DOC.md +1420 -0
- package/dist/jira/docs/issues/python/DOC.md +1492 -0
- package/dist/kafka/docs/streaming/javascript/DOC.md +1671 -0
- package/dist/kafka/docs/streaming/python/DOC.md +1464 -0
- package/dist/landingai-ade/docs/api/DOC.md +620 -0
- package/dist/landingai-ade/docs/sdk/python/DOC.md +489 -0
- package/dist/landingai-ade/docs/sdk/typescript/DOC.md +542 -0
- package/dist/landingai-ade/skills/SKILL.md +489 -0
- package/dist/launchdarkly/docs/feature-flags/javascript/DOC.md +1191 -0
- package/dist/launchdarkly/docs/feature-flags/python/DOC.md +1671 -0
- package/dist/linear/docs/tracker/DOC.md +1554 -0
- package/dist/livekit/docs/realtime/javascript/DOC.md +303 -0
- package/dist/livekit/docs/realtime/python/DOC.md +163 -0
- package/dist/mailchimp/docs/marketing/DOC.md +1420 -0
- package/dist/meilisearch/docs/search/DOC.md +1241 -0
- package/dist/microsoft/docs/onedrive/javascript/DOC.md +1421 -0
- package/dist/microsoft/docs/onedrive/python/DOC.md +1549 -0
- package/dist/mongodb/docs/atlas/DOC.md +2041 -0
- package/dist/notion/docs/workspace-api/javascript/DOC.md +1435 -0
- package/dist/notion/docs/workspace-api/python/DOC.md +1400 -0
- package/dist/okta/docs/identity/javascript/DOC.md +1171 -0
- package/dist/okta/docs/identity/python/DOC.md +1401 -0
- package/dist/openai/docs/chat/javascript/DOC.md +407 -0
- package/dist/openai/docs/chat/python/DOC.md +568 -0
- package/dist/paypal/docs/checkout/DOC.md +278 -0
- package/dist/pinecone/docs/sdk/javascript/DOC.md +984 -0
- package/dist/pinecone/docs/sdk/python/DOC.md +1395 -0
- package/dist/plaid/docs/banking/javascript/DOC.md +1163 -0
- package/dist/plaid/docs/banking/python/DOC.md +1203 -0
- package/dist/playwright-community/skills/login-flows/SKILL.md +108 -0
- package/dist/postmark/docs/transactional-email/DOC.md +1168 -0
- package/dist/prisma/docs/orm/javascript/DOC.md +1419 -0
- package/dist/prisma/docs/orm/python/DOC.md +1317 -0
- package/dist/qdrant/docs/vector-search/javascript/DOC.md +1221 -0
- package/dist/qdrant/docs/vector-search/python/DOC.md +1653 -0
- package/dist/rabbitmq/docs/message-queue/javascript/DOC.md +1193 -0
- package/dist/rabbitmq/docs/message-queue/python/DOC.md +1243 -0
- package/dist/razorpay/docs/payments/javascript/DOC.md +1219 -0
- package/dist/razorpay/docs/payments/python/DOC.md +1330 -0
- package/dist/redis/docs/key-value/javascript/DOC.md +1851 -0
- package/dist/redis/docs/key-value/python/DOC.md +2054 -0
- package/dist/registry.json +2817 -0
- package/dist/replicate/docs/model-hosting/DOC.md +1318 -0
- package/dist/resend/docs/email/DOC.md +1271 -0
- package/dist/salesforce/docs/crm/javascript/DOC.md +1241 -0
- package/dist/salesforce/docs/crm/python/DOC.md +1183 -0
- package/dist/search-index.json +1 -0
- package/dist/sendgrid/docs/email-api/javascript/DOC.md +371 -0
- package/dist/sendgrid/docs/email-api/python/DOC.md +656 -0
- package/dist/sentry/docs/error-tracking/javascript/DOC.md +1073 -0
- package/dist/sentry/docs/error-tracking/python/DOC.md +1309 -0
- package/dist/shopify/docs/storefront/DOC.md +457 -0
- package/dist/slack/docs/workspace/javascript/DOC.md +933 -0
- package/dist/slack/docs/workspace/python/DOC.md +271 -0
- package/dist/square/docs/payments/javascript/DOC.md +1855 -0
- package/dist/square/docs/payments/python/DOC.md +1728 -0
- package/dist/stripe/docs/api/DOC.md +1727 -0
- package/dist/stripe/docs/payments/DOC.md +1726 -0
- package/dist/stytch/docs/auth/javascript/DOC.md +1813 -0
- package/dist/stytch/docs/auth/python/DOC.md +1962 -0
- package/dist/supabase/docs/client/DOC.md +1606 -0
- package/dist/twilio/docs/messaging/python/DOC.md +469 -0
- package/dist/twilio/docs/messaging/typescript/DOC.md +946 -0
- package/dist/vercel/docs/platform/DOC.md +1940 -0
- package/dist/weaviate/docs/vector-db/javascript/DOC.md +1268 -0
- package/dist/weaviate/docs/vector-db/python/DOC.md +1388 -0
- package/dist/zendesk/docs/support/javascript/DOC.md +2150 -0
- package/dist/zendesk/docs/support/python/DOC.md +2297 -0
- package/package.json +22 -6
- package/skills/get-api-docs/SKILL.md +84 -0
- package/src/commands/annotate.js +83 -0
- package/src/commands/build.js +12 -1
- package/src/commands/feedback.js +150 -0
- package/src/commands/get.js +83 -42
- package/src/commands/search.js +7 -0
- package/src/index.js +43 -17
- package/src/lib/analytics.js +90 -0
- package/src/lib/annotations.js +57 -0
- package/src/lib/bm25.js +170 -0
- package/src/lib/cache.js +69 -6
- package/src/lib/config.js +8 -3
- package/src/lib/identity.js +99 -0
- package/src/lib/registry.js +103 -20
- package/src/lib/telemetry.js +86 -0
- package/src/mcp/server.js +177 -0
- package/src/mcp/tools.js +251 -0
|
@@ -0,0 +1,691 @@
|
|
|
1
|
+
---
|
|
2
|
+
name: genai
|
|
3
|
+
description: "Google Gemini GenAI SDK for multimodal LLM interactions in JavaScript/TypeScript"
|
|
4
|
+
metadata:
|
|
5
|
+
languages: "javascript"
|
|
6
|
+
versions: "1.43.0"
|
|
7
|
+
updated-on: "2026-03-01"
|
|
8
|
+
source: maintainer
|
|
9
|
+
tags: "gemini,google,genai,llm,multimodal"
|
|
10
|
+
---
|
|
11
|
+
|
|
12
|
+
# Gemini API Coding Guidelines (JavaScript/TypeScript)
|
|
13
|
+
|
|
14
|
+
You are a Gemini API coding expert. Help me with writing code using the Gemini
|
|
15
|
+
API calling the official libraries and SDKs.
|
|
16
|
+
|
|
17
|
+
Please follow the following guidelines when generating code.
|
|
18
|
+
|
|
19
|
+
You can find the official SDK documentation and code samples here:
|
|
20
|
+
https://googleapis.github.io/js-genai/
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
## Golden Rule: Use the Correct and Current SDK
|
|
24
|
+
|
|
25
|
+
Always use the Google Gen AI SDK to call the Gemini models, which is the
|
|
26
|
+
standard library for all Gemini API interactions. Do not use legacy libraries
|
|
27
|
+
and SDKs.
|
|
28
|
+
|
|
29
|
+
- **Library Name:** Google Gen AI SDK
|
|
30
|
+
- **NPM Package:** `@google/genai`
|
|
31
|
+
- **Legacy Libraries**: (`@google/generative-ai`) are deprecated
|
|
32
|
+
|
|
33
|
+
**Installation:**
|
|
34
|
+
|
|
35
|
+
- **Incorrect:** `npm install @google/generative-ai`
|
|
36
|
+
- **Incorrect:** `npm install @google-ai/generativelanguage`
|
|
37
|
+
- **Correct:** `npm install @google/genai`
|
|
38
|
+
|
|
39
|
+
**APIs and Usage:**
|
|
40
|
+
|
|
41
|
+
- **Incorrect:** `const { GenerativeModel } =
|
|
42
|
+
require('@google/generative-ai')` -> **Correct:** `import { GoogleGenAI }
|
|
43
|
+
from '@google/genai'`
|
|
44
|
+
- **Incorrect:** `const model = genai.getGenerativeModel(...)` -> **Correct:**
|
|
45
|
+
`const ai = new GoogleGenAI({apiKey: "..."})`
|
|
46
|
+
- **Incorrect:** `await model.generateContent(...)` -> **Correct:** `await
|
|
47
|
+
ai.models.generateContent(...)`
|
|
48
|
+
- **Incorrect:** `await model.generateContentStream(...)` -> **Correct:**
|
|
49
|
+
`await ai.models.generateContentStream(...)`
|
|
50
|
+
- **Incorrect:** `const generationConfig = { ... }` -> **Correct:** Pass
|
|
51
|
+
configuration directly: `config: { safetySettings: [...] }`
|
|
52
|
+
- **Incorrect** `GoogleGenerativeAI`
|
|
53
|
+
- **Incorrect** `google.generativeai`
|
|
54
|
+
- **Incorrect** `models.create`
|
|
55
|
+
- **Incorrect** `ai.models.create`
|
|
56
|
+
- **Incorrect** `models.getGenerativeModel`
|
|
57
|
+
- **Incorrect** `ai.models.getModel`
|
|
58
|
+
- **Incorrect** `ai.models['model_name']`
|
|
59
|
+
- **Incorrect** `generationConfig`
|
|
60
|
+
- **Incorrect** `GoogleGenAIError` -> **Correct** `ApiError`
|
|
61
|
+
- **Incorrect** `GenerateContentResult` -> **Correct**
|
|
62
|
+
`GenerateContentResponse`.
|
|
63
|
+
- **Incorrect** `GenerateContentRequest` -> **Correct**
|
|
64
|
+
`GenerateContentParameters`
|
|
65
|
+
|
|
66
|
+
## Initialization and API key
|
|
67
|
+
|
|
68
|
+
The `@google/genai` library requires creating a `GoogleGenAI` instance for all
|
|
69
|
+
API calls.
|
|
70
|
+
|
|
71
|
+
- Always use `const ai = new GoogleGenAI({})` to create an instance.
|
|
72
|
+
- Set the `GEMINI_API_KEY` environment variable, which will be picked up
|
|
73
|
+
automatically in Node.js environments.
|
|
74
|
+
|
|
75
|
+
```javascript
|
|
76
|
+
import { GoogleGenAI } from '@google/genai';
|
|
77
|
+
|
|
78
|
+
// Uses the GEMINI_API_KEY environment variable if apiKey not specified
|
|
79
|
+
const ai = new GoogleGenAI({});
|
|
80
|
+
|
|
81
|
+
// Or pass the API key directly
|
|
82
|
+
// const ai = new GoogleGenAI({apiKey: process.env.GEMINI_API_KEY});
|
|
83
|
+
```
|
|
84
|
+
|
|
85
|
+
## Models
|
|
86
|
+
|
|
87
|
+
- By default, use the following models as of March 2026:
|
|
88
|
+
- **General Text & Multimodal Tasks:** `gemini-2.5-flash`
|
|
89
|
+
- **Coding and Complex Reasoning Tasks:** `gemini-2.5-pro`
|
|
90
|
+
- **Image Generation Tasks:** `imagen-4.0-fast-generate-001`,
|
|
91
|
+
`imagen-4.0-generate-001` or `imagen-4.0-ultra-generate-001`
|
|
92
|
+
- **Image Editing Tasks:** `gemini-2.5-flash-image-preview`
|
|
93
|
+
- **Video Generation Tasks:** `veo-3.0-fast-generate-preview` or
|
|
94
|
+
`veo-3.0-generate-preview`.
|
|
95
|
+
|
|
96
|
+
- It is also acceptable to use the following model if explicitly requested by
|
|
97
|
+
the user:
|
|
98
|
+
- **Gemini 2.0 Series**: `gemini-2.0-flash`, `gemini-2.0-pro`
|
|
99
|
+
|
|
100
|
+
- Do not use the following deprecated models (or their variants like
|
|
101
|
+
`gemini-1.5-flash-latest`):
|
|
102
|
+
- **Prohibited:** `gemini-1.5-flash`
|
|
103
|
+
- **Prohibited:** `gemini-1.5-pro`
|
|
104
|
+
- **Prohibited:** `gemini-pro`
|
|
105
|
+
|
|
106
|
+
## Basic Inference (Text Generation)
|
|
107
|
+
|
|
108
|
+
Here's how to generate a response from a text prompt.
|
|
109
|
+
|
|
110
|
+
```javascript
|
|
111
|
+
import { GoogleGenAI } from '@google/genai';
|
|
112
|
+
|
|
113
|
+
const ai = new GoogleGenAI({}); // Assumes GEMINI_API_KEY is set
|
|
114
|
+
|
|
115
|
+
async function run() {
|
|
116
|
+
const response = await ai.models.generateContent({
|
|
117
|
+
model: 'gemini-2.5-flash',
|
|
118
|
+
contents: 'why is the sky blue?',
|
|
119
|
+
});
|
|
120
|
+
|
|
121
|
+
console.log(response.text); // output is often markdown
|
|
122
|
+
}
|
|
123
|
+
|
|
124
|
+
run();
|
|
125
|
+
```
|
|
126
|
+
|
|
127
|
+
Multimodal inputs are supported by passing file data in the `contents` array.
|
|
128
|
+
|
|
129
|
+
```javascript
|
|
130
|
+
import { GoogleGenAI, Part } from '@google/genai';
|
|
131
|
+
import * as fs from 'fs';
|
|
132
|
+
|
|
133
|
+
const ai = new GoogleGenAI({});
|
|
134
|
+
|
|
135
|
+
// Converts local file information to a Part object.
|
|
136
|
+
function fileToGenerativePart(path, mimeType): Part {
|
|
137
|
+
return {
|
|
138
|
+
inlineData: {
|
|
139
|
+
data: Buffer.from(fs.readFileSync(path)).toString("base64"),
|
|
140
|
+
mimeType
|
|
141
|
+
},
|
|
142
|
+
};
|
|
143
|
+
}
|
|
144
|
+
|
|
145
|
+
async function run() {
|
|
146
|
+
const imagePart = fileToGenerativePart("path/to/image.jpg", "image/jpeg");
|
|
147
|
+
|
|
148
|
+
const response = await ai.models.generateContent({
|
|
149
|
+
model: 'gemini-2.5-flash',
|
|
150
|
+
contents: [imagePart, "explain that image"],
|
|
151
|
+
});
|
|
152
|
+
|
|
153
|
+
console.log(response.text); // The output often is markdown
|
|
154
|
+
}
|
|
155
|
+
|
|
156
|
+
run();
|
|
157
|
+
```
|
|
158
|
+
|
|
159
|
+
You can use this approach to pass a variety of data types (images, audio, video,
|
|
160
|
+
pdf). For PDF, use `application/pdf` as `mimeType`.
|
|
161
|
+
|
|
162
|
+
For larger files, use `ai.files.upload`:
|
|
163
|
+
|
|
164
|
+
```javascript
|
|
165
|
+
import { GoogleGenAI, createPartFromUri, createUserContent } from '@google/genai';
|
|
166
|
+
const ai = new GoogleGenAI({});
|
|
167
|
+
|
|
168
|
+
async function run() {
|
|
169
|
+
const f = await ai.files.upload({
|
|
170
|
+
file: 'path/to/sample.mp3',
|
|
171
|
+
config:{mimeType: 'audio/mp3'},
|
|
172
|
+
});
|
|
173
|
+
|
|
174
|
+
const response = await ai.models.generateContent({
|
|
175
|
+
model: 'gemini-2.5-flash',
|
|
176
|
+
contents: createUserContent([
|
|
177
|
+
createPartFromUri(f.uri, f.mimeType),
|
|
178
|
+
"Describe this audio clip"
|
|
179
|
+
])
|
|
180
|
+
});
|
|
181
|
+
|
|
182
|
+
console.log(response.text);
|
|
183
|
+
}
|
|
184
|
+
|
|
185
|
+
run();
|
|
186
|
+
```
|
|
187
|
+
|
|
188
|
+
You can delete files after use like this:
|
|
189
|
+
|
|
190
|
+
```javascript
|
|
191
|
+
const myFile = await ai.files.upload({file: 'path/to/sample.mp3', mimeType: 'audio/mp3'});
|
|
192
|
+
await ai.files.delete({name: myFile.name});
|
|
193
|
+
```
|
|
194
|
+
|
|
195
|
+
## Additional Capabilities and Configurations
|
|
196
|
+
|
|
197
|
+
Below are examples of advanced configurations.
|
|
198
|
+
|
|
199
|
+
### Thinking
|
|
200
|
+
|
|
201
|
+
Gemini 2.5 series models support thinking, which is on by default for
|
|
202
|
+
`gemini-2.5-flash`. It can be adjusted by using `thinking_budget` setting.
|
|
203
|
+
Setting it to zero turns thinking off, and will reduce latency.
|
|
204
|
+
|
|
205
|
+
```javascript
|
|
206
|
+
import { GoogleGenAI } from "@google/genai";
|
|
207
|
+
|
|
208
|
+
const ai = new GoogleGenAI({});
|
|
209
|
+
|
|
210
|
+
async function main() {
|
|
211
|
+
const response = await ai.models.generateContent({
|
|
212
|
+
model: "gemini-2.5-pro",
|
|
213
|
+
contents: "Provide a list of 3 famous physicists and their key contributions",
|
|
214
|
+
config: {
|
|
215
|
+
thinkingConfig: {
|
|
216
|
+
thinkingBudget: 1024,
|
|
217
|
+
// Turn off thinking:
|
|
218
|
+
// thinkingBudget: 0
|
|
219
|
+
// Turn on dynamic thinking:
|
|
220
|
+
// thinkingBudget: -1
|
|
221
|
+
},
|
|
222
|
+
},
|
|
223
|
+
});
|
|
224
|
+
|
|
225
|
+
console.log(response.text);
|
|
226
|
+
}
|
|
227
|
+
|
|
228
|
+
main();
|
|
229
|
+
```
|
|
230
|
+
|
|
231
|
+
IMPORTANT NOTES:
|
|
232
|
+
|
|
233
|
+
- Minimum thinking budget for `gemini-2.5-pro` is `128` and thinking can not
|
|
234
|
+
be turned off for that model.
|
|
235
|
+
- No models (apart from Gemini 2.5 series) support thinking or thinking
|
|
236
|
+
budgets APIs. Do not try to adjust thinking budgets other models (such as
|
|
237
|
+
`gemini-2.0-flash` or `gemini-2.0-pro`) otherwise it will cause syntax
|
|
238
|
+
errors.
|
|
239
|
+
|
|
240
|
+
### System instructions
|
|
241
|
+
|
|
242
|
+
Use system instructions to guide the model's behavior.
|
|
243
|
+
|
|
244
|
+
```javascript
|
|
245
|
+
import { GoogleGenAI } from '@google/genai';
|
|
246
|
+
|
|
247
|
+
const ai = new GoogleGenAI({});
|
|
248
|
+
|
|
249
|
+
async function run() {
|
|
250
|
+
const response = await ai.models.generateContent({
|
|
251
|
+
model: 'gemini-2.5-flash',
|
|
252
|
+
contents: "Hello.",
|
|
253
|
+
config: {
|
|
254
|
+
systemInstruction: "You are a pirate",
|
|
255
|
+
}
|
|
256
|
+
});
|
|
257
|
+
console.log(response.text);
|
|
258
|
+
}
|
|
259
|
+
run();
|
|
260
|
+
```
|
|
261
|
+
|
|
262
|
+
### Hyperparameters
|
|
263
|
+
|
|
264
|
+
You can also set `temperature` or `maxOutputTokens` within the `config` object.
|
|
265
|
+
**Avoid** setting `maxOutputTokens`, `topP`, `topK` unless explicitly requested
|
|
266
|
+
by the user.
|
|
267
|
+
|
|
268
|
+
### Safety configurations
|
|
269
|
+
|
|
270
|
+
Avoid setting safety configurations unless explicitly requested by the user. If
|
|
271
|
+
explicitly asked for by the user, here is a sample API:
|
|
272
|
+
|
|
273
|
+
```javascript
|
|
274
|
+
import { GoogleGenAI, HarmCategory, HarmBlockThreshold, Part } from '@google/genai';
|
|
275
|
+
import * as fs from 'fs';
|
|
276
|
+
|
|
277
|
+
const ai = new GoogleGenAI({});
|
|
278
|
+
|
|
279
|
+
function fileToGenerativePart(path, mimeType): Part {
|
|
280
|
+
return {
|
|
281
|
+
inlineData: {
|
|
282
|
+
data: Buffer.from(fs.readFileSync(path)).toString("base64"),
|
|
283
|
+
mimeType
|
|
284
|
+
},
|
|
285
|
+
};
|
|
286
|
+
}
|
|
287
|
+
|
|
288
|
+
async function run() {
|
|
289
|
+
const img = fileToGenerativePart("/path/to/img.jpg", "image/jpeg");
|
|
290
|
+
const response = await ai.models.generateContent({
|
|
291
|
+
model: "gemini-2.5-flash",
|
|
292
|
+
contents: ['Do these look store-bought or homemade?', img],
|
|
293
|
+
config: {
|
|
294
|
+
safetySettings: [
|
|
295
|
+
{
|
|
296
|
+
category: HarmCategory.HARM_CATEGORY_HATE_SPEECH,
|
|
297
|
+
threshold: HarmBlockThreshold.BLOCK_LOW_AND_ABOVE,
|
|
298
|
+
},
|
|
299
|
+
]
|
|
300
|
+
}
|
|
301
|
+
});
|
|
302
|
+
console.log(response.text);
|
|
303
|
+
}
|
|
304
|
+
run();
|
|
305
|
+
```
|
|
306
|
+
|
|
307
|
+
### Streaming
|
|
308
|
+
|
|
309
|
+
It is possible to stream responses to reduce user perceived latency:
|
|
310
|
+
|
|
311
|
+
```javascript
|
|
312
|
+
import { GoogleGenAI } from '@google/genai';
|
|
313
|
+
const ai = new GoogleGenAI({});
|
|
314
|
+
|
|
315
|
+
async function run() {
|
|
316
|
+
const responseStream = await ai.models.generateContentStream({
|
|
317
|
+
model: "gemini-2.5-flash",
|
|
318
|
+
contents: ["Explain how AI works"],
|
|
319
|
+
});
|
|
320
|
+
|
|
321
|
+
for await (const chunk of responseStream) {
|
|
322
|
+
process.stdout.write(chunk.text);
|
|
323
|
+
}
|
|
324
|
+
console.log(); // for a final newline
|
|
325
|
+
}
|
|
326
|
+
run();
|
|
327
|
+
```
|
|
328
|
+
|
|
329
|
+
### Chat
|
|
330
|
+
|
|
331
|
+
For multi-turn conversations, use the `chats` service to maintain conversation
|
|
332
|
+
history.
|
|
333
|
+
|
|
334
|
+
```javascript
|
|
335
|
+
import { GoogleGenAI } from '@google/genai';
|
|
336
|
+
|
|
337
|
+
const ai = new GoogleGenAI({});
|
|
338
|
+
|
|
339
|
+
async function run() {
|
|
340
|
+
const chat = ai.chats.create({model: "gemini-2.5-flash"});
|
|
341
|
+
|
|
342
|
+
let response = await chat.sendMessage({message:"I have 2 dogs in my house."});
|
|
343
|
+
console.log(response.text);
|
|
344
|
+
|
|
345
|
+
response = await chat.sendMessage({message: "How many paws are in my house?"});
|
|
346
|
+
console.log(response.text);
|
|
347
|
+
|
|
348
|
+
const history = await chat.getHistory();
|
|
349
|
+
for (const message of history) {
|
|
350
|
+
console.log(`role - ${message.role}: ${message.parts[0].text}`);
|
|
351
|
+
}
|
|
352
|
+
}
|
|
353
|
+
run();
|
|
354
|
+
``` It is also possible to use streaming with Chat:
|
|
355
|
+
|
|
356
|
+
```javascript
|
|
357
|
+
const chat = ai.chats.create({model: "gemini-2.5-flash"});
|
|
358
|
+
const stream = await chat.sendMessageStream({message:"I have 2 dogs in my house."});
|
|
359
|
+
for await (const chunk of stream) {
|
|
360
|
+
console.log(chunk.text);
|
|
361
|
+
console.log("_".repeat(80));
|
|
362
|
+
}
|
|
363
|
+
```
|
|
364
|
+
|
|
365
|
+
Note: ai.chats.create({model}) returns `Chat` under `@google/genai` which tracks
|
|
366
|
+
the session.
|
|
367
|
+
|
|
368
|
+
### Structured outputs
|
|
369
|
+
|
|
370
|
+
Ask the model to return a response in JSON format.
|
|
371
|
+
|
|
372
|
+
The recommended way is to configure a `responseSchema` for the expected output.
|
|
373
|
+
|
|
374
|
+
See the available types below that can be used in the `responseSchema`.
|
|
375
|
+
|
|
376
|
+
```javascript
|
|
377
|
+
export enum Type {
|
|
378
|
+
/**
|
|
379
|
+
* Not specified, should not be used.
|
|
380
|
+
*/
|
|
381
|
+
TYPE_UNSPECIFIED = 'TYPE_UNSPECIFIED',
|
|
382
|
+
/**
|
|
383
|
+
* OpenAPI string type
|
|
384
|
+
*/
|
|
385
|
+
STRING = 'STRING',
|
|
386
|
+
/**
|
|
387
|
+
* OpenAPI number type
|
|
388
|
+
*/
|
|
389
|
+
NUMBER = 'NUMBER',
|
|
390
|
+
/**
|
|
391
|
+
* OpenAPI integer type
|
|
392
|
+
*/
|
|
393
|
+
INTEGER = 'INTEGER',
|
|
394
|
+
/**
|
|
395
|
+
* OpenAPI boolean type
|
|
396
|
+
*/
|
|
397
|
+
BOOLEAN = 'BOOLEAN',
|
|
398
|
+
/**
|
|
399
|
+
* OpenAPI array type
|
|
400
|
+
*/
|
|
401
|
+
ARRAY = 'ARRAY',
|
|
402
|
+
/**
|
|
403
|
+
* OpenAPI object type
|
|
404
|
+
*/
|
|
405
|
+
OBJECT = 'OBJECT',
|
|
406
|
+
/**
|
|
407
|
+
* Null type
|
|
408
|
+
*/
|
|
409
|
+
NULL = 'NULL',
|
|
410
|
+
}
|
|
411
|
+
```
|
|
412
|
+
|
|
413
|
+
`Type.OBJECT` cannot be empty; it must contain other properties.
|
|
414
|
+
|
|
415
|
+
```javascript
|
|
416
|
+
import { GoogleGenAI, Type } from "@google/genai";
|
|
417
|
+
|
|
418
|
+
const ai = new GoogleGenAI({});
|
|
419
|
+
const response = await ai.models.generateContent({
|
|
420
|
+
model: "gemini-2.5-flash",
|
|
421
|
+
contents: "List a few popular cookie recipes, and include the amounts of ingredients.",
|
|
422
|
+
config: {
|
|
423
|
+
responseMimeType: "application/json",
|
|
424
|
+
responseSchema: {
|
|
425
|
+
type: Type.ARRAY,
|
|
426
|
+
items: {
|
|
427
|
+
type: Type.OBJECT,
|
|
428
|
+
properties: {
|
|
429
|
+
recipeName: {
|
|
430
|
+
type: Type.STRING,
|
|
431
|
+
description: 'The name of the recipe.',
|
|
432
|
+
},
|
|
433
|
+
ingredients: {
|
|
434
|
+
type: Type.ARRAY,
|
|
435
|
+
items: {
|
|
436
|
+
type: Type.STRING,
|
|
437
|
+
},
|
|
438
|
+
description: 'The ingredients for the recipe.',
|
|
439
|
+
},
|
|
440
|
+
},
|
|
441
|
+
propertyOrdering: ["recipeName", "ingredients"],
|
|
442
|
+
},
|
|
443
|
+
},
|
|
444
|
+
},
|
|
445
|
+
});
|
|
446
|
+
|
|
447
|
+
let jsonStr = response.text.trim();
|
|
448
|
+
```
|
|
449
|
+
|
|
450
|
+
The `jsonStr` might look like this:
|
|
451
|
+
|
|
452
|
+
```javascript
|
|
453
|
+
[
|
|
454
|
+
{
|
|
455
|
+
"recipeName": "Chocolate Chip Cookies",
|
|
456
|
+
"ingredients": [
|
|
457
|
+
"1 cup (2 sticks) unsalted butter, softened",
|
|
458
|
+
"3/4 cup granulated sugar",
|
|
459
|
+
"3/4 cup packed brown sugar",
|
|
460
|
+
"1 teaspoon vanilla extract",
|
|
461
|
+
"2 large eggs",
|
|
462
|
+
"2 1/4 cups all-purpose flour",
|
|
463
|
+
"1 teaspoon baking soda",
|
|
464
|
+
"1 teaspoon salt",
|
|
465
|
+
"2 cups chocolate chips"
|
|
466
|
+
]
|
|
467
|
+
},
|
|
468
|
+
...
|
|
469
|
+
]
|
|
470
|
+
```
|
|
471
|
+
|
|
472
|
+
#### Function Calling (Tools)
|
|
473
|
+
|
|
474
|
+
You can provide the model with tools (functions) it can use to bring in external
|
|
475
|
+
information to answer a question or act on a request outside the model.
|
|
476
|
+
|
|
477
|
+
```javascript
|
|
478
|
+
import {GoogleGenAI, FunctionDeclaration, Type} from '@google/genai';
|
|
479
|
+
const ai = new GoogleGenAI({});
|
|
480
|
+
|
|
481
|
+
async function run() {
|
|
482
|
+
const controlLightDeclaration = {
|
|
483
|
+
name: 'controlLight',
|
|
484
|
+
parameters: {
|
|
485
|
+
type: Type.OBJECT,
|
|
486
|
+
description: 'Set brightness and color temperature of a light.',
|
|
487
|
+
properties: {
|
|
488
|
+
brightness: { type: Type.NUMBER, description: 'Light level from 0 to 100.' },
|
|
489
|
+
colorTemperature: { type: Type.STRING, description: '`daylight`, `cool`, or `warm`.'},
|
|
490
|
+
},
|
|
491
|
+
required: ['brightness', 'colorTemperature'],
|
|
492
|
+
},
|
|
493
|
+
};
|
|
494
|
+
|
|
495
|
+
const response = await ai.models.generateContent({
|
|
496
|
+
model: 'gemini-2.5-flash',
|
|
497
|
+
contents: 'Dim the lights so the room feels cozy and warm.',
|
|
498
|
+
config: {
|
|
499
|
+
tools: [{functionDeclarations: [controlLightDeclaration]}]
|
|
500
|
+
}
|
|
501
|
+
});
|
|
502
|
+
|
|
503
|
+
if (response.functionCalls) {
|
|
504
|
+
console.log(response.functionCalls);
|
|
505
|
+
// In a real app, you would execute the function and send the result back.
|
|
506
|
+
}
|
|
507
|
+
}
|
|
508
|
+
run();
|
|
509
|
+
```
|
|
510
|
+
|
|
511
|
+
### Generate Images
|
|
512
|
+
|
|
513
|
+
Here's how to generate images using the Imagen models.
|
|
514
|
+
|
|
515
|
+
```javascript
|
|
516
|
+
import { GoogleGenAI } from "@google/genai";
|
|
517
|
+
|
|
518
|
+
const ai = new GoogleGenAI({});
|
|
519
|
+
|
|
520
|
+
async function run() {
|
|
521
|
+
const response = await ai.models.generateImages({
|
|
522
|
+
model: 'imagen-4.0-fast-generate-001',
|
|
523
|
+
prompt: 'A friendly robot holding a red skateboard, minimalist vector art',
|
|
524
|
+
config: {
|
|
525
|
+
numberOfImages: 1, // 1 to 4 (always 1 for the ultra model)
|
|
526
|
+
outputMimeType: 'image/jpeg',
|
|
527
|
+
aspectRatio: '1:1', // "1:1", "3:4", "4:3", "9:16", or "16:9"
|
|
528
|
+
},
|
|
529
|
+
});
|
|
530
|
+
|
|
531
|
+
const base64ImageBytes = response.generatedImages[0].image.imageBytes;
|
|
532
|
+
// This can be used directly in an <img> src attribute
|
|
533
|
+
const imageUrl = `data:image/jpeg;base64,${base64ImageBytes}`;
|
|
534
|
+
console.log(imageUrl);
|
|
535
|
+
}
|
|
536
|
+
run();
|
|
537
|
+
```
|
|
538
|
+
|
|
539
|
+
Note: Do not include negativePrompts in config, it's not supported.
|
|
540
|
+
|
|
541
|
+
### Edit Images
|
|
542
|
+
|
|
543
|
+
Editing images is better done using the Gemini native image generation model.
|
|
544
|
+
Configs are not supported in this model (except modality).
|
|
545
|
+
|
|
546
|
+
```javascript
|
|
547
|
+
import { GoogleGenAI } from '@google/genai';
|
|
548
|
+
|
|
549
|
+
const ai = new GoogleGenAI({});
|
|
550
|
+
|
|
551
|
+
const response = await ai.models.generateContent({
|
|
552
|
+
model: 'gemini-2.5-flash-image-preview',
|
|
553
|
+
contents: [imagePart, 'koala eating a nano banana']
|
|
554
|
+
});
|
|
555
|
+
for (const part of response.candidates[0].content.parts) {
|
|
556
|
+
if (part.inlineData) {
|
|
557
|
+
const base64ImageBytes: string = part.inlineData.data;
|
|
558
|
+
const imageUrl = `data:image/png;base64,${base64ImageBytes}`;
|
|
559
|
+
}
|
|
560
|
+
}
|
|
561
|
+
```
|
|
562
|
+
|
|
563
|
+
### Generate Videos
|
|
564
|
+
|
|
565
|
+
Here's how to generate videos using the Veo models. Usage of Veo can be costly,
|
|
566
|
+
so after generating code for it, give user a heads up to check pricing for Veo.
|
|
567
|
+
|
|
568
|
+
```javascript
|
|
569
|
+
import { GoogleGenAI } from "@google/genai";
|
|
570
|
+
import { createWriteStream } from "fs";
|
|
571
|
+
import { Readable } from "stream";
|
|
572
|
+
|
|
573
|
+
const ai = new GoogleGenAI({});
|
|
574
|
+
|
|
575
|
+
async function main() {
|
|
576
|
+
let operation = await ai.models.generateVideos({
|
|
577
|
+
model: "veo-3.0-fast-generate-preview",
|
|
578
|
+
prompt: "Panning wide shot of a calico kitten sleeping in the sunshine",
|
|
579
|
+
config: {
|
|
580
|
+
personGeneration: "dont_allow",
|
|
581
|
+
aspectRatio: "16:9",
|
|
582
|
+
},
|
|
583
|
+
});
|
|
584
|
+
|
|
585
|
+
while (!operation.done) {
|
|
586
|
+
await new Promise((resolve) => setTimeout(resolve, 10000));
|
|
587
|
+
operation = await ai.operations.getVideosOperation({
|
|
588
|
+
operation: operation,
|
|
589
|
+
});
|
|
590
|
+
}
|
|
591
|
+
|
|
592
|
+
operation.response?.generatedVideos?.forEach(async (generatedVideo, n) => {
|
|
593
|
+
const resp = await fetch(`${generatedVideo.video?.uri}&key=GEMINI_API_KEY`); // append your API key
|
|
594
|
+
const writer = createWriteStream(`video${n}.mp4`);
|
|
595
|
+
Readable.fromWeb(resp.body).pipe(writer);
|
|
596
|
+
});
|
|
597
|
+
}
|
|
598
|
+
|
|
599
|
+
main();
|
|
600
|
+
```
|
|
601
|
+
|
|
602
|
+
### Search Grounding
|
|
603
|
+
|
|
604
|
+
Google Search can be used as a tool for grounding queries that with up to date
|
|
605
|
+
information from the web.
|
|
606
|
+
|
|
607
|
+
```javascript
|
|
608
|
+
import { GoogleGenAI } from "@google/genai";
|
|
609
|
+
|
|
610
|
+
const ai = new GoogleGenAI({});
|
|
611
|
+
|
|
612
|
+
async function run() {
|
|
613
|
+
const response = await ai.models.generateContent({
|
|
614
|
+
model: "gemini-2.5-flash",
|
|
615
|
+
contents: "Who won the latest F1 race?",
|
|
616
|
+
config: {
|
|
617
|
+
tools: [{googleSearch: {}}],
|
|
618
|
+
},
|
|
619
|
+
});
|
|
620
|
+
|
|
621
|
+
console.log("Response:", response.text);
|
|
622
|
+
|
|
623
|
+
// Extract and display grounding URLs
|
|
624
|
+
const searchChunks = response.candidates?.[0]?.groundingMetadata?.groundingChunks;
|
|
625
|
+
if (searchChunks) {
|
|
626
|
+
const urls = searchChunks.map(chunk => chunk.web.uri);
|
|
627
|
+
console.log("Sources:", urls);
|
|
628
|
+
}
|
|
629
|
+
}
|
|
630
|
+
run();
|
|
631
|
+
```
|
|
632
|
+
|
|
633
|
+
### Content and Part Hierarchy
|
|
634
|
+
|
|
635
|
+
While the simpler API call is often sufficient, you may run into scenarios where
|
|
636
|
+
you need to work directly with the underlying `Content` and `Part` objects for
|
|
637
|
+
more explicit control. These are the fundamental building blocks of the
|
|
638
|
+
`generateContent` API.
|
|
639
|
+
|
|
640
|
+
For instance, the following simple API call:
|
|
641
|
+
|
|
642
|
+
```javascript
|
|
643
|
+
import { GoogleGenAI } from '@google/genai';
|
|
644
|
+
const ai = new GoogleGenAI({});
|
|
645
|
+
|
|
646
|
+
async function run() {
|
|
647
|
+
const response = await ai.models.generateContent({
|
|
648
|
+
model: "gemini-2.5-flash",
|
|
649
|
+
contents: "How does AI work?",
|
|
650
|
+
});
|
|
651
|
+
console.log(response.text);
|
|
652
|
+
}
|
|
653
|
+
run();
|
|
654
|
+
```
|
|
655
|
+
|
|
656
|
+
is effectively a shorthand for this more explicit structure:
|
|
657
|
+
|
|
658
|
+
```javascript
|
|
659
|
+
import { GoogleGenAI } from '@google/genai';
|
|
660
|
+
const ai = new GoogleGenAI({});
|
|
661
|
+
|
|
662
|
+
async function run() {
|
|
663
|
+
const response = await ai.models.generateContent({
|
|
664
|
+
model: "gemini-2.5-flash",
|
|
665
|
+
contents: [
|
|
666
|
+
{ role: "user", parts: [{ text: "How does AI work?" }] },
|
|
667
|
+
],
|
|
668
|
+
});
|
|
669
|
+
console.log(response.text);
|
|
670
|
+
}
|
|
671
|
+
run();
|
|
672
|
+
```
|
|
673
|
+
|
|
674
|
+
## API Errors
|
|
675
|
+
|
|
676
|
+
`ApiError` from `@google/genai` extends from EcmaScript `Error` and has
|
|
677
|
+
`message`, `name` fields in addition to `status` (HTTP Code).
|
|
678
|
+
|
|
679
|
+
## Other APIs
|
|
680
|
+
|
|
681
|
+
The list of APIs and capabilities above are not comprehensive. If users ask you
|
|
682
|
+
to generate code for a capability not provided above, refer them to
|
|
683
|
+
https://googleapis.github.io/js-genai/.
|
|
684
|
+
|
|
685
|
+
## Useful Links
|
|
686
|
+
|
|
687
|
+
- Documentation: ai.google.dev/gemini-api/docs
|
|
688
|
+
- API Keys and Authentication: ai.google.dev/gemini-api/docs/api-key
|
|
689
|
+
- Models: ai.google.dev/models
|
|
690
|
+
- API Pricing: ai.google.dev/pricing
|
|
691
|
+
- Rate Limits: ai.google.dev/rate-limits
|