@riotprompt/execution-gemini 0.0.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.kodrdriv-test-cache.json +6 -0
- package/LICENSE +18 -0
- package/README.md +80 -0
- package/dist/index.cjs +101 -0
- package/dist/index.cjs.map +1 -0
- package/dist/index.d.ts +85 -0
- package/dist/index.js +101 -0
- package/dist/index.js.map +1 -0
- package/package.json +46 -0
package/LICENSE
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
1
|
+
Apache License
|
|
2
|
+
Version 2.0, January 2004
|
|
3
|
+
http://www.apache.org/licenses/
|
|
4
|
+
|
|
5
|
+
Copyright 2025 Tim O'Brien
|
|
6
|
+
|
|
7
|
+
Licensed under the Apache License, Version 2.0 (the "License");
|
|
8
|
+
you may not use this file except in compliance with the License.
|
|
9
|
+
You may obtain a copy of the License at
|
|
10
|
+
|
|
11
|
+
http://www.apache.org/licenses/LICENSE-2.0
|
|
12
|
+
|
|
13
|
+
Unless required by applicable law or agreed to in writing, software
|
|
14
|
+
distributed under the License is distributed on an "AS IS" BASIS,
|
|
15
|
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
16
|
+
See the License for the specific language governing permissions and
|
|
17
|
+
limitations under the License.
|
|
18
|
+
|
package/README.md
ADDED
|
@@ -0,0 +1,80 @@
|
|
|
1
|
+
# execution-gemini
|
|
2
|
+
|
|
3
|
+
Google Gemini provider implementation for LLM execution. Implements the `Provider` interface from the `execution` package.
|
|
4
|
+
|
|
5
|
+
## Installation
|
|
6
|
+
|
|
7
|
+
```bash
|
|
8
|
+
npm install execution-gemini @google/generative-ai
|
|
9
|
+
```
|
|
10
|
+
|
|
11
|
+
## Usage
|
|
12
|
+
|
|
13
|
+
```typescript
|
|
14
|
+
import { GeminiProvider, createGeminiProvider } from 'execution-gemini';
|
|
15
|
+
|
|
16
|
+
// Create provider
|
|
17
|
+
const provider = createGeminiProvider();
|
|
18
|
+
|
|
19
|
+
// Execute a request
|
|
20
|
+
const response = await provider.execute(
|
|
21
|
+
{
|
|
22
|
+
model: 'gemini-1.5-pro',
|
|
23
|
+
messages: [
|
|
24
|
+
{ role: 'system', content: 'You are helpful.' },
|
|
25
|
+
{ role: 'user', content: 'Hello!' }
|
|
26
|
+
],
|
|
27
|
+
addMessage: () => {},
|
|
28
|
+
},
|
|
29
|
+
{
|
|
30
|
+
apiKey: process.env.GEMINI_API_KEY,
|
|
31
|
+
temperature: 0.7,
|
|
32
|
+
}
|
|
33
|
+
);
|
|
34
|
+
|
|
35
|
+
console.log(response.content);
|
|
36
|
+
console.log(response.usage); // { inputTokens: X, outputTokens: Y }
|
|
37
|
+
```
|
|
38
|
+
|
|
39
|
+
## Supported Models
|
|
40
|
+
|
|
41
|
+
The provider supports all Gemini models:
|
|
42
|
+
- Gemini 1.5 Pro
|
|
43
|
+
- Gemini 1.5 Flash
|
|
44
|
+
- Gemini 1.0 Pro
|
|
45
|
+
|
|
46
|
+
## API Key
|
|
47
|
+
|
|
48
|
+
Set via:
|
|
49
|
+
1. `options.apiKey` parameter
|
|
50
|
+
2. `GEMINI_API_KEY` environment variable
|
|
51
|
+
|
|
52
|
+
## Features
|
|
53
|
+
|
|
54
|
+
- System instruction support
|
|
55
|
+
- Multi-turn conversation handling
|
|
56
|
+
- Structured output via JSON schema
|
|
57
|
+
- Token usage tracking
|
|
58
|
+
|
|
59
|
+
## Response Format
|
|
60
|
+
|
|
61
|
+
```typescript
|
|
62
|
+
interface ProviderResponse {
|
|
63
|
+
content: string;
|
|
64
|
+
model: string;
|
|
65
|
+
usage?: {
|
|
66
|
+
inputTokens: number;
|
|
67
|
+
outputTokens: number;
|
|
68
|
+
};
|
|
69
|
+
}
|
|
70
|
+
```
|
|
71
|
+
|
|
72
|
+
## Related Packages
|
|
73
|
+
|
|
74
|
+
- `execution` - Core interfaces (no SDK dependencies)
|
|
75
|
+
- `execution-openai` - OpenAI provider
|
|
76
|
+
- `execution-anthropic` - Anthropic provider
|
|
77
|
+
|
|
78
|
+
## License
|
|
79
|
+
|
|
80
|
+
Apache-2.0
|
package/dist/index.cjs
ADDED
|
@@ -0,0 +1,101 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperties(exports, { __esModule: { value: true }, [Symbol.toStringTag]: { value: "Module" } });
|
|
3
|
+
const generativeAi = require("@google/generative-ai");
|
|
4
|
+
class GeminiProvider {
|
|
5
|
+
name = "gemini";
|
|
6
|
+
/**
|
|
7
|
+
* Check if this provider supports a given model
|
|
8
|
+
*/
|
|
9
|
+
supportsModel(model) {
|
|
10
|
+
if (!model) return false;
|
|
11
|
+
return model.startsWith("gemini");
|
|
12
|
+
}
|
|
13
|
+
/**
|
|
14
|
+
* Execute a request against Gemini
|
|
15
|
+
*/
|
|
16
|
+
async execute(request, options = {}) {
|
|
17
|
+
const apiKey = options.apiKey || process.env.GEMINI_API_KEY;
|
|
18
|
+
if (!apiKey) throw new Error("Gemini API key is required");
|
|
19
|
+
const genAI = new generativeAi.GoogleGenerativeAI(apiKey);
|
|
20
|
+
const modelName = options.model || request.model || "gemini-1.5-pro";
|
|
21
|
+
const generationConfig = {};
|
|
22
|
+
if (request.responseFormat?.type === "json_schema") {
|
|
23
|
+
generationConfig.responseMimeType = "application/json";
|
|
24
|
+
const openAISchema = request.responseFormat.json_schema.schema;
|
|
25
|
+
const mapSchema = (s) => {
|
|
26
|
+
if (!s) return void 0;
|
|
27
|
+
const newSchema = { ...s };
|
|
28
|
+
if (newSchema.type) {
|
|
29
|
+
newSchema.type = typeof newSchema.type === "string" ? newSchema.type.toUpperCase() : newSchema.type;
|
|
30
|
+
}
|
|
31
|
+
if (newSchema.properties) {
|
|
32
|
+
const newProps = {};
|
|
33
|
+
for (const [key, val] of Object.entries(newSchema.properties)) {
|
|
34
|
+
newProps[key] = mapSchema(val);
|
|
35
|
+
}
|
|
36
|
+
newSchema.properties = newProps;
|
|
37
|
+
}
|
|
38
|
+
if (newSchema.items) {
|
|
39
|
+
newSchema.items = mapSchema(newSchema.items);
|
|
40
|
+
}
|
|
41
|
+
delete newSchema.additionalProperties;
|
|
42
|
+
delete newSchema["$schema"];
|
|
43
|
+
return newSchema;
|
|
44
|
+
};
|
|
45
|
+
generationConfig.responseSchema = mapSchema(openAISchema);
|
|
46
|
+
}
|
|
47
|
+
let systemInstruction = "";
|
|
48
|
+
for (const msg of request.messages) {
|
|
49
|
+
if (msg.role === "system" || msg.role === "developer") {
|
|
50
|
+
systemInstruction += (typeof msg.content === "string" ? msg.content : JSON.stringify(msg.content)) + "\n\n";
|
|
51
|
+
}
|
|
52
|
+
}
|
|
53
|
+
const configuredModel = genAI.getGenerativeModel({
|
|
54
|
+
model: modelName,
|
|
55
|
+
systemInstruction: systemInstruction ? systemInstruction.trim() : void 0,
|
|
56
|
+
generationConfig
|
|
57
|
+
});
|
|
58
|
+
const chatHistory = [];
|
|
59
|
+
let lastUserMessage = "";
|
|
60
|
+
for (const msg of request.messages) {
|
|
61
|
+
if (msg.role === "system" || msg.role === "developer") continue;
|
|
62
|
+
const content = typeof msg.content === "string" ? msg.content : JSON.stringify(msg.content);
|
|
63
|
+
if (msg.role === "user") {
|
|
64
|
+
lastUserMessage = content;
|
|
65
|
+
}
|
|
66
|
+
chatHistory.push({
|
|
67
|
+
role: msg.role === "assistant" ? "model" : "user",
|
|
68
|
+
parts: [{ text: content }]
|
|
69
|
+
});
|
|
70
|
+
}
|
|
71
|
+
let result;
|
|
72
|
+
if (chatHistory.length > 1) {
|
|
73
|
+
const lastMsg = chatHistory.pop();
|
|
74
|
+
const chat = configuredModel.startChat({
|
|
75
|
+
history: chatHistory
|
|
76
|
+
});
|
|
77
|
+
result = await chat.sendMessage(lastMsg?.parts[0].text || "");
|
|
78
|
+
} else {
|
|
79
|
+
result = await configuredModel.generateContent(lastUserMessage || " ");
|
|
80
|
+
}
|
|
81
|
+
const response = await result.response;
|
|
82
|
+
const text = response.text();
|
|
83
|
+
return {
|
|
84
|
+
content: text,
|
|
85
|
+
model: modelName,
|
|
86
|
+
usage: response.usageMetadata ? {
|
|
87
|
+
inputTokens: response.usageMetadata.promptTokenCount,
|
|
88
|
+
outputTokens: response.usageMetadata.candidatesTokenCount
|
|
89
|
+
} : void 0
|
|
90
|
+
};
|
|
91
|
+
}
|
|
92
|
+
}
|
|
93
|
+
function createGeminiProvider() {
|
|
94
|
+
return new GeminiProvider();
|
|
95
|
+
}
|
|
96
|
+
const VERSION = "0.0.1";
|
|
97
|
+
exports.GeminiProvider = GeminiProvider;
|
|
98
|
+
exports.VERSION = VERSION;
|
|
99
|
+
exports.createGeminiProvider = createGeminiProvider;
|
|
100
|
+
exports.default = GeminiProvider;
|
|
101
|
+
//# sourceMappingURL=index.cjs.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"index.cjs","sources":["../src/index.ts"],"sourcesContent":["/**\n * Execution Gemini Package\n *\n * Google Gemini provider implementation for LLM execution.\n *\n * @packageDocumentation\n */\n\nimport { GoogleGenerativeAI } from '@google/generative-ai';\n\n// ===== INLINE TYPES (from 'execution' package) =====\n\nexport type Model = string;\n\nexport interface Message {\n role: 'user' | 'assistant' | 'system' | 'developer' | 'tool';\n content: string | string[] | null;\n name?: string;\n}\n\nexport interface Request {\n messages: Message[];\n model: Model;\n responseFormat?: any;\n validator?: any;\n addMessage(message: Message): void;\n}\n\nexport interface ProviderResponse {\n content: string;\n model: string;\n usage?: {\n inputTokens: number;\n outputTokens: number;\n };\n toolCalls?: Array<{\n id: string;\n type: 'function';\n function: {\n name: string;\n arguments: string;\n };\n }>;\n}\n\nexport interface ExecutionOptions {\n apiKey?: string;\n model?: string;\n temperature?: number;\n maxTokens?: number;\n timeout?: number;\n retries?: number;\n}\n\nexport interface Provider {\n readonly name: string;\n execute(request: Request, options?: ExecutionOptions): Promise<ProviderResponse>;\n supportsModel?(model: Model): boolean;\n}\n\n/**\n * Gemini Provider implementation\n */\nexport class GeminiProvider implements Provider {\n readonly name = 'gemini';\n\n /**\n * Check if this provider supports a given model\n */\n supportsModel(model: Model): boolean {\n if (!model) return false;\n return model.startsWith('gemini');\n }\n\n /**\n * Execute a request against Gemini\n */\n async execute(\n request: Request,\n options: ExecutionOptions = {}\n ): Promise<ProviderResponse> {\n const apiKey = options.apiKey || process.env.GEMINI_API_KEY;\n if (!apiKey) throw new Error('Gemini API key is required');\n\n const genAI = new GoogleGenerativeAI(apiKey);\n\n const modelName = options.model || request.model || 'gemini-1.5-pro';\n\n // Handle generation config for structured output\n const generationConfig: any = {};\n\n if (request.responseFormat?.type === 'json_schema') {\n generationConfig.responseMimeType = 'application/json';\n\n const openAISchema = request.responseFormat.json_schema.schema;\n\n // Map schema types to uppercase for Gemini\n const mapSchema = (s: any): any => {\n if (!s) return undefined;\n\n const newSchema: any = { ...s };\n\n if (newSchema.type) {\n newSchema.type =\n typeof newSchema.type === 'string'\n ? (newSchema.type as string).toUpperCase()\n : newSchema.type;\n }\n\n if (newSchema.properties) {\n const newProps: any = {};\n for (const [key, val] of Object.entries(newSchema.properties)) {\n newProps[key] = mapSchema(val);\n }\n newSchema.properties = newProps;\n }\n\n if (newSchema.items) {\n newSchema.items = mapSchema(newSchema.items);\n }\n\n delete newSchema.additionalProperties;\n delete newSchema['$schema'];\n\n return newSchema;\n };\n\n generationConfig.responseSchema = mapSchema(openAISchema);\n }\n\n // Extract system instruction\n let systemInstruction = '';\n\n for (const msg of request.messages) {\n if (msg.role === 'system' || msg.role === 'developer') {\n systemInstruction +=\n (typeof msg.content === 'string'\n ? msg.content\n : JSON.stringify(msg.content)) + '\\n\\n';\n }\n }\n\n const configuredModel = genAI.getGenerativeModel({\n model: modelName,\n systemInstruction: systemInstruction\n ? systemInstruction.trim()\n : undefined,\n generationConfig,\n });\n\n // Build history/messages\n const chatHistory = [];\n let lastUserMessage = '';\n\n for (const msg of request.messages) {\n if (msg.role === 'system' || msg.role === 'developer') continue;\n\n const content =\n typeof msg.content === 'string'\n ? msg.content\n : JSON.stringify(msg.content);\n\n if (msg.role === 'user') {\n lastUserMessage = content;\n }\n\n chatHistory.push({\n role: msg.role === 'assistant' ? 'model' : 'user',\n parts: [{ text: content }],\n });\n }\n\n let result;\n\n if (chatHistory.length > 1) {\n const lastMsg = chatHistory.pop();\n const chat = configuredModel.startChat({\n history: chatHistory,\n });\n result = await chat.sendMessage(lastMsg?.parts[0].text || '');\n } else {\n result = await configuredModel.generateContent(lastUserMessage || ' ');\n }\n\n const response = await result.response;\n const text = response.text();\n\n return {\n content: text,\n model: modelName,\n usage: response.usageMetadata\n ? {\n inputTokens: response.usageMetadata.promptTokenCount,\n outputTokens: response.usageMetadata.candidatesTokenCount,\n }\n : undefined,\n };\n }\n}\n\n/**\n * Create a new Gemini provider instance\n */\nexport function createGeminiProvider(): GeminiProvider {\n return new GeminiProvider();\n}\n\n/**\n * Package version\n */\nexport const VERSION = '0.0.1';\n\nexport default GeminiProvider;\n"],"names":["GoogleGenerativeAI"],"mappings":";;;AA+DO,MAAM,eAAmC;AAAA,EACnC,OAAO;AAAA;AAAA;AAAA;AAAA,EAKhB,cAAc,OAAuB;AACjC,QAAI,CAAC,MAAO,QAAO;AACnB,WAAO,MAAM,WAAW,QAAQ;AAAA,EACpC;AAAA;AAAA;AAAA;AAAA,EAKA,MAAM,QACF,SACA,UAA4B,IACH;AACzB,UAAM,SAAS,QAAQ,UAAU,QAAQ,IAAI;AAC7C,QAAI,CAAC,OAAQ,OAAM,IAAI,MAAM,4BAA4B;AAEzD,UAAM,QAAQ,IAAIA,aAAAA,mBAAmB,MAAM;AAE3C,UAAM,YAAY,QAAQ,SAAS,QAAQ,SAAS;AAGpD,UAAM,mBAAwB,CAAA;AAE9B,QAAI,QAAQ,gBAAgB,SAAS,eAAe;AAChD,uBAAiB,mBAAmB;AAEpC,YAAM,eAAe,QAAQ,eAAe,YAAY;AAGxD,YAAM,YAAY,CAAC,MAAgB;AAC/B,YAAI,CAAC,EAAG,QAAO;AAEf,cAAM,YAAiB,EAAE,GAAG,EAAA;AAE5B,YAAI,UAAU,MAAM;AAChB,oBAAU,OACN,OAAO,UAAU,SAAS,WACnB,UAAU,KAAgB,gBAC3B,UAAU;AAAA,QACxB;AAEA,YAAI,UAAU,YAAY;AACtB,gBAAM,WAAgB,CAAA;AACtB,qBAAW,CAAC,KAAK,GAAG,KAAK,OAAO,QAAQ,UAAU,UAAU,GAAG;AAC3D,qBAAS,GAAG,IAAI,UAAU,GAAG;AAAA,UACjC;AACA,oBAAU,aAAa;AAAA,QAC3B;AAEA,YAAI,UAAU,OAAO;AACjB,oBAAU,QAAQ,UAAU,UAAU,KAAK;AAAA,QAC/C;AAEA,eAAO,UAAU;AACjB,eAAO,UAAU,SAAS;AAE1B,eAAO;AAAA,MACX;AAEA,uBAAiB,iBAAiB,UAAU,YAAY;AAAA,IAC5D;AAGA,QAAI,oBAAoB;AAExB,eAAW,OAAO,QAAQ,UAAU;AAChC,UAAI,IAAI,SAAS,YAAY,IAAI,SAAS,aAAa;AACnD,8BACK,OAAO,IAAI,YAAY,WAClB,IAAI,UACJ,KAAK,UAAU,IAAI,OAAO,KAAK;AAAA,MAC7C;AAAA,IACJ;AAEA,UAAM,kBAAkB,MAAM,mBAAmB;AAAA,MAC7C,OAAO;AAAA,MACP,mBAAmB,oBACb,kBAAkB,KAAA,IAClB;AAAA,MACN;AAAA,IAAA,CACH;AAGD,UAAM,cAAc,CAAA;AACpB,QAAI,kBAAkB;AAEtB,eAAW,OAAO,QAAQ,UAAU;AAChC,UAAI,IAAI,SAAS,YAAY,IAAI,SAAS,YAAa;AAEvD,YAAM,UACF,OAAO,IAAI,YAAY,WACjB,IAAI,UACJ,KAAK,UAAU,IAAI,OAAO;AAEpC,UAAI,IAAI,SAAS,QAAQ;AACrB,0BAAkB;AAAA,MACtB;AAEA,kBAAY,KAAK;AAAA,QACb,MAAM,IAAI,SAAS,cAAc,UAAU;AAAA,QAC3C,OAAO,CAAC,EAAE,MAAM,SAAS;AAAA,MAAA,CAC5B;AAAA,IACL;AAEA,QAAI;AAEJ,QAAI,YAAY,SAAS,GAAG;AACxB,YAAM,UAAU,YAAY,IAAA;AAC5B,YAAM,OAAO,gBAAgB,UAAU;AAAA,QACnC,SAAS;AAAA,MAAA,CACZ;AACD,eAAS,MAAM,KAAK,YAAY,SAAS,MAAM,CAAC,EAAE,QAAQ,EAAE;AAAA,IAChE,OAAO;AACH,eAAS,MAAM,gBAAgB,gBAAgB,mBAAmB,GAAG;AAAA,IACzE;AAEA,UAAM,WAAW,MAAM,OAAO;AAC9B,UAAM,OAAO,SAAS,KAAA;AAEtB,WAAO;AAAA,MACH,SAAS;AAAA,MACT,OAAO;AAAA,MACP,OAAO,SAAS,gBACV;AAAA,QACE,aAAa,SAAS,cAAc;AAAA,QACpC,cAAc,SAAS,cAAc;AAAA,MAAA,IAEvC;AAAA,IAAA;AAAA,EAEd;AACJ;AAKO,SAAS,uBAAuC;AACnD,SAAO,IAAI,eAAA;AACf;AAKO,MAAM,UAAU;;;;;"}
|
package/dist/index.d.ts
ADDED
|
@@ -0,0 +1,85 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Execution Gemini Package
|
|
3
|
+
*
|
|
4
|
+
* Google Gemini provider implementation for LLM execution.
|
|
5
|
+
*
|
|
6
|
+
* @packageDocumentation
|
|
7
|
+
*/
|
|
8
|
+
|
|
9
|
+
/**
|
|
10
|
+
* Create a new Gemini provider instance
|
|
11
|
+
*/
|
|
12
|
+
export declare function createGeminiProvider(): GeminiProvider;
|
|
13
|
+
|
|
14
|
+
export declare interface ExecutionOptions {
|
|
15
|
+
apiKey?: string;
|
|
16
|
+
model?: string;
|
|
17
|
+
temperature?: number;
|
|
18
|
+
maxTokens?: number;
|
|
19
|
+
timeout?: number;
|
|
20
|
+
retries?: number;
|
|
21
|
+
}
|
|
22
|
+
|
|
23
|
+
/**
|
|
24
|
+
* Gemini Provider implementation
|
|
25
|
+
*/
|
|
26
|
+
declare class GeminiProvider implements Provider {
|
|
27
|
+
readonly name = "gemini";
|
|
28
|
+
/**
|
|
29
|
+
* Check if this provider supports a given model
|
|
30
|
+
*/
|
|
31
|
+
supportsModel(model: Model): boolean;
|
|
32
|
+
/**
|
|
33
|
+
* Execute a request against Gemini
|
|
34
|
+
*/
|
|
35
|
+
execute(request: Request_2, options?: ExecutionOptions): Promise<ProviderResponse>;
|
|
36
|
+
}
|
|
37
|
+
export { GeminiProvider }
|
|
38
|
+
export default GeminiProvider;
|
|
39
|
+
|
|
40
|
+
export declare interface Message {
|
|
41
|
+
role: 'user' | 'assistant' | 'system' | 'developer' | 'tool';
|
|
42
|
+
content: string | string[] | null;
|
|
43
|
+
name?: string;
|
|
44
|
+
}
|
|
45
|
+
|
|
46
|
+
export declare type Model = string;
|
|
47
|
+
|
|
48
|
+
export declare interface Provider {
|
|
49
|
+
readonly name: string;
|
|
50
|
+
execute(request: Request_2, options?: ExecutionOptions): Promise<ProviderResponse>;
|
|
51
|
+
supportsModel?(model: Model): boolean;
|
|
52
|
+
}
|
|
53
|
+
|
|
54
|
+
export declare interface ProviderResponse {
|
|
55
|
+
content: string;
|
|
56
|
+
model: string;
|
|
57
|
+
usage?: {
|
|
58
|
+
inputTokens: number;
|
|
59
|
+
outputTokens: number;
|
|
60
|
+
};
|
|
61
|
+
toolCalls?: Array<{
|
|
62
|
+
id: string;
|
|
63
|
+
type: 'function';
|
|
64
|
+
function: {
|
|
65
|
+
name: string;
|
|
66
|
+
arguments: string;
|
|
67
|
+
};
|
|
68
|
+
}>;
|
|
69
|
+
}
|
|
70
|
+
|
|
71
|
+
declare interface Request_2 {
|
|
72
|
+
messages: Message[];
|
|
73
|
+
model: Model;
|
|
74
|
+
responseFormat?: any;
|
|
75
|
+
validator?: any;
|
|
76
|
+
addMessage(message: Message): void;
|
|
77
|
+
}
|
|
78
|
+
export { Request_2 as Request }
|
|
79
|
+
|
|
80
|
+
/**
|
|
81
|
+
* Package version
|
|
82
|
+
*/
|
|
83
|
+
export declare const VERSION = "0.0.1";
|
|
84
|
+
|
|
85
|
+
export { }
|
package/dist/index.js
ADDED
|
@@ -0,0 +1,101 @@
|
|
|
1
|
+
import { GoogleGenerativeAI } from "@google/generative-ai";
|
|
2
|
+
class GeminiProvider {
|
|
3
|
+
name = "gemini";
|
|
4
|
+
/**
|
|
5
|
+
* Check if this provider supports a given model
|
|
6
|
+
*/
|
|
7
|
+
supportsModel(model) {
|
|
8
|
+
if (!model) return false;
|
|
9
|
+
return model.startsWith("gemini");
|
|
10
|
+
}
|
|
11
|
+
/**
|
|
12
|
+
* Execute a request against Gemini
|
|
13
|
+
*/
|
|
14
|
+
async execute(request, options = {}) {
|
|
15
|
+
const apiKey = options.apiKey || process.env.GEMINI_API_KEY;
|
|
16
|
+
if (!apiKey) throw new Error("Gemini API key is required");
|
|
17
|
+
const genAI = new GoogleGenerativeAI(apiKey);
|
|
18
|
+
const modelName = options.model || request.model || "gemini-1.5-pro";
|
|
19
|
+
const generationConfig = {};
|
|
20
|
+
if (request.responseFormat?.type === "json_schema") {
|
|
21
|
+
generationConfig.responseMimeType = "application/json";
|
|
22
|
+
const openAISchema = request.responseFormat.json_schema.schema;
|
|
23
|
+
const mapSchema = (s) => {
|
|
24
|
+
if (!s) return void 0;
|
|
25
|
+
const newSchema = { ...s };
|
|
26
|
+
if (newSchema.type) {
|
|
27
|
+
newSchema.type = typeof newSchema.type === "string" ? newSchema.type.toUpperCase() : newSchema.type;
|
|
28
|
+
}
|
|
29
|
+
if (newSchema.properties) {
|
|
30
|
+
const newProps = {};
|
|
31
|
+
for (const [key, val] of Object.entries(newSchema.properties)) {
|
|
32
|
+
newProps[key] = mapSchema(val);
|
|
33
|
+
}
|
|
34
|
+
newSchema.properties = newProps;
|
|
35
|
+
}
|
|
36
|
+
if (newSchema.items) {
|
|
37
|
+
newSchema.items = mapSchema(newSchema.items);
|
|
38
|
+
}
|
|
39
|
+
delete newSchema.additionalProperties;
|
|
40
|
+
delete newSchema["$schema"];
|
|
41
|
+
return newSchema;
|
|
42
|
+
};
|
|
43
|
+
generationConfig.responseSchema = mapSchema(openAISchema);
|
|
44
|
+
}
|
|
45
|
+
let systemInstruction = "";
|
|
46
|
+
for (const msg of request.messages) {
|
|
47
|
+
if (msg.role === "system" || msg.role === "developer") {
|
|
48
|
+
systemInstruction += (typeof msg.content === "string" ? msg.content : JSON.stringify(msg.content)) + "\n\n";
|
|
49
|
+
}
|
|
50
|
+
}
|
|
51
|
+
const configuredModel = genAI.getGenerativeModel({
|
|
52
|
+
model: modelName,
|
|
53
|
+
systemInstruction: systemInstruction ? systemInstruction.trim() : void 0,
|
|
54
|
+
generationConfig
|
|
55
|
+
});
|
|
56
|
+
const chatHistory = [];
|
|
57
|
+
let lastUserMessage = "";
|
|
58
|
+
for (const msg of request.messages) {
|
|
59
|
+
if (msg.role === "system" || msg.role === "developer") continue;
|
|
60
|
+
const content = typeof msg.content === "string" ? msg.content : JSON.stringify(msg.content);
|
|
61
|
+
if (msg.role === "user") {
|
|
62
|
+
lastUserMessage = content;
|
|
63
|
+
}
|
|
64
|
+
chatHistory.push({
|
|
65
|
+
role: msg.role === "assistant" ? "model" : "user",
|
|
66
|
+
parts: [{ text: content }]
|
|
67
|
+
});
|
|
68
|
+
}
|
|
69
|
+
let result;
|
|
70
|
+
if (chatHistory.length > 1) {
|
|
71
|
+
const lastMsg = chatHistory.pop();
|
|
72
|
+
const chat = configuredModel.startChat({
|
|
73
|
+
history: chatHistory
|
|
74
|
+
});
|
|
75
|
+
result = await chat.sendMessage(lastMsg?.parts[0].text || "");
|
|
76
|
+
} else {
|
|
77
|
+
result = await configuredModel.generateContent(lastUserMessage || " ");
|
|
78
|
+
}
|
|
79
|
+
const response = await result.response;
|
|
80
|
+
const text = response.text();
|
|
81
|
+
return {
|
|
82
|
+
content: text,
|
|
83
|
+
model: modelName,
|
|
84
|
+
usage: response.usageMetadata ? {
|
|
85
|
+
inputTokens: response.usageMetadata.promptTokenCount,
|
|
86
|
+
outputTokens: response.usageMetadata.candidatesTokenCount
|
|
87
|
+
} : void 0
|
|
88
|
+
};
|
|
89
|
+
}
|
|
90
|
+
}
|
|
91
|
+
function createGeminiProvider() {
|
|
92
|
+
return new GeminiProvider();
|
|
93
|
+
}
|
|
94
|
+
const VERSION = "0.0.1";
|
|
95
|
+
export {
|
|
96
|
+
GeminiProvider,
|
|
97
|
+
VERSION,
|
|
98
|
+
createGeminiProvider,
|
|
99
|
+
GeminiProvider as default
|
|
100
|
+
};
|
|
101
|
+
//# sourceMappingURL=index.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"index.js","sources":["../src/index.ts"],"sourcesContent":["/**\n * Execution Gemini Package\n *\n * Google Gemini provider implementation for LLM execution.\n *\n * @packageDocumentation\n */\n\nimport { GoogleGenerativeAI } from '@google/generative-ai';\n\n// ===== INLINE TYPES (from 'execution' package) =====\n\nexport type Model = string;\n\nexport interface Message {\n role: 'user' | 'assistant' | 'system' | 'developer' | 'tool';\n content: string | string[] | null;\n name?: string;\n}\n\nexport interface Request {\n messages: Message[];\n model: Model;\n responseFormat?: any;\n validator?: any;\n addMessage(message: Message): void;\n}\n\nexport interface ProviderResponse {\n content: string;\n model: string;\n usage?: {\n inputTokens: number;\n outputTokens: number;\n };\n toolCalls?: Array<{\n id: string;\n type: 'function';\n function: {\n name: string;\n arguments: string;\n };\n }>;\n}\n\nexport interface ExecutionOptions {\n apiKey?: string;\n model?: string;\n temperature?: number;\n maxTokens?: number;\n timeout?: number;\n retries?: number;\n}\n\nexport interface Provider {\n readonly name: string;\n execute(request: Request, options?: ExecutionOptions): Promise<ProviderResponse>;\n supportsModel?(model: Model): boolean;\n}\n\n/**\n * Gemini Provider implementation\n */\nexport class GeminiProvider implements Provider {\n readonly name = 'gemini';\n\n /**\n * Check if this provider supports a given model\n */\n supportsModel(model: Model): boolean {\n if (!model) return false;\n return model.startsWith('gemini');\n }\n\n /**\n * Execute a request against Gemini\n */\n async execute(\n request: Request,\n options: ExecutionOptions = {}\n ): Promise<ProviderResponse> {\n const apiKey = options.apiKey || process.env.GEMINI_API_KEY;\n if (!apiKey) throw new Error('Gemini API key is required');\n\n const genAI = new GoogleGenerativeAI(apiKey);\n\n const modelName = options.model || request.model || 'gemini-1.5-pro';\n\n // Handle generation config for structured output\n const generationConfig: any = {};\n\n if (request.responseFormat?.type === 'json_schema') {\n generationConfig.responseMimeType = 'application/json';\n\n const openAISchema = request.responseFormat.json_schema.schema;\n\n // Map schema types to uppercase for Gemini\n const mapSchema = (s: any): any => {\n if (!s) return undefined;\n\n const newSchema: any = { ...s };\n\n if (newSchema.type) {\n newSchema.type =\n typeof newSchema.type === 'string'\n ? (newSchema.type as string).toUpperCase()\n : newSchema.type;\n }\n\n if (newSchema.properties) {\n const newProps: any = {};\n for (const [key, val] of Object.entries(newSchema.properties)) {\n newProps[key] = mapSchema(val);\n }\n newSchema.properties = newProps;\n }\n\n if (newSchema.items) {\n newSchema.items = mapSchema(newSchema.items);\n }\n\n delete newSchema.additionalProperties;\n delete newSchema['$schema'];\n\n return newSchema;\n };\n\n generationConfig.responseSchema = mapSchema(openAISchema);\n }\n\n // Extract system instruction\n let systemInstruction = '';\n\n for (const msg of request.messages) {\n if (msg.role === 'system' || msg.role === 'developer') {\n systemInstruction +=\n (typeof msg.content === 'string'\n ? msg.content\n : JSON.stringify(msg.content)) + '\\n\\n';\n }\n }\n\n const configuredModel = genAI.getGenerativeModel({\n model: modelName,\n systemInstruction: systemInstruction\n ? systemInstruction.trim()\n : undefined,\n generationConfig,\n });\n\n // Build history/messages\n const chatHistory = [];\n let lastUserMessage = '';\n\n for (const msg of request.messages) {\n if (msg.role === 'system' || msg.role === 'developer') continue;\n\n const content =\n typeof msg.content === 'string'\n ? msg.content\n : JSON.stringify(msg.content);\n\n if (msg.role === 'user') {\n lastUserMessage = content;\n }\n\n chatHistory.push({\n role: msg.role === 'assistant' ? 'model' : 'user',\n parts: [{ text: content }],\n });\n }\n\n let result;\n\n if (chatHistory.length > 1) {\n const lastMsg = chatHistory.pop();\n const chat = configuredModel.startChat({\n history: chatHistory,\n });\n result = await chat.sendMessage(lastMsg?.parts[0].text || '');\n } else {\n result = await configuredModel.generateContent(lastUserMessage || ' ');\n }\n\n const response = await result.response;\n const text = response.text();\n\n return {\n content: text,\n model: modelName,\n usage: response.usageMetadata\n ? {\n inputTokens: response.usageMetadata.promptTokenCount,\n outputTokens: response.usageMetadata.candidatesTokenCount,\n }\n : undefined,\n };\n }\n}\n\n/**\n * Create a new Gemini provider instance\n */\nexport function createGeminiProvider(): GeminiProvider {\n return new GeminiProvider();\n}\n\n/**\n * Package version\n */\nexport const VERSION = '0.0.1';\n\nexport default GeminiProvider;\n"],"names":[],"mappings":";AA+DO,MAAM,eAAmC;AAAA,EACnC,OAAO;AAAA;AAAA;AAAA;AAAA,EAKhB,cAAc,OAAuB;AACjC,QAAI,CAAC,MAAO,QAAO;AACnB,WAAO,MAAM,WAAW,QAAQ;AAAA,EACpC;AAAA;AAAA;AAAA;AAAA,EAKA,MAAM,QACF,SACA,UAA4B,IACH;AACzB,UAAM,SAAS,QAAQ,UAAU,QAAQ,IAAI;AAC7C,QAAI,CAAC,OAAQ,OAAM,IAAI,MAAM,4BAA4B;AAEzD,UAAM,QAAQ,IAAI,mBAAmB,MAAM;AAE3C,UAAM,YAAY,QAAQ,SAAS,QAAQ,SAAS;AAGpD,UAAM,mBAAwB,CAAA;AAE9B,QAAI,QAAQ,gBAAgB,SAAS,eAAe;AAChD,uBAAiB,mBAAmB;AAEpC,YAAM,eAAe,QAAQ,eAAe,YAAY;AAGxD,YAAM,YAAY,CAAC,MAAgB;AAC/B,YAAI,CAAC,EAAG,QAAO;AAEf,cAAM,YAAiB,EAAE,GAAG,EAAA;AAE5B,YAAI,UAAU,MAAM;AAChB,oBAAU,OACN,OAAO,UAAU,SAAS,WACnB,UAAU,KAAgB,gBAC3B,UAAU;AAAA,QACxB;AAEA,YAAI,UAAU,YAAY;AACtB,gBAAM,WAAgB,CAAA;AACtB,qBAAW,CAAC,KAAK,GAAG,KAAK,OAAO,QAAQ,UAAU,UAAU,GAAG;AAC3D,qBAAS,GAAG,IAAI,UAAU,GAAG;AAAA,UACjC;AACA,oBAAU,aAAa;AAAA,QAC3B;AAEA,YAAI,UAAU,OAAO;AACjB,oBAAU,QAAQ,UAAU,UAAU,KAAK;AAAA,QAC/C;AAEA,eAAO,UAAU;AACjB,eAAO,UAAU,SAAS;AAE1B,eAAO;AAAA,MACX;AAEA,uBAAiB,iBAAiB,UAAU,YAAY;AAAA,IAC5D;AAGA,QAAI,oBAAoB;AAExB,eAAW,OAAO,QAAQ,UAAU;AAChC,UAAI,IAAI,SAAS,YAAY,IAAI,SAAS,aAAa;AACnD,8BACK,OAAO,IAAI,YAAY,WAClB,IAAI,UACJ,KAAK,UAAU,IAAI,OAAO,KAAK;AAAA,MAC7C;AAAA,IACJ;AAEA,UAAM,kBAAkB,MAAM,mBAAmB;AAAA,MAC7C,OAAO;AAAA,MACP,mBAAmB,oBACb,kBAAkB,KAAA,IAClB;AAAA,MACN;AAAA,IAAA,CACH;AAGD,UAAM,cAAc,CAAA;AACpB,QAAI,kBAAkB;AAEtB,eAAW,OAAO,QAAQ,UAAU;AAChC,UAAI,IAAI,SAAS,YAAY,IAAI,SAAS,YAAa;AAEvD,YAAM,UACF,OAAO,IAAI,YAAY,WACjB,IAAI,UACJ,KAAK,UAAU,IAAI,OAAO;AAEpC,UAAI,IAAI,SAAS,QAAQ;AACrB,0BAAkB;AAAA,MACtB;AAEA,kBAAY,KAAK;AAAA,QACb,MAAM,IAAI,SAAS,cAAc,UAAU;AAAA,QAC3C,OAAO,CAAC,EAAE,MAAM,SAAS;AAAA,MAAA,CAC5B;AAAA,IACL;AAEA,QAAI;AAEJ,QAAI,YAAY,SAAS,GAAG;AACxB,YAAM,UAAU,YAAY,IAAA;AAC5B,YAAM,OAAO,gBAAgB,UAAU;AAAA,QACnC,SAAS;AAAA,MAAA,CACZ;AACD,eAAS,MAAM,KAAK,YAAY,SAAS,MAAM,CAAC,EAAE,QAAQ,EAAE;AAAA,IAChE,OAAO;AACH,eAAS,MAAM,gBAAgB,gBAAgB,mBAAmB,GAAG;AAAA,IACzE;AAEA,UAAM,WAAW,MAAM,OAAO;AAC9B,UAAM,OAAO,SAAS,KAAA;AAEtB,WAAO;AAAA,MACH,SAAS;AAAA,MACT,OAAO;AAAA,MACP,OAAO,SAAS,gBACV;AAAA,QACE,aAAa,SAAS,cAAc;AAAA,QACpC,cAAc,SAAS,cAAc;AAAA,MAAA,IAEvC;AAAA,IAAA;AAAA,EAEd;AACJ;AAKO,SAAS,uBAAuC;AACnD,SAAO,IAAI,eAAA;AACf;AAKO,MAAM,UAAU;"}
|
package/package.json
ADDED
|
@@ -0,0 +1,46 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "@riotprompt/execution-gemini",
|
|
3
|
+
"version": "0.0.2",
|
|
4
|
+
"description": "Google Gemini provider for execution interface",
|
|
5
|
+
"type": "module",
|
|
6
|
+
"main": "./dist/index.cjs",
|
|
7
|
+
"module": "./dist/index.js",
|
|
8
|
+
"types": "./dist/index.d.ts",
|
|
9
|
+
"exports": {
|
|
10
|
+
".": {
|
|
11
|
+
"types": "./dist/index.d.ts",
|
|
12
|
+
"import": "./dist/index.js",
|
|
13
|
+
"require": "./dist/index.cjs"
|
|
14
|
+
}
|
|
15
|
+
},
|
|
16
|
+
"scripts": {
|
|
17
|
+
"clean": "rm -rf dist",
|
|
18
|
+
"build": "vite build",
|
|
19
|
+
"test": "vitest run",
|
|
20
|
+
"lint": "eslint src",
|
|
21
|
+
"prepublishOnly": "npm run clean && npm run build"
|
|
22
|
+
},
|
|
23
|
+
"keywords": [
|
|
24
|
+
"llm",
|
|
25
|
+
"google",
|
|
26
|
+
"gemini",
|
|
27
|
+
"execution",
|
|
28
|
+
"provider"
|
|
29
|
+
],
|
|
30
|
+
"author": "Tim O'Brien <tobrien@discursive.com>",
|
|
31
|
+
"license": "Apache-2.0",
|
|
32
|
+
"dependencies": {
|
|
33
|
+
"@google/generative-ai": "^0.24.0"
|
|
34
|
+
},
|
|
35
|
+
"devDependencies": {
|
|
36
|
+
"@types/node": "^25.0.6",
|
|
37
|
+
"@typescript-eslint/eslint-plugin": "^8.33.1",
|
|
38
|
+
"@typescript-eslint/parser": "^8.33.1",
|
|
39
|
+
"eslint": "^9.28.0",
|
|
40
|
+
"globals": "^16.2.0",
|
|
41
|
+
"typescript": "^5.8.3",
|
|
42
|
+
"vite": "^7.0.4",
|
|
43
|
+
"vite-plugin-dts": "^4.5.4",
|
|
44
|
+
"vitest": "^3.2.4"
|
|
45
|
+
}
|
|
46
|
+
}
|