@tepa/provider-gemini 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +59 -0
- package/dist/index.cjs +173 -0
- package/dist/index.cjs.map +1 -0
- package/dist/index.d.cts +49 -0
- package/dist/index.d.ts +49 -0
- package/dist/index.js +141 -0
- package/dist/index.js.map +1 -0
- package/package.json +55 -0
package/README.md
ADDED
|
@@ -0,0 +1,59 @@
|
|
|
1
|
+
# @tepa/provider-gemini
|
|
2
|
+
|
|
3
|
+
Google Gemini LLM provider for the Tepa agent pipeline. Uses the [@google/genai](https://www.npmjs.com/package/@google/genai) SDK.
|
|
4
|
+
|
|
5
|
+
## Install
|
|
6
|
+
|
|
7
|
+
```bash
|
|
8
|
+
npm install @tepa/provider-gemini
|
|
9
|
+
```
|
|
10
|
+
|
|
11
|
+
## Setup
|
|
12
|
+
|
|
13
|
+
Set your Gemini API key as an environment variable:
|
|
14
|
+
|
|
15
|
+
```bash
|
|
16
|
+
export GEMINI_API_KEY=...
|
|
17
|
+
```
|
|
18
|
+
|
|
19
|
+
Or use `GOOGLE_API_KEY`, or pass it directly in the constructor.
|
|
20
|
+
|
|
21
|
+
## Usage
|
|
22
|
+
|
|
23
|
+
```typescript
|
|
24
|
+
import { Tepa } from "tepa";
|
|
25
|
+
import { GeminiProvider } from "@tepa/provider-gemini";
|
|
26
|
+
|
|
27
|
+
const tepa = new Tepa({
|
|
28
|
+
tools: [/* ... */],
|
|
29
|
+
provider: new GeminiProvider(),
|
|
30
|
+
});
|
|
31
|
+
```
|
|
32
|
+
|
|
33
|
+
### Provider Options
|
|
34
|
+
|
|
35
|
+
```typescript
|
|
36
|
+
const provider = new GeminiProvider({
|
|
37
|
+
apiKey: "...", // Defaults to GEMINI_API_KEY or GOOGLE_API_KEY env var
|
|
38
|
+
maxRetries: 3, // Default: 3
|
|
39
|
+
retryBaseDelayMs: 1000, // Base delay for exponential backoff
|
|
40
|
+
});
|
|
41
|
+
```
|
|
42
|
+
|
|
43
|
+
## Logging
|
|
44
|
+
|
|
45
|
+
Every LLM call is automatically logged to a JSONL file in `.tepa/logs/`. You can disable the default file logger, add custom log listeners, or send logs to external services like Prometheus, NewRelic, or Datadog using the `onLog()` method:
|
|
46
|
+
|
|
47
|
+
```typescript
|
|
48
|
+
const provider = new GeminiProvider({ defaultLog: false });
|
|
49
|
+
|
|
50
|
+
provider.onLog((entry) => {
|
|
51
|
+
externalLogger.send(entry);
|
|
52
|
+
});
|
|
53
|
+
```
|
|
54
|
+
|
|
55
|
+
See [`@tepa/provider-core`](../provider-core) for full logging documentation.
|
|
56
|
+
|
|
57
|
+
## Native Tool Use
|
|
58
|
+
|
|
59
|
+
This provider supports native tool calling via Gemini's function calling API. When tool schemas are passed via `options.tools`, they are forwarded as function declarations. The LLM returns structured `functionCall` parts with pre-parsed parameters, eliminating text-based JSON parsing errors.
|
package/dist/index.cjs
ADDED
|
@@ -0,0 +1,173 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __defProp = Object.defineProperty;
|
|
3
|
+
var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
|
|
4
|
+
var __getOwnPropNames = Object.getOwnPropertyNames;
|
|
5
|
+
var __hasOwnProp = Object.prototype.hasOwnProperty;
|
|
6
|
+
var __export = (target, all) => {
|
|
7
|
+
for (var name in all)
|
|
8
|
+
__defProp(target, name, { get: all[name], enumerable: true });
|
|
9
|
+
};
|
|
10
|
+
var __copyProps = (to, from, except, desc) => {
|
|
11
|
+
if (from && typeof from === "object" || typeof from === "function") {
|
|
12
|
+
for (let key of __getOwnPropNames(from))
|
|
13
|
+
if (!__hasOwnProp.call(to, key) && key !== except)
|
|
14
|
+
__defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
|
|
15
|
+
}
|
|
16
|
+
return to;
|
|
17
|
+
};
|
|
18
|
+
var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
|
|
19
|
+
|
|
20
|
+
// src/index.ts
|
|
21
|
+
var index_exports = {};
|
|
22
|
+
__export(index_exports, {
|
|
23
|
+
GeminiProvider: () => GeminiProvider,
|
|
24
|
+
extractText: () => extractText,
|
|
25
|
+
extractToolUse: () => extractToolUse,
|
|
26
|
+
toFinishReason: () => toFinishReason,
|
|
27
|
+
toGeminiContents: () => toGeminiContents,
|
|
28
|
+
toGeminiTools: () => toGeminiTools
|
|
29
|
+
});
|
|
30
|
+
module.exports = __toCommonJS(index_exports);
|
|
31
|
+
|
|
32
|
+
// src/gemini.ts
|
|
33
|
+
var import_genai = require("@google/genai");
|
|
34
|
+
var import_provider_core = require("@tepa/provider-core");
|
|
35
|
+
|
|
36
|
+
// src/formatting.ts
|
|
37
|
+
function toGeminiContents(messages) {
|
|
38
|
+
return messages.map((msg) => ({
|
|
39
|
+
role: msg.role === "assistant" ? "model" : "user",
|
|
40
|
+
parts: [{ text: msg.content }]
|
|
41
|
+
}));
|
|
42
|
+
}
|
|
43
|
+
function toFinishReason(reason) {
|
|
44
|
+
switch (reason) {
|
|
45
|
+
case "MAX_TOKENS":
|
|
46
|
+
return "max_tokens";
|
|
47
|
+
case "STOP":
|
|
48
|
+
default:
|
|
49
|
+
return "end_turn";
|
|
50
|
+
}
|
|
51
|
+
}
|
|
52
|
+
function extractText(response) {
|
|
53
|
+
return response.text ?? "";
|
|
54
|
+
}
|
|
55
|
+
function extractToolUse(response) {
|
|
56
|
+
const parts = response.candidates?.[0]?.content?.parts ?? [];
|
|
57
|
+
const blocks = [];
|
|
58
|
+
let index = 0;
|
|
59
|
+
for (const part of parts) {
|
|
60
|
+
if (part.functionCall?.name) {
|
|
61
|
+
blocks.push({
|
|
62
|
+
id: `gemini-call-${index++}`,
|
|
63
|
+
name: part.functionCall.name,
|
|
64
|
+
input: part.functionCall.args ?? {}
|
|
65
|
+
});
|
|
66
|
+
}
|
|
67
|
+
}
|
|
68
|
+
return blocks;
|
|
69
|
+
}
|
|
70
|
+
function toGeminiTools(tools) {
|
|
71
|
+
const functionDeclarations = tools.map((tool) => {
|
|
72
|
+
const properties = {};
|
|
73
|
+
const required = [];
|
|
74
|
+
for (const [name, param] of Object.entries(tool.parameters)) {
|
|
75
|
+
properties[name] = {
|
|
76
|
+
type: param.type.toUpperCase(),
|
|
77
|
+
description: param.description
|
|
78
|
+
};
|
|
79
|
+
if (param.required !== false) {
|
|
80
|
+
required.push(name);
|
|
81
|
+
}
|
|
82
|
+
}
|
|
83
|
+
return {
|
|
84
|
+
name: tool.name,
|
|
85
|
+
description: tool.description,
|
|
86
|
+
parameters: {
|
|
87
|
+
type: "OBJECT",
|
|
88
|
+
properties,
|
|
89
|
+
required
|
|
90
|
+
}
|
|
91
|
+
};
|
|
92
|
+
});
|
|
93
|
+
return [{ functionDeclarations }];
|
|
94
|
+
}
|
|
95
|
+
|
|
96
|
+
// src/gemini.ts
|
|
97
|
+
var DEFAULT_MODEL = "gemini-3-flash-preview";
|
|
98
|
+
var DEFAULT_MAX_TOKENS = 64e3;
|
|
99
|
+
var GeminiProvider = class extends import_provider_core.BaseLLMProvider {
|
|
100
|
+
providerName = "gemini";
|
|
101
|
+
client;
|
|
102
|
+
constructor(options = {}) {
|
|
103
|
+
super(options);
|
|
104
|
+
const apiKey = options.apiKey ?? process.env.GEMINI_API_KEY ?? process.env.GOOGLE_API_KEY;
|
|
105
|
+
this.client = new import_genai.GoogleGenAI({ apiKey });
|
|
106
|
+
}
|
|
107
|
+
async doComplete(messages, options) {
|
|
108
|
+
const contents = toGeminiContents(messages);
|
|
109
|
+
const config = {
|
|
110
|
+
maxOutputTokens: options.maxTokens ?? DEFAULT_MAX_TOKENS
|
|
111
|
+
};
|
|
112
|
+
if (options.temperature !== void 0) {
|
|
113
|
+
config.temperature = options.temperature;
|
|
114
|
+
}
|
|
115
|
+
if (options.systemPrompt) {
|
|
116
|
+
config.systemInstruction = options.systemPrompt;
|
|
117
|
+
}
|
|
118
|
+
const params = {
|
|
119
|
+
model: options.model || DEFAULT_MODEL,
|
|
120
|
+
contents,
|
|
121
|
+
config
|
|
122
|
+
};
|
|
123
|
+
if (options.tools && options.tools.length > 0) {
|
|
124
|
+
params.tools = toGeminiTools(options.tools);
|
|
125
|
+
}
|
|
126
|
+
const response = await this.client.models.generateContent(params);
|
|
127
|
+
const candidates = response.candidates ?? [];
|
|
128
|
+
const finishReason = candidates[0]?.finishReason ?? null;
|
|
129
|
+
const usage = response.usageMetadata ?? {};
|
|
130
|
+
const toolUse = extractToolUse(response);
|
|
131
|
+
const hasToolUse = toolUse.length > 0;
|
|
132
|
+
return {
|
|
133
|
+
text: extractText(response),
|
|
134
|
+
tokensUsed: {
|
|
135
|
+
input: usage.promptTokenCount ?? 0,
|
|
136
|
+
output: usage.candidatesTokenCount ?? 0
|
|
137
|
+
},
|
|
138
|
+
finishReason: hasToolUse ? "tool_use" : toFinishReason(finishReason),
|
|
139
|
+
...hasToolUse && { toolUse }
|
|
140
|
+
};
|
|
141
|
+
}
|
|
142
|
+
isRetryable(error) {
|
|
143
|
+
if (error instanceof TypeError) {
|
|
144
|
+
return true;
|
|
145
|
+
}
|
|
146
|
+
if (error instanceof import_genai.ApiError) {
|
|
147
|
+
const status = error.status;
|
|
148
|
+
if (status === 400 || status === 401 || status === 403 || status === 404) {
|
|
149
|
+
return false;
|
|
150
|
+
}
|
|
151
|
+
if (status === 429 || status !== void 0 && status >= 500) {
|
|
152
|
+
return true;
|
|
153
|
+
}
|
|
154
|
+
}
|
|
155
|
+
return false;
|
|
156
|
+
}
|
|
157
|
+
getRetryAfterMs(_error) {
|
|
158
|
+
return null;
|
|
159
|
+
}
|
|
160
|
+
isRateLimitError(error) {
|
|
161
|
+
return error instanceof import_genai.ApiError && error.status === 429;
|
|
162
|
+
}
|
|
163
|
+
};
|
|
164
|
+
// Annotate the CommonJS export names for ESM import in node:
|
|
165
|
+
0 && (module.exports = {
|
|
166
|
+
GeminiProvider,
|
|
167
|
+
extractText,
|
|
168
|
+
extractToolUse,
|
|
169
|
+
toFinishReason,
|
|
170
|
+
toGeminiContents,
|
|
171
|
+
toGeminiTools
|
|
172
|
+
});
|
|
173
|
+
//# sourceMappingURL=index.cjs.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"sources":["../src/index.ts","../src/gemini.ts","../src/formatting.ts"],"sourcesContent":["export { GeminiProvider, type GeminiProviderOptions } from \"./gemini.js\";\nexport { toGeminiContents, toGeminiTools, toFinishReason, extractText, extractToolUse } from \"./formatting.js\";\n","import { GoogleGenAI, ApiError } from \"@google/genai\";\nimport type { LLMMessage, LLMRequestOptions, LLMResponse } from \"@tepa/types\";\nimport { BaseLLMProvider, type BaseLLMProviderOptions } from \"@tepa/provider-core\";\nimport { toGeminiContents, toGeminiTools, toFinishReason, extractText, extractToolUse } from \"./formatting.js\";\n\nconst DEFAULT_MODEL = \"gemini-3-flash-preview\";\nconst DEFAULT_MAX_TOKENS = 64_000;\n\nexport interface GeminiProviderOptions extends BaseLLMProviderOptions {\n /** Gemini API key. Falls back to GEMINI_API_KEY or GOOGLE_API_KEY env variables. */\n apiKey?: string;\n}\n\n/** LLM provider implementation for Google Gemini models. */\nexport class GeminiProvider extends BaseLLMProvider {\n protected readonly providerName = \"gemini\";\n private readonly client: GoogleGenAI;\n\n constructor(options: GeminiProviderOptions = {}) {\n super(options);\n const apiKey = options.apiKey ?? process.env.GEMINI_API_KEY ?? process.env.GOOGLE_API_KEY;\n this.client = new GoogleGenAI({ apiKey });\n }\n\n protected async doComplete(\n messages: LLMMessage[],\n options: LLMRequestOptions,\n ): Promise<LLMResponse> {\n const contents = toGeminiContents(messages);\n\n const config: Record<string, unknown> = {\n maxOutputTokens: options.maxTokens ?? DEFAULT_MAX_TOKENS,\n };\n\n if (options.temperature !== undefined) {\n config.temperature = options.temperature;\n }\n\n if (options.systemPrompt) {\n config.systemInstruction = options.systemPrompt;\n }\n\n const params: Record<string, unknown> = {\n model: options.model || DEFAULT_MODEL,\n contents,\n config,\n };\n\n if (options.tools && options.tools.length > 0) {\n params.tools = toGeminiTools(options.tools);\n }\n\n const response = await this.client.models.generateContent(params as any);\n\n const candidates = response.candidates ?? [];\n const finishReason = candidates[0]?.finishReason ?? null;\n const usage = response.usageMetadata ?? {};\n\n const toolUse = extractToolUse(response);\n const hasToolUse = toolUse.length > 0;\n\n return {\n text: extractText(response),\n tokensUsed: {\n input: usage.promptTokenCount ?? 0,\n output: usage.candidatesTokenCount ?? 0,\n },\n finishReason: hasToolUse ? \"tool_use\" : toFinishReason(finishReason),\n ...(hasToolUse && { toolUse }),\n };\n }\n\n protected isRetryable(error: unknown): boolean {\n if (error instanceof TypeError) {\n return true;\n }\n if (error instanceof ApiError) {\n const status = error.status;\n if (status === 400 || status === 401 || status === 403 || status === 404) {\n return false;\n }\n if (status === 429 || (status !== undefined && status >= 500)) {\n return true;\n }\n }\n return false;\n }\n\n protected getRetryAfterMs(_error: unknown): number | null {\n return null;\n }\n\n protected isRateLimitError(error: unknown): boolean {\n return error instanceof ApiError && error.status === 429;\n }\n}\n","import type { LLMMessage, LLMToolUseBlock, ToolSchema } from \"@tepa/types\";\n\nexport interface GeminiContent {\n role: \"user\" | \"model\";\n parts: { text: string }[];\n}\n\n/**\n * Convert Tepa LLMMessage array to Gemini contents format.\n * Maps \"assistant\" role to \"model\".\n */\nexport function toGeminiContents(messages: LLMMessage[]): GeminiContent[] {\n return messages.map((msg) => ({\n role: msg.role === \"assistant\" ? \"model\" : \"user\",\n parts: [{ text: msg.content }],\n }));\n}\n\n/**\n * Map Gemini finish reason to Tepa finishReason.\n */\nexport function toFinishReason(\n reason: string | null | undefined,\n): \"end_turn\" | \"max_tokens\" | \"stop_sequence\" | \"tool_use\" {\n switch (reason) {\n case \"MAX_TOKENS\":\n return \"max_tokens\";\n case \"STOP\":\n default:\n return \"end_turn\";\n }\n}\n\n/**\n * Extract text from a Gemini response.\n */\nexport function extractText(response: { text?: string }): string {\n return response.text ?? \"\";\n}\n\n/**\n * Extract tool use blocks from a Gemini response.\n */\nexport function extractToolUse(response: any): LLMToolUseBlock[] {\n const parts = response.candidates?.[0]?.content?.parts ?? [];\n const blocks: LLMToolUseBlock[] = [];\n let index = 0;\n\n for (const part of parts) {\n if (part.functionCall?.name) {\n blocks.push({\n id: `gemini-call-${index++}`,\n name: part.functionCall.name,\n input: part.functionCall.args ?? {},\n });\n }\n }\n\n return blocks;\n}\n\n/**\n * Convert Tepa ToolSchema to Gemini function declarations.\n */\nexport function toGeminiTools(tools: ToolSchema[]): Record<string, unknown>[] {\n const functionDeclarations = tools.map((tool) => {\n const properties: Record<string, unknown> = {};\n const required: string[] = [];\n\n for (const [name, param] of Object.entries(tool.parameters)) {\n properties[name] = {\n type: param.type.toUpperCase(),\n description: param.description,\n };\n if (param.required !== false) {\n required.push(name);\n }\n }\n\n return {\n name: tool.name,\n description: tool.description,\n parameters: {\n type: \"OBJECT\",\n properties,\n required,\n },\n };\n });\n\n return [{ functionDeclarations }];\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;;;ACAA,mBAAsC;AAEtC,2BAA6D;;;ACStD,SAAS,iBAAiB,UAAyC;AACxE,SAAO,SAAS,IAAI,CAAC,SAAS;AAAA,IAC5B,MAAM,IAAI,SAAS,cAAc,UAAU;AAAA,IAC3C,OAAO,CAAC,EAAE,MAAM,IAAI,QAAQ,CAAC;AAAA,EAC/B,EAAE;AACJ;AAKO,SAAS,eACd,QAC0D;AAC1D,UAAQ,QAAQ;AAAA,IACd,KAAK;AACH,aAAO;AAAA,IACT,KAAK;AAAA,IACL;AACE,aAAO;AAAA,EACX;AACF;AAKO,SAAS,YAAY,UAAqC;AAC/D,SAAO,SAAS,QAAQ;AAC1B;AAKO,SAAS,eAAe,UAAkC;AAC/D,QAAM,QAAQ,SAAS,aAAa,CAAC,GAAG,SAAS,SAAS,CAAC;AAC3D,QAAM,SAA4B,CAAC;AACnC,MAAI,QAAQ;AAEZ,aAAW,QAAQ,OAAO;AACxB,QAAI,KAAK,cAAc,MAAM;AAC3B,aAAO,KAAK;AAAA,QACV,IAAI,eAAe,OAAO;AAAA,QAC1B,MAAM,KAAK,aAAa;AAAA,QACxB,OAAO,KAAK,aAAa,QAAQ,CAAC;AAAA,MACpC,CAAC;AAAA,IACH;AAAA,EACF;AAEA,SAAO;AACT;AAKO,SAAS,cAAc,OAAgD;AAC5E,QAAM,uBAAuB,MAAM,IAAI,CAAC,SAAS;AAC/C,UAAM,aAAsC,CAAC;AAC7C,UAAM,WAAqB,CAAC;AAE5B,eAAW,CAAC,MAAM,KAAK,KAAK,OAAO,QAAQ,KAAK,UAAU,GAAG;AAC3D,iBAAW,IAAI,IAAI;AAAA,QACjB,MAAM,MAAM,KAAK,YAAY;AAAA,QAC7B,aAAa,MAAM;AAAA,MACrB;AACA,UAAI,MAAM,aAAa,OAAO;AAC5B,iBAAS,KAAK,IAAI;AAAA,MACpB;AAAA,IACF;AAEA,WAAO;AAAA,MACL,MAAM,KAAK;AAAA,MACX,aAAa,KAAK;AAAA,MAClB,YAAY;AAAA,QACV,MAAM;AAAA,QACN;AAAA,QACA;AAAA,MACF;AAAA,IACF;AAAA,EACF,CAAC;AAED,SAAO,CAAC,EAAE,qBAAqB,CAAC;AAClC;;;ADtFA,IAAM,gBAAgB;AACtB,IAAM,qBAAqB;AAQpB,IAAM,iBAAN,cAA6B,qCAAgB;AAAA,EAC/B,eAAe;AAAA,EACjB;AAAA,EAEjB,YAAY,UAAiC,CAAC,GAAG;AAC/C,UAAM,OAAO;AACb,UAAM,SAAS,QAAQ,UAAU,QAAQ,IAAI,kBAAkB,QAAQ,IAAI;AAC3E,SAAK,SAAS,IAAI,yBAAY,EAAE,OAAO,CAAC;AAAA,EAC1C;AAAA,EAEA,MAAgB,WACd,UACA,SACsB;AACtB,UAAM,WAAW,iBAAiB,QAAQ;AAE1C,UAAM,SAAkC;AAAA,MACtC,iBAAiB,QAAQ,aAAa;AAAA,IACxC;AAEA,QAAI,QAAQ,gBAAgB,QAAW;AACrC,aAAO,cAAc,QAAQ;AAAA,IAC/B;AAEA,QAAI,QAAQ,cAAc;AACxB,aAAO,oBAAoB,QAAQ;AAAA,IACrC;AAEA,UAAM,SAAkC;AAAA,MACtC,OAAO,QAAQ,SAAS;AAAA,MACxB;AAAA,MACA;AAAA,IACF;AAEA,QAAI,QAAQ,SAAS,QAAQ,MAAM,SAAS,GAAG;AAC7C,aAAO,QAAQ,cAAc,QAAQ,KAAK;AAAA,IAC5C;AAEA,UAAM,WAAW,MAAM,KAAK,OAAO,OAAO,gBAAgB,MAAa;AAEvE,UAAM,aAAa,SAAS,cAAc,CAAC;AAC3C,UAAM,eAAe,WAAW,CAAC,GAAG,gBAAgB;AACpD,UAAM,QAAQ,SAAS,iBAAiB,CAAC;AAEzC,UAAM,UAAU,eAAe,QAAQ;AACvC,UAAM,aAAa,QAAQ,SAAS;AAEpC,WAAO;AAAA,MACL,MAAM,YAAY,QAAQ;AAAA,MAC1B,YAAY;AAAA,QACV,OAAO,MAAM,oBAAoB;AAAA,QACjC,QAAQ,MAAM,wBAAwB;AAAA,MACxC;AAAA,MACA,cAAc,aAAa,aAAa,eAAe,YAAY;AAAA,MACnE,GAAI,cAAc,EAAE,QAAQ;AAAA,IAC9B;AAAA,EACF;AAAA,EAEU,YAAY,OAAyB;AAC7C,QAAI,iBAAiB,WAAW;AAC9B,aAAO;AAAA,IACT;AACA,QAAI,iBAAiB,uBAAU;AAC7B,YAAM,SAAS,MAAM;AACrB,UAAI,WAAW,OAAO,WAAW,OAAO,WAAW,OAAO,WAAW,KAAK;AACxE,eAAO;AAAA,MACT;AACA,UAAI,WAAW,OAAQ,WAAW,UAAa,UAAU,KAAM;AAC7D,eAAO;AAAA,MACT;AAAA,IACF;AACA,WAAO;AAAA,EACT;AAAA,EAEU,gBAAgB,QAAgC;AACxD,WAAO;AAAA,EACT;AAAA,EAEU,iBAAiB,OAAyB;AAClD,WAAO,iBAAiB,yBAAY,MAAM,WAAW;AAAA,EACvD;AACF;","names":[]}
|
package/dist/index.d.cts
ADDED
|
@@ -0,0 +1,49 @@
|
|
|
1
|
+
import { LLMMessage, LLMRequestOptions, LLMResponse, LLMToolUseBlock, ToolSchema } from '@tepa/types';
|
|
2
|
+
import { BaseLLMProvider, BaseLLMProviderOptions } from '@tepa/provider-core';
|
|
3
|
+
|
|
4
|
+
interface GeminiProviderOptions extends BaseLLMProviderOptions {
|
|
5
|
+
/** Gemini API key. Falls back to GEMINI_API_KEY or GOOGLE_API_KEY env variables. */
|
|
6
|
+
apiKey?: string;
|
|
7
|
+
}
|
|
8
|
+
/** LLM provider implementation for Google Gemini models. */
|
|
9
|
+
declare class GeminiProvider extends BaseLLMProvider {
|
|
10
|
+
protected readonly providerName = "gemini";
|
|
11
|
+
private readonly client;
|
|
12
|
+
constructor(options?: GeminiProviderOptions);
|
|
13
|
+
protected doComplete(messages: LLMMessage[], options: LLMRequestOptions): Promise<LLMResponse>;
|
|
14
|
+
protected isRetryable(error: unknown): boolean;
|
|
15
|
+
protected getRetryAfterMs(_error: unknown): number | null;
|
|
16
|
+
protected isRateLimitError(error: unknown): boolean;
|
|
17
|
+
}
|
|
18
|
+
|
|
19
|
+
interface GeminiContent {
|
|
20
|
+
role: "user" | "model";
|
|
21
|
+
parts: {
|
|
22
|
+
text: string;
|
|
23
|
+
}[];
|
|
24
|
+
}
|
|
25
|
+
/**
|
|
26
|
+
* Convert Tepa LLMMessage array to Gemini contents format.
|
|
27
|
+
* Maps "assistant" role to "model".
|
|
28
|
+
*/
|
|
29
|
+
declare function toGeminiContents(messages: LLMMessage[]): GeminiContent[];
|
|
30
|
+
/**
|
|
31
|
+
* Map Gemini finish reason to Tepa finishReason.
|
|
32
|
+
*/
|
|
33
|
+
declare function toFinishReason(reason: string | null | undefined): "end_turn" | "max_tokens" | "stop_sequence" | "tool_use";
|
|
34
|
+
/**
|
|
35
|
+
* Extract text from a Gemini response.
|
|
36
|
+
*/
|
|
37
|
+
declare function extractText(response: {
|
|
38
|
+
text?: string;
|
|
39
|
+
}): string;
|
|
40
|
+
/**
|
|
41
|
+
* Extract tool use blocks from a Gemini response.
|
|
42
|
+
*/
|
|
43
|
+
declare function extractToolUse(response: any): LLMToolUseBlock[];
|
|
44
|
+
/**
|
|
45
|
+
* Convert Tepa ToolSchema to Gemini function declarations.
|
|
46
|
+
*/
|
|
47
|
+
declare function toGeminiTools(tools: ToolSchema[]): Record<string, unknown>[];
|
|
48
|
+
|
|
49
|
+
export { GeminiProvider, type GeminiProviderOptions, extractText, extractToolUse, toFinishReason, toGeminiContents, toGeminiTools };
|
package/dist/index.d.ts
ADDED
|
@@ -0,0 +1,49 @@
|
|
|
1
|
+
import { LLMMessage, LLMRequestOptions, LLMResponse, LLMToolUseBlock, ToolSchema } from '@tepa/types';
|
|
2
|
+
import { BaseLLMProvider, BaseLLMProviderOptions } from '@tepa/provider-core';
|
|
3
|
+
|
|
4
|
+
interface GeminiProviderOptions extends BaseLLMProviderOptions {
|
|
5
|
+
/** Gemini API key. Falls back to GEMINI_API_KEY or GOOGLE_API_KEY env variables. */
|
|
6
|
+
apiKey?: string;
|
|
7
|
+
}
|
|
8
|
+
/** LLM provider implementation for Google Gemini models. */
|
|
9
|
+
declare class GeminiProvider extends BaseLLMProvider {
|
|
10
|
+
protected readonly providerName = "gemini";
|
|
11
|
+
private readonly client;
|
|
12
|
+
constructor(options?: GeminiProviderOptions);
|
|
13
|
+
protected doComplete(messages: LLMMessage[], options: LLMRequestOptions): Promise<LLMResponse>;
|
|
14
|
+
protected isRetryable(error: unknown): boolean;
|
|
15
|
+
protected getRetryAfterMs(_error: unknown): number | null;
|
|
16
|
+
protected isRateLimitError(error: unknown): boolean;
|
|
17
|
+
}
|
|
18
|
+
|
|
19
|
+
interface GeminiContent {
|
|
20
|
+
role: "user" | "model";
|
|
21
|
+
parts: {
|
|
22
|
+
text: string;
|
|
23
|
+
}[];
|
|
24
|
+
}
|
|
25
|
+
/**
|
|
26
|
+
* Convert Tepa LLMMessage array to Gemini contents format.
|
|
27
|
+
* Maps "assistant" role to "model".
|
|
28
|
+
*/
|
|
29
|
+
declare function toGeminiContents(messages: LLMMessage[]): GeminiContent[];
|
|
30
|
+
/**
|
|
31
|
+
* Map Gemini finish reason to Tepa finishReason.
|
|
32
|
+
*/
|
|
33
|
+
declare function toFinishReason(reason: string | null | undefined): "end_turn" | "max_tokens" | "stop_sequence" | "tool_use";
|
|
34
|
+
/**
|
|
35
|
+
* Extract text from a Gemini response.
|
|
36
|
+
*/
|
|
37
|
+
declare function extractText(response: {
|
|
38
|
+
text?: string;
|
|
39
|
+
}): string;
|
|
40
|
+
/**
|
|
41
|
+
* Extract tool use blocks from a Gemini response.
|
|
42
|
+
*/
|
|
43
|
+
declare function extractToolUse(response: any): LLMToolUseBlock[];
|
|
44
|
+
/**
|
|
45
|
+
* Convert Tepa ToolSchema to Gemini function declarations.
|
|
46
|
+
*/
|
|
47
|
+
declare function toGeminiTools(tools: ToolSchema[]): Record<string, unknown>[];
|
|
48
|
+
|
|
49
|
+
export { GeminiProvider, type GeminiProviderOptions, extractText, extractToolUse, toFinishReason, toGeminiContents, toGeminiTools };
|
package/dist/index.js
ADDED
|
@@ -0,0 +1,141 @@
|
|
|
1
|
+
// src/gemini.ts
|
|
2
|
+
import { GoogleGenAI, ApiError } from "@google/genai";
|
|
3
|
+
import { BaseLLMProvider } from "@tepa/provider-core";
|
|
4
|
+
|
|
5
|
+
// src/formatting.ts
|
|
6
|
+
function toGeminiContents(messages) {
|
|
7
|
+
return messages.map((msg) => ({
|
|
8
|
+
role: msg.role === "assistant" ? "model" : "user",
|
|
9
|
+
parts: [{ text: msg.content }]
|
|
10
|
+
}));
|
|
11
|
+
}
|
|
12
|
+
function toFinishReason(reason) {
|
|
13
|
+
switch (reason) {
|
|
14
|
+
case "MAX_TOKENS":
|
|
15
|
+
return "max_tokens";
|
|
16
|
+
case "STOP":
|
|
17
|
+
default:
|
|
18
|
+
return "end_turn";
|
|
19
|
+
}
|
|
20
|
+
}
|
|
21
|
+
function extractText(response) {
|
|
22
|
+
return response.text ?? "";
|
|
23
|
+
}
|
|
24
|
+
function extractToolUse(response) {
|
|
25
|
+
const parts = response.candidates?.[0]?.content?.parts ?? [];
|
|
26
|
+
const blocks = [];
|
|
27
|
+
let index = 0;
|
|
28
|
+
for (const part of parts) {
|
|
29
|
+
if (part.functionCall?.name) {
|
|
30
|
+
blocks.push({
|
|
31
|
+
id: `gemini-call-${index++}`,
|
|
32
|
+
name: part.functionCall.name,
|
|
33
|
+
input: part.functionCall.args ?? {}
|
|
34
|
+
});
|
|
35
|
+
}
|
|
36
|
+
}
|
|
37
|
+
return blocks;
|
|
38
|
+
}
|
|
39
|
+
function toGeminiTools(tools) {
|
|
40
|
+
const functionDeclarations = tools.map((tool) => {
|
|
41
|
+
const properties = {};
|
|
42
|
+
const required = [];
|
|
43
|
+
for (const [name, param] of Object.entries(tool.parameters)) {
|
|
44
|
+
properties[name] = {
|
|
45
|
+
type: param.type.toUpperCase(),
|
|
46
|
+
description: param.description
|
|
47
|
+
};
|
|
48
|
+
if (param.required !== false) {
|
|
49
|
+
required.push(name);
|
|
50
|
+
}
|
|
51
|
+
}
|
|
52
|
+
return {
|
|
53
|
+
name: tool.name,
|
|
54
|
+
description: tool.description,
|
|
55
|
+
parameters: {
|
|
56
|
+
type: "OBJECT",
|
|
57
|
+
properties,
|
|
58
|
+
required
|
|
59
|
+
}
|
|
60
|
+
};
|
|
61
|
+
});
|
|
62
|
+
return [{ functionDeclarations }];
|
|
63
|
+
}
|
|
64
|
+
|
|
65
|
+
// src/gemini.ts
|
|
66
|
+
var DEFAULT_MODEL = "gemini-3-flash-preview";
|
|
67
|
+
var DEFAULT_MAX_TOKENS = 64e3;
|
|
68
|
+
var GeminiProvider = class extends BaseLLMProvider {
|
|
69
|
+
providerName = "gemini";
|
|
70
|
+
client;
|
|
71
|
+
constructor(options = {}) {
|
|
72
|
+
super(options);
|
|
73
|
+
const apiKey = options.apiKey ?? process.env.GEMINI_API_KEY ?? process.env.GOOGLE_API_KEY;
|
|
74
|
+
this.client = new GoogleGenAI({ apiKey });
|
|
75
|
+
}
|
|
76
|
+
async doComplete(messages, options) {
|
|
77
|
+
const contents = toGeminiContents(messages);
|
|
78
|
+
const config = {
|
|
79
|
+
maxOutputTokens: options.maxTokens ?? DEFAULT_MAX_TOKENS
|
|
80
|
+
};
|
|
81
|
+
if (options.temperature !== void 0) {
|
|
82
|
+
config.temperature = options.temperature;
|
|
83
|
+
}
|
|
84
|
+
if (options.systemPrompt) {
|
|
85
|
+
config.systemInstruction = options.systemPrompt;
|
|
86
|
+
}
|
|
87
|
+
const params = {
|
|
88
|
+
model: options.model || DEFAULT_MODEL,
|
|
89
|
+
contents,
|
|
90
|
+
config
|
|
91
|
+
};
|
|
92
|
+
if (options.tools && options.tools.length > 0) {
|
|
93
|
+
params.tools = toGeminiTools(options.tools);
|
|
94
|
+
}
|
|
95
|
+
const response = await this.client.models.generateContent(params);
|
|
96
|
+
const candidates = response.candidates ?? [];
|
|
97
|
+
const finishReason = candidates[0]?.finishReason ?? null;
|
|
98
|
+
const usage = response.usageMetadata ?? {};
|
|
99
|
+
const toolUse = extractToolUse(response);
|
|
100
|
+
const hasToolUse = toolUse.length > 0;
|
|
101
|
+
return {
|
|
102
|
+
text: extractText(response),
|
|
103
|
+
tokensUsed: {
|
|
104
|
+
input: usage.promptTokenCount ?? 0,
|
|
105
|
+
output: usage.candidatesTokenCount ?? 0
|
|
106
|
+
},
|
|
107
|
+
finishReason: hasToolUse ? "tool_use" : toFinishReason(finishReason),
|
|
108
|
+
...hasToolUse && { toolUse }
|
|
109
|
+
};
|
|
110
|
+
}
|
|
111
|
+
isRetryable(error) {
|
|
112
|
+
if (error instanceof TypeError) {
|
|
113
|
+
return true;
|
|
114
|
+
}
|
|
115
|
+
if (error instanceof ApiError) {
|
|
116
|
+
const status = error.status;
|
|
117
|
+
if (status === 400 || status === 401 || status === 403 || status === 404) {
|
|
118
|
+
return false;
|
|
119
|
+
}
|
|
120
|
+
if (status === 429 || status !== void 0 && status >= 500) {
|
|
121
|
+
return true;
|
|
122
|
+
}
|
|
123
|
+
}
|
|
124
|
+
return false;
|
|
125
|
+
}
|
|
126
|
+
getRetryAfterMs(_error) {
|
|
127
|
+
return null;
|
|
128
|
+
}
|
|
129
|
+
isRateLimitError(error) {
|
|
130
|
+
return error instanceof ApiError && error.status === 429;
|
|
131
|
+
}
|
|
132
|
+
};
|
|
133
|
+
export {
|
|
134
|
+
GeminiProvider,
|
|
135
|
+
extractText,
|
|
136
|
+
extractToolUse,
|
|
137
|
+
toFinishReason,
|
|
138
|
+
toGeminiContents,
|
|
139
|
+
toGeminiTools
|
|
140
|
+
};
|
|
141
|
+
//# sourceMappingURL=index.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"sources":["../src/gemini.ts","../src/formatting.ts"],"sourcesContent":["import { GoogleGenAI, ApiError } from \"@google/genai\";\nimport type { LLMMessage, LLMRequestOptions, LLMResponse } from \"@tepa/types\";\nimport { BaseLLMProvider, type BaseLLMProviderOptions } from \"@tepa/provider-core\";\nimport { toGeminiContents, toGeminiTools, toFinishReason, extractText, extractToolUse } from \"./formatting.js\";\n\nconst DEFAULT_MODEL = \"gemini-3-flash-preview\";\nconst DEFAULT_MAX_TOKENS = 64_000;\n\nexport interface GeminiProviderOptions extends BaseLLMProviderOptions {\n /** Gemini API key. Falls back to GEMINI_API_KEY or GOOGLE_API_KEY env variables. */\n apiKey?: string;\n}\n\n/** LLM provider implementation for Google Gemini models. */\nexport class GeminiProvider extends BaseLLMProvider {\n protected readonly providerName = \"gemini\";\n private readonly client: GoogleGenAI;\n\n constructor(options: GeminiProviderOptions = {}) {\n super(options);\n const apiKey = options.apiKey ?? process.env.GEMINI_API_KEY ?? process.env.GOOGLE_API_KEY;\n this.client = new GoogleGenAI({ apiKey });\n }\n\n protected async doComplete(\n messages: LLMMessage[],\n options: LLMRequestOptions,\n ): Promise<LLMResponse> {\n const contents = toGeminiContents(messages);\n\n const config: Record<string, unknown> = {\n maxOutputTokens: options.maxTokens ?? DEFAULT_MAX_TOKENS,\n };\n\n if (options.temperature !== undefined) {\n config.temperature = options.temperature;\n }\n\n if (options.systemPrompt) {\n config.systemInstruction = options.systemPrompt;\n }\n\n const params: Record<string, unknown> = {\n model: options.model || DEFAULT_MODEL,\n contents,\n config,\n };\n\n if (options.tools && options.tools.length > 0) {\n params.tools = toGeminiTools(options.tools);\n }\n\n const response = await this.client.models.generateContent(params as any);\n\n const candidates = response.candidates ?? [];\n const finishReason = candidates[0]?.finishReason ?? null;\n const usage = response.usageMetadata ?? {};\n\n const toolUse = extractToolUse(response);\n const hasToolUse = toolUse.length > 0;\n\n return {\n text: extractText(response),\n tokensUsed: {\n input: usage.promptTokenCount ?? 0,\n output: usage.candidatesTokenCount ?? 0,\n },\n finishReason: hasToolUse ? \"tool_use\" : toFinishReason(finishReason),\n ...(hasToolUse && { toolUse }),\n };\n }\n\n protected isRetryable(error: unknown): boolean {\n if (error instanceof TypeError) {\n return true;\n }\n if (error instanceof ApiError) {\n const status = error.status;\n if (status === 400 || status === 401 || status === 403 || status === 404) {\n return false;\n }\n if (status === 429 || (status !== undefined && status >= 500)) {\n return true;\n }\n }\n return false;\n }\n\n protected getRetryAfterMs(_error: unknown): number | null {\n return null;\n }\n\n protected isRateLimitError(error: unknown): boolean {\n return error instanceof ApiError && error.status === 429;\n }\n}\n","import type { LLMMessage, LLMToolUseBlock, ToolSchema } from \"@tepa/types\";\n\nexport interface GeminiContent {\n role: \"user\" | \"model\";\n parts: { text: string }[];\n}\n\n/**\n * Convert Tepa LLMMessage array to Gemini contents format.\n * Maps \"assistant\" role to \"model\".\n */\nexport function toGeminiContents(messages: LLMMessage[]): GeminiContent[] {\n return messages.map((msg) => ({\n role: msg.role === \"assistant\" ? \"model\" : \"user\",\n parts: [{ text: msg.content }],\n }));\n}\n\n/**\n * Map Gemini finish reason to Tepa finishReason.\n */\nexport function toFinishReason(\n reason: string | null | undefined,\n): \"end_turn\" | \"max_tokens\" | \"stop_sequence\" | \"tool_use\" {\n switch (reason) {\n case \"MAX_TOKENS\":\n return \"max_tokens\";\n case \"STOP\":\n default:\n return \"end_turn\";\n }\n}\n\n/**\n * Extract text from a Gemini response.\n */\nexport function extractText(response: { text?: string }): string {\n return response.text ?? \"\";\n}\n\n/**\n * Extract tool use blocks from a Gemini response.\n */\nexport function extractToolUse(response: any): LLMToolUseBlock[] {\n const parts = response.candidates?.[0]?.content?.parts ?? [];\n const blocks: LLMToolUseBlock[] = [];\n let index = 0;\n\n for (const part of parts) {\n if (part.functionCall?.name) {\n blocks.push({\n id: `gemini-call-${index++}`,\n name: part.functionCall.name,\n input: part.functionCall.args ?? {},\n });\n }\n }\n\n return blocks;\n}\n\n/**\n * Convert Tepa ToolSchema to Gemini function declarations.\n */\nexport function toGeminiTools(tools: ToolSchema[]): Record<string, unknown>[] {\n const functionDeclarations = tools.map((tool) => {\n const properties: Record<string, unknown> = {};\n const required: string[] = [];\n\n for (const [name, param] of Object.entries(tool.parameters)) {\n properties[name] = {\n type: param.type.toUpperCase(),\n description: param.description,\n };\n if (param.required !== false) {\n required.push(name);\n }\n }\n\n return {\n name: tool.name,\n description: tool.description,\n parameters: {\n type: \"OBJECT\",\n properties,\n required,\n },\n };\n });\n\n return [{ functionDeclarations }];\n}\n"],"mappings":";AAAA,SAAS,aAAa,gBAAgB;AAEtC,SAAS,uBAAoD;;;ACStD,SAAS,iBAAiB,UAAyC;AACxE,SAAO,SAAS,IAAI,CAAC,SAAS;AAAA,IAC5B,MAAM,IAAI,SAAS,cAAc,UAAU;AAAA,IAC3C,OAAO,CAAC,EAAE,MAAM,IAAI,QAAQ,CAAC;AAAA,EAC/B,EAAE;AACJ;AAKO,SAAS,eACd,QAC0D;AAC1D,UAAQ,QAAQ;AAAA,IACd,KAAK;AACH,aAAO;AAAA,IACT,KAAK;AAAA,IACL;AACE,aAAO;AAAA,EACX;AACF;AAKO,SAAS,YAAY,UAAqC;AAC/D,SAAO,SAAS,QAAQ;AAC1B;AAKO,SAAS,eAAe,UAAkC;AAC/D,QAAM,QAAQ,SAAS,aAAa,CAAC,GAAG,SAAS,SAAS,CAAC;AAC3D,QAAM,SAA4B,CAAC;AACnC,MAAI,QAAQ;AAEZ,aAAW,QAAQ,OAAO;AACxB,QAAI,KAAK,cAAc,MAAM;AAC3B,aAAO,KAAK;AAAA,QACV,IAAI,eAAe,OAAO;AAAA,QAC1B,MAAM,KAAK,aAAa;AAAA,QACxB,OAAO,KAAK,aAAa,QAAQ,CAAC;AAAA,MACpC,CAAC;AAAA,IACH;AAAA,EACF;AAEA,SAAO;AACT;AAKO,SAAS,cAAc,OAAgD;AAC5E,QAAM,uBAAuB,MAAM,IAAI,CAAC,SAAS;AAC/C,UAAM,aAAsC,CAAC;AAC7C,UAAM,WAAqB,CAAC;AAE5B,eAAW,CAAC,MAAM,KAAK,KAAK,OAAO,QAAQ,KAAK,UAAU,GAAG;AAC3D,iBAAW,IAAI,IAAI;AAAA,QACjB,MAAM,MAAM,KAAK,YAAY;AAAA,QAC7B,aAAa,MAAM;AAAA,MACrB;AACA,UAAI,MAAM,aAAa,OAAO;AAC5B,iBAAS,KAAK,IAAI;AAAA,MACpB;AAAA,IACF;AAEA,WAAO;AAAA,MACL,MAAM,KAAK;AAAA,MACX,aAAa,KAAK;AAAA,MAClB,YAAY;AAAA,QACV,MAAM;AAAA,QACN;AAAA,QACA;AAAA,MACF;AAAA,IACF;AAAA,EACF,CAAC;AAED,SAAO,CAAC,EAAE,qBAAqB,CAAC;AAClC;;;ADtFA,IAAM,gBAAgB;AACtB,IAAM,qBAAqB;AAQpB,IAAM,iBAAN,cAA6B,gBAAgB;AAAA,EAC/B,eAAe;AAAA,EACjB;AAAA,EAEjB,YAAY,UAAiC,CAAC,GAAG;AAC/C,UAAM,OAAO;AACb,UAAM,SAAS,QAAQ,UAAU,QAAQ,IAAI,kBAAkB,QAAQ,IAAI;AAC3E,SAAK,SAAS,IAAI,YAAY,EAAE,OAAO,CAAC;AAAA,EAC1C;AAAA,EAEA,MAAgB,WACd,UACA,SACsB;AACtB,UAAM,WAAW,iBAAiB,QAAQ;AAE1C,UAAM,SAAkC;AAAA,MACtC,iBAAiB,QAAQ,aAAa;AAAA,IACxC;AAEA,QAAI,QAAQ,gBAAgB,QAAW;AACrC,aAAO,cAAc,QAAQ;AAAA,IAC/B;AAEA,QAAI,QAAQ,cAAc;AACxB,aAAO,oBAAoB,QAAQ;AAAA,IACrC;AAEA,UAAM,SAAkC;AAAA,MACtC,OAAO,QAAQ,SAAS;AAAA,MACxB;AAAA,MACA;AAAA,IACF;AAEA,QAAI,QAAQ,SAAS,QAAQ,MAAM,SAAS,GAAG;AAC7C,aAAO,QAAQ,cAAc,QAAQ,KAAK;AAAA,IAC5C;AAEA,UAAM,WAAW,MAAM,KAAK,OAAO,OAAO,gBAAgB,MAAa;AAEvE,UAAM,aAAa,SAAS,cAAc,CAAC;AAC3C,UAAM,eAAe,WAAW,CAAC,GAAG,gBAAgB;AACpD,UAAM,QAAQ,SAAS,iBAAiB,CAAC;AAEzC,UAAM,UAAU,eAAe,QAAQ;AACvC,UAAM,aAAa,QAAQ,SAAS;AAEpC,WAAO;AAAA,MACL,MAAM,YAAY,QAAQ;AAAA,MAC1B,YAAY;AAAA,QACV,OAAO,MAAM,oBAAoB;AAAA,QACjC,QAAQ,MAAM,wBAAwB;AAAA,MACxC;AAAA,MACA,cAAc,aAAa,aAAa,eAAe,YAAY;AAAA,MACnE,GAAI,cAAc,EAAE,QAAQ;AAAA,IAC9B;AAAA,EACF;AAAA,EAEU,YAAY,OAAyB;AAC7C,QAAI,iBAAiB,WAAW;AAC9B,aAAO;AAAA,IACT;AACA,QAAI,iBAAiB,UAAU;AAC7B,YAAM,SAAS,MAAM;AACrB,UAAI,WAAW,OAAO,WAAW,OAAO,WAAW,OAAO,WAAW,KAAK;AACxE,eAAO;AAAA,MACT;AACA,UAAI,WAAW,OAAQ,WAAW,UAAa,UAAU,KAAM;AAC7D,eAAO;AAAA,MACT;AAAA,IACF;AACA,WAAO;AAAA,EACT;AAAA,EAEU,gBAAgB,QAAgC;AACxD,WAAO;AAAA,EACT;AAAA,EAEU,iBAAiB,OAAyB;AAClD,WAAO,iBAAiB,YAAY,MAAM,WAAW;AAAA,EACvD;AACF;","names":[]}
|
package/package.json
ADDED
|
@@ -0,0 +1,55 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "@tepa/provider-gemini",
|
|
3
|
+
"version": "0.1.0",
|
|
4
|
+
"description": "Google Gemini LLM provider for the Tepa autonomous agent",
|
|
5
|
+
"license": "MIT",
|
|
6
|
+
"author": "Frandi <frandi.tech@gmail.com>",
|
|
7
|
+
"repository": {
|
|
8
|
+
"type": "git",
|
|
9
|
+
"url": "https://github.com/frandi/tepa-ai.git",
|
|
10
|
+
"directory": "packages/provider-gemini"
|
|
11
|
+
},
|
|
12
|
+
"homepage": "https://github.com/frandi/tepa-ai/tree/main/packages/provider-gemini#readme",
|
|
13
|
+
"bugs": {
|
|
14
|
+
"url": "https://github.com/frandi/tepa-ai/issues"
|
|
15
|
+
},
|
|
16
|
+
"keywords": ["tepa", "ai", "llm", "agent", "gemini", "google", "provider"],
|
|
17
|
+
"type": "module",
|
|
18
|
+
"main": "./dist/index.cjs",
|
|
19
|
+
"module": "./dist/index.js",
|
|
20
|
+
"types": "./dist/index.d.ts",
|
|
21
|
+
"exports": {
|
|
22
|
+
".": {
|
|
23
|
+
"import": {
|
|
24
|
+
"types": "./dist/index.d.ts",
|
|
25
|
+
"default": "./dist/index.js"
|
|
26
|
+
},
|
|
27
|
+
"require": {
|
|
28
|
+
"types": "./dist/index.d.cts",
|
|
29
|
+
"default": "./dist/index.cjs"
|
|
30
|
+
}
|
|
31
|
+
}
|
|
32
|
+
},
|
|
33
|
+
"files": ["dist"],
|
|
34
|
+
"sideEffects": false,
|
|
35
|
+
"engines": {
|
|
36
|
+
"node": ">=18"
|
|
37
|
+
},
|
|
38
|
+
"publishConfig": {
|
|
39
|
+
"access": "public"
|
|
40
|
+
},
|
|
41
|
+
"scripts": {
|
|
42
|
+
"build": "tsup",
|
|
43
|
+
"dev": "tsup --watch",
|
|
44
|
+
"prepublishOnly": "npm run build"
|
|
45
|
+
},
|
|
46
|
+
"dependencies": {
|
|
47
|
+
"@tepa/types": "^0.1.0",
|
|
48
|
+
"@tepa/provider-core": "^0.1.0",
|
|
49
|
+
"@google/genai": "^1.x"
|
|
50
|
+
},
|
|
51
|
+
"devDependencies": {
|
|
52
|
+
"tsup": "^8.0.0",
|
|
53
|
+
"typescript": "^5.5.0"
|
|
54
|
+
}
|
|
55
|
+
}
|