@aigne/gemini 0.11.6 → 0.12.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,39 @@
1
1
  # Changelog
2
2
 
3
+ ## [0.12.1](https://github.com/AIGNE-io/aigne-framework/compare/gemini-v0.12.0...gemini-v0.12.1) (2025-09-05)
4
+
5
+
6
+ ### Bug Fixes
7
+
8
+ * should not return local path from aigne hub service ([#460](https://github.com/AIGNE-io/aigne-framework/issues/460)) ([c959717](https://github.com/AIGNE-io/aigne-framework/commit/c95971774f7e84dbeb3313f60b3e6464e2bb22e4))
9
+
10
+
11
+ ### Dependencies
12
+
13
+ * The following workspace dependencies were updated
14
+ * dependencies
15
+ * @aigne/openai bumped to 0.14.1
16
+ * devDependencies
17
+ * @aigne/core bumped to 1.58.1
18
+ * @aigne/test-utils bumped to 0.5.45
19
+
20
+ ## [0.12.0](https://github.com/AIGNE-io/aigne-framework/compare/gemini-v0.11.6...gemini-v0.12.0) (2025-09-05)
21
+
22
+
23
+ ### Features
24
+
25
+ * add modalities support for chat model ([#454](https://github.com/AIGNE-io/aigne-framework/issues/454)) ([70d1bf6](https://github.com/AIGNE-io/aigne-framework/commit/70d1bf631f4e711235d89c6df8ee210a19179b30))
26
+
27
+
28
+ ### Dependencies
29
+
30
+ * The following workspace dependencies were updated
31
+ * dependencies
32
+ * @aigne/openai bumped to 0.14.0
33
+ * devDependencies
34
+ * @aigne/core bumped to 1.58.0
35
+ * @aigne/test-utils bumped to 0.5.44
36
+
3
37
  ## [0.11.6](https://github.com/AIGNE-io/aigne-framework/compare/gemini-v0.11.5...gemini-v0.11.6) (2025-09-01)
4
38
 
5
39
 
@@ -1,5 +1,7 @@
1
- import type { ChatModelInput } from "@aigne/core";
1
+ import { type AgentInvokeOptions, type AgentProcessResult, type ChatModelInput, type ChatModelOutput } from "@aigne/core";
2
+ import { type PromiseOrValue } from "@aigne/core/utils/type-utils.js";
2
3
  import { OpenAIChatModel, type OpenAIChatModelOptions } from "@aigne/openai";
4
+ import { GoogleGenAI } from "@google/genai";
3
5
  /**
4
6
  * Implementation of the ChatModel interface for Google's Gemini API
5
7
  *
@@ -20,5 +22,12 @@ export declare class GeminiChatModel extends OpenAIChatModel {
20
22
  protected supportsToolsUseWithJsonSchema: boolean;
21
23
  protected supportsParallelToolCalls: boolean;
22
24
  protected supportsToolStreaming: boolean;
25
+ protected _googleClient?: GoogleGenAI;
26
+ get googleClient(): GoogleGenAI;
27
+ process(input: ChatModelInput, options: AgentInvokeOptions): PromiseOrValue<AgentProcessResult<ChatModelOutput>>;
28
+ private handleImageModelProcessing;
29
+ private buildConfig;
30
+ private buildTools;
31
+ private buildContents;
23
32
  getRunMessages(input: ChatModelInput): ReturnType<OpenAIChatModel["getRunMessages"]>;
24
33
  }
@@ -1,7 +1,12 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
3
  exports.GeminiChatModel = void 0;
4
+ const core_1 = require("@aigne/core");
5
+ const type_utils_js_1 = require("@aigne/core/utils/type-utils.js");
4
6
  const openai_1 = require("@aigne/openai");
7
+ const index_js_1 = require("@aigne/platform-helpers/nodejs/index.js");
8
+ const genai_1 = require("@google/genai");
9
+ const uuid_1 = require("uuid");
5
10
  const GEMINI_BASE_URL = "https://generativelanguage.googleapis.com/v1beta/openai";
6
11
  const GEMINI_DEFAULT_CHAT_MODEL = "gemini-2.0-flash";
7
12
  /**
@@ -30,6 +35,206 @@ class GeminiChatModel extends openai_1.OpenAIChatModel {
30
35
  supportsToolsUseWithJsonSchema = false;
31
36
  supportsParallelToolCalls = false;
32
37
  supportsToolStreaming = false;
38
+ _googleClient;
39
+ get googleClient() {
40
+ if (this._googleClient)
41
+ return this._googleClient;
42
+ const { apiKey } = this.credential;
43
+ if (!apiKey)
44
+ throw new Error(`${this.name} requires an API key. Please provide it via \`options.apiKey\`, or set the \`${this.apiKeyEnvName}\` environment variable`);
45
+ this._googleClient ??= new genai_1.GoogleGenAI({ apiKey });
46
+ return this._googleClient;
47
+ }
48
+ process(input, options) {
49
+ const model = input.modelOptions?.model || this.credential.model;
50
+ if (!model.includes("image"))
51
+ return super.process(input, options);
52
+ return this.handleImageModelProcessing(input);
53
+ }
54
+ async *handleImageModelProcessing(input) {
55
+ const model = input.modelOptions?.model || this.credential.model;
56
+ const { contents, config } = await this.buildContents(input);
57
+ const parameters = {
58
+ model: model,
59
+ contents,
60
+ config: {
61
+ responseModalities: input.modelOptions?.modalities,
62
+ temperature: input.modelOptions?.temperature || this.modelOptions?.temperature,
63
+ topP: input.modelOptions?.topP || this.modelOptions?.topP,
64
+ frequencyPenalty: input.modelOptions?.frequencyPenalty || this.modelOptions?.frequencyPenalty,
65
+ presencePenalty: input.modelOptions?.presencePenalty || this.modelOptions?.presencePenalty,
66
+ ...config,
67
+ ...(await this.buildTools(input)),
68
+ ...(await this.buildConfig(input)),
69
+ },
70
+ };
71
+ const response = await this.googleClient.models.generateContentStream(parameters);
72
+ const usage = {
73
+ inputTokens: 0,
74
+ outputTokens: 0,
75
+ };
76
+ let responseModel;
77
+ const files = [];
78
+ const toolCalls = [];
79
+ let text = "";
80
+ for await (const chunk of response) {
81
+ if (!responseModel && chunk.modelVersion) {
82
+ responseModel = chunk.modelVersion;
83
+ yield { delta: { json: { model: responseModel } } };
84
+ }
85
+ for (const { content } of chunk.candidates ?? []) {
86
+ if (content?.parts) {
87
+ for (const part of content.parts) {
88
+ if (part.text) {
89
+ text += part.text;
90
+ if (input.responseFormat?.type !== "json_schema") {
91
+ yield { delta: { text: { text: part.text } } };
92
+ }
93
+ }
94
+ if (part.inlineData?.data) {
95
+ files.push({
96
+ type: "file",
97
+ data: part.inlineData.data,
98
+ filename: part.inlineData.displayName,
99
+ mimeType: part.inlineData.mimeType,
100
+ });
101
+ }
102
+ if (part.functionCall?.name) {
103
+ toolCalls.push({
104
+ id: part.functionCall.id || (0, uuid_1.v7)(),
105
+ type: "function",
106
+ function: {
107
+ name: part.functionCall.name,
108
+ arguments: part.functionCall.args || {},
109
+ },
110
+ });
111
+ yield { delta: { json: { toolCalls } } };
112
+ }
113
+ }
114
+ }
115
+ }
116
+ if (chunk.usageMetadata) {
117
+ usage.inputTokens += chunk.usageMetadata.promptTokenCount || 0;
118
+ usage.outputTokens += chunk.usageMetadata.candidatesTokenCount || 0;
119
+ }
120
+ }
121
+ if (input.responseFormat?.type === "json_schema") {
122
+ yield { delta: { json: { json: (0, core_1.safeParseJSON)(text) } } };
123
+ }
124
+ yield { delta: { json: { usage, files } } };
125
+ }
126
+ async buildConfig(input) {
127
+ const config = {};
128
+ if (input.responseFormat?.type === "json_schema") {
129
+ config.responseJsonSchema = input.responseFormat.jsonSchema.schema;
130
+ config.responseMimeType = "application/json";
131
+ }
132
+ return config;
133
+ }
134
+ async buildTools(input) {
135
+ const tools = [];
136
+ for (const tool of input.tools ?? []) {
137
+ tools.push({
138
+ functionDeclarations: [
139
+ {
140
+ name: tool.function.name,
141
+ description: tool.function.description,
142
+ parametersJsonSchema: tool.function.parameters,
143
+ },
144
+ ],
145
+ });
146
+ }
147
+ const functionCallingConfig = !input.toolChoice
148
+ ? undefined
149
+ : input.toolChoice === "auto"
150
+ ? { mode: genai_1.FunctionCallingConfigMode.AUTO }
151
+ : input.toolChoice === "none"
152
+ ? { mode: genai_1.FunctionCallingConfigMode.NONE }
153
+ : input.toolChoice === "required"
154
+ ? { mode: genai_1.FunctionCallingConfigMode.ANY }
155
+ : {
156
+ mode: genai_1.FunctionCallingConfigMode.ANY,
157
+ allowedFunctionNames: [input.toolChoice.function.name],
158
+ };
159
+ return { tools, toolConfig: { functionCallingConfig } };
160
+ }
161
+ async buildContents(input) {
162
+ const result = {
163
+ contents: [],
164
+ };
165
+ const systemParts = [];
166
+ result.contents = (await Promise.all(input.messages.map(async (msg) => {
167
+ if (msg.role === "system") {
168
+ if (typeof msg.content === "string") {
169
+ systemParts.push({ text: msg.content });
170
+ }
171
+ else if (Array.isArray(msg.content)) {
172
+ systemParts.push(...msg.content.map((item) => {
173
+ if (item.type === "text")
174
+ return { text: item.text };
175
+ throw new Error(`Unsupported content type: ${item.type}`);
176
+ }));
177
+ }
178
+ return;
179
+ }
180
+ const content = {
181
+ role: msg.role === "agent" ? "model" : "user",
182
+ };
183
+ if (msg.toolCalls) {
184
+ content.parts = msg.toolCalls.map((call) => ({
185
+ functionCall: {
186
+ id: call.id,
187
+ name: call.function.name,
188
+ args: call.function.arguments,
189
+ },
190
+ }));
191
+ }
192
+ else if (msg.toolCallId) {
193
+ const call = input.messages
194
+ .flatMap((i) => i.toolCalls)
195
+ .find((c) => c?.id === msg.toolCallId);
196
+ if (!call)
197
+ throw new Error(`Tool call not found: ${msg.toolCallId}`);
198
+ content.parts = [
199
+ {
200
+ functionResponse: {
201
+ id: msg.toolCallId,
202
+ name: call.function.name,
203
+ response: JSON.parse(msg.content),
204
+ },
205
+ },
206
+ ];
207
+ }
208
+ else if (typeof msg.content === "string") {
209
+ content.parts = [{ text: msg.content }];
210
+ }
211
+ else if (Array.isArray(msg.content)) {
212
+ content.parts = await Promise.all(msg.content.map(async (item) => {
213
+ switch (item.type) {
214
+ case "text":
215
+ return { text: item.text };
216
+ case "url":
217
+ return { fileData: { fileUri: item.url, mimeType: item.mimeType } };
218
+ case "file":
219
+ return { inlineData: { data: item.data, mimeType: item.mimeType } };
220
+ case "local":
221
+ return {
222
+ inlineData: {
223
+ data: await index_js_1.nodejs.fs.readFile(item.path, "base64"),
224
+ mimeType: item.mimeType,
225
+ },
226
+ };
227
+ }
228
+ }));
229
+ }
230
+ return content;
231
+ }))).filter(type_utils_js_1.isNonNullable);
232
+ if (systemParts) {
233
+ result.config ??= {};
234
+ result.config.systemInstruction = systemParts;
235
+ }
236
+ return result;
237
+ }
33
238
  async getRunMessages(input) {
34
239
  const messages = await super.getRunMessages(input);
35
240
  const lastMessage = messages.at(-1);
@@ -1,5 +1,7 @@
1
- import type { ChatModelInput } from "@aigne/core";
1
+ import { type AgentInvokeOptions, type AgentProcessResult, type ChatModelInput, type ChatModelOutput } from "@aigne/core";
2
+ import { type PromiseOrValue } from "@aigne/core/utils/type-utils.js";
2
3
  import { OpenAIChatModel, type OpenAIChatModelOptions } from "@aigne/openai";
4
+ import { GoogleGenAI } from "@google/genai";
3
5
  /**
4
6
  * Implementation of the ChatModel interface for Google's Gemini API
5
7
  *
@@ -20,5 +22,12 @@ export declare class GeminiChatModel extends OpenAIChatModel {
20
22
  protected supportsToolsUseWithJsonSchema: boolean;
21
23
  protected supportsParallelToolCalls: boolean;
22
24
  protected supportsToolStreaming: boolean;
25
+ protected _googleClient?: GoogleGenAI;
26
+ get googleClient(): GoogleGenAI;
27
+ process(input: ChatModelInput, options: AgentInvokeOptions): PromiseOrValue<AgentProcessResult<ChatModelOutput>>;
28
+ private handleImageModelProcessing;
29
+ private buildConfig;
30
+ private buildTools;
31
+ private buildContents;
23
32
  getRunMessages(input: ChatModelInput): ReturnType<OpenAIChatModel["getRunMessages"]>;
24
33
  }
@@ -1,5 +1,7 @@
1
- import type { ChatModelInput } from "@aigne/core";
1
+ import { type AgentInvokeOptions, type AgentProcessResult, type ChatModelInput, type ChatModelOutput } from "@aigne/core";
2
+ import { type PromiseOrValue } from "@aigne/core/utils/type-utils.js";
2
3
  import { OpenAIChatModel, type OpenAIChatModelOptions } from "@aigne/openai";
4
+ import { GoogleGenAI } from "@google/genai";
3
5
  /**
4
6
  * Implementation of the ChatModel interface for Google's Gemini API
5
7
  *
@@ -20,5 +22,12 @@ export declare class GeminiChatModel extends OpenAIChatModel {
20
22
  protected supportsToolsUseWithJsonSchema: boolean;
21
23
  protected supportsParallelToolCalls: boolean;
22
24
  protected supportsToolStreaming: boolean;
25
+ protected _googleClient?: GoogleGenAI;
26
+ get googleClient(): GoogleGenAI;
27
+ process(input: ChatModelInput, options: AgentInvokeOptions): PromiseOrValue<AgentProcessResult<ChatModelOutput>>;
28
+ private handleImageModelProcessing;
29
+ private buildConfig;
30
+ private buildTools;
31
+ private buildContents;
23
32
  getRunMessages(input: ChatModelInput): ReturnType<OpenAIChatModel["getRunMessages"]>;
24
33
  }
@@ -1,4 +1,9 @@
1
+ import { safeParseJSON, } from "@aigne/core";
2
+ import { isNonNullable } from "@aigne/core/utils/type-utils.js";
1
3
  import { OpenAIChatModel } from "@aigne/openai";
4
+ import { nodejs } from "@aigne/platform-helpers/nodejs/index.js";
5
+ import { FunctionCallingConfigMode, GoogleGenAI, } from "@google/genai";
6
+ import { v7 } from "uuid";
2
7
  const GEMINI_BASE_URL = "https://generativelanguage.googleapis.com/v1beta/openai";
3
8
  const GEMINI_DEFAULT_CHAT_MODEL = "gemini-2.0-flash";
4
9
  /**
@@ -27,6 +32,206 @@ export class GeminiChatModel extends OpenAIChatModel {
27
32
  supportsToolsUseWithJsonSchema = false;
28
33
  supportsParallelToolCalls = false;
29
34
  supportsToolStreaming = false;
35
+ _googleClient;
36
+ get googleClient() {
37
+ if (this._googleClient)
38
+ return this._googleClient;
39
+ const { apiKey } = this.credential;
40
+ if (!apiKey)
41
+ throw new Error(`${this.name} requires an API key. Please provide it via \`options.apiKey\`, or set the \`${this.apiKeyEnvName}\` environment variable`);
42
+ this._googleClient ??= new GoogleGenAI({ apiKey });
43
+ return this._googleClient;
44
+ }
45
+ process(input, options) {
46
+ const model = input.modelOptions?.model || this.credential.model;
47
+ if (!model.includes("image"))
48
+ return super.process(input, options);
49
+ return this.handleImageModelProcessing(input);
50
+ }
51
+ async *handleImageModelProcessing(input) {
52
+ const model = input.modelOptions?.model || this.credential.model;
53
+ const { contents, config } = await this.buildContents(input);
54
+ const parameters = {
55
+ model: model,
56
+ contents,
57
+ config: {
58
+ responseModalities: input.modelOptions?.modalities,
59
+ temperature: input.modelOptions?.temperature || this.modelOptions?.temperature,
60
+ topP: input.modelOptions?.topP || this.modelOptions?.topP,
61
+ frequencyPenalty: input.modelOptions?.frequencyPenalty || this.modelOptions?.frequencyPenalty,
62
+ presencePenalty: input.modelOptions?.presencePenalty || this.modelOptions?.presencePenalty,
63
+ ...config,
64
+ ...(await this.buildTools(input)),
65
+ ...(await this.buildConfig(input)),
66
+ },
67
+ };
68
+ const response = await this.googleClient.models.generateContentStream(parameters);
69
+ const usage = {
70
+ inputTokens: 0,
71
+ outputTokens: 0,
72
+ };
73
+ let responseModel;
74
+ const files = [];
75
+ const toolCalls = [];
76
+ let text = "";
77
+ for await (const chunk of response) {
78
+ if (!responseModel && chunk.modelVersion) {
79
+ responseModel = chunk.modelVersion;
80
+ yield { delta: { json: { model: responseModel } } };
81
+ }
82
+ for (const { content } of chunk.candidates ?? []) {
83
+ if (content?.parts) {
84
+ for (const part of content.parts) {
85
+ if (part.text) {
86
+ text += part.text;
87
+ if (input.responseFormat?.type !== "json_schema") {
88
+ yield { delta: { text: { text: part.text } } };
89
+ }
90
+ }
91
+ if (part.inlineData?.data) {
92
+ files.push({
93
+ type: "file",
94
+ data: part.inlineData.data,
95
+ filename: part.inlineData.displayName,
96
+ mimeType: part.inlineData.mimeType,
97
+ });
98
+ }
99
+ if (part.functionCall?.name) {
100
+ toolCalls.push({
101
+ id: part.functionCall.id || v7(),
102
+ type: "function",
103
+ function: {
104
+ name: part.functionCall.name,
105
+ arguments: part.functionCall.args || {},
106
+ },
107
+ });
108
+ yield { delta: { json: { toolCalls } } };
109
+ }
110
+ }
111
+ }
112
+ }
113
+ if (chunk.usageMetadata) {
114
+ usage.inputTokens += chunk.usageMetadata.promptTokenCount || 0;
115
+ usage.outputTokens += chunk.usageMetadata.candidatesTokenCount || 0;
116
+ }
117
+ }
118
+ if (input.responseFormat?.type === "json_schema") {
119
+ yield { delta: { json: { json: safeParseJSON(text) } } };
120
+ }
121
+ yield { delta: { json: { usage, files } } };
122
+ }
123
+ async buildConfig(input) {
124
+ const config = {};
125
+ if (input.responseFormat?.type === "json_schema") {
126
+ config.responseJsonSchema = input.responseFormat.jsonSchema.schema;
127
+ config.responseMimeType = "application/json";
128
+ }
129
+ return config;
130
+ }
131
+ async buildTools(input) {
132
+ const tools = [];
133
+ for (const tool of input.tools ?? []) {
134
+ tools.push({
135
+ functionDeclarations: [
136
+ {
137
+ name: tool.function.name,
138
+ description: tool.function.description,
139
+ parametersJsonSchema: tool.function.parameters,
140
+ },
141
+ ],
142
+ });
143
+ }
144
+ const functionCallingConfig = !input.toolChoice
145
+ ? undefined
146
+ : input.toolChoice === "auto"
147
+ ? { mode: FunctionCallingConfigMode.AUTO }
148
+ : input.toolChoice === "none"
149
+ ? { mode: FunctionCallingConfigMode.NONE }
150
+ : input.toolChoice === "required"
151
+ ? { mode: FunctionCallingConfigMode.ANY }
152
+ : {
153
+ mode: FunctionCallingConfigMode.ANY,
154
+ allowedFunctionNames: [input.toolChoice.function.name],
155
+ };
156
+ return { tools, toolConfig: { functionCallingConfig } };
157
+ }
158
+ async buildContents(input) {
159
+ const result = {
160
+ contents: [],
161
+ };
162
+ const systemParts = [];
163
+ result.contents = (await Promise.all(input.messages.map(async (msg) => {
164
+ if (msg.role === "system") {
165
+ if (typeof msg.content === "string") {
166
+ systemParts.push({ text: msg.content });
167
+ }
168
+ else if (Array.isArray(msg.content)) {
169
+ systemParts.push(...msg.content.map((item) => {
170
+ if (item.type === "text")
171
+ return { text: item.text };
172
+ throw new Error(`Unsupported content type: ${item.type}`);
173
+ }));
174
+ }
175
+ return;
176
+ }
177
+ const content = {
178
+ role: msg.role === "agent" ? "model" : "user",
179
+ };
180
+ if (msg.toolCalls) {
181
+ content.parts = msg.toolCalls.map((call) => ({
182
+ functionCall: {
183
+ id: call.id,
184
+ name: call.function.name,
185
+ args: call.function.arguments,
186
+ },
187
+ }));
188
+ }
189
+ else if (msg.toolCallId) {
190
+ const call = input.messages
191
+ .flatMap((i) => i.toolCalls)
192
+ .find((c) => c?.id === msg.toolCallId);
193
+ if (!call)
194
+ throw new Error(`Tool call not found: ${msg.toolCallId}`);
195
+ content.parts = [
196
+ {
197
+ functionResponse: {
198
+ id: msg.toolCallId,
199
+ name: call.function.name,
200
+ response: JSON.parse(msg.content),
201
+ },
202
+ },
203
+ ];
204
+ }
205
+ else if (typeof msg.content === "string") {
206
+ content.parts = [{ text: msg.content }];
207
+ }
208
+ else if (Array.isArray(msg.content)) {
209
+ content.parts = await Promise.all(msg.content.map(async (item) => {
210
+ switch (item.type) {
211
+ case "text":
212
+ return { text: item.text };
213
+ case "url":
214
+ return { fileData: { fileUri: item.url, mimeType: item.mimeType } };
215
+ case "file":
216
+ return { inlineData: { data: item.data, mimeType: item.mimeType } };
217
+ case "local":
218
+ return {
219
+ inlineData: {
220
+ data: await nodejs.fs.readFile(item.path, "base64"),
221
+ mimeType: item.mimeType,
222
+ },
223
+ };
224
+ }
225
+ }));
226
+ }
227
+ return content;
228
+ }))).filter(isNonNullable);
229
+ if (systemParts) {
230
+ result.config ??= {};
231
+ result.config.systemInstruction = systemParts;
232
+ }
233
+ return result;
234
+ }
30
235
  async getRunMessages(input) {
31
236
  const messages = await super.getRunMessages(input);
32
237
  const lastMessage = messages.at(-1);
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@aigne/gemini",
3
- "version": "0.11.6",
3
+ "version": "0.12.1",
4
4
  "description": "AIGNE Gemini SDK for integrating with Google's Gemini AI models",
5
5
  "publishConfig": {
6
6
  "access": "public"
@@ -36,8 +36,10 @@
36
36
  },
37
37
  "dependencies": {
38
38
  "@google/genai": "^1.15.0",
39
+ "uuid": "^11.1.0",
39
40
  "zod": "^3.25.67",
40
- "@aigne/openai": "^0.13.7"
41
+ "@aigne/openai": "^0.14.1",
42
+ "@aigne/platform-helpers": "^0.6.2"
41
43
  },
42
44
  "devDependencies": {
43
45
  "@types/bun": "^1.2.18",
@@ -45,8 +47,8 @@
45
47
  "npm-run-all": "^4.1.5",
46
48
  "rimraf": "^6.0.1",
47
49
  "typescript": "^5.8.3",
48
- "@aigne/test-utils": "^0.5.43",
49
- "@aigne/core": "^1.57.5"
50
+ "@aigne/test-utils": "^0.5.45",
51
+ "@aigne/core": "^1.58.1"
50
52
  },
51
53
  "scripts": {
52
54
  "lint": "tsc --noEmit",