@memberjunction/ai-gemini 2.32.2 → 2.34.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.d.ts +24 -1
- package/dist/index.d.ts.map +1 -1
- package/dist/index.js +77 -1
- package/dist/index.js.map +1 -1
- package/package.json +3 -3
- package/readme.md +177 -1
package/dist/index.d.ts
CHANGED
|
@@ -7,8 +7,31 @@ export declare class GeminiLLM extends BaseLLM {
|
|
|
7
7
|
* Read only getter method to get the Gemini client instance
|
|
8
8
|
*/
|
|
9
9
|
get GeminiClient(): GoogleGenerativeAI;
|
|
10
|
+
/**
|
|
11
|
+
* Gemini supports streaming
|
|
12
|
+
*/
|
|
13
|
+
get SupportsStreaming(): boolean;
|
|
10
14
|
protected geminiMessageSpacing(messages: Content[]): Content[];
|
|
11
|
-
|
|
15
|
+
/**
|
|
16
|
+
* Implementation of non-streaming chat completion for Gemini
|
|
17
|
+
*/
|
|
18
|
+
protected nonStreamingChatCompletion(params: ChatParams): Promise<ChatResult>;
|
|
19
|
+
/**
|
|
20
|
+
* Create a streaming request for Gemini
|
|
21
|
+
*/
|
|
22
|
+
protected createStreamingRequest(params: ChatParams): Promise<any>;
|
|
23
|
+
/**
|
|
24
|
+
* Process a streaming chunk from Gemini
|
|
25
|
+
*/
|
|
26
|
+
protected processStreamingChunk(chunk: any): {
|
|
27
|
+
content: string;
|
|
28
|
+
finishReason?: string;
|
|
29
|
+
usage?: any;
|
|
30
|
+
};
|
|
31
|
+
/**
|
|
32
|
+
* Create the final response from streaming results for Gemini
|
|
33
|
+
*/
|
|
34
|
+
protected finalizeStreamingResponse(accumulatedContent: string | null | undefined, lastChunk: any | null | undefined, usage: any | null | undefined): ChatResult;
|
|
12
35
|
SummarizeText(params: SummarizeParams): Promise<SummarizeResult>;
|
|
13
36
|
ClassifyText(params: any): Promise<any>;
|
|
14
37
|
static MapMJMessageToGeminiHistoryEntry(message: ChatMessage): Content;
|
package/dist/index.d.ts.map
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../src/index.ts"],"names":[],"mappings":"AAGA,OAAO,EAAE,OAAO,EAAoB,kBAAkB,EAAY,MAAM,uBAAuB,CAAC;AAGhG,OAAO,EAAE,OAAO,EAAE,WAAW,EAAE,UAAU,EAAE,UAAU,EAAE,eAAe,EAAE,eAAe,
|
|
1
|
+
{"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../src/index.ts"],"names":[],"mappings":"AAGA,OAAO,EAAE,OAAO,EAAoB,kBAAkB,EAAY,MAAM,uBAAuB,CAAC;AAGhG,OAAO,EAAE,OAAO,EAAE,WAAW,EAAE,UAAU,EAAE,UAAU,EAAE,eAAe,EAAE,eAAe,EAA0B,MAAM,oBAAoB,CAAC;AAG5I,qBACa,SAAU,SAAQ,OAAO;IAClC,OAAO,CAAC,OAAO,CAAqB;gBAExB,MAAM,EAAE,MAAM;IAK1B;;OAEG;IACH,IAAW,YAAY,IAAI,kBAAkB,CAE5C;IAED;;OAEG;IACH,IAAoB,iBAAiB,IAAI,OAAO,CAE/C;IAED,SAAS,CAAC,oBAAoB,CAAC,QAAQ,EAAE,OAAO,EAAE,GAAG,OAAO,EAAE;IAmB9D;;OAEG;cACa,0BAA0B,CAAC,MAAM,EAAE,UAAU,GAAG,OAAO,CAAC,UAAU,CAAC;IA8DnF;;OAEG;cACa,sBAAsB,CAAC,MAAM,EAAE,UAAU,GAAG,OAAO,CAAC,GAAG,CAAC;IA2BxE;;OAEG;IACH,SAAS,CAAC,qBAAqB,CAAC,KAAK,EAAE,GAAG,GAAG;QACzC,OAAO,EAAE,MAAM,CAAC;QAChB,YAAY,CAAC,EAAE,MAAM,CAAC;QACtB,KAAK,CAAC,EAAE,GAAG,CAAC;KACf;IAYD;;OAEG;IACH,SAAS,CAAC,yBAAyB,CAC/B,kBAAkB,EAAE,MAAM,GAAG,IAAI,GAAG,SAAS,EAC7C,SAAS,EAAE,GAAG,GAAG,IAAI,GAAG,SAAS,EACjC,KAAK,EAAE,GAAG,GAAG,IAAI,GAAG,SAAS,GAC9B,UAAU;IAgCb,aAAa,CAAC,MAAM,EAAE,eAAe,GAAG,OAAO,CAAC,eAAe,CAAC;IAGhE,YAAY,CAAC,MAAM,EAAE,GAAG,GAAG,OAAO,CAAC,GAAG,CAAC;WAIzB,gCAAgC,CAAC,OAAO,EAAE,WAAW,GAAG,OAAO;CAShF;AAGD,wBAAgB,aAAa,SAE5B"}
|
package/dist/index.js
CHANGED
|
@@ -24,6 +24,12 @@ let GeminiLLM = GeminiLLM_1 = class GeminiLLM extends ai_1.BaseLLM {
|
|
|
24
24
|
get GeminiClient() {
|
|
25
25
|
return this._gemini;
|
|
26
26
|
}
|
|
27
|
+
/**
|
|
28
|
+
* Gemini supports streaming
|
|
29
|
+
*/
|
|
30
|
+
get SupportsStreaming() {
|
|
31
|
+
return true;
|
|
32
|
+
}
|
|
27
33
|
geminiMessageSpacing(messages) {
|
|
28
34
|
// this method is simple, it makes sure that we alternate messages between user and assistant, otherwise Anthropic will
|
|
29
35
|
// have a problem. If we find two user messages in a row, we insert an assistant message between them with just "OK"
|
|
@@ -42,7 +48,10 @@ let GeminiLLM = GeminiLLM_1 = class GeminiLLM extends ai_1.BaseLLM {
|
|
|
42
48
|
}
|
|
43
49
|
return result;
|
|
44
50
|
}
|
|
45
|
-
|
|
51
|
+
/**
|
|
52
|
+
* Implementation of non-streaming chat completion for Gemini
|
|
53
|
+
*/
|
|
54
|
+
async nonStreamingChatCompletion(params) {
|
|
46
55
|
try {
|
|
47
56
|
// For text-only input, use the gemini-pro model
|
|
48
57
|
const startTime = new Date();
|
|
@@ -102,6 +111,73 @@ let GeminiLLM = GeminiLLM_1 = class GeminiLLM extends ai_1.BaseLLM {
|
|
|
102
111
|
};
|
|
103
112
|
}
|
|
104
113
|
}
|
|
114
|
+
/**
|
|
115
|
+
* Create a streaming request for Gemini
|
|
116
|
+
*/
|
|
117
|
+
async createStreamingRequest(params) {
|
|
118
|
+
const config = {
|
|
119
|
+
temperature: params.temperature || 0.5,
|
|
120
|
+
responseMimeType: params.responseFormat
|
|
121
|
+
};
|
|
122
|
+
const model = this.GeminiClient.getGenerativeModel({
|
|
123
|
+
model: params.model || "gemini-pro",
|
|
124
|
+
generationConfig: config
|
|
125
|
+
}, { apiVersion: "v1beta" });
|
|
126
|
+
const allMessagesButLast = params.messages.slice(0, params.messages.length - 1);
|
|
127
|
+
const convertedMessages = allMessagesButLast.map(m => GeminiLLM_1.MapMJMessageToGeminiHistoryEntry(m));
|
|
128
|
+
const tempMessages = this.geminiMessageSpacing(convertedMessages);
|
|
129
|
+
const chat = model.startChat({
|
|
130
|
+
history: tempMessages
|
|
131
|
+
});
|
|
132
|
+
const latestMessage = params.messages[params.messages.length - 1].content;
|
|
133
|
+
// Return an object with a stream property
|
|
134
|
+
const streamResult = await chat.sendMessageStream(latestMessage);
|
|
135
|
+
// Return the stream directly for the for-await loop to work
|
|
136
|
+
return streamResult.stream;
|
|
137
|
+
}
|
|
138
|
+
/**
|
|
139
|
+
* Process a streaming chunk from Gemini
|
|
140
|
+
*/
|
|
141
|
+
processStreamingChunk(chunk) {
|
|
142
|
+
// Gemini chunks provide text via the text() method
|
|
143
|
+
const content = chunk.text();
|
|
144
|
+
// Gemini doesn't provide finish reason or usage in chunks
|
|
145
|
+
return {
|
|
146
|
+
content,
|
|
147
|
+
finishReason: undefined,
|
|
148
|
+
usage: null
|
|
149
|
+
};
|
|
150
|
+
}
|
|
151
|
+
/**
|
|
152
|
+
* Create the final response from streaming results for Gemini
|
|
153
|
+
*/
|
|
154
|
+
finalizeStreamingResponse(accumulatedContent, lastChunk, usage) {
|
|
155
|
+
// Gemini doesn't provide usage information in streaming
|
|
156
|
+
// Create dates (will be overridden by base class)
|
|
157
|
+
const now = new Date();
|
|
158
|
+
// Create a proper ChatResult instance with constructor params
|
|
159
|
+
const result = new ai_1.ChatResult(true, now, now);
|
|
160
|
+
// Set all properties
|
|
161
|
+
result.data = {
|
|
162
|
+
choices: [{
|
|
163
|
+
message: {
|
|
164
|
+
role: 'assistant',
|
|
165
|
+
content: accumulatedContent ? accumulatedContent : ''
|
|
166
|
+
},
|
|
167
|
+
finish_reason: 'stop',
|
|
168
|
+
index: 0
|
|
169
|
+
}],
|
|
170
|
+
usage: {
|
|
171
|
+
promptTokens: 0,
|
|
172
|
+
completionTokens: 0,
|
|
173
|
+
totalTokens: 0
|
|
174
|
+
}
|
|
175
|
+
};
|
|
176
|
+
result.statusText = 'success';
|
|
177
|
+
result.errorMessage = null;
|
|
178
|
+
result.exception = null;
|
|
179
|
+
return result;
|
|
180
|
+
}
|
|
105
181
|
SummarizeText(params) {
|
|
106
182
|
throw new Error("Method not implemented.");
|
|
107
183
|
}
|
package/dist/index.js.map
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"index.js","sourceRoot":"","sources":["../src/index.ts"],"names":[],"mappings":";;;;;;;;;;AAEA,uBAAuB;AACvB,yDAAgG;AAEhG,WAAW;AACX,
|
|
1
|
+
{"version":3,"file":"index.js","sourceRoot":"","sources":["../src/index.ts"],"names":[],"mappings":";;;;;;;;;;AAEA,uBAAuB;AACvB,yDAAgG;AAEhG,WAAW;AACX,2CAA4I;AAC5I,mDAAuD;AAGhD,IAAM,SAAS,iBAAf,MAAM,SAAU,SAAQ,YAAO;IAGlC,YAAY,MAAc;QACtB,KAAK,CAAC,MAAM,CAAC,CAAC;QACd,IAAI,CAAC,OAAO,GAAG,IAAI,kCAAkB,CAAC,MAAM,CAAC,CAAC;IAClD,CAAC;IAED;;OAEG;IACH,IAAW,YAAY;QACnB,OAAO,IAAI,CAAC,OAAO,CAAC;IACxB,CAAC;IAED;;OAEG;IACH,IAAoB,iBAAiB;QACjC,OAAO,IAAI,CAAC;IAChB,CAAC;IAES,oBAAoB,CAAC,QAAmB;QAC9C,uHAAuH;QACvH,oHAAoH;QACpH,MAAM,MAAM,GAAU,EAAE,CAAC;QACzB,IAAI,QAAQ,GAAG,OAAO,CAAC;QACvB,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,QAAQ,CAAC,MAAM,EAAE,CAAC,EAAE,EAAE,CAAC;YACvC,IAAI,QAAQ,CAAC,CAAC,CAAC,CAAC,IAAI,KAAK,QAAQ,EAAE,CAAC;gBAChC,MAAM,CAAC,IAAI,CAAC;oBACR,IAAI,EAAE,OAAO,EAAE,mHAAmH;oBAC/G,gGAAgG;oBACnH,KAAK,EAAE,CAAC,EAAC,IAAI,EAAE,IAAI,EAAC,CAAC;iBACxB,CAAC,CAAC;YACP,CAAC;YACD,MAAM,CAAC,IAAI,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC;YACzB,QAAQ,GAAG,QAAQ,CAAC,CAAC,CAAC,CAAC,IAAI,CAAC;QAChC,CAAC;QACD,OAAO,MAAM,CAAC;IAClB,CAAC;IAED;;OAEG;IACO,KAAK,CAAC,0BAA0B,CAAC,MAAkB;QACzD,IAAI,CAAC;YACD,gDAAgD;YAChD,MAAM,SAAS,GAAG,IAAI,IAAI,EAAE,CAAC;YAC7B,MAAM,MAAM,GAAqB;gBAC7B,WAAW,EAAE,MAAM,CAAC,WAAW,IAAI,GAAG;gBACtC,gBAAgB,EAAE,MAAM,CAAC,cAAc;aAC1C,CAAC;YACF,MAAM,KAAK,GAAG,IAAI,CAAC,YAAY,CAAC,kBAAkB,CAAC,EAAE,KAAK,EAAE,MAAM,CAAC,KAAK,IAAI,YAAY,EAAE,gBAAgB,EAAE,MAAM,EAAC,EAAE,EAAC,UAAU,EAAE,QAAQ,EAAC,CAAC,CAAC;YAC7I,MAAM,kBAAkB,GAAG,MAAM,CAAC,QAAQ,CAAC,KAAK,CAAC,CAAC,EAAE,MAAM,CAAC,QAAQ,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC;YAChF,MAAM,iBAAiB,GAAG,kBAAkB,CAAC,GAAG,CAAC,CAAC,CAAC,EAAE,CAAC,WAAS,CAAC,gCAAgC,CAAC,CAAC,CAAC,CAAC,CAAA;YACpG,MAAM,YAAY,GAAG,IAAI,CAAC,oBAAoB,CAAC,iBAAiB,CAAC,CAAC;YAClE,MAAM,IAAI,GAAG,KAAK,CAAC,SAAS,CAAC;gBACzB,OAAO,EAAE,YAAY;aACxB,CAAC,CAAC;YACH,MAAM,aAAa,GAAG,MAAM,CAAC,QAAQ,CAAC,MAAM,CAAC,QAAQ,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC,OAAO,CAAC;YAC1E,MAAM,MAAM,GAAG,MAAM,IAAI,CAAC,WAAW,CAAC,aAAa,CAAC,CAAC;YACrD,MAAM,OAAO,GAAG,IAAI,IAAI,EAAE,CAAC;YAC3B,OAAO;gBACH,OAAO,EAAE,IAAI;gBACb,UAAU,EAAE,IAAI;gBAChB,SAAS,EAAE,SAAS;gBACpB,OAAO,EAAE,OAAO;gBAChB,WAAW,EAAE,OAAO,CAAC,OAAO,EAAE,GAAG,SAAS,CAAC,OAAO,EAAE;gBACpD,IAAI,EAAE;oBACF,OAAO,EAAE,CAAC;4BACN,OAAO,EAAE,EAAE,IAAI,EAAE,WAAW,EAAE,OAAO,EAAE,MAAM,CAAC,QAAQ,CAAC,IAAI,EAAE,EAAE;4BAC/D,aAAa,EAAE,WAAW;4BAC1B,KAAK,EAAE,CAAC;yBACX,CAAC;oBACF,KAAK,EAAE;wBACH,WAAW,EAAE,CAAC;wBACd,YAAY,EAAE,CAAC;wBACf,gBAAgB,EAAE,CAAC,CAAC,6BAA6B;qBACpD;iBACJ;gBACD,YAAY,EAAE,EAAE;gBAChB,SAAS,EAAE,IAAI;aAClB,CAAA;QACL,CAAC;QACD,OAAO,CAAC,EAAE,CAAC;YACP,OAAO;gBACH,OAAO,EAAE,KAAK;gBACd,UAAU,EAAE,CAAC,IAAI,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC,OAAO;gBAChD,SAAS,EAAE,IAAI,IAAI,EAAE;gBACrB,OAAO,EAAE,IAAI,IAAI,EAAE;gBACnB,WAAW,EAAE,CAAC;gBACd,IAAI,EAAE;oBACF,OAAO,EAAE,EAAE;oBACX,KAAK,EAAE;wBACH,WAAW,EAAE,CAAC;wBACd,YAAY,EAAE,CAAC;wBACf,gBAAgB,EAAE,CAAC;qBACtB;iBACJ;gBACD,YAAY,EAAE,CAAC,CAAC,OAAO;gBACvB,SAAS,EAAE,CAAC;aAEf,CAAA;QACL,CAAC;IACL,CAAC;IAED;;OAEG;IACO,KAAK,CAAC,sBAAsB,CAAC,MAAkB;QACrD,MAAM,MAAM,GAAqB;YAC7B,WAAW,EAAE,MAAM,CAAC,WAAW,IAAI,GAAG;YACtC,gBAAgB,EAAE,MAAM,CAAC,cAAc;SAC1C,CAAC;QAEF,MAAM,KAAK,GAAG,IAAI,CAAC,YAAY,CAAC,kBAAkB,CAAC;YAC/C,KAAK,EAAE,MAAM,CAAC,KAAK,IAAI,YAAY;YACnC,gBAAgB,EAAE,MAAM;SAC3B,EAAE,EAAC,UAAU,EAAE,QAAQ,EAAC,CAAC,CAAC;QAE3B,MAAM,kBAAkB,GAAG,MAAM,CAAC,QAAQ,CAAC,KAAK,CAAC,CAAC,EAAE,MAAM,CAAC,QAAQ,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC;QAChF,MAAM,iBAAiB,GAAG,kBAAkB,CAAC,GAAG,CAAC,CAAC,CAAC,EAAE,CAAC,WAAS,CAAC,gCAAgC,CAAC,CAAC,CAAC,CAAC,CAAC;QACrG,MAAM,YAAY,GAAG,IAAI,CAAC,oBAAoB,CAAC,iBAAiB,CAAC,CAAC;QAElE,MAAM,IAAI,GAAG,KAAK,CAAC,SAAS,CAAC;YACzB,OAAO,EAAE,YAAY;SACxB,CAAC,CAAC;QAEH,MAAM,aAAa,GAAG,MAAM,CAAC,QAAQ,CAAC,MAAM,CAAC,QAAQ,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC,OAAO,CAAC;QAE1E,0CAA0C;QAC1C,MAAM,YAAY,GAAG,MAAM,IAAI,CAAC,iBAAiB,CAAC,aAAa,CAAC,CAAC;QACjE,4DAA4D;QAC5D,OAAO,YAAY,CAAC,MAAM,CAAC;IAC/B,CAAC;IAED;;OAEG;IACO,qBAAqB,CAAC,KAAU;QAKtC,mDAAmD;QACnD,MAAM,OAAO,GAAG,KAAK,CAAC,IAAI,EAAE,CAAC;QAE7B,0DAA0D;QAC1D,OAAO;YACH,OAAO;YACP,YAAY,EAAE,SAAS;YACvB,KAAK,EAAE,IAAI;SACd,CAAC;IACN,CAAC;IAED;;OAEG;IACO,yBAAyB,CAC/B,kBAA6C,EAC7C,SAAiC,EACjC,KAA6B;QAE7B,wDAAwD;QAExD,kDAAkD;QAClD,MAAM,GAAG,GAAG,IAAI,IAAI,EAAE,CAAC;QAEvB,8DAA8D;QAC9D,MAAM,MAAM,GAAG,IAAI,eAAU,CAAC,IAAI,EAAE,GAAG,EAAE,GAAG,CAAC,CAAC;QAE9C,qBAAqB;QACrB,MAAM,CAAC,IAAI,GAAG;YACV,OAAO,EAAE,CAAC;oBACN,OAAO,EAAE;wBACL,IAAI,EAAE,WAAW;wBACjB,OAAO,EAAE,kBAAkB,CAAC,CAAC,CAAC,kBAAkB,CAAC,CAAC,CAAC,EAAE;qBACxD;oBACD,aAAa,EAAE,MAAM;oBACrB,KAAK,EAAE,CAAC;iBACX,CAAC;YACF,KAAK,EAAE;gBACH,YAAY,EAAE,CAAC;gBACf,gBAAgB,EAAE,CAAC;gBACnB,WAAW,EAAE,CAAC;aACjB;SACJ,CAAC;QAEF,MAAM,CAAC,UAAU,GAAG,SAAS,CAAC;QAC9B,MAAM,CAAC,YAAY,GAAG,IAAI,CAAC;QAC3B,MAAM,CAAC,SAAS,GAAG,IAAI,CAAC;QAExB,OAAO,MAAM,CAAC;IAClB,CAAC;IACD,aAAa,CAAC,MAAuB;QACjC,MAAM,IAAI,KAAK,CAAC,yBAAyB,CAAC,CAAC;IAC/C,CAAC;IACD,YAAY,CAAC,MAAW;QACpB,MAAM,IAAI,KAAK,CAAC,yBAAyB,CAAC,CAAC;IAC/C,CAAC;IAEM,MAAM,CAAC,gCAAgC,CAAC,OAAoB;QAC/D,MAAM,QAAQ,GAAa;YACvB,IAAI,EAAE,OAAO,CAAC,OAAO;SACxB,CAAA;QACD,OAAO;YACH,IAAI,EAAE,OAAO,CAAC,IAAI,KAAK,WAAW,CAAC,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC,MAAM,EAAE,+GAA+G;YACtK,KAAK,EAAE,CAAC,QAAQ,CAAC;SACpB,CAAA;IACL,CAAC;CACJ,CAAA;AAlNY,8BAAS;oBAAT,SAAS;IADrB,IAAA,sBAAa,EAAC,YAAO,EAAE,WAAW,CAAC;GACvB,SAAS,CAkNrB;AAGD,SAAgB,aAAa;IACzB,4JAA4J;AAChK,CAAC;AAFD,sCAEC"}
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@memberjunction/ai-gemini",
|
|
3
|
-
"version": "2.
|
|
3
|
+
"version": "2.34.0",
|
|
4
4
|
"description": "MemberJunction Wrapper for Google Gemini AI Models",
|
|
5
5
|
"main": "dist/index.js",
|
|
6
6
|
"types": "dist/index.d.ts",
|
|
@@ -19,8 +19,8 @@
|
|
|
19
19
|
"typescript": "^5.4.5"
|
|
20
20
|
},
|
|
21
21
|
"dependencies": {
|
|
22
|
-
"@memberjunction/ai": "2.
|
|
23
|
-
"@memberjunction/global": "2.
|
|
22
|
+
"@memberjunction/ai": "2.34.0",
|
|
23
|
+
"@memberjunction/global": "2.34.0",
|
|
24
24
|
"@google/generative-ai": "0.21.0"
|
|
25
25
|
}
|
|
26
26
|
}
|
package/readme.md
CHANGED
|
@@ -1,2 +1,178 @@
|
|
|
1
1
|
# @memberjunction/ai-gemini
|
|
2
|
-
|
|
2
|
+
|
|
3
|
+
A comprehensive wrapper for Google's Gemini AI models that seamlessly integrates with the MemberJunction AI framework, providing access to Google's powerful generative AI capabilities.
|
|
4
|
+
|
|
5
|
+
## Features
|
|
6
|
+
|
|
7
|
+
- **Google Gemini Integration**: Connect to Google's state-of-the-art Gemini models
|
|
8
|
+
- **Standardized Interface**: Implements MemberJunction's BaseLLM abstract class
|
|
9
|
+
- **Message Formatting**: Handles conversion between MemberJunction and Gemini message formats
|
|
10
|
+
- **Error Handling**: Robust error handling with detailed reporting
|
|
11
|
+
- **Chat Support**: Full support for chat-based interactions with Gemini models
|
|
12
|
+
- **Temperature Control**: Fine-tune generation creativity
|
|
13
|
+
- **Response Format Control**: Request specific response MIME types
|
|
14
|
+
|
|
15
|
+
## Installation
|
|
16
|
+
|
|
17
|
+
```bash
|
|
18
|
+
npm install @memberjunction/ai-gemini
|
|
19
|
+
```
|
|
20
|
+
|
|
21
|
+
## Requirements
|
|
22
|
+
|
|
23
|
+
- Node.js 16+
|
|
24
|
+
- A Google AI Studio API key
|
|
25
|
+
- MemberJunction Core libraries
|
|
26
|
+
|
|
27
|
+
## Usage
|
|
28
|
+
|
|
29
|
+
### Basic Setup
|
|
30
|
+
|
|
31
|
+
```typescript
|
|
32
|
+
import { GeminiLLM } from '@memberjunction/ai-gemini';
|
|
33
|
+
|
|
34
|
+
// Initialize with your Google AI API key
|
|
35
|
+
const geminiLLM = new GeminiLLM('your-google-ai-api-key');
|
|
36
|
+
```
|
|
37
|
+
|
|
38
|
+
### Chat Completion
|
|
39
|
+
|
|
40
|
+
```typescript
|
|
41
|
+
import { ChatParams } from '@memberjunction/ai';
|
|
42
|
+
|
|
43
|
+
// Create chat parameters
|
|
44
|
+
const chatParams: ChatParams = {
|
|
45
|
+
model: 'gemini-pro', // or 'gemini-pro-vision' for images, 'gemini-ultra' for more advanced capabilities
|
|
46
|
+
messages: [
|
|
47
|
+
{ role: 'system', content: 'You are a helpful assistant.' },
|
|
48
|
+
{ role: 'user', content: 'What are the key features of the Gemini AI model?' }
|
|
49
|
+
],
|
|
50
|
+
temperature: 0.7,
|
|
51
|
+
maxOutputTokens: 1000
|
|
52
|
+
};
|
|
53
|
+
|
|
54
|
+
// Get a response
|
|
55
|
+
try {
|
|
56
|
+
const response = await geminiLLM.ChatCompletion(chatParams);
|
|
57
|
+
if (response.success) {
|
|
58
|
+
console.log('Response:', response.data.choices[0].message.content);
|
|
59
|
+
console.log('Time elapsed:', response.timeElapsed, 'ms');
|
|
60
|
+
} else {
|
|
61
|
+
console.error('Error:', response.errorMessage);
|
|
62
|
+
}
|
|
63
|
+
} catch (error) {
|
|
64
|
+
console.error('Exception:', error);
|
|
65
|
+
}
|
|
66
|
+
```
|
|
67
|
+
|
|
68
|
+
### Direct Access to Gemini Client
|
|
69
|
+
|
|
70
|
+
```typescript
|
|
71
|
+
// Access the underlying Google Generative AI client for advanced usage
|
|
72
|
+
const geminiClient = geminiLLM.GeminiClient;
|
|
73
|
+
|
|
74
|
+
// Use the client directly if needed
|
|
75
|
+
const model = geminiClient.getGenerativeModel({ model: 'gemini-pro' });
|
|
76
|
+
const result = await model.generateContent('Tell me a short joke about programming');
|
|
77
|
+
console.log(result.response.text());
|
|
78
|
+
```
|
|
79
|
+
|
|
80
|
+
## Supported Models
|
|
81
|
+
|
|
82
|
+
Google Gemini provides several models with different capabilities:
|
|
83
|
+
|
|
84
|
+
- `gemini-pro`: General-purpose text model
|
|
85
|
+
- `gemini-pro-vision`: Multimodal model that can process images and text
|
|
86
|
+
- `gemini-ultra`: Google's most advanced model (if available)
|
|
87
|
+
|
|
88
|
+
Check the [Google AI documentation](https://ai.google.dev/models/gemini) for the latest list of supported models.
|
|
89
|
+
|
|
90
|
+
## API Reference
|
|
91
|
+
|
|
92
|
+
### GeminiLLM Class
|
|
93
|
+
|
|
94
|
+
A class that extends BaseLLM to provide Google Gemini-specific functionality.
|
|
95
|
+
|
|
96
|
+
#### Constructor
|
|
97
|
+
|
|
98
|
+
```typescript
|
|
99
|
+
new GeminiLLM(apiKey: string)
|
|
100
|
+
```
|
|
101
|
+
|
|
102
|
+
#### Properties
|
|
103
|
+
|
|
104
|
+
- `GeminiClient`: (read-only) Returns the underlying Google Generative AI client instance
|
|
105
|
+
|
|
106
|
+
#### Methods
|
|
107
|
+
|
|
108
|
+
- `ChatCompletion(params: ChatParams): Promise<ChatResult>` - Perform a chat completion
|
|
109
|
+
- `SummarizeText(params: SummarizeParams): Promise<SummarizeResult>` - Not implemented yet
|
|
110
|
+
- `ClassifyText(params: ClassifyParams): Promise<ClassifyResult>` - Not implemented yet
|
|
111
|
+
|
|
112
|
+
#### Static Methods
|
|
113
|
+
|
|
114
|
+
- `MapMJMessageToGeminiHistoryEntry(message: ChatMessage): Content` - Converts MemberJunction messages to Gemini format
|
|
115
|
+
|
|
116
|
+
## Response Format Control
|
|
117
|
+
|
|
118
|
+
Control the format of Gemini responses using the `responseFormat` parameter:
|
|
119
|
+
|
|
120
|
+
```typescript
|
|
121
|
+
const params: ChatParams = {
|
|
122
|
+
// ...other parameters
|
|
123
|
+
responseFormat: 'text/plain', // Regular text response
|
|
124
|
+
};
|
|
125
|
+
|
|
126
|
+
// For structured data
|
|
127
|
+
const jsonParams: ChatParams = {
|
|
128
|
+
// ...other parameters
|
|
129
|
+
responseFormat: 'application/json', // Request JSON response
|
|
130
|
+
};
|
|
131
|
+
```
|
|
132
|
+
|
|
133
|
+
## Error Handling
|
|
134
|
+
|
|
135
|
+
The wrapper provides detailed error information:
|
|
136
|
+
|
|
137
|
+
```typescript
|
|
138
|
+
try {
|
|
139
|
+
const response = await geminiLLM.ChatCompletion(params);
|
|
140
|
+
if (!response.success) {
|
|
141
|
+
console.error('Error:', response.errorMessage);
|
|
142
|
+
console.error('Status:', response.statusText);
|
|
143
|
+
console.error('Exception:', response.exception);
|
|
144
|
+
}
|
|
145
|
+
} catch (error) {
|
|
146
|
+
console.error('Exception occurred:', error);
|
|
147
|
+
}
|
|
148
|
+
```
|
|
149
|
+
|
|
150
|
+
## Message Handling
|
|
151
|
+
|
|
152
|
+
The wrapper handles proper message formatting and role conversion between MemberJunction's format and Google Gemini's expected format:
|
|
153
|
+
|
|
154
|
+
- MemberJunction's `system` and `user` roles are converted to Gemini's `user` role
|
|
155
|
+
- MemberJunction's `assistant` role is converted to Gemini's `model` role
|
|
156
|
+
- Messages are properly spaced to ensure alternating roles as required by Gemini
|
|
157
|
+
|
|
158
|
+
## Limitations
|
|
159
|
+
|
|
160
|
+
Currently, the wrapper implements:
|
|
161
|
+
- Chat completion functionality
|
|
162
|
+
|
|
163
|
+
Future implementations may include:
|
|
164
|
+
- `SummarizeText` functionality
|
|
165
|
+
- `ClassifyText` functionality
|
|
166
|
+
- Token counting and usage reporting
|
|
167
|
+
- Image processing with `gemini-pro-vision`
|
|
168
|
+
- Function calling
|
|
169
|
+
|
|
170
|
+
## Dependencies
|
|
171
|
+
|
|
172
|
+
- `@google/generative-ai`: Official Google Generative AI SDK
|
|
173
|
+
- `@memberjunction/ai`: MemberJunction AI core framework
|
|
174
|
+
- `@memberjunction/global`: MemberJunction global utilities
|
|
175
|
+
|
|
176
|
+
## License
|
|
177
|
+
|
|
178
|
+
ISC
|