@memberjunction/ai-gemini 2.42.1 → 2.44.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/package.json +3 -3
- package/readme.md +188 -25
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@memberjunction/ai-gemini",
|
|
3
|
-
"version": "2.
|
|
3
|
+
"version": "2.44.0",
|
|
4
4
|
"description": "MemberJunction Wrapper for Google Gemini AI Models",
|
|
5
5
|
"main": "dist/index.js",
|
|
6
6
|
"types": "dist/index.d.ts",
|
|
@@ -19,8 +19,8 @@
|
|
|
19
19
|
"typescript": "^5.4.5"
|
|
20
20
|
},
|
|
21
21
|
"dependencies": {
|
|
22
|
-
"@memberjunction/ai": "2.
|
|
23
|
-
"@memberjunction/global": "2.
|
|
22
|
+
"@memberjunction/ai": "2.44.0",
|
|
23
|
+
"@memberjunction/global": "2.44.0",
|
|
24
24
|
"@google/genai": "0.14.0"
|
|
25
25
|
}
|
|
26
26
|
}
|
package/readme.md
CHANGED
|
@@ -6,9 +6,12 @@ A comprehensive wrapper for Google's Gemini AI models that seamlessly integrates
|
|
|
6
6
|
|
|
7
7
|
- **Google Gemini Integration**: Connect to Google's state-of-the-art Gemini models using the official @google/genai SDK
|
|
8
8
|
- **Standardized Interface**: Implements MemberJunction's BaseLLM abstract class
|
|
9
|
-
- **
|
|
9
|
+
- **Streaming Support**: Full support for streaming responses with real-time token generation
|
|
10
|
+
- **Multimodal Support**: Handle text, images, audio, video, and file content
|
|
11
|
+
- **Message Formatting**: Automatic conversion between MemberJunction and Gemini message formats
|
|
12
|
+
- **Effort Level Support**: Leverage Gemini's reasoning mode for higher-quality responses
|
|
10
13
|
- **Error Handling**: Robust error handling with detailed reporting
|
|
11
|
-
- **Chat Support**: Full support for chat-based interactions with
|
|
14
|
+
- **Chat Support**: Full support for chat-based interactions with conversation history
|
|
12
15
|
- **Temperature Control**: Fine-tune generation creativity
|
|
13
16
|
- **Response Format Control**: Request specific response MIME types
|
|
14
17
|
|
|
@@ -42,7 +45,7 @@ import { ChatParams } from '@memberjunction/ai';
|
|
|
42
45
|
|
|
43
46
|
// Create chat parameters
|
|
44
47
|
const chatParams: ChatParams = {
|
|
45
|
-
model: 'gemini-pro', // or 'gemini-pro-vision' for
|
|
48
|
+
model: 'gemini-pro', // or 'gemini-pro-vision' for multimodal
|
|
46
49
|
messages: [
|
|
47
50
|
{ role: 'system', content: 'You are a helpful assistant.' },
|
|
48
51
|
{ role: 'user', content: 'What are the key features of the Gemini AI model?' }
|
|
@@ -65,18 +68,84 @@ try {
|
|
|
65
68
|
}
|
|
66
69
|
```
|
|
67
70
|
|
|
71
|
+
### Streaming Chat Completion
|
|
72
|
+
|
|
73
|
+
```typescript
|
|
74
|
+
import { StreamingChatCallbacks } from '@memberjunction/ai';
|
|
75
|
+
|
|
76
|
+
// Define streaming callbacks
|
|
77
|
+
const streamCallbacks: StreamingChatCallbacks = {
|
|
78
|
+
onToken: (token: string) => {
|
|
79
|
+
process.stdout.write(token); // Print each token as it arrives
|
|
80
|
+
},
|
|
81
|
+
onComplete: (fullResponse: string) => {
|
|
82
|
+
console.log('\n\nComplete response received');
|
|
83
|
+
},
|
|
84
|
+
onError: (error: Error) => {
|
|
85
|
+
console.error('Streaming error:', error);
|
|
86
|
+
}
|
|
87
|
+
};
|
|
88
|
+
|
|
89
|
+
// Use streaming
|
|
90
|
+
const streamParams: ChatParams = {
|
|
91
|
+
model: 'gemini-pro',
|
|
92
|
+
messages: [
|
|
93
|
+
{ role: 'user', content: 'Write a short story about a robot.' }
|
|
94
|
+
],
|
|
95
|
+
streaming: true,
|
|
96
|
+
streamingCallbacks: streamCallbacks
|
|
97
|
+
};
|
|
98
|
+
|
|
99
|
+
await geminiLLM.ChatCompletion(streamParams);
|
|
100
|
+
```
|
|
101
|
+
|
|
102
|
+
### Multimodal Content
|
|
103
|
+
|
|
104
|
+
```typescript
|
|
105
|
+
import { ChatMessageContent } from '@memberjunction/ai';
|
|
106
|
+
|
|
107
|
+
// Create multimodal content
|
|
108
|
+
const multimodalContent: ChatMessageContent = [
|
|
109
|
+
{ type: 'text', content: 'What do you see in this image?' },
|
|
110
|
+
{ type: 'image_url', content: 'base64_encoded_image_data_here' }
|
|
111
|
+
];
|
|
112
|
+
|
|
113
|
+
const multimodalParams: ChatParams = {
|
|
114
|
+
model: 'gemini-pro-vision',
|
|
115
|
+
messages: [
|
|
116
|
+
{ role: 'user', content: multimodalContent }
|
|
117
|
+
]
|
|
118
|
+
};
|
|
119
|
+
|
|
120
|
+
const response = await geminiLLM.ChatCompletion(multimodalParams);
|
|
121
|
+
```
|
|
122
|
+
|
|
123
|
+
### Enhanced Reasoning with Effort Level
|
|
124
|
+
|
|
125
|
+
```typescript
|
|
126
|
+
// Use effort level to enable Gemini's full reasoning mode
|
|
127
|
+
const reasoningParams: ChatParams = {
|
|
128
|
+
model: 'gemini-pro',
|
|
129
|
+
messages: [
|
|
130
|
+
{ role: 'user', content: 'Solve this complex logic puzzle...' }
|
|
131
|
+
],
|
|
132
|
+
effortLevel: 'high' // Enables full reasoning mode
|
|
133
|
+
};
|
|
134
|
+
|
|
135
|
+
const response = await geminiLLM.ChatCompletion(reasoningParams);
|
|
136
|
+
```
|
|
137
|
+
|
|
68
138
|
### Direct Access to Gemini Client
|
|
69
139
|
|
|
70
140
|
```typescript
|
|
71
141
|
// Access the underlying GoogleGenAI client for advanced usage
|
|
72
142
|
const geminiClient = geminiLLM.GeminiClient;
|
|
73
143
|
|
|
74
|
-
// Use the client directly if needed
|
|
75
|
-
const
|
|
144
|
+
// Use the client directly if needed for custom operations
|
|
145
|
+
const chat = geminiClient.chats.create({
|
|
76
146
|
model: 'gemini-pro',
|
|
77
|
-
|
|
147
|
+
history: []
|
|
78
148
|
});
|
|
79
|
-
console.log(result.candidates[0].content.parts[0].text);
|
|
80
149
|
```
|
|
81
150
|
|
|
82
151
|
## Supported Models
|
|
@@ -85,7 +154,7 @@ Google Gemini provides several models with different capabilities:
|
|
|
85
154
|
|
|
86
155
|
- `gemini-pro`: General-purpose text model
|
|
87
156
|
- `gemini-pro-vision`: Multimodal model that can process images and text
|
|
88
|
-
- `gemini-ultra`: Google's most advanced model (
|
|
157
|
+
- `gemini-ultra`: Google's most advanced model (when available)
|
|
89
158
|
|
|
90
159
|
Check the [Google AI documentation](https://ai.google.dev/models/gemini) for the latest list of supported models.
|
|
91
160
|
|
|
@@ -101,19 +170,57 @@ A class that extends BaseLLM to provide Google Gemini-specific functionality.
|
|
|
101
170
|
new GeminiLLM(apiKey: string)
|
|
102
171
|
```
|
|
103
172
|
|
|
173
|
+
Creates a new instance of the Gemini LLM wrapper.
|
|
174
|
+
|
|
175
|
+
**Parameters:**
|
|
176
|
+
- `apiKey`: Your Google AI Studio API key
|
|
177
|
+
|
|
104
178
|
#### Properties
|
|
105
179
|
|
|
106
180
|
- `GeminiClient`: (read-only) Returns the underlying GoogleGenAI client instance
|
|
181
|
+
- `SupportsStreaming`: (read-only) Returns `true` - Gemini supports streaming responses
|
|
107
182
|
|
|
108
183
|
#### Methods
|
|
109
184
|
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
185
|
+
##### ChatCompletion(params: ChatParams): Promise<ChatResult>
|
|
186
|
+
|
|
187
|
+
Perform a chat completion with Gemini models.
|
|
188
|
+
|
|
189
|
+
**Parameters:**
|
|
190
|
+
- `params`: Chat parameters including model, messages, temperature, etc.
|
|
191
|
+
|
|
192
|
+
**Returns:**
|
|
193
|
+
- Promise resolving to a `ChatResult` with the model's response
|
|
194
|
+
|
|
195
|
+
##### SummarizeText(params: SummarizeParams): Promise<SummarizeResult>
|
|
196
|
+
|
|
197
|
+
Not implemented yet - will throw an error if called.
|
|
198
|
+
|
|
199
|
+
##### ClassifyText(params: ClassifyParams): Promise<ClassifyResult>
|
|
200
|
+
|
|
201
|
+
Not implemented yet - will throw an error if called.
|
|
113
202
|
|
|
114
203
|
#### Static Methods
|
|
115
204
|
|
|
116
|
-
|
|
205
|
+
##### MapMJMessageToGeminiHistoryEntry(message: ChatMessage): Content
|
|
206
|
+
|
|
207
|
+
Converts a MemberJunction ChatMessage to Gemini's Content format.
|
|
208
|
+
|
|
209
|
+
**Parameters:**
|
|
210
|
+
- `message`: MemberJunction ChatMessage object
|
|
211
|
+
|
|
212
|
+
**Returns:**
|
|
213
|
+
- Gemini Content object with proper role mapping
|
|
214
|
+
|
|
215
|
+
##### MapMJContentToGeminiParts(content: ChatMessageContent): Array<Part>
|
|
216
|
+
|
|
217
|
+
Converts MemberJunction message content to Gemini Parts array.
|
|
218
|
+
|
|
219
|
+
**Parameters:**
|
|
220
|
+
- `content`: String or array of content parts
|
|
221
|
+
|
|
222
|
+
**Returns:**
|
|
223
|
+
- Array of Gemini Part objects
|
|
117
224
|
|
|
118
225
|
## Response Format Control
|
|
119
226
|
|
|
@@ -155,26 +262,82 @@ The wrapper handles proper message formatting and role conversion between Member
|
|
|
155
262
|
|
|
156
263
|
- MemberJunction's `system` and `user` roles are converted to Gemini's `user` role
|
|
157
264
|
- MemberJunction's `assistant` role is converted to Gemini's `model` role
|
|
158
|
-
- Messages are
|
|
265
|
+
- Messages are automatically spaced to ensure alternating roles as required by Gemini
|
|
266
|
+
- Multimodal content is properly converted with appropriate MIME types
|
|
267
|
+
|
|
268
|
+
## Content Type Support
|
|
269
|
+
|
|
270
|
+
The wrapper supports various content types with automatic MIME type mapping:
|
|
271
|
+
|
|
272
|
+
- **Text**: Standard text messages
|
|
273
|
+
- **Images**: `image_url` type → `image/jpeg` MIME type
|
|
274
|
+
- **Audio**: `audio_url` type → `audio/mpeg` MIME type
|
|
275
|
+
- **Video**: `video_url` type → `video/mp4` MIME type
|
|
276
|
+
- **Files**: `file_url` type → `application/octet-stream` MIME type
|
|
277
|
+
|
|
278
|
+
## Integration with MemberJunction
|
|
279
|
+
|
|
280
|
+
This package is designed to work seamlessly with the MemberJunction AI framework:
|
|
281
|
+
|
|
282
|
+
```typescript
|
|
283
|
+
import { AIEngine } from '@memberjunction/ai';
|
|
284
|
+
import { GeminiLLM } from '@memberjunction/ai-gemini';
|
|
285
|
+
|
|
286
|
+
// Register the Gemini provider with the AI engine
|
|
287
|
+
const aiEngine = new AIEngine();
|
|
288
|
+
const geminiProvider = new GeminiLLM('your-api-key');
|
|
289
|
+
|
|
290
|
+
// Use through the AI engine's unified interface
|
|
291
|
+
const result = await aiEngine.ChatCompletion({
|
|
292
|
+
provider: 'GeminiLLM',
|
|
293
|
+
model: 'gemini-pro',
|
|
294
|
+
messages: [/* ... */]
|
|
295
|
+
});
|
|
296
|
+
```
|
|
297
|
+
|
|
298
|
+
## Performance Considerations
|
|
299
|
+
|
|
300
|
+
- **Streaming**: Use streaming for long responses to improve perceived performance
|
|
301
|
+
- **Effort Level**: Use the `effortLevel` parameter judiciously as it increases latency and cost
|
|
302
|
+
- **Model Selection**: Choose the appropriate model based on your needs (text-only vs multimodal)
|
|
303
|
+
- **Message Spacing**: The wrapper automatically handles message spacing, adding minimal overhead
|
|
159
304
|
|
|
160
305
|
## Limitations
|
|
161
306
|
|
|
162
307
|
Currently, the wrapper implements:
|
|
163
|
-
- Chat completion functionality
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
- `SummarizeText` functionality
|
|
167
|
-
- `ClassifyText` functionality
|
|
168
|
-
-
|
|
169
|
-
- Image processing with `gemini-pro-vision`
|
|
170
|
-
- Function calling
|
|
308
|
+
- ✅ Chat completion functionality (streaming and non-streaming)
|
|
309
|
+
- ✅ Multimodal content support
|
|
310
|
+
- ✅ Effort level configuration for enhanced reasoning
|
|
311
|
+
- ❌ `SummarizeText` functionality (not implemented)
|
|
312
|
+
- ❌ `ClassifyText` functionality (not implemented)
|
|
313
|
+
- ❌ Detailed token usage reporting (Gemini doesn't provide this)
|
|
171
314
|
|
|
172
315
|
## Dependencies
|
|
173
316
|
|
|
174
|
-
- `@google/genai
|
|
175
|
-
- `@memberjunction/ai
|
|
176
|
-
- `@memberjunction/global
|
|
317
|
+
- `@google/genai` (v0.14.0): Official Google GenAI SDK
|
|
318
|
+
- `@memberjunction/ai` (v2.43.0): MemberJunction AI core framework
|
|
319
|
+
- `@memberjunction/global` (v2.43.0): MemberJunction global utilities
|
|
320
|
+
|
|
321
|
+
## Development
|
|
322
|
+
|
|
323
|
+
### Building
|
|
324
|
+
|
|
325
|
+
```bash
|
|
326
|
+
npm run build
|
|
327
|
+
```
|
|
328
|
+
|
|
329
|
+
### Testing
|
|
330
|
+
|
|
331
|
+
Tests are not currently implemented. To add tests:
|
|
332
|
+
|
|
333
|
+
```bash
|
|
334
|
+
npm test
|
|
335
|
+
```
|
|
177
336
|
|
|
178
337
|
## License
|
|
179
338
|
|
|
180
|
-
ISC
|
|
339
|
+
ISC
|
|
340
|
+
|
|
341
|
+
## Contributing
|
|
342
|
+
|
|
343
|
+
For bug reports, feature requests, or contributions, please visit the [MemberJunction repository](https://github.com/MemberJunction/MJ).
|