@umituz/react-native-ai-groq-provider 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,212 @@
1
+ /**
2
+ * Model Types and Configurations
3
+ */
4
+
5
+ import { GROQ_MODELS } from "./groq.types";
6
+
7
+ /**
8
+ * Model capabilities
9
+ */
10
+ export interface ModelCapabilities {
11
+ /** Supports streaming */
12
+ streaming: boolean;
13
+ /** Supports function calling */
14
+ functionCalling: boolean;
15
+ /** Supports vision */
16
+ vision: boolean;
17
+ /** Maximum context length */
18
+ maxContextLength: number;
19
+ /** Supports system instructions */
20
+ systemInstructions: boolean;
21
+ }
22
+
23
+ /**
24
+ * Model information
25
+ */
26
+ export interface ModelInfo {
27
+ /** Model ID */
28
+ id: string;
29
+ /** Display name */
30
+ name: string;
31
+ /** Model capabilities */
32
+ capabilities: ModelCapabilities;
33
+ /** Recommended temperature range */
34
+ temperatureRange: [number, number];
35
+ /** Speed in tokens/second */
36
+ speed?: number;
37
+ /** Cost per 1M tokens (input) */
38
+ costPerInput?: number;
39
+ /** Cost per 1M tokens (output) */
40
+ costPerOutput?: number;
41
+ }
42
+
43
+ /**
44
+ * Model registry with capabilities
45
+ */
46
+ export const MODEL_REGISTRY: Record<string, ModelInfo> = {
47
+ [GROQ_MODELS.LLAMA_3_1_8B_INSTANT]: {
48
+ id: GROQ_MODELS.LLAMA_3_1_8B_INSTANT,
49
+ name: "Llama 3.1 8B Instant",
50
+ capabilities: {
51
+ streaming: true,
52
+ functionCalling: true,
53
+ vision: false,
54
+ maxContextLength: 131072,
55
+ systemInstructions: true,
56
+ },
57
+ temperatureRange: [0.0, 2.0],
58
+ speed: 560,
59
+ costPerInput: 0.05,
60
+ costPerOutput: 0.08,
61
+ },
62
+ [GROQ_MODELS.LLAMA_3_3_70B_VERSATILE]: {
63
+ id: GROQ_MODELS.LLAMA_3_3_70B_VERSATILE,
64
+ name: "Llama 3.3 70B Versatile",
65
+ capabilities: {
66
+ streaming: true,
67
+ functionCalling: true,
68
+ vision: false,
69
+ maxContextLength: 131072,
70
+ systemInstructions: true,
71
+ },
72
+ temperatureRange: [0.0, 2.0],
73
+ speed: 280,
74
+ costPerInput: 0.59,
75
+ costPerOutput: 0.79,
76
+ },
77
+ [GROQ_MODELS.LLAMA_3_1_70B_VERSATILE]: {
78
+ id: GROQ_MODELS.LLAMA_3_1_70B_VERSATILE,
79
+ name: "Llama 3.1 70B Versatile",
80
+ capabilities: {
81
+ streaming: true,
82
+ functionCalling: true,
83
+ vision: false,
84
+ maxContextLength: 131072,
85
+ systemInstructions: true,
86
+ },
87
+ temperatureRange: [0.0, 2.0],
88
+ speed: 280,
89
+ costPerInput: 0.59,
90
+ costPerOutput: 0.79,
91
+ },
92
+ [GROQ_MODELS.GPT_OSS_20B]: {
93
+ id: GROQ_MODELS.GPT_OSS_20B,
94
+ name: "GPT-OSS 20B",
95
+ capabilities: {
96
+ streaming: true,
97
+ functionCalling: true,
98
+ vision: false,
99
+ maxContextLength: 131072,
100
+ systemInstructions: true,
101
+ },
102
+ temperatureRange: [0.0, 2.0],
103
+ speed: 1000,
104
+ costPerInput: 0.075,
105
+ costPerOutput: 0.30,
106
+ },
107
+ [GROQ_MODELS.GPT_OSS_120B]: {
108
+ id: GROQ_MODELS.GPT_OSS_120B,
109
+ name: "GPT-OSS 120B",
110
+ capabilities: {
111
+ streaming: true,
112
+ functionCalling: true,
113
+ vision: false,
114
+ maxContextLength: 131072,
115
+ systemInstructions: true,
116
+ },
117
+ temperatureRange: [0.0, 2.0],
118
+ speed: 400,
119
+ costPerInput: 0.40,
120
+ costPerOutput: 0.40,
121
+ },
122
+ [GROQ_MODELS.MIXTRAL_8X7B]: {
123
+ id: GROQ_MODELS.MIXTRAL_8X7B,
124
+ name: "Mixtral 8x7B",
125
+ capabilities: {
126
+ streaming: true,
127
+ functionCalling: true,
128
+ vision: false,
129
+ maxContextLength: 32768,
130
+ systemInstructions: true,
131
+ },
132
+ temperatureRange: [0.0, 2.0],
133
+ speed: 250,
134
+ costPerInput: 0.27,
135
+ costPerOutput: 0.27,
136
+ },
137
+ [GROQ_MODELS.GEMMA_2_9B]: {
138
+ id: GROQ_MODELS.GEMMA_2_9B,
139
+ name: "Gemma 2 9B IT",
140
+ capabilities: {
141
+ streaming: true,
142
+ functionCalling: true,
143
+ vision: false,
144
+ maxContextLength: 131072,
145
+ systemInstructions: true,
146
+ },
147
+ temperatureRange: [0.0, 2.0],
148
+ speed: 450,
149
+ costPerInput: 0.20,
150
+ costPerOutput: 0.20,
151
+ },
152
+ [GROQ_MODELS.LLAMA_4_SCOUT_17B]: {
153
+ id: GROQ_MODELS.LLAMA_4_SCOUT_17B,
154
+ name: "Llama 4 Scout 17B",
155
+ capabilities: {
156
+ streaming: true,
157
+ functionCalling: true,
158
+ vision: false,
159
+ maxContextLength: 131072,
160
+ systemInstructions: true,
161
+ },
162
+ temperatureRange: [0.0, 2.0],
163
+ speed: 30,
164
+ costPerInput: 0.15,
165
+ costPerOutput: 0.15,
166
+ },
167
+ [GROQ_MODELS.KIMI_K2_INSTRUCT]: {
168
+ id: GROQ_MODELS.KIMI_K2_INSTRUCT,
169
+ name: "Kimi K2 Instruct",
170
+ capabilities: {
171
+ streaming: true,
172
+ functionCalling: true,
173
+ vision: false,
174
+ maxContextLength: 131072,
175
+ systemInstructions: true,
176
+ },
177
+ temperatureRange: [0.0, 2.0],
178
+ speed: 60,
179
+ costPerInput: 0.12,
180
+ costPerOutput: 0.12,
181
+ },
182
+ [GROQ_MODELS.QWEN3_32B]: {
183
+ id: GROQ_MODELS.QWEN3_32B,
184
+ name: "Qwen 3 32B",
185
+ capabilities: {
186
+ streaming: true,
187
+ functionCalling: true,
188
+ vision: false,
189
+ maxContextLength: 131072,
190
+ systemInstructions: true,
191
+ },
192
+ temperatureRange: [0.0, 2.0],
193
+ speed: 60,
194
+ costPerInput: 0.10,
195
+ costPerOutput: 0.10,
196
+ },
197
+ };
198
+
199
+ /**
200
+ * Get model information
201
+ */
202
+ export function getModelInfo(modelId: string): ModelInfo | undefined {
203
+ return MODEL_REGISTRY[modelId];
204
+ }
205
+
206
+ /**
207
+ * Check if model supports a capability
208
+ */
209
+ export function modelSupports(modelId: string, capability: keyof ModelCapabilities): boolean {
210
+ const info = getModelInfo(modelId);
211
+ return Boolean(info?.capabilities[capability]);
212
+ }
package/src/index.ts ADDED
@@ -0,0 +1,113 @@
1
+ /**
2
+ * @umituz/react-native-ai-groq-provider
3
+ * Groq text generation provider for React Native applications
4
+ *
5
+ * @author umituz
6
+ * @license MIT
7
+ */
8
+
9
+ // Domain Types
10
+ export type {
11
+ GroqConfig,
12
+ GroqGenerationConfig,
13
+ GroqMessageRole,
14
+ GroqMessage,
15
+ GroqChatRequest,
16
+ GroqChatResponse,
17
+ GroqChoice,
18
+ GroqFinishReason,
19
+ GroqUsage,
20
+ GroqChatChunk,
21
+ GroqChunkChoice,
22
+ GroqErrorResponse,
23
+ GroqChatConfig,
24
+ } from "./domain/entities";
25
+
26
+ export {
27
+ GROQ_MODELS,
28
+ DEFAULT_MODELS,
29
+ } from "./domain/entities";
30
+
31
+ export {
32
+ GroqError,
33
+ GroqErrorType,
34
+ mapHttpStatusToErrorType,
35
+ } from "./domain/entities/error.types";
36
+
37
+ export {
38
+ MODEL_REGISTRY,
39
+ getModelInfo,
40
+ modelSupports,
41
+ type ModelCapabilities,
42
+ type ModelInfo,
43
+ } from "./domain/entities/models";
44
+
45
+ // Services
46
+ export { groqHttpClient } from "./infrastructure/services/GroqClient";
47
+ export { textGeneration, chatGeneration } from "./infrastructure/services/TextGeneration";
48
+ export { structuredText, structuredChat } from "./infrastructure/services/StructuredText";
49
+ export { streaming, streamingChat } from "./infrastructure/services/Streaming";
50
+ export {
51
+ chatSessionService,
52
+ createChatSession,
53
+ sendChatMessage,
54
+ buildChatHistory,
55
+ trimChatHistory,
56
+ type ChatSession,
57
+ type SendChatMessageOptions,
58
+ type ChatSendResult,
59
+ type ChatHistoryMessage,
60
+ } from "./infrastructure/services";
61
+
62
+ export type { StreamingCallbacks, StreamingOptions } from "./infrastructure/services/Streaming";
63
+
64
+ // React Hooks
65
+ export { useGroq } from "./presentation/hooks/useGroq";
66
+ export type { UseGroqOptions, UseGroqReturn } from "./presentation/hooks/useGroq";
67
+
68
+ export { useOperationManager } from "./presentation/hooks/useOperationManager";
69
+
70
+ // Provider Configuration & Factory
71
+ export {
72
+ ConfigBuilder,
73
+ GenerationConfigBuilder,
74
+ providerFactory,
75
+ initializeProvider,
76
+ configureProvider,
77
+ resetProvider,
78
+ } from "./providers/ProviderFactory";
79
+
80
+ export type {
81
+ ProviderConfig,
82
+ ProviderFactoryOptions,
83
+ } from "./providers/ProviderFactory";
84
+
85
+ // Utilities
86
+ export {
87
+ createUserMessage,
88
+ createAssistantMessage,
89
+ createSystemMessage,
90
+ createTextMessage,
91
+ promptToMessages,
92
+ extractTextFromMessages,
93
+ formatMessagesForDisplay,
94
+ } from "./infrastructure/utils/content-mapper.util";
95
+
96
+ export {
97
+ getUserFriendlyError,
98
+ isRetryableError,
99
+ isAuthError,
100
+ formatErrorForLogging,
101
+ } from "./infrastructure/utils/error-mapper.util";
102
+
103
+ export {
104
+ executeWithState,
105
+ executeWithRetry,
106
+ type AsyncStateSetters,
107
+ type AsyncCallbacks,
108
+ } from "./infrastructure/utils/async";
109
+
110
+ export {
111
+ telemetry,
112
+ useTelemetry,
113
+ } from "./infrastructure/telemetry";
@@ -0,0 +1,265 @@
1
+ /**
2
+ * Chat Session Service
3
+ * Manages multi-turn chat conversations with Groq API
4
+ */
5
+
6
+ import type {
7
+ GroqMessage,
8
+ GroqChatConfig,
9
+ GroqGenerationConfig,
10
+ } from "../../domain/entities";
11
+ import { groqHttpClient } from "./GroqClient";
12
+ import { DEFAULT_MODELS } from "../../domain/entities";
13
+ import { GroqError, GroqErrorType } from "../../domain/entities/error.types";
14
+
15
+ /**
16
+ * Chat session state
17
+ */
18
+ export interface ChatSession {
19
+ id: string;
20
+ model: string;
21
+ systemInstruction?: string;
22
+ messages: GroqMessage[];
23
+ generationConfig?: GroqGenerationConfig;
24
+ createdAt: Date;
25
+ updatedAt: Date;
26
+ }
27
+
28
+ /**
29
+ * Result of sending a chat message
30
+ */
31
+ export interface ChatSendResult {
32
+ response: string;
33
+ usage: {
34
+ promptTokens: number;
35
+ completionTokens: number;
36
+ totalTokens: number;
37
+ };
38
+ finishReason: string;
39
+ }
40
+
41
+ /**
42
+ * Options for sending a chat message
43
+ */
44
+ export interface SendChatMessageOptions {
45
+ /** Stream the response (not yet implemented) */
46
+ stream?: boolean;
47
+ }
48
+
49
+ /**
50
+ * Message format for chat history (simplified)
51
+ */
52
+ export type ChatHistoryMessage = {
53
+ role: "system" | "user" | "assistant";
54
+ content: string;
55
+ };
56
+
57
+ /**
58
+ * Create a new chat session
59
+ */
60
+ export function createChatSession(config: GroqChatConfig = {}): ChatSession {
61
+ return {
62
+ id: `groq-chat-${Date.now()}-${Math.random().toString(36).substr(2, 9)}`,
63
+ model: config.model || DEFAULT_MODELS.TEXT,
64
+ systemInstruction: config.systemInstruction,
65
+ messages: config.history ? [...config.history] : [],
66
+ generationConfig: config.generationConfig,
67
+ createdAt: new Date(),
68
+ updatedAt: new Date(),
69
+ };
70
+ }
71
+
72
+ /**
73
+ * Chat session service
74
+ */
75
+ class ChatSessionService {
76
+ private sessions = new Map<string, ChatSession>();
77
+
78
+ /**
79
+ * Create a new chat session
80
+ */
81
+ create(config: GroqChatConfig = {}): ChatSession {
82
+ const session = createChatSession(config);
83
+ this.sessions.set(session.id, session);
84
+ return session;
85
+ }
86
+
87
+ /**
88
+ * Get a session by ID
89
+ */
90
+ get(sessionId: string): ChatSession | undefined {
91
+ return this.sessions.get(sessionId);
92
+ }
93
+
94
+ /**
95
+ * Delete a session
96
+ */
97
+ delete(sessionId: string): boolean {
98
+ return this.sessions.delete(sessionId);
99
+ }
100
+
101
+ /**
102
+ * Send a message in a chat session
103
+ */
104
+ async send(
105
+ sessionId: string,
106
+ content: string,
107
+ options: SendChatMessageOptions = {}
108
+ ): Promise<ChatSendResult> {
109
+ const session = this.sessions.get(sessionId);
110
+ if (!session) {
111
+ throw new GroqError(
112
+ GroqErrorType.MISSING_CONFIG,
113
+ `Chat session ${sessionId} not found`
114
+ );
115
+ }
116
+
117
+ // Add user message to history
118
+ const userMessage: GroqMessage = {
119
+ role: "user",
120
+ content,
121
+ };
122
+ session.messages.push(userMessage);
123
+
124
+ // Build messages array for API
125
+ const messagesForApi = buildChatHistory(session);
126
+
127
+ // Call API
128
+ const response = await groqHttpClient.chatCompletion({
129
+ model: session.model,
130
+ messages: messagesForApi,
131
+ temperature: session.generationConfig?.temperature || 0.7,
132
+ max_tokens: session.generationConfig?.maxTokens || 1024,
133
+ top_p: session.generationConfig?.topP,
134
+ });
135
+
136
+ // Extract assistant response
137
+ const assistantContent = response.choices[0]?.message?.content;
138
+ if (!assistantContent) {
139
+ throw new GroqError(
140
+ GroqErrorType.UNKNOWN_ERROR,
141
+ "No content generated from Groq API"
142
+ );
143
+ }
144
+
145
+ // Add assistant message to history
146
+ const assistantMessage: GroqMessage = {
147
+ role: "assistant",
148
+ content: assistantContent,
149
+ };
150
+ session.messages.push(assistantMessage);
151
+
152
+ // Update timestamp
153
+ session.updatedAt = new Date();
154
+
155
+ return {
156
+ response: assistantContent,
157
+ usage: {
158
+ promptTokens: response.usage?.prompt_tokens || 0,
159
+ completionTokens: response.usage?.completion_tokens || 0,
160
+ totalTokens: response.usage?.total_tokens || 0,
161
+ },
162
+ finishReason: response.choices[0]?.finish_reason || "unknown",
163
+ };
164
+ }
165
+
166
+ /**
167
+ * Reset a chat session (clear messages except system instruction)
168
+ */
169
+ reset(sessionId: string): ChatSession {
170
+ const session = this.sessions.get(sessionId);
171
+ if (!session) {
172
+ throw new GroqError(
173
+ GroqErrorType.MISSING_CONFIG,
174
+ `Chat session ${sessionId} not found`
175
+ );
176
+ }
177
+
178
+ session.messages = [];
179
+ session.updatedAt = new Date();
180
+
181
+ return session;
182
+ }
183
+
184
+ /**
185
+ * Update a chat session's config
186
+ */
187
+ updateConfig(sessionId: string, config: Partial<GroqChatConfig>): ChatSession {
188
+ const session = this.sessions.get(sessionId);
189
+ if (!session) {
190
+ throw new GroqError(
191
+ GroqErrorType.MISSING_CONFIG,
192
+ `Chat session ${sessionId} not found`
193
+ );
194
+ }
195
+
196
+ if (config.model !== undefined) session.model = config.model;
197
+ if (config.systemInstruction !== undefined) session.systemInstruction = config.systemInstruction;
198
+ if (config.generationConfig !== undefined) session.generationConfig = config.generationConfig;
199
+ if (config.history) session.messages = [...config.history];
200
+
201
+ session.updatedAt = new Date();
202
+
203
+ return session;
204
+ }
205
+ }
206
+
207
+ /**
208
+ * Singleton instance
209
+ */
210
+ export const chatSessionService = new ChatSessionService();
211
+
212
+ /**
213
+ * Convenience function to send a chat message
214
+ */
215
+ export async function sendChatMessage(
216
+ sessionId: string,
217
+ content: string,
218
+ options?: SendChatMessageOptions
219
+ ): Promise<ChatSendResult> {
220
+ return chatSessionService.send(sessionId, content, options);
221
+ }
222
+
223
+ /**
224
+ * Build chat history for API request
225
+ */
226
+ export function buildChatHistory(session: ChatSession): GroqMessage[] {
227
+ const messages: GroqMessage[] = [];
228
+
229
+ // Add system instruction if present
230
+ if (session.systemInstruction) {
231
+ messages.push({
232
+ role: "system",
233
+ content: session.systemInstruction,
234
+ });
235
+ }
236
+
237
+ // Add conversation history
238
+ messages.push(...session.messages);
239
+
240
+ return messages;
241
+ }
242
+
243
+ /**
244
+ * Trim chat history to fit within token limit
245
+ */
246
+ export function trimChatHistory(
247
+ messages: GroqMessage[],
248
+ maxTokens: number = 4000
249
+ ): GroqMessage[] {
250
+ // Simple heuristic: assume average of 4 tokens per message
251
+ // For production, use a proper token counter
252
+ const maxMessages = Math.floor(maxTokens / 100);
253
+
254
+ if (messages.length <= maxMessages) {
255
+ return messages;
256
+ }
257
+
258
+ // Keep system messages and trim from oldest to newest
259
+ const systemMessages = messages.filter((m) => m.role === "system");
260
+ const nonSystemMessages = messages.filter((m) => m.role !== "system");
261
+
262
+ const trimmed = nonSystemMessages.slice(-maxMessages);
263
+
264
+ return [...systemMessages, ...trimmed];
265
+ }