@fencyai/react 0.1.53 → 0.1.55

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,7 @@
1
1
  import { createChatCompletion } from '@fencyai/js';
2
2
  import { useCallback, useMemo, useState } from 'react';
3
3
  import { useFencyContext } from '../../provider/useFencyContext';
4
+ import { toSpecificChatCompletionParams } from '../../types/CreateGenericChatCompletionParams';
4
5
  export const useBasicChatCompletions = () => {
5
6
  const context = useFencyContext();
6
7
  const [chatCompletions, setChatCompletions] = useState([]);
@@ -15,38 +16,12 @@ export const useBasicChatCompletions = () => {
15
16
  loading: true,
16
17
  },
17
18
  ]);
19
+ const specificParams = toSpecificChatCompletionParams(params.openai, params.gemini, params.claude, params.generic);
18
20
  const chatCompletion = await createChatCompletion({
19
21
  pk: context.fency.publishableKey,
20
22
  baseUrl: context.fency.baseUrl,
21
23
  request: {
22
- openai: params.openai
23
- ? {
24
- model: params.openai.model,
25
- messages: params.openai.messages,
26
- temperature: params.openai.temperature,
27
- topP: params.openai.topP,
28
- }
29
- : undefined,
30
- gemini: params.gemini
31
- ? {
32
- model: params.gemini.model,
33
- messages: params.gemini.messages,
34
- systemPrompt: params.gemini.systemPrompt,
35
- temperature: params.gemini.temperature,
36
- topP: params.gemini.topP,
37
- topK: params.gemini.topK,
38
- }
39
- : undefined,
40
- claude: params.claude
41
- ? {
42
- model: params.claude.model,
43
- messages: params.claude.messages,
44
- systemPrompt: params.claude.systemPrompt,
45
- temperature: params.claude.temperature,
46
- topP: params.claude.topP,
47
- topK: params.claude.topK,
48
- }
49
- : undefined,
24
+ ...specificParams,
50
25
  },
51
26
  });
52
27
  if (chatCompletion.type === 'success' &&
@@ -105,7 +80,8 @@ export const useBasicChatCompletions = () => {
105
80
  }, [context]);
106
81
  const latest = useMemo(() => {
107
82
  return chatCompletions.sort((a, b) => {
108
- return new Date(b.triggeredAt).getTime() - new Date(a.triggeredAt).getTime();
83
+ return (new Date(b.triggeredAt).getTime() -
84
+ new Date(a.triggeredAt).getTime());
109
85
  })[0];
110
86
  }, [chatCompletions]);
111
87
  return {
@@ -1,2 +1,3 @@
1
1
  import { UseStreamingChatCompletions } from '../../types/UseStreamingChatCompletions';
2
- export declare const useStreamingChatCompletions: () => UseStreamingChatCompletions;
2
+ import { UseStreamingChatCompletionsProps } from '../../types/UseStreamingChatCompletionsProps';
3
+ export declare const useStreamingChatCompletions: (props?: UseStreamingChatCompletionsProps) => UseStreamingChatCompletions;
@@ -1,20 +1,24 @@
1
1
  import { createChatCompletion } from '@fencyai/js';
2
2
  import { useCallback, useEffect, useMemo, useState } from 'react';
3
3
  import { useFencyContext } from '../../provider/useFencyContext';
4
+ import { toSpecificChatCompletionParams } from '../../types/CreateGenericChatCompletionParams';
4
5
  import { useStream } from '../useStream';
5
- export const useStreamingChatCompletions = () => {
6
+ export const useStreamingChatCompletions = (props) => {
6
7
  const context = useFencyContext();
7
8
  const [chatCompletions, setChatCompletions] = useState([]);
8
9
  const [completedStreamIds, setCompletedStreamIds] = useState([]);
9
10
  const [chunks, setChunks] = useState([]);
10
11
  const { createStream } = useStream({
11
12
  onNewChatCompletionStreamChunk: (streamData) => {
13
+ props?.onChatCompletionStreamChunk?.(streamData);
12
14
  setChunks((prev) => [...prev, streamData]);
13
15
  },
14
16
  onChatCompletionStreamCompleted: (stream) => {
17
+ props?.onChatCompletionStreamCompleted?.(stream);
15
18
  setCompletedStreamIds((prev) => [...prev, stream.streamId]);
16
19
  },
17
20
  onStreamError: (error) => {
21
+ props?.onStreamError?.(error);
18
22
  setChatCompletions((prev) => {
19
23
  const existing = prev.find((c) => c.streamId === error.streamId);
20
24
  if (!existing)
@@ -29,6 +33,12 @@ export const useStreamingChatCompletions = () => {
29
33
  ];
30
34
  });
31
35
  },
36
+ onStreamNotFound: (error) => {
37
+ props?.onStreamNotFound?.(error);
38
+ },
39
+ onStreamTimeout: (error) => {
40
+ props?.onStreamTimeout?.(error);
41
+ },
32
42
  });
33
43
  useEffect(() => {
34
44
  setChatCompletions((prev) => {
@@ -65,8 +75,8 @@ export const useStreamingChatCompletions = () => {
65
75
  }
66
76
  : null,
67
77
  streamId: chatCompletion.streamId,
68
- params: chatCompletion.params,
69
78
  error: chatCompletion.error,
79
+ prompt: chatCompletion.prompt,
70
80
  loading: chatCompletion.loading,
71
81
  doneStreaming: chatCompletion.doneStreaming,
72
82
  response: fullMessage,
@@ -88,69 +98,42 @@ export const useStreamingChatCompletions = () => {
88
98
  triggeredAt: new Date().toISOString(),
89
99
  data: null,
90
100
  error: null,
91
- params: params,
101
+ prompt: params,
92
102
  response: '',
93
103
  chunks: [],
94
104
  loading: true,
95
105
  doneStreaming: false,
96
106
  },
97
107
  ]);
108
+ const specificParams = toSpecificChatCompletionParams(params.openai, params.gemini, params.claude, params.generic);
98
109
  // Step 2: Send chat completion
99
110
  const chatCompletion = await createChatCompletion({
100
111
  pk: context.fency.publishableKey,
101
112
  baseUrl: context.fency.baseUrl,
102
113
  request: {
103
114
  streamId: streamResponse.stream.id,
104
- openai: params.openai
105
- ? {
106
- model: params.openai.model,
107
- messages: params.openai.messages,
108
- temperature: params.openai.temperature,
109
- topP: params.openai.topP,
110
- }
111
- : undefined,
112
- gemini: params.gemini
113
- ? {
114
- model: params.gemini.model,
115
- messages: params.gemini.messages,
116
- systemPrompt: params.gemini.systemPrompt,
117
- temperature: params.gemini.temperature,
118
- topP: params.gemini.topP,
119
- topK: params.gemini.topK,
120
- }
121
- : undefined,
122
- claude: params.claude
123
- ? {
124
- model: params.claude.model,
125
- messages: params.claude.messages,
126
- systemPrompt: params.claude.systemPrompt,
127
- temperature: params.claude.temperature,
128
- topP: params.claude.topP,
129
- topK: params.claude.topK,
130
- }
131
- : undefined,
115
+ ...specificParams,
132
116
  },
133
117
  });
134
118
  if (chatCompletion.type === 'success' &&
135
119
  chatCompletion.completion) {
136
- const newCompletion = {
137
- streamId: streamResponse.stream.id,
138
- triggeredAt: new Date().toISOString(),
139
- data: {
140
- id: chatCompletion.completion.id,
141
- createdAt: chatCompletion.completion.createdAt,
142
- streamId: streamResponse.stream.id,
143
- },
144
- error: null,
145
- params: params,
146
- response: '',
147
- chunks: [],
148
- doneStreaming: false,
149
- loading: true,
150
- };
151
120
  setChatCompletions((prev) => [
152
121
  ...prev.filter((c) => c.streamId !== streamResponse.stream.id),
153
- newCompletion,
122
+ {
123
+ streamId: streamResponse.stream.id,
124
+ triggeredAt: new Date().toISOString(),
125
+ data: {
126
+ id: chatCompletion.completion.id,
127
+ createdAt: chatCompletion.completion.createdAt,
128
+ streamId: streamResponse.stream.id,
129
+ },
130
+ error: null,
131
+ prompt: params,
132
+ response: '',
133
+ chunks: [],
134
+ doneStreaming: false,
135
+ loading: true,
136
+ },
154
137
  ]);
155
138
  return {
156
139
  type: 'success',
@@ -164,7 +147,7 @@ export const useStreamingChatCompletions = () => {
164
147
  {
165
148
  triggeredAt: new Date().toISOString(),
166
149
  streamId: streamResponse.stream.id,
167
- params: params,
150
+ prompt: params,
168
151
  error: chatCompletion.error,
169
152
  response: '',
170
153
  chunks: [],
@@ -188,8 +171,8 @@ export const useStreamingChatCompletions = () => {
188
171
  {
189
172
  triggeredAt: new Date().toISOString(),
190
173
  streamId: streamResponse.stream.id,
191
- params: params,
192
174
  error: error,
175
+ prompt: params,
193
176
  response: '',
194
177
  chunks: [],
195
178
  loading: false,
@@ -2,6 +2,7 @@ import { createChatCompletion } from '@fencyai/js';
2
2
  import { useCallback, useMemo, useState } from 'react';
3
3
  import { z } from 'zod';
4
4
  import { useFencyContext } from '../../provider/useFencyContext';
5
+ import { toSpecificChatCompletionParams } from '../../types/CreateGenericChatCompletionParams';
5
6
  export const useStructuredChatCompletions = () => {
6
7
  const context = useFencyContext();
7
8
  const [chatCompletions, setChatCompletions] = useState([]);
@@ -28,38 +29,26 @@ export const useStructuredChatCompletions = () => {
28
29
  }
29
30
  };
30
31
  const parsedJsonSchema = JSON.stringify(jsonSchema);
32
+ const specificParams = toSpecificChatCompletionParams(params.openai, params.gemini, params.claude, params.generic);
31
33
  const response = await createChatCompletion({
32
34
  pk: context.fency.publishableKey,
33
35
  baseUrl: context.fency.baseUrl,
34
36
  request: {
35
- openai: params.openai
37
+ openai: specificParams.openai
36
38
  ? {
37
- model: params.openai.model,
38
39
  responseJsonSchema: parsedJsonSchema,
39
- messages: params.openai.messages,
40
- temperature: params.openai.temperature,
41
- topP: params.openai.topP,
40
+ ...specificParams.openai,
42
41
  }
43
42
  : undefined,
44
- gemini: params.gemini
43
+ gemini: specificParams.gemini
45
44
  ? {
46
- model: params.gemini.model,
47
45
  responseJsonSchema: parsedJsonSchema,
48
- messages: params.gemini.messages,
49
- systemPrompt: params.gemini.systemPrompt,
50
- temperature: params.gemini.temperature,
51
- topP: params.gemini.topP,
52
- topK: params.gemini.topK,
46
+ ...specificParams.gemini,
53
47
  }
54
48
  : undefined,
55
- claude: params.claude
49
+ claude: specificParams.claude
56
50
  ? {
57
- model: params.claude.model,
58
- messages: params.claude.messages,
59
- systemPrompt: params.claude.systemPrompt,
60
- temperature: params.claude.temperature,
61
- topP: params.claude.topP,
62
- topK: params.claude.topK,
51
+ ...specificParams.claude,
63
52
  }
64
53
  : undefined,
65
54
  },
@@ -0,0 +1,4 @@
1
+ import { OpenAiModel } from '@fencyai/js';
2
+ import { GeminiModel } from '@fencyai/js';
3
+ import { AnthropicModel } from '@fencyai/js';
4
+ export type AiModel = OpenAiModel | GeminiModel | AnthropicModel;
@@ -0,0 +1 @@
1
+ export {};
@@ -1,8 +1,10 @@
1
- import { CreateOpenAiChatCompletionParams } from './CreateOpenAiChatCompletionParams';
2
- import { CreateGeminiChatCompletionParams } from './CreateGeminiChatCompletionParams';
3
1
  import { CreateClaudeChatCompletionParams } from './CreateClaudeChatCompletionParams';
2
+ import { CreateGeminiChatCompletionParams } from './CreateGeminiChatCompletionParams';
3
+ import { CreateOpenAiChatCompletionParams } from './CreateOpenAiChatCompletionParams';
4
+ import { CreateGenericChatCompletionParams } from './CreateGenericChatCompletionParams';
4
5
  export interface CreateBasicChatCompletionParams {
5
6
  openai?: CreateOpenAiChatCompletionParams;
6
7
  gemini?: CreateGeminiChatCompletionParams;
7
8
  claude?: CreateClaudeChatCompletionParams;
9
+ generic?: CreateGenericChatCompletionParams;
8
10
  }
@@ -0,0 +1,14 @@
1
+ import { ChatCompletionMessage, CreateClaudeChatCompletionRequestParams, CreateGeminiChatCompletionRequestParams, CreateOpenAiChatCompletionRequestParams } from '@fencyai/js';
2
+ import { AiModel } from './AiModel';
3
+ export interface CreateGenericChatCompletionParams {
4
+ model: AiModel;
5
+ messages: Array<ChatCompletionMessage>;
6
+ temperature?: number;
7
+ topP?: number;
8
+ topK?: number;
9
+ }
10
+ export declare const toSpecificChatCompletionParams: (openai?: CreateOpenAiChatCompletionRequestParams, gemini?: CreateGeminiChatCompletionRequestParams, claude?: CreateClaudeChatCompletionRequestParams, generic?: CreateGenericChatCompletionParams) => {
11
+ openai?: CreateOpenAiChatCompletionRequestParams;
12
+ gemini?: CreateGeminiChatCompletionRequestParams;
13
+ claude?: CreateClaudeChatCompletionRequestParams;
14
+ };
@@ -0,0 +1,169 @@
1
+ export const toSpecificChatCompletionParams = (openai, gemini, claude, generic) => {
2
+ if (openai) {
3
+ return {
4
+ openai,
5
+ };
6
+ }
7
+ if (gemini) {
8
+ return {
9
+ gemini,
10
+ };
11
+ }
12
+ if (claude) {
13
+ return {
14
+ claude,
15
+ };
16
+ }
17
+ if (generic) {
18
+ if (isOpenAiModel(generic.model)) {
19
+ return {
20
+ openai: {
21
+ model: generic.model,
22
+ messages: generic.messages,
23
+ temperature: generic.temperature,
24
+ topP: generic.topP,
25
+ },
26
+ };
27
+ }
28
+ if (isGeminiModel(generic.model)) {
29
+ return {
30
+ gemini: {
31
+ model: generic.model,
32
+ messages: toGeminiChatCompletionMessages(generic.messages),
33
+ systemPrompt: getSystemPromptFromMessages(generic.messages),
34
+ temperature: generic.temperature,
35
+ topP: generic.topP,
36
+ topK: generic.topK,
37
+ },
38
+ };
39
+ }
40
+ if (isAnthropicModel(generic.model)) {
41
+ return {
42
+ claude: {
43
+ model: generic.model,
44
+ messages: toClaudeChatCompletionMessages(generic.messages),
45
+ systemPrompt: getSystemPromptFromMessages(generic.messages),
46
+ temperature: generic.temperature,
47
+ topP: generic.topP,
48
+ topK: generic.topK,
49
+ },
50
+ };
51
+ }
52
+ }
53
+ throw new Error('No model provided');
54
+ };
55
+ const getSystemPromptFromMessages = (messages) => {
56
+ const allSystemPrompts = messages
57
+ .filter((message) => message.role === 'system')
58
+ .map((message) => message.content);
59
+ return allSystemPrompts.join('\n');
60
+ };
61
+ const toGeminiChatCompletionMessages = (messages) => {
62
+ const convertedMessages = messages.flatMap((message) => {
63
+ if (message.role === 'system') {
64
+ return [];
65
+ }
66
+ if (message.role === 'user') {
67
+ return [{ role: 'user', content: message.content }];
68
+ }
69
+ if (message.role === 'assistant') {
70
+ return [{ role: 'model', content: message.content }];
71
+ }
72
+ return [];
73
+ });
74
+ return convertedMessages;
75
+ };
76
+ const toClaudeChatCompletionMessages = (messages) => {
77
+ const convertedMessages = messages.flatMap((message) => {
78
+ if (message.role === 'system') {
79
+ return [];
80
+ }
81
+ if (message.role === 'user') {
82
+ return [{ role: 'user', content: message.content }];
83
+ }
84
+ if (message.role === 'assistant') {
85
+ return [{ role: 'assistant', content: message.content }];
86
+ }
87
+ return [];
88
+ });
89
+ return convertedMessages;
90
+ };
91
+ const isOpenAiModel = (model) => {
92
+ switch (model) {
93
+ // OpenAI models
94
+ case 'gpt-4.1':
95
+ return true;
96
+ case 'gpt-4.1-mini':
97
+ return true;
98
+ case 'gpt-4.1-nano':
99
+ return true;
100
+ case 'gpt-4o':
101
+ return true;
102
+ case 'gpt-4o-mini':
103
+ return true;
104
+ // Gemini models
105
+ case 'gemini-2.5-pro':
106
+ return true;
107
+ case 'gemini-2.5-flash':
108
+ return true;
109
+ case 'gemini-2.5-flash-lite-preview-06-17':
110
+ return true;
111
+ // Claude models
112
+ case 'claude-opus-4-0':
113
+ return true;
114
+ case 'claude-sonnet-4-0':
115
+ return true;
116
+ }
117
+ };
118
+ const isGeminiModel = (model) => {
119
+ switch (model) {
120
+ case 'gpt-4.1':
121
+ return false;
122
+ case 'gpt-4.1-mini':
123
+ return false;
124
+ case 'gpt-4.1-nano':
125
+ return false;
126
+ case 'gpt-4o':
127
+ return false;
128
+ case 'gpt-4o-mini':
129
+ return false;
130
+ // Gemini models
131
+ case 'gemini-2.5-pro':
132
+ return true;
133
+ case 'gemini-2.5-flash':
134
+ return true;
135
+ case 'gemini-2.5-flash-lite-preview-06-17':
136
+ return true;
137
+ // Claude models
138
+ case 'claude-opus-4-0':
139
+ return false;
140
+ case 'claude-sonnet-4-0':
141
+ return false;
142
+ }
143
+ };
144
+ const isAnthropicModel = (model) => {
145
+ switch (model) {
146
+ case 'gpt-4.1':
147
+ return false;
148
+ case 'gpt-4.1-mini':
149
+ return false;
150
+ case 'gpt-4.1-nano':
151
+ return false;
152
+ case 'gpt-4o':
153
+ return false;
154
+ case 'gpt-4o-mini':
155
+ return false;
156
+ // Gemini models
157
+ case 'gemini-2.5-pro':
158
+ return false;
159
+ case 'gemini-2.5-flash':
160
+ return false;
161
+ case 'gemini-2.5-flash-lite-preview-06-17':
162
+ return false;
163
+ // Claude models
164
+ case 'claude-opus-4-0':
165
+ return true;
166
+ case 'claude-sonnet-4-0':
167
+ return true;
168
+ }
169
+ };
@@ -1,8 +1,10 @@
1
1
  import { CreateOpenAiChatCompletionParams } from './CreateOpenAiChatCompletionParams';
2
2
  import { CreateGeminiChatCompletionParams } from './CreateGeminiChatCompletionParams';
3
3
  import { CreateClaudeChatCompletionParams } from './CreateClaudeChatCompletionParams';
4
+ import { CreateGenericChatCompletionParams } from './CreateGenericChatCompletionParams';
4
5
  export interface CreateStreamingChatCompletionParams {
5
6
  openai?: CreateOpenAiChatCompletionParams;
6
7
  gemini?: CreateGeminiChatCompletionParams;
7
8
  claude?: CreateClaudeChatCompletionParams;
9
+ generic?: CreateGenericChatCompletionParams;
8
10
  }
@@ -2,9 +2,11 @@ import { ZodTypeAny } from 'zod';
2
2
  import { CreateOpenAiChatCompletionParams } from './CreateOpenAiChatCompletionParams';
3
3
  import { CreateGeminiChatCompletionParams } from './CreateGeminiChatCompletionParams';
4
4
  import { CreateClaudeChatCompletionParams } from './CreateClaudeChatCompletionParams';
5
+ import { CreateGenericChatCompletionParams } from './CreateGenericChatCompletionParams';
5
6
  export interface CreateStructuredChatCompletionParams<T extends ZodTypeAny> {
6
7
  openai?: CreateOpenAiChatCompletionParams;
7
8
  gemini?: CreateGeminiChatCompletionParams;
8
9
  claude?: CreateClaudeChatCompletionParams;
10
+ generic?: CreateGenericChatCompletionParams;
9
11
  responseFormat: T;
10
12
  }
@@ -1,11 +1,19 @@
1
1
  import { ApiError } from '@fencyai/js';
2
- import { StreamingChatCompletionData } from './StreamingChatCompletionData';
2
+ import { CreateClaudeChatCompletionParams } from './CreateClaudeChatCompletionParams';
3
+ import { CreateGenericChatCompletionParams } from './CreateGenericChatCompletionParams';
4
+ import { CreateGeminiChatCompletionParams } from './CreateGeminiChatCompletionParams';
5
+ import { CreateOpenAiChatCompletionParams } from './CreateOpenAiChatCompletionParams';
3
6
  import { NewChatCompletionStreamChunk } from './StreamData';
4
- import { CreateStreamingChatCompletionParams } from './CreateStreamingChatCompletionParams';
7
+ import { StreamingChatCompletionData } from './StreamingChatCompletionData';
5
8
  export interface StreamingChatCompletion {
6
9
  triggeredAt: string;
7
10
  streamId: string;
8
- params: CreateStreamingChatCompletionParams;
11
+ prompt: {
12
+ openai?: CreateOpenAiChatCompletionParams;
13
+ gemini?: CreateGeminiChatCompletionParams;
14
+ claude?: CreateClaudeChatCompletionParams;
15
+ generic?: CreateGenericChatCompletionParams;
16
+ };
9
17
  data: StreamingChatCompletionData | null;
10
18
  error: ApiError | null;
11
19
  response: string;
@@ -0,0 +1,9 @@
1
+ import { ChatCompletionStreamCompleted, NewChatCompletionStreamChunk, StreamNotFound, StreamTimeout } from './StreamData';
2
+ import { StreamError } from './StreamError';
3
+ export interface UseStreamingChatCompletionsProps {
4
+ onChatCompletionStreamCompleted?: (stream: ChatCompletionStreamCompleted) => void;
5
+ onChatCompletionStreamChunk?: (chunk: NewChatCompletionStreamChunk) => void;
6
+ onStreamNotFound?: (error: StreamNotFound) => void;
7
+ onStreamTimeout?: (error: StreamTimeout) => void;
8
+ onStreamError?: (error: StreamError) => void;
9
+ }
@@ -0,0 +1 @@
1
+ export {};
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@fencyai/react",
3
- "version": "0.1.53",
3
+ "version": "0.1.55",
4
4
  "description": "> TODO: description",
5
5
  "author": "staklau <steinaageklaussen@gmail.com>",
6
6
  "homepage": "",
@@ -33,7 +33,7 @@
33
33
  "prepublishOnly": "npm run build"
34
34
  },
35
35
  "devDependencies": {
36
- "@fencyai/js": "^0.1.53",
36
+ "@fencyai/js": "^0.1.55",
37
37
  "@types/jest": "^29.5.11",
38
38
  "@types/node": "^20.10.5",
39
39
  "@types/react": "^18.2.45",
@@ -42,7 +42,7 @@
42
42
  "typescript": "^5.3.3"
43
43
  },
44
44
  "peerDependencies": {
45
- "@fencyai/js": "^0.1.53",
45
+ "@fencyai/js": "^0.1.55",
46
46
  "react": ">=16.8.0",
47
47
  "zod": "^4.0.5"
48
48
  },
@@ -51,5 +51,5 @@
51
51
  "optional": false
52
52
  }
53
53
  },
54
- "gitHead": "c06951326631299d64fb298760a0b6ba19d573ee"
54
+ "gitHead": "b67b6f7fe71f8595f9c15ea85f365c1dd69ac76f"
55
55
  }