@fencyai/react 0.1.52 → 0.1.54
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/lib/hooks/useBasicChatCompletions/index.js +5 -27
- package/lib/hooks/useStreamingChatCompletions/index.js +24 -35
- package/lib/hooks/useStructuredChatCompletions/index.js +11 -6
- package/lib/types/AiModel.d.ts +4 -0
- package/lib/types/CreateBasicChatCompletionParams.d.ts +5 -3
- package/lib/types/CreateClaudeChatCompletionParams.d.ts +9 -0
- package/lib/types/CreateClaudeChatCompletionParams.js +1 -0
- package/lib/types/CreateGeminiChatCompletionParams.d.ts +4 -2
- package/lib/types/CreateGenericChatCompletionParams.d.ts +14 -0
- package/lib/types/CreateGenericChatCompletionParams.js +169 -0
- package/lib/types/CreateOpenAiChatCompletionParams.d.ts +2 -2
- package/lib/types/CreateStreamingChatCompletionParams.d.ts +4 -2
- package/lib/types/CreateStructuredChatCompletionParams.d.ts +4 -0
- package/lib/types/StreamingChatCompletion.d.ts +11 -1
- package/lib/types/index.d.ts +1 -1
- package/lib/types/index.js +1 -1
- package/package.json +11 -8
- package/lib/types/CreateAnthropicChatCompletionParams.d.ts +0 -9
- /package/lib/types/{CreateAnthropicChatCompletionParams.js → AiModel.js} +0 -0
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
import { createChatCompletion } from '@fencyai/js';
|
|
2
2
|
import { useCallback, useMemo, useState } from 'react';
|
|
3
3
|
import { useFencyContext } from '../../provider/useFencyContext';
|
|
4
|
+
import { toSpecificChatCompletionParams } from '../../types/CreateGenericChatCompletionParams';
|
|
4
5
|
export const useBasicChatCompletions = () => {
|
|
5
6
|
const context = useFencyContext();
|
|
6
7
|
const [chatCompletions, setChatCompletions] = useState([]);
|
|
@@ -15,36 +16,12 @@ export const useBasicChatCompletions = () => {
|
|
|
15
16
|
loading: true,
|
|
16
17
|
},
|
|
17
18
|
]);
|
|
19
|
+
const specificParams = toSpecificChatCompletionParams(params.openai, params.gemini, params.claude, params.generic);
|
|
18
20
|
const chatCompletion = await createChatCompletion({
|
|
19
21
|
pk: context.fency.publishableKey,
|
|
20
22
|
baseUrl: context.fency.baseUrl,
|
|
21
23
|
request: {
|
|
22
|
-
|
|
23
|
-
? {
|
|
24
|
-
model: params.openai.model,
|
|
25
|
-
messages: params.openai.messages,
|
|
26
|
-
temperature: params.openai.temperature,
|
|
27
|
-
topP: params.openai.topP,
|
|
28
|
-
}
|
|
29
|
-
: undefined,
|
|
30
|
-
gemini: params.gemini
|
|
31
|
-
? {
|
|
32
|
-
model: params.gemini.model,
|
|
33
|
-
content: params.gemini.content,
|
|
34
|
-
temperature: params.gemini.temperature,
|
|
35
|
-
topP: params.gemini.topP,
|
|
36
|
-
topK: params.gemini.topK,
|
|
37
|
-
}
|
|
38
|
-
: undefined,
|
|
39
|
-
anthropic: params.anthropic
|
|
40
|
-
? {
|
|
41
|
-
model: params.anthropic.model,
|
|
42
|
-
messages: params.anthropic.messages,
|
|
43
|
-
temperature: params.anthropic.temperature,
|
|
44
|
-
topP: params.anthropic.topP,
|
|
45
|
-
topK: params.anthropic.topK,
|
|
46
|
-
}
|
|
47
|
-
: undefined,
|
|
24
|
+
...specificParams,
|
|
48
25
|
},
|
|
49
26
|
});
|
|
50
27
|
if (chatCompletion.type === 'success' &&
|
|
@@ -103,7 +80,8 @@ export const useBasicChatCompletions = () => {
|
|
|
103
80
|
}, [context]);
|
|
104
81
|
const latest = useMemo(() => {
|
|
105
82
|
return chatCompletions.sort((a, b) => {
|
|
106
|
-
return new Date(b.triggeredAt).getTime() -
|
|
83
|
+
return (new Date(b.triggeredAt).getTime() -
|
|
84
|
+
new Date(a.triggeredAt).getTime());
|
|
107
85
|
})[0];
|
|
108
86
|
}, [chatCompletions]);
|
|
109
87
|
return {
|
|
@@ -1,13 +1,14 @@
|
|
|
1
1
|
import { createChatCompletion } from '@fencyai/js';
|
|
2
2
|
import { useCallback, useEffect, useMemo, useState } from 'react';
|
|
3
3
|
import { useFencyContext } from '../../provider/useFencyContext';
|
|
4
|
+
import { toSpecificChatCompletionParams } from '../../types/CreateGenericChatCompletionParams';
|
|
4
5
|
import { useStream } from '../useStream';
|
|
5
6
|
export const useStreamingChatCompletions = () => {
|
|
6
7
|
const context = useFencyContext();
|
|
7
8
|
const [chatCompletions, setChatCompletions] = useState([]);
|
|
8
9
|
const [completedStreamIds, setCompletedStreamIds] = useState([]);
|
|
9
10
|
const [chunks, setChunks] = useState([]);
|
|
10
|
-
const {
|
|
11
|
+
const { createStream } = useStream({
|
|
11
12
|
onNewChatCompletionStreamChunk: (streamData) => {
|
|
12
13
|
setChunks((prev) => [...prev, streamData]);
|
|
13
14
|
},
|
|
@@ -66,6 +67,7 @@ export const useStreamingChatCompletions = () => {
|
|
|
66
67
|
: null,
|
|
67
68
|
streamId: chatCompletion.streamId,
|
|
68
69
|
error: chatCompletion.error,
|
|
70
|
+
prompt: chatCompletion.prompt,
|
|
69
71
|
loading: chatCompletion.loading,
|
|
70
72
|
doneStreaming: chatCompletion.doneStreaming,
|
|
71
73
|
response: fullMessage,
|
|
@@ -87,57 +89,42 @@ export const useStreamingChatCompletions = () => {
|
|
|
87
89
|
triggeredAt: new Date().toISOString(),
|
|
88
90
|
data: null,
|
|
89
91
|
error: null,
|
|
92
|
+
prompt: params,
|
|
90
93
|
response: '',
|
|
91
94
|
chunks: [],
|
|
92
95
|
loading: true,
|
|
93
96
|
doneStreaming: false,
|
|
94
97
|
},
|
|
95
98
|
]);
|
|
99
|
+
const specificParams = toSpecificChatCompletionParams(params.openai, params.gemini, params.claude, params.generic);
|
|
96
100
|
// Step 2: Send chat completion
|
|
97
101
|
const chatCompletion = await createChatCompletion({
|
|
98
102
|
pk: context.fency.publishableKey,
|
|
99
103
|
baseUrl: context.fency.baseUrl,
|
|
100
104
|
request: {
|
|
101
105
|
streamId: streamResponse.stream.id,
|
|
102
|
-
|
|
103
|
-
? {
|
|
104
|
-
model: params.openai.model,
|
|
105
|
-
messages: params.openai.messages,
|
|
106
|
-
}
|
|
107
|
-
: undefined,
|
|
108
|
-
gemini: params.gemini
|
|
109
|
-
? {
|
|
110
|
-
model: params.gemini.model,
|
|
111
|
-
content: params.gemini.content,
|
|
112
|
-
}
|
|
113
|
-
: undefined,
|
|
114
|
-
anthropic: params.anthropic
|
|
115
|
-
? {
|
|
116
|
-
model: params.anthropic.model,
|
|
117
|
-
messages: params.anthropic.messages,
|
|
118
|
-
}
|
|
119
|
-
: undefined,
|
|
106
|
+
...specificParams,
|
|
120
107
|
},
|
|
121
108
|
});
|
|
122
109
|
if (chatCompletion.type === 'success' &&
|
|
123
110
|
chatCompletion.completion) {
|
|
124
|
-
const newCompletion = {
|
|
125
|
-
streamId: streamResponse.stream.id,
|
|
126
|
-
triggeredAt: new Date().toISOString(),
|
|
127
|
-
data: {
|
|
128
|
-
id: chatCompletion.completion.id,
|
|
129
|
-
createdAt: chatCompletion.completion.createdAt,
|
|
130
|
-
streamId: streamResponse.stream.id,
|
|
131
|
-
},
|
|
132
|
-
error: null,
|
|
133
|
-
response: '',
|
|
134
|
-
chunks: [],
|
|
135
|
-
doneStreaming: false,
|
|
136
|
-
loading: true,
|
|
137
|
-
};
|
|
138
111
|
setChatCompletions((prev) => [
|
|
139
112
|
...prev.filter((c) => c.streamId !== streamResponse.stream.id),
|
|
140
|
-
|
|
113
|
+
{
|
|
114
|
+
streamId: streamResponse.stream.id,
|
|
115
|
+
triggeredAt: new Date().toISOString(),
|
|
116
|
+
data: {
|
|
117
|
+
id: chatCompletion.completion.id,
|
|
118
|
+
createdAt: chatCompletion.completion.createdAt,
|
|
119
|
+
streamId: streamResponse.stream.id,
|
|
120
|
+
},
|
|
121
|
+
error: null,
|
|
122
|
+
prompt: params,
|
|
123
|
+
response: '',
|
|
124
|
+
chunks: [],
|
|
125
|
+
doneStreaming: false,
|
|
126
|
+
loading: true,
|
|
127
|
+
},
|
|
141
128
|
]);
|
|
142
129
|
return {
|
|
143
130
|
type: 'success',
|
|
@@ -151,6 +138,7 @@ export const useStreamingChatCompletions = () => {
|
|
|
151
138
|
{
|
|
152
139
|
triggeredAt: new Date().toISOString(),
|
|
153
140
|
streamId: streamResponse.stream.id,
|
|
141
|
+
prompt: params,
|
|
154
142
|
error: chatCompletion.error,
|
|
155
143
|
response: '',
|
|
156
144
|
chunks: [],
|
|
@@ -175,6 +163,7 @@ export const useStreamingChatCompletions = () => {
|
|
|
175
163
|
triggeredAt: new Date().toISOString(),
|
|
176
164
|
streamId: streamResponse.stream.id,
|
|
177
165
|
error: error,
|
|
166
|
+
prompt: params,
|
|
178
167
|
response: '',
|
|
179
168
|
chunks: [],
|
|
180
169
|
loading: false,
|
|
@@ -192,7 +181,7 @@ export const useStreamingChatCompletions = () => {
|
|
|
192
181
|
console.error(streamResponse.error);
|
|
193
182
|
return streamResponse;
|
|
194
183
|
}
|
|
195
|
-
}, [context]);
|
|
184
|
+
}, [context, chatCompletions]);
|
|
196
185
|
const latest = useMemo(() => {
|
|
197
186
|
return chatCompletions.sort((a, b) => {
|
|
198
187
|
return (new Date(b.triggeredAt).getTime() -
|
|
@@ -2,6 +2,7 @@ import { createChatCompletion } from '@fencyai/js';
|
|
|
2
2
|
import { useCallback, useMemo, useState } from 'react';
|
|
3
3
|
import { z } from 'zod';
|
|
4
4
|
import { useFencyContext } from '../../provider/useFencyContext';
|
|
5
|
+
import { toSpecificChatCompletionParams } from '../../types/CreateGenericChatCompletionParams';
|
|
5
6
|
export const useStructuredChatCompletions = () => {
|
|
6
7
|
const context = useFencyContext();
|
|
7
8
|
const [chatCompletions, setChatCompletions] = useState([]);
|
|
@@ -28,22 +29,26 @@ export const useStructuredChatCompletions = () => {
|
|
|
28
29
|
}
|
|
29
30
|
};
|
|
30
31
|
const parsedJsonSchema = JSON.stringify(jsonSchema);
|
|
32
|
+
const specificParams = toSpecificChatCompletionParams(params.openai, params.gemini, params.claude, params.generic);
|
|
31
33
|
const response = await createChatCompletion({
|
|
32
34
|
pk: context.fency.publishableKey,
|
|
33
35
|
baseUrl: context.fency.baseUrl,
|
|
34
36
|
request: {
|
|
35
|
-
openai:
|
|
37
|
+
openai: specificParams.openai
|
|
36
38
|
? {
|
|
37
|
-
model: params.openai.model,
|
|
38
39
|
responseJsonSchema: parsedJsonSchema,
|
|
39
|
-
|
|
40
|
+
...specificParams.openai,
|
|
40
41
|
}
|
|
41
42
|
: undefined,
|
|
42
|
-
gemini:
|
|
43
|
+
gemini: specificParams.gemini
|
|
43
44
|
? {
|
|
44
|
-
model: params.gemini.model,
|
|
45
45
|
responseJsonSchema: parsedJsonSchema,
|
|
46
|
-
|
|
46
|
+
...specificParams.gemini,
|
|
47
|
+
}
|
|
48
|
+
: undefined,
|
|
49
|
+
claude: specificParams.claude
|
|
50
|
+
? {
|
|
51
|
+
...specificParams.claude,
|
|
47
52
|
}
|
|
48
53
|
: undefined,
|
|
49
54
|
},
|
|
@@ -1,8 +1,10 @@
|
|
|
1
|
-
import {
|
|
1
|
+
import { CreateClaudeChatCompletionParams } from './CreateClaudeChatCompletionParams';
|
|
2
2
|
import { CreateGeminiChatCompletionParams } from './CreateGeminiChatCompletionParams';
|
|
3
|
-
import {
|
|
3
|
+
import { CreateOpenAiChatCompletionParams } from './CreateOpenAiChatCompletionParams';
|
|
4
|
+
import { CreateGenericChatCompletionParams } from './CreateGenericChatCompletionParams';
|
|
4
5
|
export interface CreateBasicChatCompletionParams {
|
|
5
6
|
openai?: CreateOpenAiChatCompletionParams;
|
|
6
7
|
gemini?: CreateGeminiChatCompletionParams;
|
|
7
|
-
|
|
8
|
+
claude?: CreateClaudeChatCompletionParams;
|
|
9
|
+
generic?: CreateGenericChatCompletionParams;
|
|
8
10
|
}
|
|
@@ -0,0 +1,9 @@
|
|
|
1
|
+
import { AnthropicModel, ClaudeChatCompletionMessage } from '@fencyai/js';
|
|
2
|
+
export interface CreateClaudeChatCompletionParams {
|
|
3
|
+
model: AnthropicModel;
|
|
4
|
+
messages: Array<ClaudeChatCompletionMessage>;
|
|
5
|
+
systemPrompt?: string;
|
|
6
|
+
temperature?: number;
|
|
7
|
+
topP?: number;
|
|
8
|
+
topK?: number;
|
|
9
|
+
}
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export {};
|
|
@@ -1,7 +1,9 @@
|
|
|
1
|
-
import { GeminiModel } from '@fencyai/js
|
|
1
|
+
import { GeminiModel } from '@fencyai/js';
|
|
2
|
+
import { GeminiChatCompletionMessage } from '@fencyai/js';
|
|
2
3
|
export interface CreateGeminiChatCompletionParams {
|
|
3
4
|
model: GeminiModel;
|
|
4
|
-
|
|
5
|
+
messages: Array<GeminiChatCompletionMessage>;
|
|
6
|
+
systemPrompt?: string;
|
|
5
7
|
temperature?: number;
|
|
6
8
|
topP?: number;
|
|
7
9
|
topK?: number;
|
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
import { ChatCompletionMessage, CreateClaudeChatCompletionRequestParams, CreateGeminiChatCompletionRequestParams, CreateOpenAiChatCompletionRequestParams } from '@fencyai/js';
|
|
2
|
+
import { AiModel } from './AiModel';
|
|
3
|
+
export interface CreateGenericChatCompletionParams {
|
|
4
|
+
model: AiModel;
|
|
5
|
+
messages: Array<ChatCompletionMessage>;
|
|
6
|
+
temperature?: number;
|
|
7
|
+
topP?: number;
|
|
8
|
+
topK?: number;
|
|
9
|
+
}
|
|
10
|
+
export declare const toSpecificChatCompletionParams: (openai?: CreateOpenAiChatCompletionRequestParams, gemini?: CreateGeminiChatCompletionRequestParams, claude?: CreateClaudeChatCompletionRequestParams, generic?: CreateGenericChatCompletionParams) => {
|
|
11
|
+
openai?: CreateOpenAiChatCompletionRequestParams;
|
|
12
|
+
gemini?: CreateGeminiChatCompletionRequestParams;
|
|
13
|
+
claude?: CreateClaudeChatCompletionRequestParams;
|
|
14
|
+
};
|
|
@@ -0,0 +1,169 @@
|
|
|
1
|
+
export const toSpecificChatCompletionParams = (openai, gemini, claude, generic) => {
|
|
2
|
+
if (openai) {
|
|
3
|
+
return {
|
|
4
|
+
openai,
|
|
5
|
+
};
|
|
6
|
+
}
|
|
7
|
+
if (gemini) {
|
|
8
|
+
return {
|
|
9
|
+
gemini,
|
|
10
|
+
};
|
|
11
|
+
}
|
|
12
|
+
if (claude) {
|
|
13
|
+
return {
|
|
14
|
+
claude,
|
|
15
|
+
};
|
|
16
|
+
}
|
|
17
|
+
if (generic) {
|
|
18
|
+
if (isOpenAiModel(generic.model)) {
|
|
19
|
+
return {
|
|
20
|
+
openai: {
|
|
21
|
+
model: generic.model,
|
|
22
|
+
messages: generic.messages,
|
|
23
|
+
temperature: generic.temperature,
|
|
24
|
+
topP: generic.topP,
|
|
25
|
+
},
|
|
26
|
+
};
|
|
27
|
+
}
|
|
28
|
+
if (isGeminiModel(generic.model)) {
|
|
29
|
+
return {
|
|
30
|
+
gemini: {
|
|
31
|
+
model: generic.model,
|
|
32
|
+
messages: toGeminiChatCompletionMessages(generic.messages),
|
|
33
|
+
systemPrompt: getSystemPromptFromMessages(generic.messages),
|
|
34
|
+
temperature: generic.temperature,
|
|
35
|
+
topP: generic.topP,
|
|
36
|
+
topK: generic.topK,
|
|
37
|
+
},
|
|
38
|
+
};
|
|
39
|
+
}
|
|
40
|
+
if (isAnthropicModel(generic.model)) {
|
|
41
|
+
return {
|
|
42
|
+
claude: {
|
|
43
|
+
model: generic.model,
|
|
44
|
+
messages: toClaudeChatCompletionMessages(generic.messages),
|
|
45
|
+
systemPrompt: getSystemPromptFromMessages(generic.messages),
|
|
46
|
+
temperature: generic.temperature,
|
|
47
|
+
topP: generic.topP,
|
|
48
|
+
topK: generic.topK,
|
|
49
|
+
},
|
|
50
|
+
};
|
|
51
|
+
}
|
|
52
|
+
}
|
|
53
|
+
throw new Error('No model provided');
|
|
54
|
+
};
|
|
55
|
+
const getSystemPromptFromMessages = (messages) => {
|
|
56
|
+
const allSystemPrompts = messages
|
|
57
|
+
.filter((message) => message.role === 'system')
|
|
58
|
+
.map((message) => message.content);
|
|
59
|
+
return allSystemPrompts.join('\n');
|
|
60
|
+
};
|
|
61
|
+
const toGeminiChatCompletionMessages = (messages) => {
|
|
62
|
+
const convertedMessages = messages.flatMap((message) => {
|
|
63
|
+
if (message.role === 'system') {
|
|
64
|
+
return [];
|
|
65
|
+
}
|
|
66
|
+
if (message.role === 'user') {
|
|
67
|
+
return [{ role: 'user', content: message.content }];
|
|
68
|
+
}
|
|
69
|
+
if (message.role === 'assistant') {
|
|
70
|
+
return [{ role: 'model', content: message.content }];
|
|
71
|
+
}
|
|
72
|
+
return [];
|
|
73
|
+
});
|
|
74
|
+
return convertedMessages;
|
|
75
|
+
};
|
|
76
|
+
const toClaudeChatCompletionMessages = (messages) => {
|
|
77
|
+
const convertedMessages = messages.flatMap((message) => {
|
|
78
|
+
if (message.role === 'system') {
|
|
79
|
+
return [];
|
|
80
|
+
}
|
|
81
|
+
if (message.role === 'user') {
|
|
82
|
+
return [{ role: 'user', content: message.content }];
|
|
83
|
+
}
|
|
84
|
+
if (message.role === 'assistant') {
|
|
85
|
+
return [{ role: 'assistant', content: message.content }];
|
|
86
|
+
}
|
|
87
|
+
return [];
|
|
88
|
+
});
|
|
89
|
+
return convertedMessages;
|
|
90
|
+
};
|
|
91
|
+
const isOpenAiModel = (model) => {
|
|
92
|
+
switch (model) {
|
|
93
|
+
// OpenAI models
|
|
94
|
+
case 'gpt-4.1':
|
|
95
|
+
return true;
|
|
96
|
+
case 'gpt-4.1-mini':
|
|
97
|
+
return true;
|
|
98
|
+
case 'gpt-4.1-nano':
|
|
99
|
+
return true;
|
|
100
|
+
case 'gpt-4o':
|
|
101
|
+
return true;
|
|
102
|
+
case 'gpt-4o-mini':
|
|
103
|
+
return true;
|
|
104
|
+
// Gemini models
|
|
105
|
+
case 'gemini-2.5-pro':
|
|
106
|
+
return true;
|
|
107
|
+
case 'gemini-2.5-flash':
|
|
108
|
+
return true;
|
|
109
|
+
case 'gemini-2.5-flash-lite-preview-06-17':
|
|
110
|
+
return true;
|
|
111
|
+
// Claude models
|
|
112
|
+
case 'claude-opus-4-0':
|
|
113
|
+
return true;
|
|
114
|
+
case 'claude-sonnet-4-0':
|
|
115
|
+
return true;
|
|
116
|
+
}
|
|
117
|
+
};
|
|
118
|
+
const isGeminiModel = (model) => {
|
|
119
|
+
switch (model) {
|
|
120
|
+
case 'gpt-4.1':
|
|
121
|
+
return false;
|
|
122
|
+
case 'gpt-4.1-mini':
|
|
123
|
+
return false;
|
|
124
|
+
case 'gpt-4.1-nano':
|
|
125
|
+
return false;
|
|
126
|
+
case 'gpt-4o':
|
|
127
|
+
return false;
|
|
128
|
+
case 'gpt-4o-mini':
|
|
129
|
+
return false;
|
|
130
|
+
// Gemini models
|
|
131
|
+
case 'gemini-2.5-pro':
|
|
132
|
+
return true;
|
|
133
|
+
case 'gemini-2.5-flash':
|
|
134
|
+
return true;
|
|
135
|
+
case 'gemini-2.5-flash-lite-preview-06-17':
|
|
136
|
+
return true;
|
|
137
|
+
// Claude models
|
|
138
|
+
case 'claude-opus-4-0':
|
|
139
|
+
return false;
|
|
140
|
+
case 'claude-sonnet-4-0':
|
|
141
|
+
return false;
|
|
142
|
+
}
|
|
143
|
+
};
|
|
144
|
+
const isAnthropicModel = (model) => {
|
|
145
|
+
switch (model) {
|
|
146
|
+
case 'gpt-4.1':
|
|
147
|
+
return false;
|
|
148
|
+
case 'gpt-4.1-mini':
|
|
149
|
+
return false;
|
|
150
|
+
case 'gpt-4.1-nano':
|
|
151
|
+
return false;
|
|
152
|
+
case 'gpt-4o':
|
|
153
|
+
return false;
|
|
154
|
+
case 'gpt-4o-mini':
|
|
155
|
+
return false;
|
|
156
|
+
// Gemini models
|
|
157
|
+
case 'gemini-2.5-pro':
|
|
158
|
+
return false;
|
|
159
|
+
case 'gemini-2.5-flash':
|
|
160
|
+
return false;
|
|
161
|
+
case 'gemini-2.5-flash-lite-preview-06-17':
|
|
162
|
+
return false;
|
|
163
|
+
// Claude models
|
|
164
|
+
case 'claude-opus-4-0':
|
|
165
|
+
return true;
|
|
166
|
+
case 'claude-sonnet-4-0':
|
|
167
|
+
return true;
|
|
168
|
+
}
|
|
169
|
+
};
|
|
@@ -1,5 +1,5 @@
|
|
|
1
|
-
import { OpenAiModel } from '@fencyai/js
|
|
2
|
-
import { ChatCompletionMessage } from '@fencyai/js
|
|
1
|
+
import { OpenAiModel } from '@fencyai/js';
|
|
2
|
+
import { ChatCompletionMessage } from '@fencyai/js';
|
|
3
3
|
export interface CreateOpenAiChatCompletionParams {
|
|
4
4
|
model: OpenAiModel;
|
|
5
5
|
messages: Array<ChatCompletionMessage>;
|
|
@@ -1,8 +1,10 @@
|
|
|
1
1
|
import { CreateOpenAiChatCompletionParams } from './CreateOpenAiChatCompletionParams';
|
|
2
2
|
import { CreateGeminiChatCompletionParams } from './CreateGeminiChatCompletionParams';
|
|
3
|
-
import {
|
|
3
|
+
import { CreateClaudeChatCompletionParams } from './CreateClaudeChatCompletionParams';
|
|
4
|
+
import { CreateGenericChatCompletionParams } from './CreateGenericChatCompletionParams';
|
|
4
5
|
export interface CreateStreamingChatCompletionParams {
|
|
5
6
|
openai?: CreateOpenAiChatCompletionParams;
|
|
6
7
|
gemini?: CreateGeminiChatCompletionParams;
|
|
7
|
-
|
|
8
|
+
claude?: CreateClaudeChatCompletionParams;
|
|
9
|
+
generic?: CreateGenericChatCompletionParams;
|
|
8
10
|
}
|
|
@@ -1,8 +1,12 @@
|
|
|
1
1
|
import { ZodTypeAny } from 'zod';
|
|
2
2
|
import { CreateOpenAiChatCompletionParams } from './CreateOpenAiChatCompletionParams';
|
|
3
3
|
import { CreateGeminiChatCompletionParams } from './CreateGeminiChatCompletionParams';
|
|
4
|
+
import { CreateClaudeChatCompletionParams } from './CreateClaudeChatCompletionParams';
|
|
5
|
+
import { CreateGenericChatCompletionParams } from './CreateGenericChatCompletionParams';
|
|
4
6
|
export interface CreateStructuredChatCompletionParams<T extends ZodTypeAny> {
|
|
5
7
|
openai?: CreateOpenAiChatCompletionParams;
|
|
6
8
|
gemini?: CreateGeminiChatCompletionParams;
|
|
9
|
+
claude?: CreateClaudeChatCompletionParams;
|
|
10
|
+
generic?: CreateGenericChatCompletionParams;
|
|
7
11
|
responseFormat: T;
|
|
8
12
|
}
|
|
@@ -1,9 +1,19 @@
|
|
|
1
1
|
import { ApiError } from '@fencyai/js';
|
|
2
|
-
import {
|
|
2
|
+
import { CreateClaudeChatCompletionParams } from './CreateClaudeChatCompletionParams';
|
|
3
|
+
import { CreateGenericChatCompletionParams } from './CreateGenericChatCompletionParams';
|
|
4
|
+
import { CreateGeminiChatCompletionParams } from './CreateGeminiChatCompletionParams';
|
|
5
|
+
import { CreateOpenAiChatCompletionParams } from './CreateOpenAiChatCompletionParams';
|
|
3
6
|
import { NewChatCompletionStreamChunk } from './StreamData';
|
|
7
|
+
import { StreamingChatCompletionData } from './StreamingChatCompletionData';
|
|
4
8
|
export interface StreamingChatCompletion {
|
|
5
9
|
triggeredAt: string;
|
|
6
10
|
streamId: string;
|
|
11
|
+
prompt: {
|
|
12
|
+
openai?: CreateOpenAiChatCompletionParams;
|
|
13
|
+
gemini?: CreateGeminiChatCompletionParams;
|
|
14
|
+
claude?: CreateClaudeChatCompletionParams;
|
|
15
|
+
generic?: CreateGenericChatCompletionParams;
|
|
16
|
+
};
|
|
7
17
|
data: StreamingChatCompletionData | null;
|
|
8
18
|
error: ApiError | null;
|
|
9
19
|
response: string;
|
package/lib/types/index.d.ts
CHANGED
|
@@ -2,7 +2,7 @@ export * from './FencyProviderProps';
|
|
|
2
2
|
export * from './FencyContext';
|
|
3
3
|
export * from './CreateOpenAiChatCompletionParams';
|
|
4
4
|
export * from './CreateGeminiChatCompletionParams';
|
|
5
|
-
export * from './
|
|
5
|
+
export * from './CreateClaudeChatCompletionParams';
|
|
6
6
|
export * from './BasicChatCompletionData';
|
|
7
7
|
export * from './BasicChatCompletion';
|
|
8
8
|
export * from './BasicChatCompletionResponse';
|
package/lib/types/index.js
CHANGED
|
@@ -4,7 +4,7 @@ export * from './FencyContext';
|
|
|
4
4
|
// Chat completion parameter types
|
|
5
5
|
export * from './CreateOpenAiChatCompletionParams';
|
|
6
6
|
export * from './CreateGeminiChatCompletionParams';
|
|
7
|
-
export * from './
|
|
7
|
+
export * from './CreateClaudeChatCompletionParams';
|
|
8
8
|
// Basic chat completion types
|
|
9
9
|
export * from './BasicChatCompletionData';
|
|
10
10
|
export * from './BasicChatCompletion';
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@fencyai/react",
|
|
3
|
-
"version": "0.1.
|
|
3
|
+
"version": "0.1.54",
|
|
4
4
|
"description": "> TODO: description",
|
|
5
5
|
"author": "staklau <steinaageklaussen@gmail.com>",
|
|
6
6
|
"homepage": "",
|
|
@@ -32,11 +32,8 @@
|
|
|
32
32
|
"dev": "tsc --watch",
|
|
33
33
|
"prepublishOnly": "npm run build"
|
|
34
34
|
},
|
|
35
|
-
"dependencies": {
|
|
36
|
-
"zod": "^4.0.5"
|
|
37
|
-
},
|
|
38
35
|
"devDependencies": {
|
|
39
|
-
"@fencyai/js": "^0.1.
|
|
36
|
+
"@fencyai/js": "^0.1.54",
|
|
40
37
|
"@types/jest": "^29.5.11",
|
|
41
38
|
"@types/node": "^20.10.5",
|
|
42
39
|
"@types/react": "^18.2.45",
|
|
@@ -45,8 +42,14 @@
|
|
|
45
42
|
"typescript": "^5.3.3"
|
|
46
43
|
},
|
|
47
44
|
"peerDependencies": {
|
|
48
|
-
"@fencyai/js": "^0.1.
|
|
49
|
-
"react": ">=16.8.0"
|
|
45
|
+
"@fencyai/js": "^0.1.54",
|
|
46
|
+
"react": ">=16.8.0",
|
|
47
|
+
"zod": "^4.0.5"
|
|
48
|
+
},
|
|
49
|
+
"peerDependenciesMeta": {
|
|
50
|
+
"zod": {
|
|
51
|
+
"optional": false
|
|
52
|
+
}
|
|
50
53
|
},
|
|
51
|
-
"gitHead": "
|
|
54
|
+
"gitHead": "42e5ede5f912c388e8ba5bd37e0d04a539771d8e"
|
|
52
55
|
}
|
|
@@ -1,9 +0,0 @@
|
|
|
1
|
-
import { AnthropicModel } from '@fencyai/js/lib/types/AnthropicModel';
|
|
2
|
-
import { ChatCompletionMessage } from '@fencyai/js/lib/types/ChatCompletionMessage';
|
|
3
|
-
export interface CreateAnthropicChatCompletionParams {
|
|
4
|
-
model: AnthropicModel;
|
|
5
|
-
messages: Array<ChatCompletionMessage>;
|
|
6
|
-
temperature?: number;
|
|
7
|
-
topP?: number;
|
|
8
|
-
topK?: number;
|
|
9
|
-
}
|
|
File without changes
|