@fencyai/react 0.1.51 → 0.1.53

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -30,19 +30,21 @@ export const useBasicChatCompletions = () => {
30
30
  gemini: params.gemini
31
31
  ? {
32
32
  model: params.gemini.model,
33
- content: params.gemini.content,
33
+ messages: params.gemini.messages,
34
+ systemPrompt: params.gemini.systemPrompt,
34
35
  temperature: params.gemini.temperature,
35
36
  topP: params.gemini.topP,
36
37
  topK: params.gemini.topK,
37
38
  }
38
39
  : undefined,
39
- anthropic: params.anthropic
40
+ claude: params.claude
40
41
  ? {
41
- model: params.anthropic.model,
42
- messages: params.anthropic.messages,
43
- temperature: params.anthropic.temperature,
44
- topP: params.anthropic.topP,
45
- topK: params.anthropic.topK,
42
+ model: params.claude.model,
43
+ messages: params.claude.messages,
44
+ systemPrompt: params.claude.systemPrompt,
45
+ temperature: params.claude.temperature,
46
+ topP: params.claude.topP,
47
+ topK: params.claude.topK,
46
48
  }
47
49
  : undefined,
48
50
  },
@@ -7,7 +7,7 @@ export const useStreamingChatCompletions = () => {
7
7
  const [chatCompletions, setChatCompletions] = useState([]);
8
8
  const [completedStreamIds, setCompletedStreamIds] = useState([]);
9
9
  const [chunks, setChunks] = useState([]);
10
- const { stream, createStream } = useStream({
10
+ const { createStream } = useStream({
11
11
  onNewChatCompletionStreamChunk: (streamData) => {
12
12
  setChunks((prev) => [...prev, streamData]);
13
13
  },
@@ -65,6 +65,7 @@ export const useStreamingChatCompletions = () => {
65
65
  }
66
66
  : null,
67
67
  streamId: chatCompletion.streamId,
68
+ params: chatCompletion.params,
68
69
  error: chatCompletion.error,
69
70
  loading: chatCompletion.loading,
70
71
  doneStreaming: chatCompletion.doneStreaming,
@@ -87,6 +88,7 @@ export const useStreamingChatCompletions = () => {
87
88
  triggeredAt: new Date().toISOString(),
88
89
  data: null,
89
90
  error: null,
91
+ params: params,
90
92
  response: '',
91
93
  chunks: [],
92
94
  loading: true,
@@ -103,18 +105,28 @@ export const useStreamingChatCompletions = () => {
103
105
  ? {
104
106
  model: params.openai.model,
105
107
  messages: params.openai.messages,
108
+ temperature: params.openai.temperature,
109
+ topP: params.openai.topP,
106
110
  }
107
111
  : undefined,
108
112
  gemini: params.gemini
109
113
  ? {
110
114
  model: params.gemini.model,
111
- content: params.gemini.content,
115
+ messages: params.gemini.messages,
116
+ systemPrompt: params.gemini.systemPrompt,
117
+ temperature: params.gemini.temperature,
118
+ topP: params.gemini.topP,
119
+ topK: params.gemini.topK,
112
120
  }
113
121
  : undefined,
114
- anthropic: params.anthropic
122
+ claude: params.claude
115
123
  ? {
116
- model: params.anthropic.model,
117
- messages: params.anthropic.messages,
124
+ model: params.claude.model,
125
+ messages: params.claude.messages,
126
+ systemPrompt: params.claude.systemPrompt,
127
+ temperature: params.claude.temperature,
128
+ topP: params.claude.topP,
129
+ topK: params.claude.topK,
118
130
  }
119
131
  : undefined,
120
132
  },
@@ -130,6 +142,7 @@ export const useStreamingChatCompletions = () => {
130
142
  streamId: streamResponse.stream.id,
131
143
  },
132
144
  error: null,
145
+ params: params,
133
146
  response: '',
134
147
  chunks: [],
135
148
  doneStreaming: false,
@@ -151,6 +164,7 @@ export const useStreamingChatCompletions = () => {
151
164
  {
152
165
  triggeredAt: new Date().toISOString(),
153
166
  streamId: streamResponse.stream.id,
167
+ params: params,
154
168
  error: chatCompletion.error,
155
169
  response: '',
156
170
  chunks: [],
@@ -174,6 +188,7 @@ export const useStreamingChatCompletions = () => {
174
188
  {
175
189
  triggeredAt: new Date().toISOString(),
176
190
  streamId: streamResponse.stream.id,
191
+ params: params,
177
192
  error: error,
178
193
  response: '',
179
194
  chunks: [],
@@ -192,7 +207,7 @@ export const useStreamingChatCompletions = () => {
192
207
  console.error(streamResponse.error);
193
208
  return streamResponse;
194
209
  }
195
- }, [context]);
210
+ }, [context, chatCompletions]);
196
211
  const latest = useMemo(() => {
197
212
  return chatCompletions.sort((a, b) => {
198
213
  return (new Date(b.triggeredAt).getTime() -
@@ -37,13 +37,29 @@ export const useStructuredChatCompletions = () => {
37
37
  model: params.openai.model,
38
38
  responseJsonSchema: parsedJsonSchema,
39
39
  messages: params.openai.messages,
40
+ temperature: params.openai.temperature,
41
+ topP: params.openai.topP,
40
42
  }
41
43
  : undefined,
42
44
  gemini: params.gemini
43
45
  ? {
44
46
  model: params.gemini.model,
45
47
  responseJsonSchema: parsedJsonSchema,
46
- content: params.gemini.content,
48
+ messages: params.gemini.messages,
49
+ systemPrompt: params.gemini.systemPrompt,
50
+ temperature: params.gemini.temperature,
51
+ topP: params.gemini.topP,
52
+ topK: params.gemini.topK,
53
+ }
54
+ : undefined,
55
+ claude: params.claude
56
+ ? {
57
+ model: params.claude.model,
58
+ messages: params.claude.messages,
59
+ systemPrompt: params.claude.systemPrompt,
60
+ temperature: params.claude.temperature,
61
+ topP: params.claude.topP,
62
+ topK: params.claude.topK,
47
63
  }
48
64
  : undefined,
49
65
  },
@@ -1,8 +1,8 @@
1
1
  import { CreateOpenAiChatCompletionParams } from './CreateOpenAiChatCompletionParams';
2
2
  import { CreateGeminiChatCompletionParams } from './CreateGeminiChatCompletionParams';
3
- import { CreateAnthropicChatCompletionParams } from './CreateAnthropicChatCompletionParams';
3
+ import { CreateClaudeChatCompletionParams } from './CreateClaudeChatCompletionParams';
4
4
  export interface CreateBasicChatCompletionParams {
5
5
  openai?: CreateOpenAiChatCompletionParams;
6
6
  gemini?: CreateGeminiChatCompletionParams;
7
- anthropic?: CreateAnthropicChatCompletionParams;
7
+ claude?: CreateClaudeChatCompletionParams;
8
8
  }
@@ -0,0 +1,9 @@
1
+ import { AnthropicModel, ClaudeChatCompletionMessage } from '@fencyai/js';
2
+ export interface CreateClaudeChatCompletionParams {
3
+ model: AnthropicModel;
4
+ messages: Array<ClaudeChatCompletionMessage>;
5
+ systemPrompt?: string;
6
+ temperature?: number;
7
+ topP?: number;
8
+ topK?: number;
9
+ }
@@ -1,7 +1,9 @@
1
- import { GeminiModel } from '@fencyai/js/lib/types/GeminiModel';
1
+ import { GeminiModel } from '@fencyai/js';
2
+ import { GeminiChatCompletionMessage } from '@fencyai/js';
2
3
  export interface CreateGeminiChatCompletionParams {
3
4
  model: GeminiModel;
4
- content: string;
5
+ messages: Array<GeminiChatCompletionMessage>;
6
+ systemPrompt?: string;
5
7
  temperature?: number;
6
8
  topP?: number;
7
9
  topK?: number;
@@ -1,5 +1,5 @@
1
- import { OpenAiModel } from '@fencyai/js/lib/types/OpenAiModel';
2
- import { ChatCompletionMessage } from '@fencyai/js/lib/types/ChatCompletionMessage';
1
+ import { OpenAiModel } from '@fencyai/js';
2
+ import { ChatCompletionMessage } from '@fencyai/js';
3
3
  export interface CreateOpenAiChatCompletionParams {
4
4
  model: OpenAiModel;
5
5
  messages: Array<ChatCompletionMessage>;
@@ -1,8 +1,8 @@
1
1
  import { CreateOpenAiChatCompletionParams } from './CreateOpenAiChatCompletionParams';
2
2
  import { CreateGeminiChatCompletionParams } from './CreateGeminiChatCompletionParams';
3
- import { CreateAnthropicChatCompletionParams } from './CreateAnthropicChatCompletionParams';
3
+ import { CreateClaudeChatCompletionParams } from './CreateClaudeChatCompletionParams';
4
4
  export interface CreateStreamingChatCompletionParams {
5
5
  openai?: CreateOpenAiChatCompletionParams;
6
6
  gemini?: CreateGeminiChatCompletionParams;
7
- anthropic?: CreateAnthropicChatCompletionParams;
7
+ claude?: CreateClaudeChatCompletionParams;
8
8
  }
@@ -1,8 +1,10 @@
1
1
  import { ZodTypeAny } from 'zod';
2
2
  import { CreateOpenAiChatCompletionParams } from './CreateOpenAiChatCompletionParams';
3
3
  import { CreateGeminiChatCompletionParams } from './CreateGeminiChatCompletionParams';
4
+ import { CreateClaudeChatCompletionParams } from './CreateClaudeChatCompletionParams';
4
5
  export interface CreateStructuredChatCompletionParams<T extends ZodTypeAny> {
5
6
  openai?: CreateOpenAiChatCompletionParams;
6
7
  gemini?: CreateGeminiChatCompletionParams;
8
+ claude?: CreateClaudeChatCompletionParams;
7
9
  responseFormat: T;
8
10
  }
@@ -1,9 +1,11 @@
1
1
  import { ApiError } from '@fencyai/js';
2
2
  import { StreamingChatCompletionData } from './StreamingChatCompletionData';
3
3
  import { NewChatCompletionStreamChunk } from './StreamData';
4
+ import { CreateStreamingChatCompletionParams } from './CreateStreamingChatCompletionParams';
4
5
  export interface StreamingChatCompletion {
5
6
  triggeredAt: string;
6
7
  streamId: string;
8
+ params: CreateStreamingChatCompletionParams;
7
9
  data: StreamingChatCompletionData | null;
8
10
  error: ApiError | null;
9
11
  response: string;
@@ -2,7 +2,7 @@ export * from './FencyProviderProps';
2
2
  export * from './FencyContext';
3
3
  export * from './CreateOpenAiChatCompletionParams';
4
4
  export * from './CreateGeminiChatCompletionParams';
5
- export * from './CreateAnthropicChatCompletionParams';
5
+ export * from './CreateClaudeChatCompletionParams';
6
6
  export * from './BasicChatCompletionData';
7
7
  export * from './BasicChatCompletion';
8
8
  export * from './BasicChatCompletionResponse';
@@ -4,7 +4,7 @@ export * from './FencyContext';
4
4
  // Chat completion parameter types
5
5
  export * from './CreateOpenAiChatCompletionParams';
6
6
  export * from './CreateGeminiChatCompletionParams';
7
- export * from './CreateAnthropicChatCompletionParams';
7
+ export * from './CreateClaudeChatCompletionParams';
8
8
  // Basic chat completion types
9
9
  export * from './BasicChatCompletionData';
10
10
  export * from './BasicChatCompletion';
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@fencyai/react",
3
- "version": "0.1.51",
3
+ "version": "0.1.53",
4
4
  "description": "> TODO: description",
5
5
  "author": "staklau <steinaageklaussen@gmail.com>",
6
6
  "homepage": "",
@@ -32,11 +32,8 @@
32
32
  "dev": "tsc --watch",
33
33
  "prepublishOnly": "npm run build"
34
34
  },
35
- "dependencies": {
36
- "zod": "^4.0.5"
37
- },
38
35
  "devDependencies": {
39
- "@fencyai/js": "^0.1.51",
36
+ "@fencyai/js": "^0.1.53",
40
37
  "@types/jest": "^29.5.11",
41
38
  "@types/node": "^20.10.5",
42
39
  "@types/react": "^18.2.45",
@@ -45,8 +42,14 @@
45
42
  "typescript": "^5.3.3"
46
43
  },
47
44
  "peerDependencies": {
48
- "@fencyai/js": "^0.1.51",
49
- "react": ">=16.8.0"
45
+ "@fencyai/js": "^0.1.53",
46
+ "react": ">=16.8.0",
47
+ "zod": "^4.0.5"
48
+ },
49
+ "peerDependenciesMeta": {
50
+ "zod": {
51
+ "optional": false
52
+ }
50
53
  },
51
- "gitHead": "78ae5a655bece0599efe1e207ebda23e2e6b0260"
54
+ "gitHead": "c06951326631299d64fb298760a0b6ba19d573ee"
52
55
  }
@@ -1,9 +0,0 @@
1
- import { AnthropicModel } from '@fencyai/js/lib/types/AnthropicModel';
2
- import { ChatCompletionMessage } from '@fencyai/js/lib/types/ChatCompletionMessage';
3
- export interface CreateAnthropicChatCompletionParams {
4
- model: AnthropicModel;
5
- messages: Array<ChatCompletionMessage>;
6
- temperature?: number;
7
- topP?: number;
8
- topK?: number;
9
- }