@ahoo-wang/fetcher-openai 2.9.8
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +201 -0
- package/README.md +1223 -0
- package/README.zh-CN.md +1217 -0
- package/dist/chat/chatClient.d.ts +225 -0
- package/dist/chat/chatClient.d.ts.map +1 -0
- package/dist/chat/completionStreamResultExtractor.d.ts +67 -0
- package/dist/chat/completionStreamResultExtractor.d.ts.map +1 -0
- package/dist/chat/index.d.ts +4 -0
- package/dist/chat/index.d.ts.map +1 -0
- package/dist/chat/types.d.ts +114 -0
- package/dist/chat/types.d.ts.map +1 -0
- package/dist/index.d.ts +3 -0
- package/dist/index.d.ts.map +1 -0
- package/dist/index.es.js +128 -0
- package/dist/index.es.js.map +1 -0
- package/dist/index.umd.js +2 -0
- package/dist/index.umd.js.map +1 -0
- package/dist/openai.d.ts +82 -0
- package/dist/openai.d.ts.map +1 -0
- package/package.json +80 -0
|
@@ -0,0 +1,225 @@
|
|
|
1
|
+
import { ApiMetadata, ApiMetadataCapable, ExecuteLifeCycle } from '@ahoo-wang/fetcher-decorator';
|
|
2
|
+
import { JsonServerSentEventStream } from '@ahoo-wang/fetcher-eventstream';
|
|
3
|
+
import { FetchExchange } from '@ahoo-wang/fetcher';
|
|
4
|
+
import { ChatRequest, ChatResponse } from './types';
|
|
5
|
+
/**
|
|
6
|
+
* OpenAI Chat API Client
|
|
7
|
+
*
|
|
8
|
+
* A comprehensive client for OpenAI's Chat Completions API that provides type-safe integration
|
|
9
|
+
* with both streaming and non-streaming chat completion endpoints. This client leverages the
|
|
10
|
+
* fetcher-decorator pattern to enable declarative API definitions and automatic request/response
|
|
11
|
+
* handling.
|
|
12
|
+
*
|
|
13
|
+
* Key Features:
|
|
14
|
+
* - Type-safe request/response handling with TypeScript generics
|
|
15
|
+
* - Automatic streaming response processing for server-sent events
|
|
16
|
+
* - Lifecycle hooks for request preprocessing and response transformation
|
|
17
|
+
* - Support for all OpenAI Chat Completion API parameters
|
|
18
|
+
* - Conditional return types based on streaming configuration
|
|
19
|
+
*
|
|
20
|
+
* @implements {ApiMetadataCapable} - Supports API metadata configuration
|
|
21
|
+
* @implements {ExecuteLifeCycle} - Provides lifecycle hooks for request processing
|
|
22
|
+
*
|
|
23
|
+
* @example
|
|
24
|
+
* ```typescript
|
|
25
|
+
* import { ChatClient } from '@ahoo-wang/fetcher-openai';
|
|
26
|
+
*
|
|
27
|
+
* const client = new ChatClient({
|
|
28
|
+
* baseURL: 'https://api.openai.com/v1',
|
|
29
|
+
* headers: {
|
|
30
|
+
* 'Authorization': `Bearer ${process.env.OPENAI_API_KEY}`
|
|
31
|
+
* }
|
|
32
|
+
* });
|
|
33
|
+
*
|
|
34
|
+
* // Non-streaming completion
|
|
35
|
+
* const response = await client.completions({
|
|
36
|
+
* model: 'gpt-3.5-turbo',
|
|
37
|
+
* messages: [{ role: 'user', content: 'Hello, world!' }]
|
|
38
|
+
* });
|
|
39
|
+
*
|
|
40
|
+
* // Streaming completion
|
|
41
|
+
* const stream = await client.completions({
|
|
42
|
+
* model: 'gpt-3.5-turbo',
|
|
43
|
+
* messages: [{ role: 'user', content: 'Tell me a story' }],
|
|
44
|
+
* stream: true
|
|
45
|
+
* });
|
|
46
|
+
*
|
|
47
|
+
* for await (const chunk of stream) {
|
|
48
|
+
* console.log(chunk.choices[0]?.delta?.content || '');
|
|
49
|
+
* }
|
|
50
|
+
* ```
|
|
51
|
+
*/
|
|
52
|
+
export declare class ChatClient implements ApiMetadataCapable, ExecuteLifeCycle {
|
|
53
|
+
readonly apiMetadata?: ApiMetadata | undefined;
|
|
54
|
+
/**
|
|
55
|
+
* Creates a new ChatClient instance with optional API configuration.
|
|
56
|
+
*
|
|
57
|
+
* The API metadata allows customization of request behavior such as base URLs,
|
|
58
|
+
* default headers, timeout settings, and other fetcher configuration options.
|
|
59
|
+
* This enables flexible deployment scenarios (e.g., custom API endpoints, proxy configurations).
|
|
60
|
+
*
|
|
61
|
+
* @param apiMetadata - Optional configuration object for customizing API behavior.
|
|
62
|
+
* Includes baseURL, headers, timeout, and other fetcher options.
|
|
63
|
+
* If not provided, uses default fetcher configuration.
|
|
64
|
+
*
|
|
65
|
+
* @example
|
|
66
|
+
* ```typescript
|
|
67
|
+
* // Basic usage with default configuration
|
|
68
|
+
* const client = new ChatClient();
|
|
69
|
+
*
|
|
70
|
+
* // With custom API endpoint and authentication
|
|
71
|
+
* const client = new ChatClient({
|
|
72
|
+
* baseURL: 'https://api.openai.com/v1',
|
|
73
|
+
* headers: {
|
|
74
|
+
* 'Authorization': `Bearer ${process.env.OPENAI_API_KEY}`,
|
|
75
|
+
* 'Content-Type': 'application/json'
|
|
76
|
+
* },
|
|
77
|
+
* timeout: 30000
|
|
78
|
+
* });
|
|
79
|
+
*
|
|
80
|
+
* // With custom fetcher configuration
|
|
81
|
+
* const client = new ChatClient({
|
|
82
|
+
* baseURL: 'https://custom-openai-proxy.com/api',
|
|
83
|
+
* interceptors: [customAuthInterceptor]
|
|
84
|
+
* });
|
|
85
|
+
* ```
|
|
86
|
+
*/
|
|
87
|
+
constructor(apiMetadata?: ApiMetadata | undefined);
|
|
88
|
+
/**
|
|
89
|
+
* Lifecycle hook executed before request processing.
|
|
90
|
+
*
|
|
91
|
+
* This method implements the ExecuteLifeCycle interface and is automatically called by the
|
|
92
|
+
* fetcher-decorator framework before each API request. It inspects the chat request to determine
|
|
93
|
+
* whether streaming is requested and configures the appropriate result extractor accordingly.
|
|
94
|
+
*
|
|
95
|
+
* The method performs dynamic result extractor assignment:
|
|
96
|
+
* - For streaming requests (`stream: true`): Assigns `CompletionStreamResultExtractor` to handle
|
|
97
|
+
* server-sent event streams with automatic termination detection
|
|
98
|
+
* - For non-streaming requests (`stream: false` or undefined): Uses the default JSON extractor
|
|
99
|
+
* for standard JSON response parsing
|
|
100
|
+
*
|
|
101
|
+
* This approach enables type-safe conditional return types while maintaining clean API design.
|
|
102
|
+
*
|
|
103
|
+
* @param exchange - The fetch exchange object containing request details, response information,
|
|
104
|
+
* and configuration that can be modified before execution
|
|
105
|
+
*
|
|
106
|
+
* @example
|
|
107
|
+
* ```typescript
|
|
108
|
+
* // This method is called automatically by the framework
|
|
109
|
+
* // No manual invocation needed - shown for illustration:
|
|
110
|
+
*
|
|
111
|
+
* const exchange = {
|
|
112
|
+
* request: { body: { stream: true, messages: [...] } },
|
|
113
|
+
* resultExtractor: undefined // Will be set by beforeExecute
|
|
114
|
+
* };
|
|
115
|
+
*
|
|
116
|
+
* client.beforeExecute(exchange);
|
|
117
|
+
* // exchange.resultExtractor is now CompletionStreamResultExtractor
|
|
118
|
+
* ```
|
|
119
|
+
*/
|
|
120
|
+
beforeExecute(exchange: FetchExchange): void;
|
|
121
|
+
/**
|
|
122
|
+
* Creates a chat completion using OpenAI's Chat Completions API.
|
|
123
|
+
*
|
|
124
|
+
* This is the primary method for interacting with OpenAI's chat completion models.
|
|
125
|
+
* It supports both synchronous and streaming response modes, automatically handling
|
|
126
|
+
* the appropriate response processing based on the `stream` parameter.
|
|
127
|
+
*
|
|
128
|
+
* The method uses advanced TypeScript conditional types to provide type-safe return values:
|
|
129
|
+
* - When `stream: true`, returns a `JsonServerSentEventStream<ChatResponse>` for real-time streaming
|
|
130
|
+
* - When `stream: false` or omitted, returns a standard `ChatResponse` object
|
|
131
|
+
*
|
|
132
|
+
* Streaming responses automatically terminate when the API sends a '[DONE]' signal,
|
|
133
|
+
* eliminating the need for manual stream management.
|
|
134
|
+
*
|
|
135
|
+
* @template T - The chat request type, constrained to ChatRequest for type safety
|
|
136
|
+
* @param chatRequest - Complete chat request configuration including:
|
|
137
|
+
* - `model`: The model to use (e.g., 'gpt-3.5-turbo', 'gpt-4')
|
|
138
|
+
* - `messages`: Array of chat messages with role/content pairs
|
|
139
|
+
* - `stream`: Optional boolean to enable streaming responses
|
|
140
|
+
* - `temperature`: Optional creativity control (0.0 to 2.0)
|
|
141
|
+
* - `max_tokens`: Optional maximum response length
|
|
142
|
+
* - `top_p`: Optional nucleus sampling parameter
|
|
143
|
+
* - `frequency_penalty`: Optional repetition control (-2.0 to 2.0)
|
|
144
|
+
* - `presence_penalty`: Optional topic diversity control (-2.0 to 2.0)
|
|
145
|
+
* - `stop`: Optional stop sequences (string or string array)
|
|
146
|
+
* - Additional OpenAI API parameters as needed
|
|
147
|
+
* @returns Promise resolving to:
|
|
148
|
+
* - `JsonServerSentEventStream<ChatResponse>` when `T['stream'] extends true`
|
|
149
|
+
* - `ChatResponse` when streaming is disabled or undefined
|
|
150
|
+
* @throws {Error} Network errors, authentication failures, or API errors
|
|
151
|
+
* @throws {EventStreamConvertError} When streaming response cannot be processed
|
|
152
|
+
*
|
|
153
|
+
* @example
|
|
154
|
+
* ```typescript
|
|
155
|
+
* // Basic non-streaming completion
|
|
156
|
+
* const response = await client.completions({
|
|
157
|
+
* model: 'gpt-3.5-turbo',
|
|
158
|
+
* messages: [
|
|
159
|
+
* { role: 'system', content: 'You are a helpful assistant.' },
|
|
160
|
+
* { role: 'user', content: 'What is TypeScript?' }
|
|
161
|
+
* ],
|
|
162
|
+
* temperature: 0.7,
|
|
163
|
+
* max_tokens: 150
|
|
164
|
+
* });
|
|
165
|
+
*
|
|
166
|
+
* console.log(response.choices[0].message.content);
|
|
167
|
+
* // Output: TypeScript is a programming language developed by Microsoft...
|
|
168
|
+
* ```
|
|
169
|
+
*
|
|
170
|
+
* @example
|
|
171
|
+
* ```typescript
|
|
172
|
+
* // Streaming completion with real-time output
|
|
173
|
+
* const stream = await client.completions({
|
|
174
|
+
* model: 'gpt-4',
|
|
175
|
+
* messages: [{ role: 'user', content: 'Write a short story' }],
|
|
176
|
+
* stream: true,
|
|
177
|
+
* temperature: 0.8
|
|
178
|
+
* });
|
|
179
|
+
*
|
|
180
|
+
* for await (const chunk of stream) {
|
|
181
|
+
* const content = chunk.choices[0]?.delta?.content;
|
|
182
|
+
* if (content) {
|
|
183
|
+
* process.stdout.write(content); // Real-time streaming output
|
|
184
|
+
* }
|
|
185
|
+
* }
|
|
186
|
+
* ```
|
|
187
|
+
*
|
|
188
|
+
* @example
|
|
189
|
+
* ```typescript
|
|
190
|
+
* // Advanced configuration with multiple parameters
|
|
191
|
+
* const response = await client.completions({
|
|
192
|
+
* model: 'gpt-3.5-turbo',
|
|
193
|
+
* messages: [{ role: 'user', content: 'Explain quantum computing' }],
|
|
194
|
+
* temperature: 0.3, // Lower temperature for more focused responses
|
|
195
|
+
* max_tokens: 500, // Limit response length
|
|
196
|
+
* top_p: 0.9, // Nucleus sampling
|
|
197
|
+
* frequency_penalty: 0.1, // Reduce repetition
|
|
198
|
+
* presence_penalty: 0.1, // Encourage topic diversity
|
|
199
|
+
* stop: ['###', 'END'] // Custom stop sequences
|
|
200
|
+
* });
|
|
201
|
+
* ```
|
|
202
|
+
*
|
|
203
|
+
* @example
|
|
204
|
+
* ```typescript
|
|
205
|
+
* // Error handling
|
|
206
|
+
* try {
|
|
207
|
+
* const response = await client.completions({
|
|
208
|
+
* model: 'gpt-3.5-turbo',
|
|
209
|
+
* messages: [{ role: 'user', content: 'Hello!' }]
|
|
210
|
+
* });
|
|
211
|
+
* console.log('Success:', response);
|
|
212
|
+
* } catch (error) {
|
|
213
|
+
* if (error.response?.status === 401) {
|
|
214
|
+
* console.error('Authentication failed - check API key');
|
|
215
|
+
* } else if (error.response?.status === 429) {
|
|
216
|
+
* console.error('Rate limit exceeded - retry later');
|
|
217
|
+
* } else {
|
|
218
|
+
* console.error('API error:', error.message);
|
|
219
|
+
* }
|
|
220
|
+
* }
|
|
221
|
+
* ```
|
|
222
|
+
*/
|
|
223
|
+
completions<T extends ChatRequest = ChatRequest>(chatRequest: T): Promise<T['stream'] extends true ? JsonServerSentEventStream<ChatResponse> : ChatResponse>;
|
|
224
|
+
}
|
|
225
|
+
//# sourceMappingURL=chatClient.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"chatClient.d.ts","sourceRoot":"","sources":["../../src/chat/chatClient.ts"],"names":[],"mappings":"AAaA,OAAO,EAEL,KAAK,WAAW,EAChB,kBAAkB,EAGlB,gBAAgB,EAEjB,MAAM,8BAA8B,CAAC;AACtC,OAAO,EAAE,yBAAyB,EAAE,MAAM,gCAAgC,CAAC;AAC3E,OAAO,EAAE,aAAa,EAAE,MAAM,oBAAoB,CAAC;AACnD,OAAO,EAAE,WAAW,EAAE,YAAY,EAAE,MAAM,SAAS,CAAC;AAGpD;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;GA8CG;AACH,qBACa,UAAW,YAAW,kBAAkB,EAAE,gBAAgB;aAkCzC,WAAW,CAAC,EAAE,WAAW;IAjCrD;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;OAgCG;gBACyB,WAAW,CAAC,EAAE,WAAW,YAAA;IAErD;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;OA+BG;IACH,aAAa,CAAC,QAAQ,EAAE,aAAa,GAAG,IAAI;IAO5C;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;OAqGG;IAEH,WAAW,CAAC,CAAC,SAAS,WAAW,GAAG,WAAW,EACrC,WAAW,EAAE,CAAC,GACrB,OAAO,CACR,CAAC,CAAC,QAAQ,CAAC,SAAS,IAAI,GACpB,yBAAyB,CAAC,YAAY,CAAC,GACvC,YAAY,CACjB;CAGF"}
|
|
@@ -0,0 +1,67 @@
|
|
|
1
|
+
import { ResultExtractor } from '@ahoo-wang/fetcher';
|
|
2
|
+
import { JsonServerSentEventStream, TerminateDetector } from '@ahoo-wang/fetcher-eventstream';
|
|
3
|
+
import { ChatResponse } from './types';
|
|
4
|
+
/**
|
|
5
|
+
* A termination detector for OpenAI chat completion streams.
|
|
6
|
+
*
|
|
7
|
+
* This detector identifies when a chat completion stream has finished by checking
|
|
8
|
+
* if the server-sent event data equals '[DONE]'. This is the standard completion
|
|
9
|
+
* signal used by OpenAI's API for streaming chat completions.
|
|
10
|
+
*
|
|
11
|
+
* @param event - The server-sent event to evaluate for termination
|
|
12
|
+
* @returns true if the event indicates stream completion, false otherwise
|
|
13
|
+
*
|
|
14
|
+
* @example
|
|
15
|
+
* ```typescript
|
|
16
|
+
* const event: ServerSentEvent = { data: '[DONE]', event: 'done' };
|
|
17
|
+
* const isDone = DoneDetector(event); // returns true
|
|
18
|
+
* ```
|
|
19
|
+
*/
|
|
20
|
+
export declare const DoneDetector: TerminateDetector;
|
|
21
|
+
/**
|
|
22
|
+
* Result extractor for OpenAI chat completion streaming responses.
|
|
23
|
+
*
|
|
24
|
+
* This extractor processes HTTP responses from OpenAI's chat completion API endpoints
|
|
25
|
+
* that return streaming responses. It converts the response into a JSON server-sent
|
|
26
|
+
* event stream that automatically terminates when the completion is finished.
|
|
27
|
+
*
|
|
28
|
+
* The extractor uses the DoneDetector to identify completion signals and ensures
|
|
29
|
+
* the response is properly formatted as a server-sent event stream with JSON data.
|
|
30
|
+
*
|
|
31
|
+
* @param exchange - The fetch exchange containing the HTTP response from OpenAI's API
|
|
32
|
+
* @returns A JSON server-sent event stream of ChatResponse objects that terminates on completion
|
|
33
|
+
* @throws {EventStreamConvertError} If the response is not a valid event stream or has incorrect content type
|
|
34
|
+
*
|
|
35
|
+
* @example
|
|
36
|
+
* ```typescript
|
|
37
|
+
* import { fetcher } from '@ahoo-wang/fetcher';
|
|
38
|
+
* import { CompletionStreamResultExtractor } from '@ahoo-wang/fetcher-openai';
|
|
39
|
+
*
|
|
40
|
+
* const response = await fetcher.post('/chat/completions', {
|
|
41
|
+
* model: 'gpt-3.5-turbo',
|
|
42
|
+
* messages: [{ role: 'user', content: 'Hello!' }],
|
|
43
|
+
* stream: true
|
|
44
|
+
* });
|
|
45
|
+
*
|
|
46
|
+
* const stream = CompletionStreamResultExtractor(response);
|
|
47
|
+
*
|
|
48
|
+
* for await (const event of stream) {
|
|
49
|
+
* console.log('Received:', event.data);
|
|
50
|
+
* // Stream automatically terminates when '[DONE]' is received
|
|
51
|
+
* }
|
|
52
|
+
* ```
|
|
53
|
+
*
|
|
54
|
+
* @example
|
|
55
|
+
* ```typescript
|
|
56
|
+
* // Using with fetcher configuration
|
|
57
|
+
* const fetcherWithExtractor = fetcher.extend({
|
|
58
|
+
* resultExtractor: CompletionStreamResultExtractor
|
|
59
|
+
* });
|
|
60
|
+
*
|
|
61
|
+
* const stream = await fetcherWithExtractor.post('/chat/completions', {
|
|
62
|
+
* // ... request options
|
|
63
|
+
* });
|
|
64
|
+
* ```
|
|
65
|
+
*/
|
|
66
|
+
export declare const CompletionStreamResultExtractor: ResultExtractor<JsonServerSentEventStream<ChatResponse>>;
|
|
67
|
+
//# sourceMappingURL=completionStreamResultExtractor.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"completionStreamResultExtractor.d.ts","sourceRoot":"","sources":["../../src/chat/completionStreamResultExtractor.ts"],"names":[],"mappings":"AAaA,OAAO,EAAiB,eAAe,EAAE,MAAM,oBAAoB,CAAC;AACpE,OAAO,gCAAgC,CAAC;AACxC,OAAO,EACL,yBAAyB,EAEzB,iBAAiB,EAClB,MAAM,gCAAgC,CAAC;AACxC,OAAO,EAAE,YAAY,EAAE,MAAM,SAAS,CAAC;AAEvC;;;;;;;;;;;;;;;GAeG;AACH,eAAO,MAAM,YAAY,EAAE,iBAE1B,CAAC;AAEF;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;GA4CG;AACH,eAAO,MAAM,+BAA+B,EAAE,eAAe,CAC3D,yBAAyB,CAAC,YAAY,CAAC,CAGxC,CAAC"}
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../../src/chat/index.ts"],"names":[],"mappings":"AAaA,cAAc,cAAc,CAAC;AAC7B,cAAc,mCAAmC,CAAC;AAClD,cAAc,SAAS,CAAC"}
|
|
@@ -0,0 +1,114 @@
|
|
|
1
|
+
export interface ChatRequest {
|
|
2
|
+
/**
|
|
3
|
+
* 默认为 0 -2.0 到 2.0 之间的数字。正值根据文本目前的存在频率惩罚新标记,降低模型重复相同行的可能性。 有关频率和存在惩罚的更多信息。
|
|
4
|
+
*/
|
|
5
|
+
frequency_penalty?: number;
|
|
6
|
+
/**
|
|
7
|
+
* 修改指定标记出现在补全中的可能性。
|
|
8
|
+
*
|
|
9
|
+
* 接受一个 JSON 对象,该对象将标记(由标记器指定的标记 ID)映射到相关的偏差值(-100 到 100)。从数学上讲,偏差在对模型进行采样之前添加到模型生成的 logit
|
|
10
|
+
* 中。确切效果因模型而异,但-1 和 1 之间的值应减少或增加相关标记的选择可能性;如-100 或 100 这样的值应导致相关标记的禁用或独占选择。
|
|
11
|
+
*/
|
|
12
|
+
logit_bias?: null;
|
|
13
|
+
/**
|
|
14
|
+
* 默认为 inf
|
|
15
|
+
* 在聊天补全中生成的最大标记数。
|
|
16
|
+
*
|
|
17
|
+
* 输入标记和生成标记的总长度受模型的上下文长度限制。计算标记的 Python 代码示例。
|
|
18
|
+
*/
|
|
19
|
+
max_tokens?: number;
|
|
20
|
+
/**
|
|
21
|
+
* 至今为止对话所包含的消息列表。Python 代码示例。
|
|
22
|
+
*/
|
|
23
|
+
messages: Message[];
|
|
24
|
+
/**
|
|
25
|
+
* 要使用的模型的 ID。有关哪些模型可与聊天 API 一起使用的详细信息,请参阅模型端点兼容性表。
|
|
26
|
+
*/
|
|
27
|
+
model?: string;
|
|
28
|
+
/**
|
|
29
|
+
* 默认为 1
|
|
30
|
+
* 为每个输入消息生成多少个聊天补全选择。
|
|
31
|
+
*/
|
|
32
|
+
n?: number;
|
|
33
|
+
/**
|
|
34
|
+
* -2.0 和 2.0 之间的数字。正值会根据到目前为止是否出现在文本中来惩罚新标记,从而增加模型谈论新主题的可能性。
|
|
35
|
+
* [查看有关频率和存在惩罚的更多信息。](https://platform.openai.com/docs/api-reference/parameter-details)
|
|
36
|
+
*/
|
|
37
|
+
presence_penalty?: number;
|
|
38
|
+
/**
|
|
39
|
+
* 指定模型必须输出的格式的对象。 将 { "type": "json_object" } 启用 JSON 模式,这可以确保模型生成的消息是有效的 JSON。 重要提示:使用
|
|
40
|
+
* JSON 模式时,还必须通过系统或用户消息指示模型生成
|
|
41
|
+
* JSON。如果不这样做,模型可能会生成无休止的空白流,直到生成达到令牌限制,从而导致延迟增加和请求“卡住”的外观。另请注意,如果
|
|
42
|
+
* finish_reason="length",则消息内容可能会被部分切断,这表示生成超过了 max_tokens 或对话超过了最大上下文长度。 显示属性
|
|
43
|
+
*/
|
|
44
|
+
response_format?: {
|
|
45
|
+
[key: string]: any;
|
|
46
|
+
};
|
|
47
|
+
/**
|
|
48
|
+
* 此功能处于测试阶段。如果指定,我们的系统将尽最大努力确定性地进行采样,以便使用相同的种子和参数进行重复请求应返回相同的结果。不能保证确定性,您应该参考
|
|
49
|
+
* system_fingerprint 响应参数来监控后端的更改。
|
|
50
|
+
*/
|
|
51
|
+
seen?: number;
|
|
52
|
+
/**
|
|
53
|
+
* 默认为 null 最多 4 个序列,API 将停止进一步生成标记。
|
|
54
|
+
*/
|
|
55
|
+
stop?: string;
|
|
56
|
+
/**
|
|
57
|
+
* 默认为 false 如果设置,则像在 ChatGPT 中一样会发送部分消息增量。标记将以仅数据的服务器发送事件的形式发送,这些事件在可用时,并在 data: [DONE]
|
|
58
|
+
* 消息终止流。Python 代码示例。
|
|
59
|
+
*/
|
|
60
|
+
stream?: boolean;
|
|
61
|
+
/**
|
|
62
|
+
* 使用什么采样温度,介于 0 和 2 之间。较高的值(如 0.8)将使输出更加随机,而较低的值(如 0.2)将使输出更加集中和确定。
|
|
63
|
+
* 我们通常建议改变这个或`top_p`但不是两者。
|
|
64
|
+
*/
|
|
65
|
+
temperature?: number;
|
|
66
|
+
/**
|
|
67
|
+
* 控制模型调用哪个函数(如果有的话)。none 表示模型不会调用函数,而是生成消息。auto 表示模型可以在生成消息和调用函数之间进行选择。通过 {"type":
|
|
68
|
+
* "function", "function": {"name": "my_function"}} 强制模型调用该函数。 如果没有函数存在,默认为
|
|
69
|
+
* none。如果有函数存在,默认为 auto。 显示可能的类型
|
|
70
|
+
*/
|
|
71
|
+
tool_choice?: {
|
|
72
|
+
[key: string]: any;
|
|
73
|
+
};
|
|
74
|
+
/**
|
|
75
|
+
* 模型可以调用的一组工具列表。目前,只支持作为工具的函数。使用此功能来提供模型可以为之生成 JSON 输入的函数列表。
|
|
76
|
+
*/
|
|
77
|
+
tools?: string[];
|
|
78
|
+
/**
|
|
79
|
+
* 一种替代温度采样的方法,称为核采样,其中模型考虑具有 top_p 概率质量的标记的结果。所以 0.1 意味着只考虑构成前 10% 概率质量的标记。
|
|
80
|
+
* 我们通常建议改变这个或`temperature`但不是两者。
|
|
81
|
+
*/
|
|
82
|
+
top_p?: number;
|
|
83
|
+
/**
|
|
84
|
+
* 代表您的最终用户的唯一标识符,可以帮助 OpenAI
|
|
85
|
+
* 监控和检测滥用行为。[了解更多](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids)。
|
|
86
|
+
*/
|
|
87
|
+
user?: string;
|
|
88
|
+
}
|
|
89
|
+
export interface Message {
|
|
90
|
+
content?: string;
|
|
91
|
+
role?: string;
|
|
92
|
+
[property: string]: any;
|
|
93
|
+
}
|
|
94
|
+
export interface ChatResponse {
|
|
95
|
+
choices: Choice[];
|
|
96
|
+
created: number;
|
|
97
|
+
id: string;
|
|
98
|
+
object: string;
|
|
99
|
+
usage: Usage;
|
|
100
|
+
[property: string]: any;
|
|
101
|
+
}
|
|
102
|
+
export interface Choice {
|
|
103
|
+
finish_reason?: string;
|
|
104
|
+
index?: number;
|
|
105
|
+
message?: Message;
|
|
106
|
+
[property: string]: any;
|
|
107
|
+
}
|
|
108
|
+
export interface Usage {
|
|
109
|
+
completion_tokens: number;
|
|
110
|
+
prompt_tokens: number;
|
|
111
|
+
total_tokens: number;
|
|
112
|
+
[property: string]: any;
|
|
113
|
+
}
|
|
114
|
+
//# sourceMappingURL=types.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"types.d.ts","sourceRoot":"","sources":["../../src/chat/types.ts"],"names":[],"mappings":"AAaA,MAAM,WAAW,WAAW;IAC1B;;OAEG;IACH,iBAAiB,CAAC,EAAE,MAAM,CAAC;IAC3B;;;;;OAKG;IACH,UAAU,CAAC,EAAE,IAAI,CAAC;IAClB;;;;;OAKG;IACH,UAAU,CAAC,EAAE,MAAM,CAAC;IACpB;;OAEG;IACH,QAAQ,EAAE,OAAO,EAAE,CAAC;IACpB;;OAEG;IACH,KAAK,CAAC,EAAE,MAAM,CAAC;IACf;;;OAGG;IACH,CAAC,CAAC,EAAE,MAAM,CAAC;IACX;;;OAGG;IACH,gBAAgB,CAAC,EAAE,MAAM,CAAC;IAC1B;;;;;OAKG;IACH,eAAe,CAAC,EAAE;QAAE,CAAC,GAAG,EAAE,MAAM,GAAG,GAAG,CAAA;KAAE,CAAC;IACzC;;;OAGG;IACH,IAAI,CAAC,EAAE,MAAM,CAAC;IACd;;OAEG;IACH,IAAI,CAAC,EAAE,MAAM,CAAC;IACd;;;OAGG;IACH,MAAM,CAAC,EAAE,OAAO,CAAC;IACjB;;;OAGG;IACH,WAAW,CAAC,EAAE,MAAM,CAAC;IACrB;;;;OAIG;IACH,WAAW,CAAC,EAAE;QAAE,CAAC,GAAG,EAAE,MAAM,GAAG,GAAG,CAAA;KAAE,CAAC;IACrC;;OAEG;IACH,KAAK,CAAC,EAAE,MAAM,EAAE,CAAC;IACjB;;;OAGG;IACH,KAAK,CAAC,EAAE,MAAM,CAAC;IACf;;;OAGG;IACH,IAAI,CAAC,EAAE,MAAM,CAAC;CACf;AAED,MAAM,WAAW,OAAO;IACtB,OAAO,CAAC,EAAE,MAAM,CAAC;IACjB,IAAI,CAAC,EAAE,MAAM,CAAC;IAEd,CAAC,QAAQ,EAAE,MAAM,GAAG,GAAG,CAAC;CACzB;AAED,MAAM,WAAW,YAAY;IAC3B,OAAO,EAAE,MAAM,EAAE,CAAC;IAClB,OAAO,EAAE,MAAM,CAAC;IAChB,EAAE,EAAE,MAAM,CAAC;IACX,MAAM,EAAE,MAAM,CAAC;IACf,KAAK,EAAE,KAAK,CAAC;IAEb,CAAC,QAAQ,EAAE,MAAM,GAAG,GAAG,CAAC;CACzB;AAED,MAAM,WAAW,MAAM;IACrB,aAAa,CAAC,EAAE,MAAM,CAAC;IACvB,KAAK,CAAC,EAAE,MAAM,CAAC;IACf,OAAO,CAAC,EAAE,OAAO,CAAC;IAElB,CAAC,QAAQ,EAAE,MAAM,GAAG,GAAG,CAAC;CACzB;AAED,MAAM,WAAW,KAAK;IACpB,iBAAiB,EAAE,MAAM,CAAC;IAC1B,aAAa,EAAE,MAAM,CAAC;IACtB,YAAY,EAAE,MAAM,CAAC;IAErB,CAAC,QAAQ,EAAE,MAAM,GAAG,GAAG,CAAC;CACzB"}
|
package/dist/index.d.ts
ADDED
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../src/index.ts"],"names":[],"mappings":"AAaA,cAAc,QAAQ,CAAC;AACvB,cAAc,UAAU,CAAC"}
|
package/dist/index.es.js
ADDED
|
@@ -0,0 +1,128 @@
|
|
|
1
|
+
import { post as i, body as h, api as u, autoGeneratedError as l } from "@ahoo-wang/fetcher-decorator";
|
|
2
|
+
import "@ahoo-wang/fetcher-eventstream";
|
|
3
|
+
import { Fetcher as m } from "@ahoo-wang/fetcher";
|
|
4
|
+
const f = (e) => e.data === "[DONE]", v = (e) => e.requiredResponse.requiredJsonEventStream(f);
|
|
5
|
+
var _ = Object.defineProperty, b = Object.getOwnPropertyDescriptor, p = (e, t, s, o) => {
|
|
6
|
+
for (var r = o > 1 ? void 0 : o ? b(t, s) : t, c = e.length - 1, n; c >= 0; c--)
|
|
7
|
+
(n = e[c]) && (r = (o ? n(t, s, r) : n(r)) || r);
|
|
8
|
+
return o && r && _(t, s, r), r;
|
|
9
|
+
}, d = (e, t) => (s, o) => t(s, o, e);
|
|
10
|
+
let a = class {
|
|
11
|
+
/**
|
|
12
|
+
* Creates a new ChatClient instance with optional API configuration.
|
|
13
|
+
*
|
|
14
|
+
* The API metadata allows customization of request behavior such as base URLs,
|
|
15
|
+
* default headers, timeout settings, and other fetcher configuration options.
|
|
16
|
+
* This enables flexible deployment scenarios (e.g., custom API endpoints, proxy configurations).
|
|
17
|
+
*
|
|
18
|
+
* @param apiMetadata - Optional configuration object for customizing API behavior.
|
|
19
|
+
* Includes baseURL, headers, timeout, and other fetcher options.
|
|
20
|
+
* If not provided, uses default fetcher configuration.
|
|
21
|
+
*
|
|
22
|
+
* @example
|
|
23
|
+
* ```typescript
|
|
24
|
+
* // Basic usage with default configuration
|
|
25
|
+
* const client = new ChatClient();
|
|
26
|
+
*
|
|
27
|
+
* // With custom API endpoint and authentication
|
|
28
|
+
* const client = new ChatClient({
|
|
29
|
+
* baseURL: 'https://api.openai.com/v1',
|
|
30
|
+
* headers: {
|
|
31
|
+
* 'Authorization': `Bearer ${process.env.OPENAI_API_KEY}`,
|
|
32
|
+
* 'Content-Type': 'application/json'
|
|
33
|
+
* },
|
|
34
|
+
* timeout: 30000
|
|
35
|
+
* });
|
|
36
|
+
*
|
|
37
|
+
* // With custom fetcher configuration
|
|
38
|
+
* const client = new ChatClient({
|
|
39
|
+
* baseURL: 'https://custom-openai-proxy.com/api',
|
|
40
|
+
* interceptors: [customAuthInterceptor]
|
|
41
|
+
* });
|
|
42
|
+
* ```
|
|
43
|
+
*/
|
|
44
|
+
constructor(e) {
|
|
45
|
+
this.apiMetadata = e;
|
|
46
|
+
}
|
|
47
|
+
/**
|
|
48
|
+
* Lifecycle hook executed before request processing.
|
|
49
|
+
*
|
|
50
|
+
* This method implements the ExecuteLifeCycle interface and is automatically called by the
|
|
51
|
+
* fetcher-decorator framework before each API request. It inspects the chat request to determine
|
|
52
|
+
* whether streaming is requested and configures the appropriate result extractor accordingly.
|
|
53
|
+
*
|
|
54
|
+
* The method performs dynamic result extractor assignment:
|
|
55
|
+
* - For streaming requests (`stream: true`): Assigns `CompletionStreamResultExtractor` to handle
|
|
56
|
+
* server-sent event streams with automatic termination detection
|
|
57
|
+
* - For non-streaming requests (`stream: false` or undefined): Uses the default JSON extractor
|
|
58
|
+
* for standard JSON response parsing
|
|
59
|
+
*
|
|
60
|
+
* This approach enables type-safe conditional return types while maintaining clean API design.
|
|
61
|
+
*
|
|
62
|
+
* @param exchange - The fetch exchange object containing request details, response information,
|
|
63
|
+
* and configuration that can be modified before execution
|
|
64
|
+
*
|
|
65
|
+
* @example
|
|
66
|
+
* ```typescript
|
|
67
|
+
* // This method is called automatically by the framework
|
|
68
|
+
* // No manual invocation needed - shown for illustration:
|
|
69
|
+
*
|
|
70
|
+
* const exchange = {
|
|
71
|
+
* request: { body: { stream: true, messages: [...] } },
|
|
72
|
+
* resultExtractor: undefined // Will be set by beforeExecute
|
|
73
|
+
* };
|
|
74
|
+
*
|
|
75
|
+
* client.beforeExecute(exchange);
|
|
76
|
+
* // exchange.resultExtractor is now CompletionStreamResultExtractor
|
|
77
|
+
* ```
|
|
78
|
+
*/
|
|
79
|
+
beforeExecute(e) {
|
|
80
|
+
e.request.body.stream && (e.resultExtractor = v);
|
|
81
|
+
}
|
|
82
|
+
completions(e) {
|
|
83
|
+
throw l(e);
|
|
84
|
+
}
|
|
85
|
+
};
|
|
86
|
+
p([
|
|
87
|
+
i("/completions"),
|
|
88
|
+
d(0, h())
|
|
89
|
+
], a.prototype, "completions", 1);
|
|
90
|
+
a = p([
|
|
91
|
+
u("chat")
|
|
92
|
+
], a);
|
|
93
|
+
class q {
|
|
94
|
+
/**
|
|
95
|
+
* Creates an instance of the OpenAI client.
|
|
96
|
+
*
|
|
97
|
+
* Initializes the client with the provided configuration options, setting up the
|
|
98
|
+
* HTTP client with proper authentication and creating specialized sub-clients
|
|
99
|
+
* for different API features.
|
|
100
|
+
*
|
|
101
|
+
* @param options - Configuration options for the OpenAI client.
|
|
102
|
+
* @throws {Error} If the provided options are invalid or missing required fields.
|
|
103
|
+
* @throws {TypeError} If the apiKey or baseURL are not strings.
|
|
104
|
+
*
|
|
105
|
+
* @example
|
|
106
|
+
* ```typescript
|
|
107
|
+
* const openai = new OpenAI({
|
|
108
|
+
* baseURL: 'https://api.openai.com/v1',
|
|
109
|
+
* apiKey: process.env.OPENAI_API_KEY!
|
|
110
|
+
* });
|
|
111
|
+
* ```
|
|
112
|
+
*/
|
|
113
|
+
constructor(t) {
|
|
114
|
+
this.fetcher = new m({
|
|
115
|
+
baseURL: t.baseURL,
|
|
116
|
+
headers: {
|
|
117
|
+
Authorization: `Bearer ${t.apiKey}`
|
|
118
|
+
}
|
|
119
|
+
}), this.chat = new a({ fetcher: this.fetcher });
|
|
120
|
+
}
|
|
121
|
+
}
|
|
122
|
+
export {
|
|
123
|
+
a as ChatClient,
|
|
124
|
+
v as CompletionStreamResultExtractor,
|
|
125
|
+
f as DoneDetector,
|
|
126
|
+
q as OpenAI
|
|
127
|
+
};
|
|
128
|
+
//# sourceMappingURL=index.es.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"index.es.js","sources":["../src/chat/completionStreamResultExtractor.ts","../src/chat/chatClient.ts","../src/openai.ts"],"sourcesContent":["/*\n * Copyright [2021-present] [ahoo wang <ahoowang@qq.com> (https://github.com/Ahoo-Wang)].\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n * http://www.apache.org/licenses/LICENSE-2.0\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { FetchExchange, ResultExtractor } from '@ahoo-wang/fetcher';\nimport '@ahoo-wang/fetcher-eventstream';\nimport {\n JsonServerSentEventStream,\n ServerSentEvent,\n TerminateDetector,\n} from '@ahoo-wang/fetcher-eventstream';\nimport { ChatResponse } from './types';\n\n/**\n * A termination detector for OpenAI chat completion streams.\n *\n * This detector identifies when a chat completion stream has finished by checking\n * if the server-sent event data equals '[DONE]'. This is the standard completion\n * signal used by OpenAI's API for streaming chat completions.\n *\n * @param event - The server-sent event to evaluate for termination\n * @returns true if the event indicates stream completion, false otherwise\n *\n * @example\n * ```typescript\n * const event: ServerSentEvent = { data: '[DONE]', event: 'done' };\n * const isDone = DoneDetector(event); // returns true\n * ```\n */\nexport const DoneDetector: TerminateDetector = (event: ServerSentEvent) => {\n return event.data === '[DONE]';\n};\n\n/**\n * Result extractor for OpenAI chat completion streaming responses.\n *\n * This extractor processes HTTP responses from OpenAI's chat completion API endpoints\n * that return streaming responses. It converts the response into a JSON server-sent\n * event stream that automatically terminates when the completion is finished.\n *\n * The extractor uses the DoneDetector to identify completion signals and ensures\n * the response is properly formatted as a server-sent event stream with JSON data.\n *\n * @param exchange - The fetch exchange containing the HTTP response from OpenAI's API\n * @returns A JSON server-sent event stream of ChatResponse objects that terminates on completion\n * @throws {EventStreamConvertError} If the response is not a valid event stream or has incorrect content type\n *\n * @example\n * ```typescript\n * import { fetcher } from '@ahoo-wang/fetcher';\n * import { CompletionStreamResultExtractor } from '@ahoo-wang/fetcher-openai';\n *\n * const response = await fetcher.post('/chat/completions', {\n * model: 'gpt-3.5-turbo',\n * messages: [{ role: 'user', content: 'Hello!' }],\n * stream: true\n * });\n *\n * const stream = CompletionStreamResultExtractor(response);\n *\n * for await (const event of stream) {\n * console.log('Received:', event.data);\n * // Stream automatically terminates when '[DONE]' is received\n * }\n * ```\n *\n * @example\n * ```typescript\n * // Using with fetcher configuration\n * const fetcherWithExtractor = fetcher.extend({\n * resultExtractor: CompletionStreamResultExtractor\n * });\n *\n * const stream = await fetcherWithExtractor.post('/chat/completions', {\n * // ... request options\n * });\n * ```\n */\nexport const CompletionStreamResultExtractor: ResultExtractor<\n JsonServerSentEventStream<ChatResponse>\n> = (exchange: FetchExchange) => {\n return exchange.requiredResponse.requiredJsonEventStream(DoneDetector);\n};\n","/*\n * Copyright [2021-present] [ahoo wang <ahoowang@qq.com> (https://github.com/Ahoo-Wang)].\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n * http://www.apache.org/licenses/LICENSE-2.0\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport {\n api,\n type ApiMetadata,\n ApiMetadataCapable,\n autoGeneratedError,\n body,\n ExecuteLifeCycle,\n post,\n} from '@ahoo-wang/fetcher-decorator';\nimport { JsonServerSentEventStream } from '@ahoo-wang/fetcher-eventstream';\nimport { FetchExchange } from '@ahoo-wang/fetcher';\nimport { ChatRequest, ChatResponse } from './types';\nimport { CompletionStreamResultExtractor } from './completionStreamResultExtractor';\n\n/**\n * OpenAI Chat API Client\n *\n * A comprehensive client for OpenAI's Chat Completions API that provides type-safe integration\n * with both streaming and non-streaming chat completion endpoints. This client leverages the\n * fetcher-decorator pattern to enable declarative API definitions and automatic request/response\n * handling.\n *\n * Key Features:\n * - Type-safe request/response handling with TypeScript generics\n * - Automatic streaming response processing for server-sent events\n * - Lifecycle hooks for request preprocessing and response transformation\n * - Support for all OpenAI Chat Completion API parameters\n * - Conditional return types based on streaming configuration\n *\n * @implements {ApiMetadataCapable} - Supports API metadata configuration\n * @implements {ExecuteLifeCycle} - Provides lifecycle hooks for request processing\n *\n * @example\n * ```typescript\n * import { ChatClient } from '@ahoo-wang/fetcher-openai';\n *\n * const client = new ChatClient({\n * baseURL: 'https://api.openai.com/v1',\n * headers: {\n * 'Authorization': `Bearer ${process.env.OPENAI_API_KEY}`\n * }\n * });\n *\n * // Non-streaming completion\n * const response = await client.completions({\n * model: 'gpt-3.5-turbo',\n * messages: [{ role: 'user', content: 'Hello, world!' }]\n * });\n *\n * // Streaming completion\n * const stream = await client.completions({\n * model: 'gpt-3.5-turbo',\n * messages: [{ role: 'user', content: 'Tell me a story' }],\n * stream: true\n * });\n *\n * for await (const chunk of stream) {\n * console.log(chunk.choices[0]?.delta?.content || '');\n * }\n * ```\n */\n@api('chat')\nexport class ChatClient implements ApiMetadataCapable, ExecuteLifeCycle {\n /**\n * Creates a new ChatClient instance with optional API configuration.\n *\n * The API metadata allows customization of request behavior such as base URLs,\n * default headers, timeout settings, and other fetcher configuration options.\n * This enables flexible deployment scenarios (e.g., custom API endpoints, proxy configurations).\n *\n * @param apiMetadata - Optional configuration object for customizing API behavior.\n * Includes baseURL, headers, timeout, and other fetcher options.\n * If not provided, uses default fetcher configuration.\n *\n * @example\n * ```typescript\n * // Basic usage with default configuration\n * const client = new ChatClient();\n *\n * // With custom API endpoint and authentication\n * const client = new ChatClient({\n * baseURL: 'https://api.openai.com/v1',\n * headers: {\n * 'Authorization': `Bearer ${process.env.OPENAI_API_KEY}`,\n * 'Content-Type': 'application/json'\n * },\n * timeout: 30000\n * });\n *\n * // With custom fetcher configuration\n * const client = new ChatClient({\n * baseURL: 'https://custom-openai-proxy.com/api',\n * interceptors: [customAuthInterceptor]\n * });\n * ```\n */\n constructor(public readonly apiMetadata?: ApiMetadata) {}\n\n /**\n * Lifecycle hook executed before request processing.\n *\n * This method implements the ExecuteLifeCycle interface and is automatically called by the\n * fetcher-decorator framework before each API request. It inspects the chat request to determine\n * whether streaming is requested and configures the appropriate result extractor accordingly.\n *\n * The method performs dynamic result extractor assignment:\n * - For streaming requests (`stream: true`): Assigns `CompletionStreamResultExtractor` to handle\n * server-sent event streams with automatic termination detection\n * - For non-streaming requests (`stream: false` or undefined): Uses the default JSON extractor\n * for standard JSON response parsing\n *\n * This approach enables type-safe conditional return types while maintaining clean API design.\n *\n * @param exchange - The fetch exchange object containing request details, response information,\n * and configuration that can be modified before execution\n *\n * @example\n * ```typescript\n * // This method is called automatically by the framework\n * // No manual invocation needed - shown for illustration:\n *\n * const exchange = {\n * request: { body: { stream: true, messages: [...] } },\n * resultExtractor: undefined // Will be set by beforeExecute\n * };\n *\n * client.beforeExecute(exchange);\n * // exchange.resultExtractor is now CompletionStreamResultExtractor\n * ```\n */\n beforeExecute(exchange: FetchExchange): void {\n const chatRequest = exchange.request.body as ChatRequest;\n if (chatRequest.stream) {\n exchange.resultExtractor = CompletionStreamResultExtractor;\n }\n }\n\n /**\n * Creates a chat completion using OpenAI's Chat Completions API.\n *\n * This is the primary method for interacting with OpenAI's chat completion models.\n * It supports both synchronous and streaming response modes, automatically handling\n * the appropriate response processing based on the `stream` parameter.\n *\n * The method uses advanced TypeScript conditional types to provide type-safe return values:\n * - When `stream: true`, returns a `JsonServerSentEventStream<ChatResponse>` for real-time streaming\n * - When `stream: false` or omitted, returns a standard `ChatResponse` object\n *\n * Streaming responses automatically terminate when the API sends a '[DONE]' signal,\n * eliminating the need for manual stream management.\n *\n * @template T - The chat request type, constrained to ChatRequest for type safety\n * @param chatRequest - Complete chat request configuration including:\n * - `model`: The model to use (e.g., 'gpt-3.5-turbo', 'gpt-4')\n * - `messages`: Array of chat messages with role/content pairs\n * - `stream`: Optional boolean to enable streaming responses\n * - `temperature`: Optional creativity control (0.0 to 2.0)\n * - `max_tokens`: Optional maximum response length\n * - `top_p`: Optional nucleus sampling parameter\n * - `frequency_penalty`: Optional repetition control (-2.0 to 2.0)\n * - `presence_penalty`: Optional topic diversity control (-2.0 to 2.0)\n * - `stop`: Optional stop sequences (string or string array)\n * - Additional OpenAI API parameters as needed\n * @returns Promise resolving to:\n * - `JsonServerSentEventStream<ChatResponse>` when `T['stream'] extends true`\n * - `ChatResponse` when streaming is disabled or undefined\n * @throws {Error} Network errors, authentication failures, or API errors\n * @throws {EventStreamConvertError} When streaming response cannot be processed\n *\n * @example\n * ```typescript\n * // Basic non-streaming completion\n * const response = await client.completions({\n * model: 'gpt-3.5-turbo',\n * messages: [\n * { role: 'system', content: 'You are a helpful assistant.' },\n * { role: 'user', content: 'What is TypeScript?' }\n * ],\n * temperature: 0.7,\n * max_tokens: 150\n * });\n *\n * console.log(response.choices[0].message.content);\n * // Output: TypeScript is a programming language developed by Microsoft...\n * ```\n *\n * @example\n * ```typescript\n * // Streaming completion with real-time output\n * const stream = await client.completions({\n * model: 'gpt-4',\n * messages: [{ role: 'user', content: 'Write a short story' }],\n * stream: true,\n * temperature: 0.8\n * });\n *\n * for await (const chunk of stream) {\n * const content = chunk.choices[0]?.delta?.content;\n * if (content) {\n * process.stdout.write(content); // Real-time streaming output\n * }\n * }\n * ```\n *\n * @example\n * ```typescript\n * // Advanced configuration with multiple parameters\n * const response = await client.completions({\n * model: 'gpt-3.5-turbo',\n * messages: [{ role: 'user', content: 'Explain quantum computing' }],\n * temperature: 0.3, // Lower temperature for more focused responses\n * max_tokens: 500, // Limit response length\n * top_p: 0.9, // Nucleus sampling\n * frequency_penalty: 0.1, // Reduce repetition\n * presence_penalty: 0.1, // Encourage topic diversity\n * stop: ['###', 'END'] // Custom stop sequences\n * });\n * ```\n *\n * @example\n * ```typescript\n * // Error handling\n * try {\n * const response = await client.completions({\n * model: 'gpt-3.5-turbo',\n * messages: [{ role: 'user', content: 'Hello!' }]\n * });\n * console.log('Success:', response);\n * } catch (error) {\n * if (error.response?.status === 401) {\n * console.error('Authentication failed - check API key');\n * } else if (error.response?.status === 429) {\n * console.error('Rate limit exceeded - retry later');\n * } else {\n * console.error('API error:', error.message);\n * }\n * }\n * ```\n */\n @post('/completions')\n completions<T extends ChatRequest = ChatRequest>(\n @body() chatRequest: T,\n ): Promise<\n T['stream'] extends true\n ? JsonServerSentEventStream<ChatResponse>\n : ChatResponse\n > {\n throw autoGeneratedError(chatRequest);\n }\n}\n","/*\n * Copyright [2021-present] [ahoo wang <ahoowang@qq.com> (https://github.com/Ahoo-Wang)].\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n * http://www.apache.org/licenses/LICENSE-2.0\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { ChatClient } from './chat';\nimport { BaseURLCapable, Fetcher } from '@ahoo-wang/fetcher';\n\n/**\n * Configuration options for the OpenAI client.\n *\n * This interface defines the required settings to initialize an OpenAI client instance,\n * including the API endpoint and authentication credentials.\n */\nexport interface OpenAIOptions extends BaseURLCapable {\n /**\n * The base URL for the OpenAI API.\n *\n * This should be the root URL of the OpenAI API service (e.g., 'https://api.openai.com/v1').\n * It is used as the base for all API requests made by the client.\n */\n baseURL: string;\n\n /**\n * The API key for authenticating requests to the OpenAI API.\n *\n * This key must be a valid OpenAI API key obtained from the OpenAI platform.\n * It is included in the Authorization header as a Bearer token for all requests.\n */\n apiKey: string;\n}\n\n/**\n * OpenAI client class for interacting with the OpenAI API.\n *\n * This class provides a high-level interface to the OpenAI API, encapsulating HTTP client\n * functionality and specialized clients for different API features. It serves as the main entry\n * point for applications needing to integrate with OpenAI services.\n *\n * @example\n * ```typescript\n * const client = new OpenAI({\n * baseURL: 'https://api.openai.com/v1',\n * apiKey: 'your-api-key-here'\n * });\n *\n * // Use the chat client for completions\n * const response = await client.chat.completions.create({\n * model: 'gpt-3.5-turbo',\n * messages: [{ role: 'user', content: 'Hello!' }]\n * });\n * ```\n */\nexport class OpenAI {\n /**\n * HTTP client instance for making requests to the OpenAI API.\n *\n * This Fetcher instance is configured with the base URL and authentication headers\n * provided during initialization. It handles all underlying HTTP communication.\n */\n public readonly fetcher: Fetcher;\n\n /**\n * Chat completion client for interacting with OpenAI's chat models.\n *\n * Provides methods to create chat completions, manage conversations, and handle\n * streaming responses from chat-based models like GPT-3.5 and GPT-4.\n */\n public readonly chat: ChatClient;\n\n /**\n * Creates an instance of the OpenAI client.\n *\n * Initializes the client with the provided configuration options, setting up the\n * HTTP client with proper authentication and creating specialized sub-clients\n * for different API features.\n *\n * @param options - Configuration options for the OpenAI client.\n * @throws {Error} If the provided options are invalid or missing required fields.\n * @throws {TypeError} If the apiKey or baseURL are not strings.\n *\n * @example\n * ```typescript\n * const openai = new OpenAI({\n * baseURL: 'https://api.openai.com/v1',\n * apiKey: process.env.OPENAI_API_KEY!\n * });\n * ```\n */\n constructor(options: OpenAIOptions) {\n this.fetcher = new Fetcher({\n baseURL: options.baseURL,\n headers: {\n Authorization: `Bearer ${options.apiKey}`,\n },\n });\n this.chat = new ChatClient({ fetcher: this.fetcher });\n }\n}\n"],"names":["DoneDetector","event","CompletionStreamResultExtractor","exchange","ChatClient","apiMetadata","chatRequest","autoGeneratedError","__decorateClass","post","__decorateParam","body","api","OpenAI","options","Fetcher"],"mappings":";;;AAsCO,MAAMA,IAAkC,CAACC,MACvCA,EAAM,SAAS,UAgDXC,IAET,CAACC,MACIA,EAAS,iBAAiB,wBAAwBH,CAAY;;;;;;ACfhE,IAAMI,IAAN,MAAiE;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAkCtE,YAA4BC,GAA2B;AAA3B,SAAA,cAAAA;AAAA,EAA4B;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAkCxD,cAAcF,GAA+B;AAE3C,IADoBA,EAAS,QAAQ,KACrB,WACdA,EAAS,kBAAkBD;AAAA,EAE/B;AAAA,EAyGA,YACUI,GAKR;AACA,UAAMC,EAAmBD,CAAW;AAAA,EACtC;AACF;AATEE,EAAA;AAAA,EADCC,EAAK,cAAc;AAAA,EAEjBC,EAAA,GAAAC,EAAA,CAAK;AAAA,GAnLGP,EAkLX,WAAA,eAAA,CAAA;AAlLWA,IAANI,EAAA;AAAA,EADNI,EAAI,MAAM;AAAA,GACER,CAAA;ACdN,MAAMS,EAAO;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAoClB,YAAYC,GAAwB;AAClC,SAAK,UAAU,IAAIC,EAAQ;AAAA,MACzB,SAASD,EAAQ;AAAA,MACjB,SAAS;AAAA,QACP,eAAe,UAAUA,EAAQ,MAAM;AAAA,MAAA;AAAA,IACzC,CACD,GACD,KAAK,OAAO,IAAIV,EAAW,EAAE,SAAS,KAAK,SAAS;AAAA,EACtD;AACF;"}
|
|
@@ -0,0 +1,2 @@
|
|
|
1
|
+
(function(e,n){typeof exports=="object"&&typeof module<"u"?n(exports,require("@ahoo-wang/fetcher-decorator"),require("@ahoo-wang/fetcher-eventstream"),require("@ahoo-wang/fetcher")):typeof define=="function"&&define.amd?define(["exports","@ahoo-wang/fetcher-decorator","@ahoo-wang/fetcher-eventstream","@ahoo-wang/fetcher"],n):(e=typeof globalThis<"u"?globalThis:e||self,n(e.FetcherOpenAI={},e.FetcherDecorator,e.FetcherEventStream,e.Fetcher))})(this,(function(e,n,v,d){"use strict";const s=r=>r.data==="[DONE]",u=r=>r.requiredResponse.requiredJsonEventStream(s);var p=Object.defineProperty,l=Object.getOwnPropertyDescriptor,f=(r,t,a,c)=>{for(var o=c>1?void 0:c?l(t,a):t,i=r.length-1,h;i>=0;i--)(h=r[i])&&(o=(c?h(t,a,o):h(o))||o);return c&&o&&p(t,a,o),o},m=(r,t)=>(a,c)=>t(a,c,r);e.ChatClient=class{constructor(t){this.apiMetadata=t}beforeExecute(t){t.request.body.stream&&(t.resultExtractor=u)}completions(t){throw n.autoGeneratedError(t)}},f([n.post("/completions"),m(0,n.body())],e.ChatClient.prototype,"completions",1),e.ChatClient=f([n.api("chat")],e.ChatClient);class C{constructor(t){this.fetcher=new d.Fetcher({baseURL:t.baseURL,headers:{Authorization:`Bearer ${t.apiKey}`}}),this.chat=new e.ChatClient({fetcher:this.fetcher})}}e.CompletionStreamResultExtractor=u,e.DoneDetector=s,e.OpenAI=C,Object.defineProperty(e,Symbol.toStringTag,{value:"Module"})}));
|
|
2
|
+
//# sourceMappingURL=index.umd.js.map
|