llmjs2 1.0.1 → 1.0.6
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +74 -417
- package/grapes.jpg +0 -0
- package/index.d.ts +43 -0
- package/index.js +465 -0
- package/package.json +10 -42
- package/spec.txt +73 -0
- package/test-generate-tools-suite.js +100 -0
- package/test-generate-tools.js +57 -0
- package/test-generate.js +31 -0
- package/test.js +33 -0
- package/LICENSE +0 -21
- package/dist/agent.d.ts +0 -80
- package/dist/agent.d.ts.map +0 -1
- package/dist/agent.js +0 -199
- package/dist/agent.js.map +0 -1
- package/dist/index.d.ts +0 -74
- package/dist/index.d.ts.map +0 -1
- package/dist/index.js +0 -191
- package/dist/index.js.map +0 -1
- package/dist/providers/base.d.ts +0 -58
- package/dist/providers/base.d.ts.map +0 -1
- package/dist/providers/base.js +0 -149
- package/dist/providers/base.js.map +0 -1
- package/dist/providers/index.d.ts +0 -8
- package/dist/providers/index.d.ts.map +0 -1
- package/dist/providers/index.js +0 -7
- package/dist/providers/index.js.map +0 -1
- package/dist/providers/ollama.d.ts +0 -42
- package/dist/providers/ollama.d.ts.map +0 -1
- package/dist/providers/ollama.js +0 -260
- package/dist/providers/ollama.js.map +0 -1
- package/dist/providers/openai.d.ts +0 -38
- package/dist/providers/openai.d.ts.map +0 -1
- package/dist/providers/openai.js +0 -322
- package/dist/providers/openai.js.map +0 -1
- package/dist/types.d.ts +0 -191
- package/dist/types.d.ts.map +0 -1
- package/dist/types.js +0 -6
- package/dist/types.js.map +0 -1
- package/src/agent.ts +0 -295
- package/src/index.ts +0 -268
- package/src/providers/base.ts +0 -216
- package/src/providers/index.ts +0 -8
- package/src/providers/ollama.ts +0 -429
- package/src/providers/openai.ts +0 -521
- package/src/types.ts +0 -243
package/src/index.ts
DELETED
|
@@ -1,268 +0,0 @@
|
|
|
1
|
-
/**
|
|
2
|
-
* llmjs2 - Enterprise-grade LLM abstraction layer
|
|
3
|
-
* Unified API for OpenAI and Ollama
|
|
4
|
-
*/
|
|
5
|
-
|
|
6
|
-
import {
|
|
7
|
-
CompletionRequest,
|
|
8
|
-
CompletionResponse,
|
|
9
|
-
CompletionChunk,
|
|
10
|
-
CompletionOptions,
|
|
11
|
-
ProviderType,
|
|
12
|
-
IProvider,
|
|
13
|
-
} from './types.js';
|
|
14
|
-
import { OpenAIProvider } from './providers/openai.js';
|
|
15
|
-
import { OllamaProvider } from './providers/ollama.js';
|
|
16
|
-
import { LLMError } from './providers/base.js';
|
|
17
|
-
|
|
18
|
-
/**
|
|
19
|
-
* Global configuration for completion
|
|
20
|
-
*/
|
|
21
|
-
let globalOptions: CompletionOptions = {};
|
|
22
|
-
|
|
23
|
-
/**
|
|
24
|
-
* Provider cache
|
|
25
|
-
*/
|
|
26
|
-
const providerCache: Map<string, IProvider> = new Map();
|
|
27
|
-
|
|
28
|
-
/**
|
|
29
|
-
* Configure global settings
|
|
30
|
-
*/
|
|
31
|
-
export function configure(options: CompletionOptions): void {
|
|
32
|
-
globalOptions = { ...globalOptions, ...options };
|
|
33
|
-
}
|
|
34
|
-
|
|
35
|
-
/**
|
|
36
|
-
* Get provider type from model string
|
|
37
|
-
*/
|
|
38
|
-
function getProviderType(model: string): ProviderType {
|
|
39
|
-
if (model.startsWith('openai/')) {
|
|
40
|
-
return 'openai';
|
|
41
|
-
}
|
|
42
|
-
if (model.startsWith('ollama/')) {
|
|
43
|
-
return 'ollama';
|
|
44
|
-
}
|
|
45
|
-
// Default to openai if no prefix specified
|
|
46
|
-
return 'openai';
|
|
47
|
-
}
|
|
48
|
-
|
|
49
|
-
/**
|
|
50
|
-
* Create appropriate provider instance
|
|
51
|
-
*/
|
|
52
|
-
function createProvider(
|
|
53
|
-
providerType: ProviderType,
|
|
54
|
-
request: CompletionRequest
|
|
55
|
-
): IProvider {
|
|
56
|
-
const cacheKey = `${providerType}:${request.apiKey || 'default'}:${request.baseUrl || 'default'}`;
|
|
57
|
-
|
|
58
|
-
// Return cached provider if available
|
|
59
|
-
if (providerCache.has(cacheKey)) {
|
|
60
|
-
return providerCache.get(cacheKey)!;
|
|
61
|
-
}
|
|
62
|
-
|
|
63
|
-
let provider: IProvider;
|
|
64
|
-
|
|
65
|
-
switch (providerType) {
|
|
66
|
-
case 'openai': {
|
|
67
|
-
const apiKey = request.apiKey || process.env.OPENAI_API_KEY;
|
|
68
|
-
if (!apiKey) {
|
|
69
|
-
throw new LLMError(
|
|
70
|
-
'OpenAI API key is required. Pass it as apiKey in the request or set OPENAI_API_KEY environment variable.',
|
|
71
|
-
'MISSING_API_KEY'
|
|
72
|
-
);
|
|
73
|
-
}
|
|
74
|
-
provider = new OpenAIProvider({
|
|
75
|
-
type: 'openai',
|
|
76
|
-
apiKey: apiKey,
|
|
77
|
-
baseUrl: request.baseUrl,
|
|
78
|
-
timeout: request.timeout ?? globalOptions.globalTimeout,
|
|
79
|
-
retry: request.retry ?? globalOptions.globalRetry,
|
|
80
|
-
});
|
|
81
|
-
break;
|
|
82
|
-
}
|
|
83
|
-
|
|
84
|
-
case 'ollama': {
|
|
85
|
-
provider = new OllamaProvider({
|
|
86
|
-
type: 'ollama',
|
|
87
|
-
baseUrl: request.baseUrl,
|
|
88
|
-
timeout: request.timeout ?? globalOptions.globalTimeout,
|
|
89
|
-
retry: request.retry ?? globalOptions.globalRetry,
|
|
90
|
-
});
|
|
91
|
-
break;
|
|
92
|
-
}
|
|
93
|
-
|
|
94
|
-
default:
|
|
95
|
-
throw new LLMError(`Unsupported provider: ${providerType}`, 'UNKNOWN_PROVIDER');
|
|
96
|
-
}
|
|
97
|
-
|
|
98
|
-
// Apply global settings
|
|
99
|
-
if (globalOptions.debug) {
|
|
100
|
-
provider.setDebug(true);
|
|
101
|
-
}
|
|
102
|
-
|
|
103
|
-
if (globalOptions.logger) {
|
|
104
|
-
provider.setLogger(globalOptions.logger);
|
|
105
|
-
}
|
|
106
|
-
|
|
107
|
-
// Cache the provider
|
|
108
|
-
providerCache.set(cacheKey, provider);
|
|
109
|
-
|
|
110
|
-
return provider;
|
|
111
|
-
}
|
|
112
|
-
|
|
113
|
-
/**
|
|
114
|
-
* Create a completion request
|
|
115
|
-
*
|
|
116
|
-
* @example
|
|
117
|
-
* import { completion } from 'llmjs2';
|
|
118
|
-
*
|
|
119
|
-
* const result = await completion({
|
|
120
|
-
* model: 'openai/gpt-4',
|
|
121
|
-
* apiKey: 'sk-...',
|
|
122
|
-
* messages: [
|
|
123
|
-
* { role: 'user', content: 'Hello!' }
|
|
124
|
-
* ]
|
|
125
|
-
* });
|
|
126
|
-
*
|
|
127
|
-
* console.log(result.content);
|
|
128
|
-
*/
|
|
129
|
-
export async function completion(
|
|
130
|
-
request: CompletionRequest
|
|
131
|
-
): Promise<CompletionResponse> {
|
|
132
|
-
if (!request.model) {
|
|
133
|
-
throw new LLMError('Model is required', 'MISSING_MODEL');
|
|
134
|
-
}
|
|
135
|
-
|
|
136
|
-
const providerType = getProviderType(request.model);
|
|
137
|
-
const provider = createProvider(providerType, request);
|
|
138
|
-
|
|
139
|
-
try {
|
|
140
|
-
return await provider.complete(request);
|
|
141
|
-
} catch (error) {
|
|
142
|
-
if (error instanceof LLMError) {
|
|
143
|
-
throw error;
|
|
144
|
-
}
|
|
145
|
-
throw new LLMError(
|
|
146
|
-
`Completion failed: ${error instanceof Error ? error.message : String(error)}`,
|
|
147
|
-
'COMPLETION_FAILED',
|
|
148
|
-
undefined,
|
|
149
|
-
error
|
|
150
|
-
);
|
|
151
|
-
}
|
|
152
|
-
}
|
|
153
|
-
|
|
154
|
-
/**
|
|
155
|
-
* Stream a completion request
|
|
156
|
-
*
|
|
157
|
-
* @example
|
|
158
|
-
* import { streamCompletion } from 'llmjs2';
|
|
159
|
-
*
|
|
160
|
-
* const stream = streamCompletion({
|
|
161
|
-
* model: 'openai/gpt-4',
|
|
162
|
-
* apiKey: 'sk-...',
|
|
163
|
-
* messages: [
|
|
164
|
-
* { role: 'user', content: 'Write a poem' }
|
|
165
|
-
* ]
|
|
166
|
-
* });
|
|
167
|
-
*
|
|
168
|
-
* for await (const chunk of stream) {
|
|
169
|
-
* process.stdout.write(chunk.delta);
|
|
170
|
-
* }
|
|
171
|
-
*/
|
|
172
|
-
export async function* streamCompletion(
|
|
173
|
-
request: CompletionRequest
|
|
174
|
-
): AsyncIterable<CompletionChunk> {
|
|
175
|
-
if (!request.model) {
|
|
176
|
-
throw new LLMError('Model is required', 'MISSING_MODEL');
|
|
177
|
-
}
|
|
178
|
-
|
|
179
|
-
const providerType = getProviderType(request.model);
|
|
180
|
-
const provider = createProvider(providerType, request);
|
|
181
|
-
|
|
182
|
-
try {
|
|
183
|
-
yield* provider.completeStream(request);
|
|
184
|
-
} catch (error) {
|
|
185
|
-
if (error instanceof LLMError) {
|
|
186
|
-
throw error;
|
|
187
|
-
}
|
|
188
|
-
throw new LLMError(
|
|
189
|
-
`Stream failed: ${error instanceof Error ? error.message : String(error)}`,
|
|
190
|
-
'STREAM_FAILED',
|
|
191
|
-
undefined,
|
|
192
|
-
error
|
|
193
|
-
);
|
|
194
|
-
}
|
|
195
|
-
}
|
|
196
|
-
|
|
197
|
-
/**
|
|
198
|
-
* Validate provider connectivity and configuration
|
|
199
|
-
*/
|
|
200
|
-
export async function validateProvider(
|
|
201
|
-
model: string,
|
|
202
|
-
apiKey?: string,
|
|
203
|
-
baseUrl?: string
|
|
204
|
-
): Promise<void> {
|
|
205
|
-
const providerType = getProviderType(model);
|
|
206
|
-
|
|
207
|
-
const request: CompletionRequest = {
|
|
208
|
-
model,
|
|
209
|
-
apiKey,
|
|
210
|
-
baseUrl,
|
|
211
|
-
messages: [{ role: 'user', content: 'test' }],
|
|
212
|
-
};
|
|
213
|
-
|
|
214
|
-
const provider = createProvider(providerType, request);
|
|
215
|
-
|
|
216
|
-
try {
|
|
217
|
-
await provider.validate();
|
|
218
|
-
} catch (error) {
|
|
219
|
-
if (error instanceof LLMError) {
|
|
220
|
-
throw error;
|
|
221
|
-
}
|
|
222
|
-
throw new LLMError(
|
|
223
|
-
`Validation failed: ${error instanceof Error ? error.message : String(error)}`,
|
|
224
|
-
'VALIDATION_FAILED'
|
|
225
|
-
);
|
|
226
|
-
}
|
|
227
|
-
}
|
|
228
|
-
|
|
229
|
-
/**
|
|
230
|
-
* Clear provider cache
|
|
231
|
-
*/
|
|
232
|
-
export function clearProviderCache(): void {
|
|
233
|
-
providerCache.clear();
|
|
234
|
-
}
|
|
235
|
-
|
|
236
|
-
/**
|
|
237
|
-
* Export types for consumers
|
|
238
|
-
*/
|
|
239
|
-
export type {
|
|
240
|
-
CompletionRequest,
|
|
241
|
-
CompletionResponse,
|
|
242
|
-
CompletionChunk,
|
|
243
|
-
CompletionOptions,
|
|
244
|
-
Message,
|
|
245
|
-
MessageRole,
|
|
246
|
-
Tool,
|
|
247
|
-
ProviderType,
|
|
248
|
-
ProviderConfig,
|
|
249
|
-
ProviderError,
|
|
250
|
-
} from './types.js';
|
|
251
|
-
|
|
252
|
-
/**
|
|
253
|
-
* Export error class
|
|
254
|
-
*/
|
|
255
|
-
export { LLMError };
|
|
256
|
-
|
|
257
|
-
/**
|
|
258
|
-
* Export provider classes for advanced use cases
|
|
259
|
-
*/
|
|
260
|
-
export { OpenAIProvider } from './providers/openai.js';
|
|
261
|
-
export { OllamaProvider } from './providers/ollama.js';
|
|
262
|
-
export { BaseProvider } from './providers/base.js';
|
|
263
|
-
|
|
264
|
-
/**
|
|
265
|
-
* Export Agent for stateful conversations
|
|
266
|
-
*/
|
|
267
|
-
export { Agent } from './agent.js';
|
|
268
|
-
export type { AgentConfig, AgentGenerateRequest, AgentGenerateResponse } from './agent.js';
|
package/src/providers/base.ts
DELETED
|
@@ -1,216 +0,0 @@
|
|
|
1
|
-
/**
|
|
2
|
-
* Base provider class with common functionality
|
|
3
|
-
*/
|
|
4
|
-
|
|
5
|
-
import {
|
|
6
|
-
IProvider,
|
|
7
|
-
CompletionRequest,
|
|
8
|
-
CompletionResponse,
|
|
9
|
-
CompletionChunk,
|
|
10
|
-
ProviderConfig,
|
|
11
|
-
ProviderError,
|
|
12
|
-
} from '../types.js';
|
|
13
|
-
|
|
14
|
-
/**
|
|
15
|
-
* Validation helper for requests
|
|
16
|
-
*/
|
|
17
|
-
export function validateCompletionRequest(request: CompletionRequest): void {
|
|
18
|
-
if (!request.model) {
|
|
19
|
-
throw new Error('Model is required');
|
|
20
|
-
}
|
|
21
|
-
|
|
22
|
-
if (!Array.isArray(request.messages) || request.messages.length === 0) {
|
|
23
|
-
throw new Error('Messages array is required and must not be empty');
|
|
24
|
-
}
|
|
25
|
-
|
|
26
|
-
// Validate message structure
|
|
27
|
-
for (const msg of request.messages) {
|
|
28
|
-
if (!msg.role || msg.content === undefined || msg.content === null) {
|
|
29
|
-
throw new Error('Each message must have role and content');
|
|
30
|
-
}
|
|
31
|
-
if (!['system', 'user', 'assistant'].includes(msg.role)) {
|
|
32
|
-
throw new Error(`Invalid role: ${msg.role}`);
|
|
33
|
-
}
|
|
34
|
-
if (typeof msg.content !== 'string') {
|
|
35
|
-
throw new Error('Message content must be a string');
|
|
36
|
-
}
|
|
37
|
-
}
|
|
38
|
-
|
|
39
|
-
// Validate numeric ranges
|
|
40
|
-
if (request.temperature !== undefined && request.temperature !== null) {
|
|
41
|
-
if (typeof request.temperature !== 'number' || request.temperature < 0 || request.temperature > 2) {
|
|
42
|
-
throw new Error('Temperature must be a number between 0 and 2');
|
|
43
|
-
}
|
|
44
|
-
}
|
|
45
|
-
|
|
46
|
-
if (request.topP !== undefined && request.topP !== null) {
|
|
47
|
-
if (typeof request.topP !== 'number' || request.topP < 0 || request.topP > 1) {
|
|
48
|
-
throw new Error('topP must be a number between 0 and 1');
|
|
49
|
-
}
|
|
50
|
-
}
|
|
51
|
-
|
|
52
|
-
if (request.maxTokens !== undefined && request.maxTokens !== null) {
|
|
53
|
-
if (typeof request.maxTokens !== 'number' || request.maxTokens < 1) {
|
|
54
|
-
throw new Error('maxTokens must be a number greater than 0');
|
|
55
|
-
}
|
|
56
|
-
}
|
|
57
|
-
|
|
58
|
-
if (request.frequencyPenalty !== undefined && request.frequencyPenalty !== null) {
|
|
59
|
-
if (typeof request.frequencyPenalty !== 'number' || request.frequencyPenalty < -2 || request.frequencyPenalty > 2) {
|
|
60
|
-
throw new Error('frequencyPenalty must be a number between -2 and 2');
|
|
61
|
-
}
|
|
62
|
-
}
|
|
63
|
-
|
|
64
|
-
if (request.presencePenalty !== undefined && request.presencePenalty !== null) {
|
|
65
|
-
if (typeof request.presencePenalty !== 'number' || request.presencePenalty < -2 || request.presencePenalty > 2) {
|
|
66
|
-
throw new Error('presencePenalty must be a number between -2 and 2');
|
|
67
|
-
}
|
|
68
|
-
}
|
|
69
|
-
}
|
|
70
|
-
|
|
71
|
-
/**
|
|
72
|
-
* Custom error class for provider errors
|
|
73
|
-
*/
|
|
74
|
-
export class LLMError extends Error implements ProviderError {
|
|
75
|
-
code?: string;
|
|
76
|
-
statusCode?: number;
|
|
77
|
-
details?: unknown;
|
|
78
|
-
retryable?: boolean;
|
|
79
|
-
|
|
80
|
-
constructor(
|
|
81
|
-
message: string,
|
|
82
|
-
code?: string,
|
|
83
|
-
statusCode?: number,
|
|
84
|
-
details?: unknown,
|
|
85
|
-
retryable?: boolean
|
|
86
|
-
) {
|
|
87
|
-
super(message);
|
|
88
|
-
this.name = 'LLMError';
|
|
89
|
-
this.code = code;
|
|
90
|
-
this.statusCode = statusCode;
|
|
91
|
-
this.details = details;
|
|
92
|
-
this.retryable = retryable ?? false;
|
|
93
|
-
}
|
|
94
|
-
}
|
|
95
|
-
|
|
96
|
-
/**
|
|
97
|
-
* Retry helper with exponential backoff
|
|
98
|
-
*/
|
|
99
|
-
export async function withRetry<T>(
|
|
100
|
-
fn: () => Promise<T>,
|
|
101
|
-
options: {
|
|
102
|
-
maxRetries?: number;
|
|
103
|
-
backoffMultiplier?: number;
|
|
104
|
-
initialDelayMs?: number;
|
|
105
|
-
} = {}
|
|
106
|
-
): Promise<T> {
|
|
107
|
-
const maxRetries = options.maxRetries ?? 3;
|
|
108
|
-
const backoffMultiplier = options.backoffMultiplier ?? 2;
|
|
109
|
-
const initialDelayMs = options.initialDelayMs ?? 1000;
|
|
110
|
-
|
|
111
|
-
let lastError: Error | undefined;
|
|
112
|
-
|
|
113
|
-
for (let attempt = 0; attempt < maxRetries; attempt++) {
|
|
114
|
-
try {
|
|
115
|
-
return await fn();
|
|
116
|
-
} catch (error) {
|
|
117
|
-
lastError = error as Error;
|
|
118
|
-
|
|
119
|
-
// Check if error is retryable
|
|
120
|
-
const isRetryable =
|
|
121
|
-
error instanceof LLMError
|
|
122
|
-
? error.retryable
|
|
123
|
-
: error instanceof Error &&
|
|
124
|
-
(error.message.includes('timeout') ||
|
|
125
|
-
error.message.includes('ECONNREFUSED') ||
|
|
126
|
-
error.message.includes('ENOTFOUND'));
|
|
127
|
-
|
|
128
|
-
if (!isRetryable || attempt === maxRetries - 1) {
|
|
129
|
-
throw error;
|
|
130
|
-
}
|
|
131
|
-
|
|
132
|
-
// Exponential backoff
|
|
133
|
-
const delay = initialDelayMs * Math.pow(backoffMultiplier, attempt);
|
|
134
|
-
await new Promise((resolve) => setTimeout(resolve, delay));
|
|
135
|
-
}
|
|
136
|
-
}
|
|
137
|
-
|
|
138
|
-
throw lastError || new Error('Retry failed');
|
|
139
|
-
}
|
|
140
|
-
|
|
141
|
-
/**
|
|
142
|
-
* Base provider class
|
|
143
|
-
*/
|
|
144
|
-
export abstract class BaseProvider implements IProvider {
|
|
145
|
-
protected config: ProviderConfig;
|
|
146
|
-
protected debug: boolean;
|
|
147
|
-
protected logger: (level: string, message: string, data?: unknown) => void;
|
|
148
|
-
|
|
149
|
-
constructor(config: ProviderConfig) {
|
|
150
|
-
this.config = config;
|
|
151
|
-
this.debug = false;
|
|
152
|
-
this.logger = (level: string, message: string, data?: unknown) => {
|
|
153
|
-
if (this.debug) {
|
|
154
|
-
console.log(`[${level}] ${message}`, data ?? '');
|
|
155
|
-
}
|
|
156
|
-
};
|
|
157
|
-
}
|
|
158
|
-
|
|
159
|
-
setDebug(debug: boolean): void {
|
|
160
|
-
this.debug = debug;
|
|
161
|
-
}
|
|
162
|
-
|
|
163
|
-
setLogger(
|
|
164
|
-
logger: (level: string, message: string, data?: unknown) => void
|
|
165
|
-
): void {
|
|
166
|
-
this.logger = logger;
|
|
167
|
-
}
|
|
168
|
-
|
|
169
|
-
abstract complete(request: CompletionRequest): Promise<CompletionResponse>;
|
|
170
|
-
|
|
171
|
-
abstract completeStream(
|
|
172
|
-
request: CompletionRequest
|
|
173
|
-
): AsyncIterable<CompletionChunk>;
|
|
174
|
-
|
|
175
|
-
abstract validate(): Promise<void>;
|
|
176
|
-
|
|
177
|
-
abstract parseModel(model: string): string;
|
|
178
|
-
|
|
179
|
-
/**
|
|
180
|
-
* Get the timeout for a request
|
|
181
|
-
*/
|
|
182
|
-
protected getTimeout(request?: CompletionRequest): number {
|
|
183
|
-
return (
|
|
184
|
-
request?.timeout ??
|
|
185
|
-
this.config.timeout ??
|
|
186
|
-
30000 // Default 30 seconds
|
|
187
|
-
);
|
|
188
|
-
}
|
|
189
|
-
|
|
190
|
-
/**
|
|
191
|
-
* Get retry config
|
|
192
|
-
*/
|
|
193
|
-
protected getRetryConfig(request?: CompletionRequest) {
|
|
194
|
-
return {
|
|
195
|
-
maxRetries: request?.retry?.maxRetries ?? this.config.retry?.maxRetries ?? 3,
|
|
196
|
-
backoffMultiplier:
|
|
197
|
-
request?.retry?.backoffMultiplier ??
|
|
198
|
-
this.config.retry?.backoffMultiplier ??
|
|
199
|
-
2,
|
|
200
|
-
initialDelayMs:
|
|
201
|
-
request?.retry?.initialDelayMs ??
|
|
202
|
-
this.config.retry?.initialDelayMs ??
|
|
203
|
-
1000,
|
|
204
|
-
};
|
|
205
|
-
}
|
|
206
|
-
|
|
207
|
-
/**
|
|
208
|
-
* Get headers including custom headers
|
|
209
|
-
*/
|
|
210
|
-
protected getHeaders(request?: CompletionRequest): Record<string, string> {
|
|
211
|
-
return {
|
|
212
|
-
...(this.config.headers ?? {}),
|
|
213
|
-
...(request?.headers ?? {}),
|
|
214
|
-
};
|
|
215
|
-
}
|
|
216
|
-
}
|