llmflow 0.3.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +142 -0
- package/bin/llmflow.js +91 -0
- package/db.js +857 -0
- package/logger.js +122 -0
- package/otlp-export.js +564 -0
- package/otlp-logs.js +238 -0
- package/otlp-metrics.js +300 -0
- package/otlp.js +398 -0
- package/package.json +62 -0
- package/pricing.fallback.json +58 -0
- package/pricing.js +154 -0
- package/providers/anthropic.js +195 -0
- package/providers/azure.js +159 -0
- package/providers/base.js +145 -0
- package/providers/cohere.js +225 -0
- package/providers/gemini.js +278 -0
- package/providers/index.js +130 -0
- package/providers/ollama.js +36 -0
- package/providers/openai-compatible.js +77 -0
- package/providers/openai.js +217 -0
- package/providers/passthrough.js +573 -0
- package/public/app.js +1484 -0
- package/public/index.html +367 -0
- package/public/style.css +1152 -0
- package/server.js +1222 -0
|
@@ -0,0 +1,195 @@
|
|
|
1
|
+
const BaseProvider = require('./base');
|
|
2
|
+
|
|
3
|
+
/**
|
|
4
|
+
* Anthropic Claude provider.
|
|
5
|
+
* Handles request/response transformation and different streaming format.
|
|
6
|
+
*/
|
|
7
|
+
class AnthropicProvider extends BaseProvider {
|
|
8
|
+
constructor(config = {}) {
|
|
9
|
+
super();
|
|
10
|
+
this.name = 'anthropic';
|
|
11
|
+
this.displayName = 'Anthropic Claude';
|
|
12
|
+
this.hostname = config.hostname || 'api.anthropic.com';
|
|
13
|
+
this.apiVersion = config.apiVersion || '2023-06-01';
|
|
14
|
+
}
|
|
15
|
+
|
|
16
|
+
getTarget(req) {
|
|
17
|
+
let path = req.path;
|
|
18
|
+
|
|
19
|
+
// Map OpenAI-style paths to Anthropic paths
|
|
20
|
+
if (path === '/v1/chat/completions') {
|
|
21
|
+
path = '/v1/messages';
|
|
22
|
+
}
|
|
23
|
+
|
|
24
|
+
return {
|
|
25
|
+
hostname: this.hostname,
|
|
26
|
+
port: 443,
|
|
27
|
+
path: path,
|
|
28
|
+
protocol: 'https'
|
|
29
|
+
};
|
|
30
|
+
}
|
|
31
|
+
|
|
32
|
+
transformRequestHeaders(headers, req) {
|
|
33
|
+
// Anthropic uses x-api-key instead of Authorization Bearer
|
|
34
|
+
let apiKey = headers.authorization;
|
|
35
|
+
if (apiKey && apiKey.startsWith('Bearer ')) {
|
|
36
|
+
apiKey = apiKey.slice(7);
|
|
37
|
+
}
|
|
38
|
+
|
|
39
|
+
// Also check for x-api-key header directly
|
|
40
|
+
apiKey = headers['x-api-key'] || apiKey;
|
|
41
|
+
|
|
42
|
+
return {
|
|
43
|
+
'Content-Type': 'application/json',
|
|
44
|
+
'x-api-key': apiKey,
|
|
45
|
+
'anthropic-version': this.apiVersion
|
|
46
|
+
};
|
|
47
|
+
}
|
|
48
|
+
|
|
49
|
+
transformRequestBody(body, req) {
|
|
50
|
+
if (!body || !body.messages) {
|
|
51
|
+
return body;
|
|
52
|
+
}
|
|
53
|
+
|
|
54
|
+
const transformed = {
|
|
55
|
+
model: body.model,
|
|
56
|
+
max_tokens: body.max_tokens || 4096, // Required field for Anthropic
|
|
57
|
+
stream: body.stream || false
|
|
58
|
+
};
|
|
59
|
+
|
|
60
|
+
// Extract system message
|
|
61
|
+
const systemMessages = body.messages.filter(m => m.role === 'system');
|
|
62
|
+
const otherMessages = body.messages.filter(m => m.role !== 'system');
|
|
63
|
+
|
|
64
|
+
if (systemMessages.length > 0) {
|
|
65
|
+
transformed.system = systemMessages.map(m => m.content).join('\n');
|
|
66
|
+
}
|
|
67
|
+
|
|
68
|
+
// Transform messages (Anthropic expects role to be 'user' or 'assistant')
|
|
69
|
+
transformed.messages = otherMessages.map(msg => ({
|
|
70
|
+
role: msg.role === 'assistant' ? 'assistant' : 'user',
|
|
71
|
+
content: msg.content
|
|
72
|
+
}));
|
|
73
|
+
|
|
74
|
+
// Copy over optional parameters
|
|
75
|
+
if (body.temperature !== undefined) transformed.temperature = body.temperature;
|
|
76
|
+
if (body.top_p !== undefined) transformed.top_p = body.top_p;
|
|
77
|
+
if (body.stop) transformed.stop_sequences = Array.isArray(body.stop) ? body.stop : [body.stop];
|
|
78
|
+
|
|
79
|
+
return transformed;
|
|
80
|
+
}
|
|
81
|
+
|
|
82
|
+
normalizeResponse(body, req) {
|
|
83
|
+
if (!body || body.error) {
|
|
84
|
+
return { data: body, usage: null, model: req.body?.model };
|
|
85
|
+
}
|
|
86
|
+
|
|
87
|
+
// Extract text content from content blocks
|
|
88
|
+
let textContent = '';
|
|
89
|
+
if (Array.isArray(body.content)) {
|
|
90
|
+
textContent = body.content
|
|
91
|
+
.filter(block => block.type === 'text')
|
|
92
|
+
.map(block => block.text)
|
|
93
|
+
.join('');
|
|
94
|
+
}
|
|
95
|
+
|
|
96
|
+
// Map stop_reason to finish_reason
|
|
97
|
+
const finishReasonMap = {
|
|
98
|
+
'end_turn': 'stop',
|
|
99
|
+
'stop_sequence': 'stop',
|
|
100
|
+
'max_tokens': 'length'
|
|
101
|
+
};
|
|
102
|
+
|
|
103
|
+
const normalized = {
|
|
104
|
+
id: body.id,
|
|
105
|
+
object: 'chat.completion',
|
|
106
|
+
model: body.model,
|
|
107
|
+
choices: [{
|
|
108
|
+
index: 0,
|
|
109
|
+
message: {
|
|
110
|
+
role: 'assistant',
|
|
111
|
+
content: textContent
|
|
112
|
+
},
|
|
113
|
+
finish_reason: finishReasonMap[body.stop_reason] || body.stop_reason
|
|
114
|
+
}],
|
|
115
|
+
usage: {
|
|
116
|
+
prompt_tokens: body.usage?.input_tokens || 0,
|
|
117
|
+
completion_tokens: body.usage?.output_tokens || 0,
|
|
118
|
+
total_tokens: (body.usage?.input_tokens || 0) + (body.usage?.output_tokens || 0)
|
|
119
|
+
}
|
|
120
|
+
};
|
|
121
|
+
|
|
122
|
+
return {
|
|
123
|
+
data: normalized,
|
|
124
|
+
usage: normalized.usage,
|
|
125
|
+
model: body.model
|
|
126
|
+
};
|
|
127
|
+
}
|
|
128
|
+
|
|
129
|
+
parseStreamChunk(chunk) {
|
|
130
|
+
const lines = chunk.split('\n');
|
|
131
|
+
let content = '';
|
|
132
|
+
let usage = null;
|
|
133
|
+
let done = false;
|
|
134
|
+
|
|
135
|
+
for (const line of lines) {
|
|
136
|
+
const trimmed = line.trim();
|
|
137
|
+
|
|
138
|
+
// Handle event: lines
|
|
139
|
+
if (trimmed.startsWith('event:')) {
|
|
140
|
+
const eventType = trimmed.slice(6).trim();
|
|
141
|
+
if (eventType === 'message_stop') {
|
|
142
|
+
done = true;
|
|
143
|
+
}
|
|
144
|
+
continue;
|
|
145
|
+
}
|
|
146
|
+
|
|
147
|
+
if (!trimmed.startsWith('data:')) continue;
|
|
148
|
+
|
|
149
|
+
const payload = trimmed.slice(5).trim();
|
|
150
|
+
if (!payload) continue;
|
|
151
|
+
|
|
152
|
+
try {
|
|
153
|
+
const json = JSON.parse(payload);
|
|
154
|
+
|
|
155
|
+
// Handle different event types
|
|
156
|
+
if (json.type === 'content_block_delta') {
|
|
157
|
+
if (json.delta?.type === 'text_delta') {
|
|
158
|
+
content += json.delta.text || '';
|
|
159
|
+
}
|
|
160
|
+
} else if (json.type === 'message_delta') {
|
|
161
|
+
if (json.usage) {
|
|
162
|
+
usage = {
|
|
163
|
+
prompt_tokens: 0, // Not provided in delta
|
|
164
|
+
completion_tokens: json.usage.output_tokens || 0,
|
|
165
|
+
total_tokens: json.usage.output_tokens || 0
|
|
166
|
+
};
|
|
167
|
+
}
|
|
168
|
+
} else if (json.type === 'message_start' && json.message?.usage) {
|
|
169
|
+
// Initial usage from message_start
|
|
170
|
+
usage = {
|
|
171
|
+
prompt_tokens: json.message.usage.input_tokens || 0,
|
|
172
|
+
completion_tokens: 0,
|
|
173
|
+
total_tokens: json.message.usage.input_tokens || 0
|
|
174
|
+
};
|
|
175
|
+
}
|
|
176
|
+
} catch {
|
|
177
|
+
// Ignore parse errors
|
|
178
|
+
}
|
|
179
|
+
}
|
|
180
|
+
|
|
181
|
+
return { content, usage, done };
|
|
182
|
+
}
|
|
183
|
+
|
|
184
|
+
extractUsage(response) {
|
|
185
|
+
const usage = response.usage || {};
|
|
186
|
+
return {
|
|
187
|
+
prompt_tokens: usage.input_tokens || usage.prompt_tokens || 0,
|
|
188
|
+
completion_tokens: usage.output_tokens || usage.completion_tokens || 0,
|
|
189
|
+
total_tokens: (usage.input_tokens || usage.prompt_tokens || 0) +
|
|
190
|
+
(usage.output_tokens || usage.completion_tokens || 0)
|
|
191
|
+
};
|
|
192
|
+
}
|
|
193
|
+
}
|
|
194
|
+
|
|
195
|
+
module.exports = AnthropicProvider;
|
|
@@ -0,0 +1,159 @@
|
|
|
1
|
+
const BaseProvider = require('./base');
|
|
2
|
+
|
|
3
|
+
/**
|
|
4
|
+
* Azure OpenAI provider.
|
|
5
|
+
*
|
|
6
|
+
* Key differences from OpenAI:
|
|
7
|
+
* - Endpoint: https://{resource}.openai.azure.com/openai/deployments/{deployment}/{endpoint}?api-version={version}
|
|
8
|
+
* - Uses api-key header instead of Authorization Bearer
|
|
9
|
+
* - Model name in request is mapped to deployment name in URL
|
|
10
|
+
* - api-version query parameter is required
|
|
11
|
+
* - Request/response format is same as OpenAI
|
|
12
|
+
*/
|
|
13
|
+
class AzureOpenAIProvider extends BaseProvider {
|
|
14
|
+
constructor(config = {}) {
|
|
15
|
+
super();
|
|
16
|
+
this.name = 'azure';
|
|
17
|
+
this.displayName = 'Azure OpenAI';
|
|
18
|
+
|
|
19
|
+
// Azure configuration from environment or config
|
|
20
|
+
this.resource = config.resource || process.env.AZURE_OPENAI_RESOURCE;
|
|
21
|
+
this.apiVersion = config.apiVersion || process.env.AZURE_OPENAI_API_VERSION || '2024-02-01';
|
|
22
|
+
|
|
23
|
+
// Optional: deployment name mapping (model -> deployment)
|
|
24
|
+
this.deploymentMap = config.deploymentMap || {};
|
|
25
|
+
}
|
|
26
|
+
|
|
27
|
+
/**
|
|
28
|
+
* Map OpenAI model name to Azure deployment name
|
|
29
|
+
* Azure deployments often have dots removed (gpt-3.5-turbo -> gpt-35-turbo)
|
|
30
|
+
*/
|
|
31
|
+
getDeploymentName(model) {
|
|
32
|
+
// Check explicit mapping first
|
|
33
|
+
if (this.deploymentMap[model]) {
|
|
34
|
+
return this.deploymentMap[model];
|
|
35
|
+
}
|
|
36
|
+
|
|
37
|
+
// Check environment variable for specific model
|
|
38
|
+
const envKey = `AZURE_DEPLOYMENT_${model.replace(/[.-]/g, '_').toUpperCase()}`;
|
|
39
|
+
if (process.env[envKey]) {
|
|
40
|
+
return process.env[envKey];
|
|
41
|
+
}
|
|
42
|
+
|
|
43
|
+
// Default: use model name as deployment (common pattern)
|
|
44
|
+
// Also try removing dots (gpt-3.5-turbo -> gpt-35-turbo)
|
|
45
|
+
return model.replace(/\./g, '');
|
|
46
|
+
}
|
|
47
|
+
|
|
48
|
+
/**
|
|
49
|
+
* Extract Azure resource name from headers or use configured default
|
|
50
|
+
*/
|
|
51
|
+
getResourceName(headers) {
|
|
52
|
+
// Allow override via header
|
|
53
|
+
const headerResource = headers?.['x-azure-resource'] || headers?.['x-llmflow-azure-resource'];
|
|
54
|
+
if (headerResource) return headerResource;
|
|
55
|
+
|
|
56
|
+
// Use configured resource
|
|
57
|
+
if (this.resource) return this.resource;
|
|
58
|
+
|
|
59
|
+
// Try environment variable
|
|
60
|
+
return process.env.AZURE_OPENAI_RESOURCE || 'azure-openai';
|
|
61
|
+
}
|
|
62
|
+
|
|
63
|
+
getTarget(req) {
|
|
64
|
+
const model = req.body?.model || 'gpt-4';
|
|
65
|
+
const deployment = this.getDeploymentName(model);
|
|
66
|
+
const resource = this.getResourceName(req.headers);
|
|
67
|
+
|
|
68
|
+
// Map OpenAI path to Azure path
|
|
69
|
+
let endpoint = req.path;
|
|
70
|
+
if (endpoint.startsWith('/v1/')) {
|
|
71
|
+
endpoint = endpoint.slice(3); // Remove /v1 prefix
|
|
72
|
+
}
|
|
73
|
+
|
|
74
|
+
// Build Azure path: /openai/deployments/{deployment}/{endpoint}?api-version={version}
|
|
75
|
+
const path = `/openai/deployments/${deployment}${endpoint}?api-version=${this.apiVersion}`;
|
|
76
|
+
|
|
77
|
+
return {
|
|
78
|
+
hostname: `${resource}.openai.azure.com`,
|
|
79
|
+
port: 443,
|
|
80
|
+
path: path,
|
|
81
|
+
protocol: 'https'
|
|
82
|
+
};
|
|
83
|
+
}
|
|
84
|
+
|
|
85
|
+
transformRequestHeaders(headers, req) {
|
|
86
|
+
// Azure uses api-key header instead of Authorization Bearer
|
|
87
|
+
let apiKey = headers?.authorization;
|
|
88
|
+
if (apiKey && apiKey.startsWith('Bearer ')) {
|
|
89
|
+
apiKey = apiKey.slice(7);
|
|
90
|
+
}
|
|
91
|
+
|
|
92
|
+
// Also check for direct api-key header
|
|
93
|
+
apiKey = headers?.['api-key'] || apiKey;
|
|
94
|
+
|
|
95
|
+
return {
|
|
96
|
+
'Content-Type': 'application/json',
|
|
97
|
+
'api-key': apiKey
|
|
98
|
+
};
|
|
99
|
+
}
|
|
100
|
+
|
|
101
|
+
// Request body format is same as OpenAI, no transformation needed
|
|
102
|
+
transformRequestBody(body, req) {
|
|
103
|
+
return body;
|
|
104
|
+
}
|
|
105
|
+
|
|
106
|
+
// Response format is same as OpenAI, use base implementation
|
|
107
|
+
normalizeResponse(body, req) {
|
|
108
|
+
if (!body || body.error) {
|
|
109
|
+
return { data: body, usage: null, model: req.body?.model };
|
|
110
|
+
}
|
|
111
|
+
|
|
112
|
+
return {
|
|
113
|
+
data: body,
|
|
114
|
+
usage: body.usage || null,
|
|
115
|
+
model: body.model || req.body?.model || 'unknown'
|
|
116
|
+
};
|
|
117
|
+
}
|
|
118
|
+
|
|
119
|
+
// Streaming format is same as OpenAI
|
|
120
|
+
parseStreamChunk(chunk) {
|
|
121
|
+
const lines = chunk.split('\n');
|
|
122
|
+
let content = '';
|
|
123
|
+
let usage = null;
|
|
124
|
+
let done = false;
|
|
125
|
+
|
|
126
|
+
for (const line of lines) {
|
|
127
|
+
const trimmed = line.trim();
|
|
128
|
+
if (!trimmed.startsWith('data:')) continue;
|
|
129
|
+
|
|
130
|
+
const payload = trimmed.slice(5).trim();
|
|
131
|
+
if (payload === '[DONE]') {
|
|
132
|
+
done = true;
|
|
133
|
+
continue;
|
|
134
|
+
}
|
|
135
|
+
|
|
136
|
+
try {
|
|
137
|
+
const json = JSON.parse(payload);
|
|
138
|
+
const delta = json.choices?.[0]?.delta?.content;
|
|
139
|
+
if (delta) content += delta;
|
|
140
|
+
if (json.usage) usage = json.usage;
|
|
141
|
+
} catch {
|
|
142
|
+
// Ignore parse errors
|
|
143
|
+
}
|
|
144
|
+
}
|
|
145
|
+
|
|
146
|
+
return { content, usage, done };
|
|
147
|
+
}
|
|
148
|
+
|
|
149
|
+
extractUsage(response) {
|
|
150
|
+
const usage = response.usage || {};
|
|
151
|
+
return {
|
|
152
|
+
prompt_tokens: usage.prompt_tokens || 0,
|
|
153
|
+
completion_tokens: usage.completion_tokens || 0,
|
|
154
|
+
total_tokens: usage.total_tokens || (usage.prompt_tokens || 0) + (usage.completion_tokens || 0)
|
|
155
|
+
};
|
|
156
|
+
}
|
|
157
|
+
}
|
|
158
|
+
|
|
159
|
+
module.exports = AzureOpenAIProvider;
|
|
@@ -0,0 +1,145 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Base provider class defining the interface for all LLM providers.
|
|
3
|
+
* Each provider must implement these methods to handle request/response transformations.
|
|
4
|
+
*/
|
|
5
|
+
class BaseProvider {
|
|
6
|
+
constructor() {
|
|
7
|
+
this.name = 'base';
|
|
8
|
+
this.displayName = 'Base Provider';
|
|
9
|
+
}
|
|
10
|
+
|
|
11
|
+
/**
|
|
12
|
+
* Get the target configuration for the upstream request
|
|
13
|
+
* @param {Object} req - Express request object
|
|
14
|
+
* @returns {Object} { hostname, port, path, protocol }
|
|
15
|
+
*/
|
|
16
|
+
getTarget(req) {
|
|
17
|
+
throw new Error('getTarget() must be implemented by provider');
|
|
18
|
+
}
|
|
19
|
+
|
|
20
|
+
/**
|
|
21
|
+
* Transform request headers for the upstream provider
|
|
22
|
+
* @param {Object} headers - Original request headers
|
|
23
|
+
* @param {Object} req - Express request object
|
|
24
|
+
* @returns {Object} Transformed headers
|
|
25
|
+
*/
|
|
26
|
+
transformRequestHeaders(headers, req) {
|
|
27
|
+
return {
|
|
28
|
+
'Content-Type': 'application/json',
|
|
29
|
+
'Authorization': headers.authorization
|
|
30
|
+
};
|
|
31
|
+
}
|
|
32
|
+
|
|
33
|
+
/**
|
|
34
|
+
* Transform request body for the upstream provider
|
|
35
|
+
* @param {Object} body - Original request body
|
|
36
|
+
* @param {Object} req - Express request object
|
|
37
|
+
* @returns {Object} Transformed body
|
|
38
|
+
*/
|
|
39
|
+
transformRequestBody(body, req) {
|
|
40
|
+
return body;
|
|
41
|
+
}
|
|
42
|
+
|
|
43
|
+
/**
|
|
44
|
+
* Normalize response body to a common format for logging
|
|
45
|
+
* @param {Object} body - Provider response body
|
|
46
|
+
* @param {Object} req - Original request for context
|
|
47
|
+
* @returns {Object} Normalized response with { data, usage, model }
|
|
48
|
+
*/
|
|
49
|
+
normalizeResponse(body, req) {
|
|
50
|
+
return {
|
|
51
|
+
data: body,
|
|
52
|
+
usage: body.usage || null,
|
|
53
|
+
model: body.model || req.body?.model || 'unknown'
|
|
54
|
+
};
|
|
55
|
+
}
|
|
56
|
+
|
|
57
|
+
/**
|
|
58
|
+
* Parse a streaming chunk and extract content
|
|
59
|
+
* @param {string} chunk - Raw chunk text
|
|
60
|
+
* @returns {Object} { content, usage, done }
|
|
61
|
+
*/
|
|
62
|
+
parseStreamChunk(chunk) {
|
|
63
|
+
const lines = chunk.split('\n');
|
|
64
|
+
let content = '';
|
|
65
|
+
let usage = null;
|
|
66
|
+
let done = false;
|
|
67
|
+
|
|
68
|
+
for (const line of lines) {
|
|
69
|
+
const trimmed = line.trim();
|
|
70
|
+
if (!trimmed.startsWith('data:')) continue;
|
|
71
|
+
|
|
72
|
+
const payload = trimmed.slice(5).trim();
|
|
73
|
+
if (payload === '[DONE]') {
|
|
74
|
+
done = true;
|
|
75
|
+
continue;
|
|
76
|
+
}
|
|
77
|
+
|
|
78
|
+
try {
|
|
79
|
+
const json = JSON.parse(payload);
|
|
80
|
+
const delta = json.choices?.[0]?.delta?.content;
|
|
81
|
+
if (delta) content += delta;
|
|
82
|
+
if (json.usage) usage = json.usage;
|
|
83
|
+
} catch {
|
|
84
|
+
// Ignore parse errors
|
|
85
|
+
}
|
|
86
|
+
}
|
|
87
|
+
|
|
88
|
+
return { content, usage, done };
|
|
89
|
+
}
|
|
90
|
+
|
|
91
|
+
/**
|
|
92
|
+
* Assemble a complete response from streaming chunks
|
|
93
|
+
* @param {string} fullContent - Accumulated content
|
|
94
|
+
* @param {Object} usage - Token usage info
|
|
95
|
+
* @param {Object} req - Original request
|
|
96
|
+
* @param {string} traceId - Trace ID
|
|
97
|
+
* @returns {Object} Assembled response object
|
|
98
|
+
*/
|
|
99
|
+
assembleStreamingResponse(fullContent, usage, req, traceId) {
|
|
100
|
+
return {
|
|
101
|
+
id: traceId,
|
|
102
|
+
object: 'chat.completion',
|
|
103
|
+
model: req.body?.model,
|
|
104
|
+
choices: [{
|
|
105
|
+
message: { role: 'assistant', content: fullContent },
|
|
106
|
+
finish_reason: 'stop'
|
|
107
|
+
}],
|
|
108
|
+
usage: usage,
|
|
109
|
+
_streaming: true
|
|
110
|
+
};
|
|
111
|
+
}
|
|
112
|
+
|
|
113
|
+
/**
|
|
114
|
+
* Extract usage information from response
|
|
115
|
+
* @param {Object} response - Provider response
|
|
116
|
+
* @returns {Object} { prompt_tokens, completion_tokens, total_tokens }
|
|
117
|
+
*/
|
|
118
|
+
extractUsage(response) {
|
|
119
|
+
const usage = response.usage || {};
|
|
120
|
+
return {
|
|
121
|
+
prompt_tokens: usage.prompt_tokens || 0,
|
|
122
|
+
completion_tokens: usage.completion_tokens || 0,
|
|
123
|
+
total_tokens: usage.total_tokens || (usage.prompt_tokens || 0) + (usage.completion_tokens || 0)
|
|
124
|
+
};
|
|
125
|
+
}
|
|
126
|
+
|
|
127
|
+
/**
|
|
128
|
+
* Check if streaming is requested
|
|
129
|
+
* @param {Object} req - Express request object
|
|
130
|
+
* @returns {boolean}
|
|
131
|
+
*/
|
|
132
|
+
isStreamingRequest(req) {
|
|
133
|
+
return req.body && req.body.stream === true;
|
|
134
|
+
}
|
|
135
|
+
|
|
136
|
+
/**
|
|
137
|
+
* Get the HTTP/HTTPS module to use
|
|
138
|
+
* @returns {Object} http or https module
|
|
139
|
+
*/
|
|
140
|
+
getHttpModule() {
|
|
141
|
+
return require('https');
|
|
142
|
+
}
|
|
143
|
+
}
|
|
144
|
+
|
|
145
|
+
module.exports = BaseProvider;
|