@revenium/perplexity 1.0.25 → 2.0.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +443 -568
- package/dist/cjs/core/config/perplexity-config.js +45 -0
- package/dist/cjs/core/config/perplexity-config.js.map +1 -0
- package/dist/cjs/core/config/revenium-config.js +80 -0
- package/dist/cjs/core/config/revenium-config.js.map +1 -0
- package/dist/cjs/core/tracking/metering.js +131 -0
- package/dist/cjs/core/tracking/metering.js.map +1 -0
- package/dist/cjs/core/wrapper/perplexity-client.js +177 -0
- package/dist/cjs/core/wrapper/perplexity-client.js.map +1 -0
- package/dist/cjs/index.js +64 -0
- package/dist/cjs/index.js.map +1 -0
- package/dist/cjs/types/index.js +21 -0
- package/dist/cjs/types/index.js.map +1 -0
- package/dist/cjs/utils/logger.js +23 -0
- package/dist/cjs/utils/logger.js.map +1 -0
- package/dist/esm/core/config/perplexity-config.js +40 -0
- package/dist/esm/core/config/perplexity-config.js.map +1 -0
- package/dist/esm/core/config/revenium-config.js +72 -0
- package/dist/esm/core/config/revenium-config.js.map +1 -0
- package/dist/esm/core/tracking/metering.js +126 -0
- package/dist/esm/core/tracking/metering.js.map +1 -0
- package/dist/esm/core/wrapper/perplexity-client.js +170 -0
- package/dist/esm/core/wrapper/perplexity-client.js.map +1 -0
- package/dist/esm/index.js +44 -0
- package/dist/esm/index.js.map +1 -0
- package/dist/esm/types/index.js +18 -0
- package/dist/esm/types/index.js.map +1 -0
- package/dist/esm/utils/logger.js +20 -0
- package/dist/esm/utils/logger.js.map +1 -0
- package/dist/types/core/config/perplexity-config.d.ts +24 -0
- package/dist/types/core/config/perplexity-config.d.ts.map +1 -0
- package/dist/types/core/config/revenium-config.d.ts +37 -0
- package/dist/types/core/config/revenium-config.d.ts.map +1 -0
- package/dist/types/core/tracking/metering.d.ts +31 -0
- package/dist/types/core/tracking/metering.d.ts.map +1 -0
- package/dist/types/core/wrapper/perplexity-client.d.ts +32 -0
- package/dist/types/core/wrapper/perplexity-client.d.ts.map +1 -0
- package/dist/types/index.d.ts +34 -0
- package/dist/types/index.d.ts.map +1 -0
- package/dist/types/types/index.d.ts +159 -0
- package/dist/types/types/index.d.ts.map +1 -0
- package/dist/types/utils/logger.d.ts +10 -0
- package/dist/types/utils/logger.d.ts.map +1 -0
- package/package.json +36 -31
- package/.env.example +0 -3
- package/dist/interfaces/meteringResponse.d.ts +0 -28
- package/dist/interfaces/meteringResponse.js +0 -2
- package/dist/models/Metering.js +0 -83
- package/dist/v1/perplexityV1.service.js +0 -166
- package/dist/v2/perplexityV2.service.js +0 -178
- package/examples/v1/basic.ts +0 -50
- package/examples/v1/chat.ts +0 -40
- package/examples/v1/metadata.ts +0 -49
- package/examples/v1/streaming.ts +0 -44
- package/examples/v2/basic.ts +0 -49
- package/examples/v2/chat.ts +0 -60
- package/examples/v2/metadata.ts +0 -71
- package/examples/v2/streaming.ts +0 -61
- package/playground/v1/basic.js +0 -50
- package/playground/v1/chat.js +0 -46
- package/playground/v1/metadata.js +0 -50
- package/playground/v1/streaming.js +0 -44
- package/playground/v2/basic.js +0 -49
- package/playground/v2/chat.js +0 -72
- package/playground/v2/metadata.js +0 -76
- package/playground/v2/streaming.js +0 -67
- package/src/index.ts +0 -17
- package/src/interfaces/chatCompletionRequest.ts +0 -15
- package/src/interfaces/credential.ts +0 -4
- package/src/interfaces/meteringRequest.ts +0 -14
- package/src/interfaces/meteringResponse.ts +0 -29
- package/src/interfaces/operation.ts +0 -4
- package/src/interfaces/perplexityResponse.ts +0 -63
- package/src/interfaces/perplexityStreaming.ts +0 -56
- package/src/interfaces/subscriber.ts +0 -8
- package/src/interfaces/tokenCounts.ts +0 -7
- package/src/interfaces/usageMetadata.ts +0 -27
- package/src/models/Logger.ts +0 -38
- package/src/models/Metering.ts +0 -121
- package/src/utils/calculateDurationMs.ts +0 -3
- package/src/utils/constants/constants.ts +0 -10
- package/src/utils/constants/logLevels.ts +0 -1
- package/src/utils/constants/messages.ts +0 -11
- package/src/utils/constants/models.ts +0 -20
- package/src/utils/constants/perplexityModels.ts +0 -71
- package/src/utils/extractTokenCount.ts +0 -26
- package/src/utils/formatTimestamp.ts +0 -3
- package/src/utils/generateTransactionId.ts +0 -5
- package/src/utils/index.ts +0 -39
- package/src/utils/loadEnv.ts +0 -8
- package/src/utils/safeExtract.ts +0 -67
- package/src/v1/perplexityV1.controller.ts +0 -164
- package/src/v1/perplexityV1.service.ts +0 -230
- package/src/v2/perplexityV2.controller.ts +0 -219
- package/src/v2/perplexityV2.service.ts +0 -260
- package/tsconfig.json +0 -15
package/src/models/Logger.ts
DELETED
|
@@ -1,38 +0,0 @@
|
|
|
1
|
-
import { LOG_LEVELS } from "../utils";
|
|
2
|
-
|
|
3
|
-
export class Logger {
|
|
4
|
-
private static logLevel: string = process.env.REVENIUM_LOG_LEVEL || "INFO";
|
|
5
|
-
|
|
6
|
-
static debug(message: string, data?: any): void {
|
|
7
|
-
if (this.shouldLog(LOG_LEVELS[0])) {
|
|
8
|
-
console.log(`[${LOG_LEVELS[0]}] ${message}`, data || "");
|
|
9
|
-
}
|
|
10
|
-
}
|
|
11
|
-
|
|
12
|
-
static info(message: string, data?: any): void {
|
|
13
|
-
if (this.shouldLog(LOG_LEVELS[1])) {
|
|
14
|
-
console.log(`[${LOG_LEVELS[1]}] ${message}`, data || "");
|
|
15
|
-
}
|
|
16
|
-
}
|
|
17
|
-
|
|
18
|
-
static warning(message: string, data?: any): void {
|
|
19
|
-
if (this.shouldLog(LOG_LEVELS[2])) {
|
|
20
|
-
console.warn(`[${LOG_LEVELS[2]}] ${message}`, data || "");
|
|
21
|
-
}
|
|
22
|
-
}
|
|
23
|
-
|
|
24
|
-
static error(message: string, data?: any): void {
|
|
25
|
-
if (this.shouldLog(LOG_LEVELS[3])) {
|
|
26
|
-
console.error(`[${LOG_LEVELS[3]}] ${message}`, data || "");
|
|
27
|
-
}
|
|
28
|
-
}
|
|
29
|
-
|
|
30
|
-
private static shouldLog(level: string): boolean {
|
|
31
|
-
const levels = LOG_LEVELS;
|
|
32
|
-
const currentLevel = levels.indexOf(this.logLevel.toUpperCase());
|
|
33
|
-
const messageLevel = levels.indexOf(level);
|
|
34
|
-
return messageLevel >= currentLevel;
|
|
35
|
-
}
|
|
36
|
-
}
|
|
37
|
-
|
|
38
|
-
export const logger = Logger;
|
package/src/models/Metering.ts
DELETED
|
@@ -1,121 +0,0 @@
|
|
|
1
|
-
import { IMeteringRequest } from "../interfaces/meteringRequest";
|
|
2
|
-
import { IMeteringResponse } from "../interfaces/meteringResponse";
|
|
3
|
-
import {
|
|
4
|
-
COST_TYPE,
|
|
5
|
-
CURRENT_CREDENTIAL,
|
|
6
|
-
MIDDLEWARE_SOURCE,
|
|
7
|
-
PRODUCT_ID_FREE,
|
|
8
|
-
REVENIUM_METERING_API_KEY,
|
|
9
|
-
REVENIUM_METERING_BASE_URL,
|
|
10
|
-
} from "../utils";
|
|
11
|
-
import { calculateDurationMs } from "../utils/calculateDurationMs";
|
|
12
|
-
import { formatTimestamp } from "../utils/formatTimestamp";
|
|
13
|
-
import { generateTransactionId } from "../utils/generateTransactionId";
|
|
14
|
-
import { logger } from "./Logger";
|
|
15
|
-
|
|
16
|
-
export class Metering {
|
|
17
|
-
private endpoint: string = "";
|
|
18
|
-
private apiKey: string = "";
|
|
19
|
-
constructor(clientApiKey: string, clientEndpoint: string) {
|
|
20
|
-
this.apiKey = clientApiKey;
|
|
21
|
-
this.endpoint = clientEndpoint;
|
|
22
|
-
}
|
|
23
|
-
|
|
24
|
-
public createMetering(
|
|
25
|
-
metering: IMeteringRequest,
|
|
26
|
-
isStreamed: boolean
|
|
27
|
-
): IMeteringResponse {
|
|
28
|
-
const usageMetadata = metering.usageMetadata;
|
|
29
|
-
const agent: string = "perplexity";
|
|
30
|
-
return {
|
|
31
|
-
stopReason: usageMetadata?.stopReason ?? metering.stopReason,
|
|
32
|
-
costType: COST_TYPE,
|
|
33
|
-
isStreamed,
|
|
34
|
-
taskType: usageMetadata?.taskType ?? COST_TYPE,
|
|
35
|
-
agent: usageMetadata?.agent ?? agent,
|
|
36
|
-
operationType:
|
|
37
|
-
usageMetadata?.operationType ?? metering.operationType.toString(),
|
|
38
|
-
inputTokenCount:
|
|
39
|
-
usageMetadata?.inputTokenCount ?? metering.tokenCounts.inputTokens,
|
|
40
|
-
outputTokenCount:
|
|
41
|
-
usageMetadata?.outputTokenCount ?? metering.tokenCounts.outputTokens,
|
|
42
|
-
reasoningTokenCount:
|
|
43
|
-
usageMetadata?.reasoningTokenCount ??
|
|
44
|
-
metering.tokenCounts.reasoningTokens ??
|
|
45
|
-
0,
|
|
46
|
-
cacheCreationTokenCount:
|
|
47
|
-
usageMetadata?.cacheCreationTokenCount ??
|
|
48
|
-
metering.tokenCounts.cachedTokens ??
|
|
49
|
-
0,
|
|
50
|
-
cacheReadTokenCount: usageMetadata?.cacheReadTokenCount ?? 0,
|
|
51
|
-
totalTokenCount:
|
|
52
|
-
usageMetadata?.totalTokenCount ?? metering.tokenCounts.totalTokens,
|
|
53
|
-
organizationId:
|
|
54
|
-
usageMetadata?.organizationId ??
|
|
55
|
-
`my-customer-name-${generateTransactionId()}`,
|
|
56
|
-
productId: usageMetadata?.productId ?? PRODUCT_ID_FREE,
|
|
57
|
-
subscriber: {
|
|
58
|
-
id: usageMetadata?.subscriberId ?? `user-${generateTransactionId()}`,
|
|
59
|
-
email:
|
|
60
|
-
usageMetadata?.subscriberEmail ?? `user-@${agent.toLowerCase()}.ai`,
|
|
61
|
-
credential:
|
|
62
|
-
usageMetadata?.subscriberCredentialName &&
|
|
63
|
-
usageMetadata?.subscriberCredential
|
|
64
|
-
? {
|
|
65
|
-
name: usageMetadata.subscriberCredentialName,
|
|
66
|
-
value: usageMetadata.subscriberCredential,
|
|
67
|
-
}
|
|
68
|
-
: CURRENT_CREDENTIAL,
|
|
69
|
-
},
|
|
70
|
-
model: metering.modelName,
|
|
71
|
-
transactionId: usageMetadata?.transactionId ?? generateTransactionId(),
|
|
72
|
-
responseTime: formatTimestamp(metering.endTime),
|
|
73
|
-
requestDuration: calculateDurationMs(
|
|
74
|
-
metering.startTime,
|
|
75
|
-
metering.endTime
|
|
76
|
-
),
|
|
77
|
-
provider: agent,
|
|
78
|
-
requestTime:
|
|
79
|
-
usageMetadata?.requestTime?.toString() ??
|
|
80
|
-
formatTimestamp(metering.startTime),
|
|
81
|
-
completionStartTime:
|
|
82
|
-
usageMetadata?.completionStartTime?.toString() ??
|
|
83
|
-
formatTimestamp(metering.endTime),
|
|
84
|
-
timeToFirstToken: usageMetadata?.timeToFirstToken ?? 0,
|
|
85
|
-
middleware_source: MIDDLEWARE_SOURCE,
|
|
86
|
-
traceId: usageMetadata?.traceId,
|
|
87
|
-
};
|
|
88
|
-
}
|
|
89
|
-
|
|
90
|
-
public sendMeteringData = async (
|
|
91
|
-
metering: IMeteringResponse
|
|
92
|
-
): Promise<void> => {
|
|
93
|
-
try {
|
|
94
|
-
// Log the metering data being sent for debugging
|
|
95
|
-
logger.info(
|
|
96
|
-
"Sending metering data to Revenium:",
|
|
97
|
-
JSON.stringify(metering, null, 2)
|
|
98
|
-
);
|
|
99
|
-
|
|
100
|
-
const response = await fetch(`${this.endpoint}/v2/ai/completions`, {
|
|
101
|
-
method: "POST",
|
|
102
|
-
headers: {
|
|
103
|
-
"Content-Type": "application/json",
|
|
104
|
-
"x-api-key": this.apiKey,
|
|
105
|
-
accept: "application/json",
|
|
106
|
-
},
|
|
107
|
-
body: JSON.stringify(metering),
|
|
108
|
-
});
|
|
109
|
-
if (!response.ok) {
|
|
110
|
-
const errorData = await response?.text();
|
|
111
|
-
logger.error(
|
|
112
|
-
`Metering API request failed with status ${response.status} - ${errorData}`
|
|
113
|
-
);
|
|
114
|
-
return;
|
|
115
|
-
}
|
|
116
|
-
logger.info(`Metering data sent successfully to Revenium`);
|
|
117
|
-
} catch (error: any) {
|
|
118
|
-
logger.error(`Error to sent metering data ${error}`);
|
|
119
|
-
}
|
|
120
|
-
};
|
|
121
|
-
}
|
|
@@ -1,10 +0,0 @@
|
|
|
1
|
-
import { ICredential } from "../../interfaces/credential";
|
|
2
|
-
|
|
3
|
-
export const PERPLEXITY_API_BASE_URL: string = "https://api.perplexity.ai";
|
|
4
|
-
export const COST_TYPE: string = "AI";
|
|
5
|
-
export const MIDDLEWARE_SOURCE: string = "node";
|
|
6
|
-
export const PRODUCT_ID_FREE: string = "free-trial";
|
|
7
|
-
export const CURRENT_CREDENTIAL: ICredential = {
|
|
8
|
-
name: "apiKey",
|
|
9
|
-
value: "keyValue",
|
|
10
|
-
};
|
|
@@ -1 +0,0 @@
|
|
|
1
|
-
export const LOG_LEVELS: string[] = ["DEBUG", "INFO", "WARNING", "ERROR"];
|
|
@@ -1,11 +0,0 @@
|
|
|
1
|
-
export const PERPLEXITY_REQUIRED_API_KEY_MESSAGE: string =
|
|
2
|
-
"PERPLEXITY_API_KEY is required. Set it in environment variables or pass it to the constructor.";
|
|
3
|
-
|
|
4
|
-
export const PERPLEXITY_API_KEY_INVALID_MESSAGE: string =
|
|
5
|
-
"PERPLEXITY_API_KEY is invalid. Please check your API key.";
|
|
6
|
-
export const PERPLEXITY_METERING_API_KEY_IS_NOT_SET_MESSAGE: string =
|
|
7
|
-
"REVENIUM_METERING_API_KEY is not set. Metering will not work.";
|
|
8
|
-
export const PERPLEXITY_METERING_BASE_URL_IS_NOT_SET_MESSAGE: string =
|
|
9
|
-
"REVENIUM_METERING_BASE_URL is not set. Metering will not work.";
|
|
10
|
-
export const PERPLEXITY_CLIENT_INITIALIZED_MESSAGE =
|
|
11
|
-
"Perplexity AI client initialized with middleware";
|
|
@@ -1,20 +0,0 @@
|
|
|
1
|
-
export const models: string[] = [
|
|
2
|
-
// Perplexity Sonar Models (2025) - Chat Completions
|
|
3
|
-
"sonar", // Lightweight, cost-effective search model
|
|
4
|
-
"sonar-pro", // Advanced search with deeper content understanding
|
|
5
|
-
"sonar-reasoning", // Quick problem-solving with step-by-step logic and search
|
|
6
|
-
"sonar-reasoning-pro", // Enhanced multi-step reasoning with web search
|
|
7
|
-
"sonar-deep-research", // Exhaustive research and detailed report generation
|
|
8
|
-
|
|
9
|
-
// Legacy models (deprecated - will be removed)
|
|
10
|
-
"sonar-small",
|
|
11
|
-
"sonar-small-online",
|
|
12
|
-
"sonar-medium-online",
|
|
13
|
-
"sonar-small-chat",
|
|
14
|
-
"sonar-medium-chat",
|
|
15
|
-
|
|
16
|
-
// OpenAI Embedding Models (use with OpenAI client directly)
|
|
17
|
-
"text-embedding-3-small", // Most capable small embedding model
|
|
18
|
-
"text-embedding-3-large", // Most capable large embedding model
|
|
19
|
-
"text-embedding-ada-002", // Legacy embedding model
|
|
20
|
-
];
|
|
@@ -1,71 +0,0 @@
|
|
|
1
|
-
// Perplexity AI Models (based on current models in utils)
|
|
2
|
-
export const PERPLEXITY_MODELS = {
|
|
3
|
-
// Sonar Models (Chat Completions with online search)
|
|
4
|
-
SONAR_SMALL: "sonar-small",
|
|
5
|
-
SONAR_MEDIUM: "sonar-medium",
|
|
6
|
-
SONAR_PRO: "sonar-pro",
|
|
7
|
-
|
|
8
|
-
// Legacy Models (for V1 compatibility)
|
|
9
|
-
LLAMA_3_1_SONAR_SMALL: "llama-3.1-sonar-small-128k-online",
|
|
10
|
-
LLAMA_3_1_SONAR_LARGE: "llama-3.1-sonar-large-128k-online",
|
|
11
|
-
LLAMA_3_1_SONAR_HUGE: "llama-3.1-sonar-huge-128k-online",
|
|
12
|
-
|
|
13
|
-
// Chat Models (without online search)
|
|
14
|
-
LLAMA_3_1_8B: "llama-3.1-8b-instruct",
|
|
15
|
-
LLAMA_3_1_70B: "llama-3.1-70b-instruct",
|
|
16
|
-
LLAMA_3_1_405B: "llama-3.1-405b-instruct",
|
|
17
|
-
} as const;
|
|
18
|
-
|
|
19
|
-
export type PerplexityModel =
|
|
20
|
-
(typeof PERPLEXITY_MODELS)[keyof typeof PERPLEXITY_MODELS];
|
|
21
|
-
|
|
22
|
-
// Default models for different operations
|
|
23
|
-
export const DEFAULT_CHAT_MODEL = PERPLEXITY_MODELS.SONAR_PRO;
|
|
24
|
-
|
|
25
|
-
// Model capabilities
|
|
26
|
-
export const MODEL_CAPABILITIES: Record<
|
|
27
|
-
string,
|
|
28
|
-
{
|
|
29
|
-
chat: boolean;
|
|
30
|
-
streaming: boolean;
|
|
31
|
-
online: boolean;
|
|
32
|
-
contextWindow: number;
|
|
33
|
-
}
|
|
34
|
-
> = {
|
|
35
|
-
[PERPLEXITY_MODELS.SONAR_SMALL]: {
|
|
36
|
-
chat: true,
|
|
37
|
-
streaming: true,
|
|
38
|
-
online: true,
|
|
39
|
-
contextWindow: 127072,
|
|
40
|
-
},
|
|
41
|
-
[PERPLEXITY_MODELS.SONAR_MEDIUM]: {
|
|
42
|
-
chat: true,
|
|
43
|
-
streaming: true,
|
|
44
|
-
online: true,
|
|
45
|
-
contextWindow: 127072,
|
|
46
|
-
},
|
|
47
|
-
[PERPLEXITY_MODELS.SONAR_PRO]: {
|
|
48
|
-
chat: true,
|
|
49
|
-
streaming: true,
|
|
50
|
-
online: true,
|
|
51
|
-
contextWindow: 127072,
|
|
52
|
-
},
|
|
53
|
-
[PERPLEXITY_MODELS.LLAMA_3_1_8B]: {
|
|
54
|
-
chat: true,
|
|
55
|
-
streaming: true,
|
|
56
|
-
online: false,
|
|
57
|
-
contextWindow: 131072,
|
|
58
|
-
},
|
|
59
|
-
[PERPLEXITY_MODELS.LLAMA_3_1_70B]: {
|
|
60
|
-
chat: true,
|
|
61
|
-
streaming: true,
|
|
62
|
-
online: false,
|
|
63
|
-
contextWindow: 131072,
|
|
64
|
-
},
|
|
65
|
-
[PERPLEXITY_MODELS.LLAMA_3_1_405B]: {
|
|
66
|
-
chat: true,
|
|
67
|
-
streaming: true,
|
|
68
|
-
online: false,
|
|
69
|
-
contextWindow: 131072,
|
|
70
|
-
},
|
|
71
|
-
} as const;
|
|
@@ -1,26 +0,0 @@
|
|
|
1
|
-
import { ITokenCounts } from "../interfaces/tokenCounts";
|
|
2
|
-
import { logger } from "../models/Logger";
|
|
3
|
-
import { safeExtract } from "./safeExtract";
|
|
4
|
-
|
|
5
|
-
export function extractGoogleAITokenCounts(response: any): ITokenCounts {
|
|
6
|
-
console.log;
|
|
7
|
-
try {
|
|
8
|
-
const usageMetadata = response.usage || (response as any).response?.usage;
|
|
9
|
-
if (!usageMetadata)
|
|
10
|
-
return { inputTokens: 0, outputTokens: 0, totalTokens: 0 };
|
|
11
|
-
|
|
12
|
-
const inputTokens = safeExtract.number(usageMetadata, "prompt_tokens");
|
|
13
|
-
const outputTokens = safeExtract.number(usageMetadata, "completion_tokens");
|
|
14
|
-
const totalTokens = safeExtract.number(usageMetadata, "total_tokens");
|
|
15
|
-
return {
|
|
16
|
-
inputTokens,
|
|
17
|
-
outputTokens,
|
|
18
|
-
totalTokens,
|
|
19
|
-
cachedTokens: 0,
|
|
20
|
-
reasoningTokens: 0,
|
|
21
|
-
};
|
|
22
|
-
} catch (error) {
|
|
23
|
-
logger.warning("Failed to extract Google AI token counts:", error);
|
|
24
|
-
return { inputTokens: 0, outputTokens: 0, totalTokens: 0 };
|
|
25
|
-
}
|
|
26
|
-
}
|
package/src/utils/index.ts
DELETED
|
@@ -1,39 +0,0 @@
|
|
|
1
|
-
import {
|
|
2
|
-
PERPLEXITY_API_KEY,
|
|
3
|
-
REVENIUM_METERING_API_KEY,
|
|
4
|
-
REVENIUM_METERING_BASE_URL,
|
|
5
|
-
} from "./loadEnv";
|
|
6
|
-
import {
|
|
7
|
-
PERPLEXITY_REQUIRED_API_KEY_MESSAGE,
|
|
8
|
-
PERPLEXITY_API_KEY_INVALID_MESSAGE,
|
|
9
|
-
PERPLEXITY_METERING_API_KEY_IS_NOT_SET_MESSAGE,
|
|
10
|
-
PERPLEXITY_METERING_BASE_URL_IS_NOT_SET_MESSAGE,
|
|
11
|
-
PERPLEXITY_CLIENT_INITIALIZED_MESSAGE,
|
|
12
|
-
} from "./constants/messages";
|
|
13
|
-
import { LOG_LEVELS } from "./constants/logLevels";
|
|
14
|
-
import { models } from "./constants/models";
|
|
15
|
-
import {
|
|
16
|
-
PERPLEXITY_API_BASE_URL,
|
|
17
|
-
COST_TYPE,
|
|
18
|
-
MIDDLEWARE_SOURCE,
|
|
19
|
-
PRODUCT_ID_FREE,
|
|
20
|
-
CURRENT_CREDENTIAL,
|
|
21
|
-
} from "./constants/constants";
|
|
22
|
-
|
|
23
|
-
export {
|
|
24
|
-
PERPLEXITY_API_KEY,
|
|
25
|
-
REVENIUM_METERING_API_KEY,
|
|
26
|
-
REVENIUM_METERING_BASE_URL,
|
|
27
|
-
PERPLEXITY_REQUIRED_API_KEY_MESSAGE,
|
|
28
|
-
PERPLEXITY_API_KEY_INVALID_MESSAGE,
|
|
29
|
-
LOG_LEVELS,
|
|
30
|
-
PERPLEXITY_METERING_API_KEY_IS_NOT_SET_MESSAGE,
|
|
31
|
-
PERPLEXITY_METERING_BASE_URL_IS_NOT_SET_MESSAGE,
|
|
32
|
-
PERPLEXITY_CLIENT_INITIALIZED_MESSAGE,
|
|
33
|
-
models,
|
|
34
|
-
PERPLEXITY_API_BASE_URL,
|
|
35
|
-
COST_TYPE,
|
|
36
|
-
MIDDLEWARE_SOURCE,
|
|
37
|
-
PRODUCT_ID_FREE,
|
|
38
|
-
CURRENT_CREDENTIAL,
|
|
39
|
-
};
|
package/src/utils/loadEnv.ts
DELETED
package/src/utils/safeExtract.ts
DELETED
|
@@ -1,67 +0,0 @@
|
|
|
1
|
-
/**
|
|
2
|
-
* Safe extraction utility functions.
|
|
3
|
-
*/
|
|
4
|
-
export namespace safeExtract {
|
|
5
|
-
/**
|
|
6
|
-
* Safely extract a value from an object, returning undefined if the path doesn't exist.
|
|
7
|
-
*/
|
|
8
|
-
export function get<T>(
|
|
9
|
-
obj: any,
|
|
10
|
-
path: string,
|
|
11
|
-
defaultValue?: T
|
|
12
|
-
): T | undefined {
|
|
13
|
-
try {
|
|
14
|
-
const keys: string[] = path.split(".");
|
|
15
|
-
let result = obj;
|
|
16
|
-
|
|
17
|
-
for (const key of keys) {
|
|
18
|
-
if (result == null || typeof result !== "object") {
|
|
19
|
-
return defaultValue;
|
|
20
|
-
}
|
|
21
|
-
result = result[key];
|
|
22
|
-
}
|
|
23
|
-
|
|
24
|
-
return result ?? defaultValue;
|
|
25
|
-
} catch {
|
|
26
|
-
return defaultValue;
|
|
27
|
-
}
|
|
28
|
-
}
|
|
29
|
-
|
|
30
|
-
/**
|
|
31
|
-
* Safely extract a string value, returning empty string if not found.
|
|
32
|
-
*/
|
|
33
|
-
export function string(obj: any, path: string): string {
|
|
34
|
-
return get(obj, path, "") || "";
|
|
35
|
-
}
|
|
36
|
-
|
|
37
|
-
/**
|
|
38
|
-
* Safely extract a number value, returning 0 if not found.
|
|
39
|
-
*/
|
|
40
|
-
export function number(obj: any, path: string): number {
|
|
41
|
-
const value = get(obj, path, 0);
|
|
42
|
-
return typeof value === "number" ? value : 0;
|
|
43
|
-
}
|
|
44
|
-
|
|
45
|
-
/**
|
|
46
|
-
* Safely extract a boolean value, returning false if not found.
|
|
47
|
-
*/
|
|
48
|
-
export function boolean(obj: any, path: string): boolean {
|
|
49
|
-
return get(obj, path, false) || false;
|
|
50
|
-
}
|
|
51
|
-
|
|
52
|
-
/**
|
|
53
|
-
* Safely extract an object value, returning empty object if not found.
|
|
54
|
-
*/
|
|
55
|
-
export function object(obj: any, path: string): Record<string, any> {
|
|
56
|
-
const value = get(obj, path, {});
|
|
57
|
-
return typeof value === "object" && value !== null ? value : {};
|
|
58
|
-
}
|
|
59
|
-
|
|
60
|
-
/**
|
|
61
|
-
* Safely extract an array value, returning empty array if not found.
|
|
62
|
-
*/
|
|
63
|
-
export function array(obj: any, path: string): any[] {
|
|
64
|
-
const value = get(obj, path, []);
|
|
65
|
-
return Array.isArray(value) ? value : [];
|
|
66
|
-
}
|
|
67
|
-
}
|
|
@@ -1,164 +0,0 @@
|
|
|
1
|
-
import { logger } from "../models/Logger";
|
|
2
|
-
import { IUsageMetadata } from "../interfaces/usageMetadata";
|
|
3
|
-
import {
|
|
4
|
-
IChatCompletionRequest,
|
|
5
|
-
IPerplexityMessage,
|
|
6
|
-
} from "../interfaces/chatCompletionRequest";
|
|
7
|
-
import { IPerplexityResponseChat } from "../interfaces/perplexityResponse";
|
|
8
|
-
import { IPerplexityStreamingResponse } from "../interfaces/perplexityStreaming";
|
|
9
|
-
import {
|
|
10
|
-
DEFAULT_CHAT_MODEL,
|
|
11
|
-
PERPLEXITY_MODELS,
|
|
12
|
-
MODEL_CAPABILITIES,
|
|
13
|
-
} from "../utils/constants/perplexityModels";
|
|
14
|
-
import { PerplexityV1Service } from "./perplexityV1.service";
|
|
15
|
-
import { models } from "../utils";
|
|
16
|
-
|
|
17
|
-
export class PerplexityV1Controller {
|
|
18
|
-
private service: PerplexityV1Service;
|
|
19
|
-
|
|
20
|
-
constructor() {
|
|
21
|
-
this.service = new PerplexityV1Service();
|
|
22
|
-
logger.info("Perplexity V1 Controller initialized");
|
|
23
|
-
}
|
|
24
|
-
|
|
25
|
-
/**
|
|
26
|
-
* Create a chat completion using Perplexity V1 API (Compatible with current middleware)
|
|
27
|
-
* @param messages Array of message strings or OpenAI message objects
|
|
28
|
-
* @param model Optional model name (defaults to sonar-pro)
|
|
29
|
-
* @param customMetadata Optional custom metadata for tracking
|
|
30
|
-
* @returns Promise<IPerplexityResponseChat>
|
|
31
|
-
*/
|
|
32
|
-
async createChat(
|
|
33
|
-
messages: string[] | Array<{ role: string; content: string }>,
|
|
34
|
-
model: string = DEFAULT_CHAT_MODEL,
|
|
35
|
-
customMetadata?: IUsageMetadata
|
|
36
|
-
): Promise<IPerplexityResponseChat> {
|
|
37
|
-
try {
|
|
38
|
-
// Normalize messages to OpenAI format
|
|
39
|
-
const normalizedMessages = this.normalizeMessages(messages);
|
|
40
|
-
|
|
41
|
-
// Validate model
|
|
42
|
-
this.validateModel(model);
|
|
43
|
-
|
|
44
|
-
// Create chat completion request
|
|
45
|
-
const request: IChatCompletionRequest = {
|
|
46
|
-
messages: normalizedMessages,
|
|
47
|
-
usageMetadata: customMetadata,
|
|
48
|
-
};
|
|
49
|
-
|
|
50
|
-
logger.info(
|
|
51
|
-
`Creating Perplexity V1 chat completion with model: ${model}`
|
|
52
|
-
);
|
|
53
|
-
|
|
54
|
-
return await this.service.createChatCompletion(request, model);
|
|
55
|
-
} catch (error) {
|
|
56
|
-
logger.error("Error in Perplexity V1 createChat:", error);
|
|
57
|
-
throw error;
|
|
58
|
-
}
|
|
59
|
-
}
|
|
60
|
-
|
|
61
|
-
/**
|
|
62
|
-
* Create a streaming chat completion using Perplexity V1 API (Compatible with current middleware)
|
|
63
|
-
* @param messages Array of message strings or OpenAI message objects
|
|
64
|
-
* @param model Optional model name (defaults to sonar-pro)
|
|
65
|
-
* @param customMetadata Optional custom metadata for tracking
|
|
66
|
-
* @returns Promise<IPerplexityStreamingResponse>
|
|
67
|
-
*/
|
|
68
|
-
async createStreaming(
|
|
69
|
-
messages: string[] | Array<{ role: string; content: string }>,
|
|
70
|
-
model: string = DEFAULT_CHAT_MODEL,
|
|
71
|
-
customMetadata?: IUsageMetadata
|
|
72
|
-
): Promise<IPerplexityStreamingResponse> {
|
|
73
|
-
try {
|
|
74
|
-
// Normalize messages to OpenAI format
|
|
75
|
-
const normalizedMessages = this.normalizeMessages(messages);
|
|
76
|
-
|
|
77
|
-
// Validate model
|
|
78
|
-
this.validateModel(model);
|
|
79
|
-
|
|
80
|
-
// Create streaming request
|
|
81
|
-
const request: IChatCompletionRequest = {
|
|
82
|
-
messages: normalizedMessages,
|
|
83
|
-
usageMetadata: customMetadata,
|
|
84
|
-
};
|
|
85
|
-
|
|
86
|
-
logger.info(
|
|
87
|
-
`Creating Perplexity V1 streaming completion with model: ${model}`
|
|
88
|
-
);
|
|
89
|
-
|
|
90
|
-
return await this.service.createStreamingCompletion(request, model);
|
|
91
|
-
} catch (error) {
|
|
92
|
-
logger.error("Error in Perplexity V1 createStreaming:", error);
|
|
93
|
-
throw error;
|
|
94
|
-
}
|
|
95
|
-
}
|
|
96
|
-
|
|
97
|
-
/**
|
|
98
|
-
* Get available models for Perplexity V1
|
|
99
|
-
* @returns Array of available model names
|
|
100
|
-
*/
|
|
101
|
-
getAvailableModels(): string[] {
|
|
102
|
-
return models; // Use the existing models from utils
|
|
103
|
-
}
|
|
104
|
-
|
|
105
|
-
/**
|
|
106
|
-
* Check if a model supports a specific capability
|
|
107
|
-
* @param model Model name
|
|
108
|
-
* @param capability Capability to check (chat, streaming, online)
|
|
109
|
-
* @returns boolean
|
|
110
|
-
*/
|
|
111
|
-
supportsCapability(
|
|
112
|
-
model: string,
|
|
113
|
-
capability: "chat" | "streaming" | "online"
|
|
114
|
-
): boolean {
|
|
115
|
-
return MODEL_CAPABILITIES[model]?.[capability] || false;
|
|
116
|
-
}
|
|
117
|
-
|
|
118
|
-
/**
|
|
119
|
-
* Get model information including capabilities and context window
|
|
120
|
-
* @param model Model name
|
|
121
|
-
* @returns Model information object
|
|
122
|
-
*/
|
|
123
|
-
getModelInfo(model: string) {
|
|
124
|
-
const capabilities = MODEL_CAPABILITIES[model];
|
|
125
|
-
if (!capabilities) {
|
|
126
|
-
throw new Error(`Model "${model}" is not supported`);
|
|
127
|
-
}
|
|
128
|
-
|
|
129
|
-
return {
|
|
130
|
-
model,
|
|
131
|
-
capabilities: {
|
|
132
|
-
chat: capabilities.chat,
|
|
133
|
-
streaming: capabilities.streaming,
|
|
134
|
-
online: capabilities.online,
|
|
135
|
-
},
|
|
136
|
-
contextWindow: capabilities.contextWindow,
|
|
137
|
-
version: "v1",
|
|
138
|
-
};
|
|
139
|
-
}
|
|
140
|
-
|
|
141
|
-
private normalizeMessages(
|
|
142
|
-
messages: string[] | Array<{ role: string; content: string }>
|
|
143
|
-
): IPerplexityMessage[] {
|
|
144
|
-
if (Array.isArray(messages) && typeof messages[0] === "string") {
|
|
145
|
-
// Convert string array to message objects
|
|
146
|
-
return (messages as string[]).map((content, index) => ({
|
|
147
|
-
role: index === 0 ? "user" : "assistant",
|
|
148
|
-
content,
|
|
149
|
-
})) as IPerplexityMessage[];
|
|
150
|
-
}
|
|
151
|
-
|
|
152
|
-
return messages as IPerplexityMessage[];
|
|
153
|
-
}
|
|
154
|
-
|
|
155
|
-
private validateModel(model: string): void {
|
|
156
|
-
if (!models.includes(model)) {
|
|
157
|
-
throw new Error(
|
|
158
|
-
`Model "${model}" is not supported. Available models: ${models.join(
|
|
159
|
-
", "
|
|
160
|
-
)}`
|
|
161
|
-
);
|
|
162
|
-
}
|
|
163
|
-
}
|
|
164
|
-
}
|