@revenium/perplexity 1.0.22 → 1.0.25

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (87) hide show
  1. package/README.md +391 -154
  2. package/dist/interfaces/meteringResponse.d.ts +28 -27
  3. package/dist/interfaces/meteringResponse.js +2 -2
  4. package/dist/models/Metering.js +23 -20
  5. package/dist/v1/perplexityV1.service.js +166 -0
  6. package/dist/v2/perplexityV2.service.js +178 -0
  7. package/examples/v1/basic.ts +50 -0
  8. package/examples/v1/chat.ts +40 -0
  9. package/examples/v1/metadata.ts +49 -0
  10. package/examples/v1/streaming.ts +44 -0
  11. package/examples/v2/basic.ts +49 -0
  12. package/examples/v2/chat.ts +60 -0
  13. package/examples/v2/metadata.ts +71 -0
  14. package/examples/v2/streaming.ts +61 -0
  15. package/package.json +26 -11
  16. package/playground/v1/basic.js +50 -0
  17. package/playground/v1/chat.js +46 -0
  18. package/playground/v1/metadata.js +50 -0
  19. package/playground/v1/streaming.js +44 -0
  20. package/playground/v2/basic.js +49 -0
  21. package/playground/v2/chat.js +72 -0
  22. package/playground/v2/metadata.js +76 -0
  23. package/playground/v2/streaming.js +67 -0
  24. package/src/index.ts +14 -1
  25. package/src/interfaces/chatCompletionRequest.ts +7 -2
  26. package/src/interfaces/meteringResponse.ts +1 -0
  27. package/src/interfaces/perplexityResponse.ts +63 -0
  28. package/src/interfaces/perplexityStreaming.ts +56 -0
  29. package/src/models/Metering.ts +9 -2
  30. package/src/utils/constants/perplexityModels.ts +71 -0
  31. package/src/v1/perplexityV1.controller.ts +164 -0
  32. package/src/v1/perplexityV1.service.ts +230 -0
  33. package/src/v2/perplexityV2.controller.ts +219 -0
  34. package/src/v2/perplexityV2.service.ts +260 -0
  35. package/dist/index.js +0 -19
  36. package/dist/interfaces/chatCompletionRequest.d.ts +0 -9
  37. package/dist/interfaces/chatCompletionRequest.js +0 -2
  38. package/dist/interfaces/credential.d.ts +0 -4
  39. package/dist/interfaces/credential.js +0 -2
  40. package/dist/interfaces/meteringRequest.d.ts +0 -13
  41. package/dist/interfaces/meteringRequest.js +0 -2
  42. package/dist/interfaces/operation.d.ts +0 -4
  43. package/dist/interfaces/operation.js +0 -8
  44. package/dist/interfaces/subscriber.d.ts +0 -8
  45. package/dist/interfaces/subscriber.js +0 -2
  46. package/dist/interfaces/tokenCounts.d.ts +0 -7
  47. package/dist/interfaces/tokenCounts.js +0 -2
  48. package/dist/interfaces/usageMetadata.d.ts +0 -27
  49. package/dist/interfaces/usageMetadata.js +0 -2
  50. package/dist/middleware.d.ts +0 -22
  51. package/dist/middleware.js +0 -129
  52. package/dist/models/Logger.js +0 -35
  53. package/dist/models/Metering.d.ts +0 -9
  54. package/dist/utils/calculateDurationMs.d.ts +0 -1
  55. package/dist/utils/calculateDurationMs.js +0 -6
  56. package/dist/utils/constants/constants.d.ts +0 -6
  57. package/dist/utils/constants/constants.js +0 -11
  58. package/dist/utils/constants/logLevels.d.ts +0 -1
  59. package/dist/utils/constants/logLevels.js +0 -4
  60. package/dist/utils/constants/messages.d.ts +0 -5
  61. package/dist/utils/constants/messages.js +0 -8
  62. package/dist/utils/constants/models.d.ts +0 -1
  63. package/dist/utils/constants/models.js +0 -21
  64. package/dist/utils/extractTokenCount.d.ts +0 -2
  65. package/dist/utils/extractTokenCount.js +0 -28
  66. package/dist/utils/formatTimeStamp.d.ts +0 -1
  67. package/dist/utils/formatTimeStamp.js +0 -6
  68. package/dist/utils/generateTransactionId.d.ts +0 -1
  69. package/dist/utils/generateTransactionId.js +0 -7
  70. package/dist/utils/index.d.ts +0 -6
  71. package/dist/utils/index.js +0 -23
  72. package/dist/utils/loadEnv.d.ts +0 -1
  73. package/dist/utils/loadEnv.js +0 -7
  74. package/dist/utils/safeExtract.d.ts +0 -29
  75. package/dist/utils/safeExtract.js +0 -67
  76. package/examples/basic.ts +0 -17
  77. package/examples/chat-completions.ts +0 -22
  78. package/examples/enhanced.ts +0 -20
  79. package/examples/metadata.ts +0 -43
  80. package/examples/streaming.ts +0 -24
  81. package/playground/basic.js +0 -17
  82. package/playground/chat-completions.js +0 -22
  83. package/playground/enhanced.js +0 -20
  84. package/playground/metadata.js +0 -43
  85. package/playground/streaming.js +0 -24
  86. package/src/middleware.ts +0 -157
  87. /package/src/utils/{formatTimeStamp.ts → formatTimestamp.ts} +0 -0
@@ -0,0 +1,76 @@
1
+ import { PerplexityV2Controller } from "@revenium/perplexity";
2
+
3
+ const metadataV2Example = async () => {
4
+ console.log("=".repeat(50));
5
+ console.log(
6
+ "📊 Perplexity V2 Enhanced Metadata Example (JavaScript - OpenAI Responses API)"
7
+ );
8
+ console.log("=".repeat(50));
9
+
10
+ try {
11
+ const controller = new PerplexityV2Controller();
12
+
13
+ // Enhanced custom metadata for V2
14
+ const customMetadata = {
15
+ traceId: "trace-v2-js-67890",
16
+ taskType: "advanced-research",
17
+ subscriberEmail: "researcher@example.com",
18
+ subscriberId: "user-v2-js-123",
19
+ organizationId: "org-v2-js-456",
20
+ agent: "perplexity-v2-js-enhanced",
21
+ responseQualityScore: 0.95,
22
+ operationType: "chat-completion",
23
+ };
24
+
25
+ const result = await controller.createChat(
26
+ [
27
+ "Provide a comprehensive analysis of the impact of artificial intelligence on modern healthcare systems",
28
+ ],
29
+ "sonar-pro",
30
+ customMetadata
31
+ );
32
+
33
+ console.log("📝 V2 Enhanced Response:");
34
+ console.log(result.choices[0].message.content);
35
+
36
+ console.log("\n📊 V2 Enhanced Usage Statistics:");
37
+ console.log(`Prompt tokens: ${result.usage.prompt_tokens}`);
38
+ console.log(`Completion tokens: ${result.usage.completion_tokens}`);
39
+ console.log(`Total tokens: ${result.usage.total_tokens}`);
40
+
41
+ console.log("\n🔧 V2 Enhanced Response Structure:");
42
+ console.log(`ID: ${result.id}`);
43
+ console.log(`Object: ${result.object}`);
44
+ console.log(`Created: ${new Date(result.created * 1000).toISOString()}`);
45
+ console.log(`Model: ${result.model}`);
46
+ console.log(`Choice Index: ${result.choices[0].index}`);
47
+ console.log(`Message Role: ${result.choices[0].message.role}`);
48
+ console.log(`Finish Reason: ${result.choices[0].finish_reason}`);
49
+
50
+ console.log("\n🏷️ V2 Enhanced Metadata:");
51
+ console.log(`Transaction ID: ${result.metadata.transactionId}`);
52
+ console.log(`Processing Time: ${result.metadata.processingTime}ms`);
53
+ console.log(`Version: ${result.metadata.version}`);
54
+
55
+ console.log("\n📋 Custom Metadata Sent:");
56
+ console.log(`Trace ID: ${customMetadata.traceId}`);
57
+ console.log(`Task Type: ${customMetadata.taskType}`);
58
+ console.log(`Subscriber: ${customMetadata.subscriberEmail}`);
59
+ console.log(`Organization: ${customMetadata.organizationId}`);
60
+ console.log(`Agent: ${customMetadata.agent}`);
61
+ console.log(`Quality Score: ${customMetadata.responseQualityScore}`);
62
+ console.log(`Operation Type: ${customMetadata.operationType}`);
63
+
64
+ // V2 Enhanced Analytics
65
+ console.log("\n📈 V2 Enhanced Analytics:");
66
+ const performance = controller.getModelPerformance("sonar-pro");
67
+ console.log(`Context Window: ${performance.contextWindow} tokens`);
68
+ console.log(`Online Capabilities: ${performance.online}`);
69
+ console.log(`Estimated Latency: ${performance.estimatedLatency}`);
70
+ console.log(`Cost Tier: ${performance.costTier}`);
71
+ } catch (error) {
72
+ console.error("❌ Error:", error);
73
+ }
74
+ };
75
+
76
+ metadataV2Example();
@@ -0,0 +1,67 @@
1
+ import { PerplexityV2Controller } from "@revenium/perplexity";
2
+
3
+ const streamingV2Example = async () => {
4
+ console.log("=".repeat(50));
5
+ console.log(
6
+ "🌊 Perplexity V2 Streaming Example (JavaScript - Enhanced Response Format)"
7
+ );
8
+ console.log("=".repeat(50));
9
+
10
+ try {
11
+ const controller = new PerplexityV2Controller();
12
+
13
+ // Streaming chat completion with V2 enhanced response
14
+ const result = await controller.createStreaming(
15
+ [
16
+ "Write a detailed explanation of how machine learning algorithms work, including examples.",
17
+ ],
18
+ "sonar-pro"
19
+ );
20
+
21
+ console.log("📝 V2 Streaming Response:");
22
+ console.log("-".repeat(30));
23
+
24
+ let fullResponse = "";
25
+ let chunkCount = 0;
26
+ let lastUsage = null;
27
+
28
+ for await (const chunk of result.stream) {
29
+ chunkCount++;
30
+ const content = chunk.choices?.[0]?.delta?.content || "";
31
+
32
+ if (content) {
33
+ process.stdout.write(content);
34
+ fullResponse += content;
35
+ }
36
+
37
+ // Capture usage data from the last chunk
38
+ if (chunk.usage) {
39
+ lastUsage = chunk.usage;
40
+ }
41
+ }
42
+
43
+ console.log("\n" + "-".repeat(30));
44
+ console.log(`\n📊 V2 Enhanced Streaming Stats:`);
45
+ console.log(`Total chunks received: ${chunkCount}`);
46
+ console.log(`Response length: ${fullResponse.length} characters`);
47
+
48
+ if (lastUsage) {
49
+ console.log(`Prompt tokens: ${lastUsage.prompt_tokens}`);
50
+ console.log(`Completion tokens: ${lastUsage.completion_tokens}`);
51
+ console.log(`Total tokens: ${lastUsage.total_tokens}`);
52
+ }
53
+
54
+ console.log(`\n🔧 V2 Metadata:`);
55
+ console.log(`Stream ID: ${result.id}`);
56
+ console.log(`Object: ${result.object}`);
57
+ console.log(`Transaction ID: ${result.metadata.transactionId}`);
58
+ console.log(
59
+ `Start Time: ${new Date(result.metadata.startTime).toISOString()}`
60
+ );
61
+ console.log(`Version: ${result.metadata.version}`);
62
+ } catch (error) {
63
+ console.error("❌ Error:", error);
64
+ }
65
+ };
66
+
67
+ streamingV2Example();
package/src/index.ts CHANGED
@@ -1,4 +1,17 @@
1
1
  import { config } from "dotenv";
2
2
  config();
3
3
 
4
- export * from "./middleware";
4
+ // V1 Controllers (compatible with current API)
5
+ export * from "./v1/perplexityV1.controller";
6
+
7
+ // V2 Controllers (enhanced response format following OpenAI Responses API)
8
+ export * from "./v2/perplexityV2.controller";
9
+
10
+ // Interfaces
11
+ export * from "./interfaces/perplexityResponse";
12
+ export * from "./interfaces/perplexityStreaming";
13
+ export * from "./interfaces/usageMetadata";
14
+ export * from "./interfaces/chatCompletionRequest";
15
+
16
+ // Constants
17
+ export * from "./utils/constants/perplexityModels";
@@ -1,9 +1,14 @@
1
- import { ChatCompletionMessageParam } from "openai/resources/index";
2
1
  import { IUsageMetadata } from "./usageMetadata";
3
2
 
3
+ // Flexible message interface for Perplexity
4
+ export interface IPerplexityMessage {
5
+ role: "system" | "user" | "assistant";
6
+ content: string;
7
+ }
8
+
4
9
  export interface IChatCompletionRequest {
5
10
  model?: string;
6
- messages: ChatCompletionMessageParam[];
11
+ messages: IPerplexityMessage[];
7
12
  stream?: boolean;
8
13
  usageMetadata?: IUsageMetadata;
9
14
  [key: string]: any;
@@ -25,4 +25,5 @@ export interface IMeteringResponse {
25
25
  completionStartTime: string;
26
26
  timeToFirstToken: number;
27
27
  middleware_source: string;
28
+ traceId?: string;
28
29
  }
@@ -0,0 +1,63 @@
1
+ import { IUsageMetadata } from "./usageMetadata";
2
+
3
+ // Perplexity Chat Response Interface (V1 - Compatible with current middleware)
4
+ export interface IPerplexityResponseChat {
5
+ responses: IPerplexityResponseMessage[];
6
+ usageMetadata: IUsageMetadata;
7
+ modelVersion: string;
8
+ transactionId: string;
9
+ }
10
+
11
+ export interface IPerplexityResponseMessage {
12
+ text: string;
13
+ role: "assistant" | "user" | "system";
14
+ finishReason?: string;
15
+ }
16
+
17
+ // Raw OpenAI-compatible response from Perplexity API
18
+ export interface IPerplexityRawResponse {
19
+ id: string;
20
+ object: string;
21
+ created: number;
22
+ model: string;
23
+ choices: Array<{
24
+ index: number;
25
+ message: {
26
+ role: string;
27
+ content: string;
28
+ };
29
+ finish_reason: string;
30
+ }>;
31
+ usage: {
32
+ prompt_tokens: number;
33
+ completion_tokens: number;
34
+ total_tokens: number;
35
+ };
36
+ }
37
+
38
+ // V2 Enhanced Response Structure (Following OpenAI Responses API)
39
+ export interface IPerplexityV2Response {
40
+ id: string;
41
+ object: "chat.completion";
42
+ created: number;
43
+ model: string;
44
+ choices: Array<{
45
+ index: number;
46
+ message: {
47
+ role: string;
48
+ content: string;
49
+ };
50
+ finish_reason: string;
51
+ }>;
52
+ usage: {
53
+ prompt_tokens: number;
54
+ completion_tokens: number;
55
+ total_tokens: number;
56
+ };
57
+ // Enhanced metadata for V2
58
+ metadata: {
59
+ transactionId: string;
60
+ processingTime: number;
61
+ version: "v2";
62
+ };
63
+ }
@@ -0,0 +1,56 @@
1
+ import { IUsageMetadata } from "./usageMetadata";
2
+
3
+ // Perplexity Streaming Response Interface (V1 - Compatible with current middleware)
4
+ export interface IPerplexityStreamingResponse {
5
+ stream: AsyncIterable<IPerplexityStreamChunk>;
6
+ usageMetadata: IUsageMetadata;
7
+ modelVersion: string;
8
+ transactionId: string;
9
+ }
10
+
11
+ export interface IPerplexityStreamChunk {
12
+ id: string;
13
+ object: string;
14
+ created: number;
15
+ model: string;
16
+ choices: Array<{
17
+ index: number;
18
+ delta: {
19
+ role?: string;
20
+ content?: string;
21
+ };
22
+ finish_reason?: string;
23
+ }>;
24
+ }
25
+
26
+ // V2 Enhanced Streaming Response (Following OpenAI Responses API)
27
+ export interface IPerplexityV2StreamingResponse {
28
+ id: string;
29
+ object: "chat.completion.chunk";
30
+ stream: AsyncIterable<IPerplexityV2StreamChunk>;
31
+ metadata: {
32
+ transactionId: string;
33
+ startTime: number;
34
+ version: "v2";
35
+ };
36
+ }
37
+
38
+ export interface IPerplexityV2StreamChunk {
39
+ id: string;
40
+ object: "chat.completion.chunk";
41
+ created: number;
42
+ model: string;
43
+ choices: Array<{
44
+ index: number;
45
+ delta: {
46
+ role?: string;
47
+ content?: string;
48
+ };
49
+ finish_reason?: string;
50
+ }>;
51
+ usage?: {
52
+ prompt_tokens: number;
53
+ completion_tokens: number;
54
+ total_tokens: number;
55
+ };
56
+ }
@@ -9,7 +9,7 @@ import {
9
9
  REVENIUM_METERING_BASE_URL,
10
10
  } from "../utils";
11
11
  import { calculateDurationMs } from "../utils/calculateDurationMs";
12
- import { formatTimestamp } from "../utils/formatTimeStamp";
12
+ import { formatTimestamp } from "../utils/formatTimestamp";
13
13
  import { generateTransactionId } from "../utils/generateTransactionId";
14
14
  import { logger } from "./Logger";
15
15
 
@@ -31,7 +31,7 @@ export class Metering {
31
31
  stopReason: usageMetadata?.stopReason ?? metering.stopReason,
32
32
  costType: COST_TYPE,
33
33
  isStreamed,
34
- taskType: COST_TYPE,
34
+ taskType: usageMetadata?.taskType ?? COST_TYPE,
35
35
  agent: usageMetadata?.agent ?? agent,
36
36
  operationType:
37
37
  usageMetadata?.operationType ?? metering.operationType.toString(),
@@ -83,6 +83,7 @@ export class Metering {
83
83
  formatTimestamp(metering.endTime),
84
84
  timeToFirstToken: usageMetadata?.timeToFirstToken ?? 0,
85
85
  middleware_source: MIDDLEWARE_SOURCE,
86
+ traceId: usageMetadata?.traceId,
86
87
  };
87
88
  }
88
89
 
@@ -90,6 +91,12 @@ export class Metering {
90
91
  metering: IMeteringResponse
91
92
  ): Promise<void> => {
92
93
  try {
94
+ // Log the metering data being sent for debugging
95
+ logger.info(
96
+ "Sending metering data to Revenium:",
97
+ JSON.stringify(metering, null, 2)
98
+ );
99
+
93
100
  const response = await fetch(`${this.endpoint}/v2/ai/completions`, {
94
101
  method: "POST",
95
102
  headers: {
@@ -0,0 +1,71 @@
1
+ // Perplexity AI Models (based on current models in utils)
2
+ export const PERPLEXITY_MODELS = {
3
+ // Sonar Models (Chat Completions with online search)
4
+ SONAR_SMALL: "sonar-small",
5
+ SONAR_MEDIUM: "sonar-medium",
6
+ SONAR_PRO: "sonar-pro",
7
+
8
+ // Legacy Models (for V1 compatibility)
9
+ LLAMA_3_1_SONAR_SMALL: "llama-3.1-sonar-small-128k-online",
10
+ LLAMA_3_1_SONAR_LARGE: "llama-3.1-sonar-large-128k-online",
11
+ LLAMA_3_1_SONAR_HUGE: "llama-3.1-sonar-huge-128k-online",
12
+
13
+ // Chat Models (without online search)
14
+ LLAMA_3_1_8B: "llama-3.1-8b-instruct",
15
+ LLAMA_3_1_70B: "llama-3.1-70b-instruct",
16
+ LLAMA_3_1_405B: "llama-3.1-405b-instruct",
17
+ } as const;
18
+
19
+ export type PerplexityModel =
20
+ (typeof PERPLEXITY_MODELS)[keyof typeof PERPLEXITY_MODELS];
21
+
22
+ // Default models for different operations
23
+ export const DEFAULT_CHAT_MODEL = PERPLEXITY_MODELS.SONAR_PRO;
24
+
25
+ // Model capabilities
26
+ export const MODEL_CAPABILITIES: Record<
27
+ string,
28
+ {
29
+ chat: boolean;
30
+ streaming: boolean;
31
+ online: boolean;
32
+ contextWindow: number;
33
+ }
34
+ > = {
35
+ [PERPLEXITY_MODELS.SONAR_SMALL]: {
36
+ chat: true,
37
+ streaming: true,
38
+ online: true,
39
+ contextWindow: 127072,
40
+ },
41
+ [PERPLEXITY_MODELS.SONAR_MEDIUM]: {
42
+ chat: true,
43
+ streaming: true,
44
+ online: true,
45
+ contextWindow: 127072,
46
+ },
47
+ [PERPLEXITY_MODELS.SONAR_PRO]: {
48
+ chat: true,
49
+ streaming: true,
50
+ online: true,
51
+ contextWindow: 127072,
52
+ },
53
+ [PERPLEXITY_MODELS.LLAMA_3_1_8B]: {
54
+ chat: true,
55
+ streaming: true,
56
+ online: false,
57
+ contextWindow: 131072,
58
+ },
59
+ [PERPLEXITY_MODELS.LLAMA_3_1_70B]: {
60
+ chat: true,
61
+ streaming: true,
62
+ online: false,
63
+ contextWindow: 131072,
64
+ },
65
+ [PERPLEXITY_MODELS.LLAMA_3_1_405B]: {
66
+ chat: true,
67
+ streaming: true,
68
+ online: false,
69
+ contextWindow: 131072,
70
+ },
71
+ } as const;
@@ -0,0 +1,164 @@
1
+ import { logger } from "../models/Logger";
2
+ import { IUsageMetadata } from "../interfaces/usageMetadata";
3
+ import {
4
+ IChatCompletionRequest,
5
+ IPerplexityMessage,
6
+ } from "../interfaces/chatCompletionRequest";
7
+ import { IPerplexityResponseChat } from "../interfaces/perplexityResponse";
8
+ import { IPerplexityStreamingResponse } from "../interfaces/perplexityStreaming";
9
+ import {
10
+ DEFAULT_CHAT_MODEL,
11
+ PERPLEXITY_MODELS,
12
+ MODEL_CAPABILITIES,
13
+ } from "../utils/constants/perplexityModels";
14
+ import { PerplexityV1Service } from "./perplexityV1.service";
15
+ import { models } from "../utils";
16
+
17
+ export class PerplexityV1Controller {
18
+ private service: PerplexityV1Service;
19
+
20
+ constructor() {
21
+ this.service = new PerplexityV1Service();
22
+ logger.info("Perplexity V1 Controller initialized");
23
+ }
24
+
25
+ /**
26
+ * Create a chat completion using Perplexity V1 API (Compatible with current middleware)
27
+ * @param messages Array of message strings or OpenAI message objects
28
+ * @param model Optional model name (defaults to sonar-pro)
29
+ * @param customMetadata Optional custom metadata for tracking
30
+ * @returns Promise<IPerplexityResponseChat>
31
+ */
32
+ async createChat(
33
+ messages: string[] | Array<{ role: string; content: string }>,
34
+ model: string = DEFAULT_CHAT_MODEL,
35
+ customMetadata?: IUsageMetadata
36
+ ): Promise<IPerplexityResponseChat> {
37
+ try {
38
+ // Normalize messages to OpenAI format
39
+ const normalizedMessages = this.normalizeMessages(messages);
40
+
41
+ // Validate model
42
+ this.validateModel(model);
43
+
44
+ // Create chat completion request
45
+ const request: IChatCompletionRequest = {
46
+ messages: normalizedMessages,
47
+ usageMetadata: customMetadata,
48
+ };
49
+
50
+ logger.info(
51
+ `Creating Perplexity V1 chat completion with model: ${model}`
52
+ );
53
+
54
+ return await this.service.createChatCompletion(request, model);
55
+ } catch (error) {
56
+ logger.error("Error in Perplexity V1 createChat:", error);
57
+ throw error;
58
+ }
59
+ }
60
+
61
+ /**
62
+ * Create a streaming chat completion using Perplexity V1 API (Compatible with current middleware)
63
+ * @param messages Array of message strings or OpenAI message objects
64
+ * @param model Optional model name (defaults to sonar-pro)
65
+ * @param customMetadata Optional custom metadata for tracking
66
+ * @returns Promise<IPerplexityStreamingResponse>
67
+ */
68
+ async createStreaming(
69
+ messages: string[] | Array<{ role: string; content: string }>,
70
+ model: string = DEFAULT_CHAT_MODEL,
71
+ customMetadata?: IUsageMetadata
72
+ ): Promise<IPerplexityStreamingResponse> {
73
+ try {
74
+ // Normalize messages to OpenAI format
75
+ const normalizedMessages = this.normalizeMessages(messages);
76
+
77
+ // Validate model
78
+ this.validateModel(model);
79
+
80
+ // Create streaming request
81
+ const request: IChatCompletionRequest = {
82
+ messages: normalizedMessages,
83
+ usageMetadata: customMetadata,
84
+ };
85
+
86
+ logger.info(
87
+ `Creating Perplexity V1 streaming completion with model: ${model}`
88
+ );
89
+
90
+ return await this.service.createStreamingCompletion(request, model);
91
+ } catch (error) {
92
+ logger.error("Error in Perplexity V1 createStreaming:", error);
93
+ throw error;
94
+ }
95
+ }
96
+
97
+ /**
98
+ * Get available models for Perplexity V1
99
+ * @returns Array of available model names
100
+ */
101
+ getAvailableModels(): string[] {
102
+ return models; // Use the existing models from utils
103
+ }
104
+
105
+ /**
106
+ * Check if a model supports a specific capability
107
+ * @param model Model name
108
+ * @param capability Capability to check (chat, streaming, online)
109
+ * @returns boolean
110
+ */
111
+ supportsCapability(
112
+ model: string,
113
+ capability: "chat" | "streaming" | "online"
114
+ ): boolean {
115
+ return MODEL_CAPABILITIES[model]?.[capability] || false;
116
+ }
117
+
118
+ /**
119
+ * Get model information including capabilities and context window
120
+ * @param model Model name
121
+ * @returns Model information object
122
+ */
123
+ getModelInfo(model: string) {
124
+ const capabilities = MODEL_CAPABILITIES[model];
125
+ if (!capabilities) {
126
+ throw new Error(`Model "${model}" is not supported`);
127
+ }
128
+
129
+ return {
130
+ model,
131
+ capabilities: {
132
+ chat: capabilities.chat,
133
+ streaming: capabilities.streaming,
134
+ online: capabilities.online,
135
+ },
136
+ contextWindow: capabilities.contextWindow,
137
+ version: "v1",
138
+ };
139
+ }
140
+
141
+ private normalizeMessages(
142
+ messages: string[] | Array<{ role: string; content: string }>
143
+ ): IPerplexityMessage[] {
144
+ if (Array.isArray(messages) && typeof messages[0] === "string") {
145
+ // Convert string array to message objects
146
+ return (messages as string[]).map((content, index) => ({
147
+ role: index === 0 ? "user" : "assistant",
148
+ content,
149
+ })) as IPerplexityMessage[];
150
+ }
151
+
152
+ return messages as IPerplexityMessage[];
153
+ }
154
+
155
+ private validateModel(model: string): void {
156
+ if (!models.includes(model)) {
157
+ throw new Error(
158
+ `Model "${model}" is not supported. Available models: ${models.join(
159
+ ", "
160
+ )}`
161
+ );
162
+ }
163
+ }
164
+ }