llm-proxy 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/docs/plan.png ADDED
Binary file
package/jest.config.js ADDED
@@ -0,0 +1,10 @@
1
+ // jest.config.js
2
+ module.exports = {
3
+ preset: "ts-jest",
4
+ testEnvironment: "node",
5
+ transform: {
6
+ "^.+\\.tsx?$": "ts-jest", // Transforms TypeScript files using ts-jest
7
+ },
8
+ testMatch: ["**/src/test/**/*.test.ts"], // Updated to match src/test location
9
+ moduleFileExtensions: ["ts", "js"], // Recognizes ts and js files
10
+ };
package/package.json ADDED
@@ -0,0 +1,29 @@
1
+ {
2
+ "name": "llm-proxy",
3
+ "version": "1.0.0",
4
+ "description": "An LLM Proxy that allows the user to interact with different language models from different providers using unified request and response formats.",
5
+ "main": "index.js",
6
+ "scripts": {
7
+ "test": "jest --config jest.config.js",
8
+ "dev": "ts-node src/index.ts"
9
+ },
10
+ "keywords": [],
11
+ "author": "Ahmad Jawabreh - jawabreh0",
12
+ "license": "MIT",
13
+ "devDependencies": {
14
+ "@types/dotenv": "^8.2.3",
15
+ "@types/jest": "^29.5.14",
16
+ "@types/node": "^22.8.6",
17
+ "jest": "^29.7.0",
18
+ "ts-jest": "^29.2.5",
19
+ "ts-node": "^10.9.2",
20
+ "typescript": "^5.6.3"
21
+ },
22
+ "dependencies": {
23
+ "@anthropic-ai/bedrock-sdk": "^0.11.1",
24
+ "aws-sdk": "^2.1691.0",
25
+ "axios": "^1.7.7",
26
+ "dotenv": "^16.4.5",
27
+ "openai": "^4.69.0"
28
+ }
29
+ }
@@ -0,0 +1,56 @@
1
+ // import { AwsBedrockAnthropicService } from "../services/AwsBedrockAnthropicService";
2
+ // import { BedrockAnthropicParsedChunk, Messages, SupportedLLMs } from "../types";
3
+
4
+ // export class AwsBedrockAnthropicChatClient {
5
+ // private client: AwsBedrockAnthropicService;
6
+
7
+ // constructor() {
8
+ // this.client = new AwsBedrockAnthropicService();
9
+ // }
10
+
11
+ // async sendMessage(
12
+ // messages: Messages,
13
+ // model: SupportedLLMs,
14
+ // maxTokens?: number,
15
+ // temperature?: number,
16
+ // systemPrompt?: string,
17
+ // tools?: any
18
+ // ) {
19
+ // const response = await this.client.generateCompletion(
20
+ // messages,
21
+ // model,
22
+ // maxTokens,
23
+ // temperature,
24
+ // systemPrompt,
25
+ // tools
26
+ // );
27
+ // console.log("Response:", response);
28
+ // return response;
29
+ // }
30
+
31
+ // async *sendMessageStream(
32
+ // messages: Messages,
33
+ // model: SupportedLLMs,
34
+ // maxTokens?: number,
35
+ // temperature?: number,
36
+ // systemPrompt?: string,
37
+ // tools?: any
38
+ // ): AsyncGenerator<string, void, unknown> {
39
+ // const stream = this.client.generateStreamCompletion(
40
+ // messages,
41
+ // model,
42
+ // maxTokens,
43
+ // temperature,
44
+ // systemPrompt,
45
+ // tools,
46
+ // true
47
+ // );
48
+
49
+ // for await (const chunk of stream) {
50
+ // const content = chunk.delta?.text;
51
+ // if (content) {
52
+ // yield content;
53
+ // }
54
+ // }
55
+ // }
56
+ // }
@@ -0,0 +1,66 @@
1
+ // import { OpenAIService } from "../services/OpenAIService";
2
+ // import { OpenAIMessage, OpenAIMessagesRoles, OpenAIResponse, OpenAISupportedLLMs } from "../types";
3
+
4
+ // export class OpenAIClient {
5
+ // private client: OpenAIService;
6
+ // private messages: OpenAIMessage[] = [];
7
+
8
+ // constructor(apiKey: string, systemPrompt: string) {
9
+ // this.client = new OpenAIService(apiKey);
10
+ // this.messages.push({
11
+ // role: OpenAIMessagesRoles.SYSTEM,
12
+ // content: systemPrompt,
13
+ // });
14
+ // }
15
+
16
+ // async sendMessage(
17
+ // userInput: string,
18
+ // model: OpenAISupportedLLMs,
19
+ // maxTokens: number,
20
+ // temperature: number
21
+ // ): Promise<string> {
22
+ // this.messages.push({ role: OpenAIMessagesRoles.USER, content: userInput });
23
+
24
+ // const response: OpenAIResponse = await this.client.generateCompletion(
25
+ // this.messages,
26
+ // model,
27
+ // maxTokens,
28
+ // temperature
29
+ // );
30
+
31
+ // const responseContent: string = response.choices[0].message.content;
32
+ // this.messages.push({
33
+ // role: OpenAIMessagesRoles.ASSISTANT,
34
+ // content: responseContent,
35
+ // });
36
+
37
+ // return responseContent;
38
+ // }
39
+
40
+ // async *sendMessageStream(
41
+ // userInput: string,
42
+ // model: OpenAISupportedLLMs,
43
+ // maxTokens: number,
44
+ // temperature: number
45
+ // ): AsyncGenerator<string, void, unknown> {
46
+ // this.messages.push({ role: OpenAIMessagesRoles.USER, content: userInput });
47
+
48
+ // const stream = this.client.generateStreamCompletion(
49
+ // this.messages,
50
+ // model,
51
+ // maxTokens,
52
+ // temperature
53
+ // );
54
+
55
+ // for await (const chunk of stream) {
56
+ // if (
57
+ // chunk.choices &&
58
+ // chunk.choices[0].delta &&
59
+ // chunk.choices[0].delta.content
60
+ // ) {
61
+ // const content = chunk.choices[0].delta.content;
62
+ // yield content;
63
+ // }
64
+ // }
65
+ // }
66
+ // }
package/src/index.ts ADDED
@@ -0,0 +1,145 @@
1
+ import { ProviderFinder } from "./middleware/ProviderFinder";
2
+ import { InputFormatAdapter } from "./middleware/InputFormatAdapter";
3
+ import { OutputFormatAdapter } from "./middleware/OutputFormatAdapter";
4
+ import { AwsBedrockAnthropicService } from "./services/AwsBedrockAnthropicService";
5
+ import { OpenAIService } from "./services/OpenAIService";
6
+ import {
7
+ Messages,
8
+ SupportedLLMs,
9
+ LLMResponse,
10
+ Providers,
11
+ OpenAIMessages,
12
+ BedrockAnthropicMessages,
13
+ } from "./types";
14
+
15
+ // Define the credentials interface for flexibility
16
+ interface Credentials {
17
+ apiKey?: string;
18
+ awsConfig?: { accessKeyId: string; secretAccessKey: string; region: string };
19
+ }
20
+
21
+ // Main function to handle both streaming and non-streaming requests
22
+ export async function generateLLMResponse(
23
+ messages: Messages,
24
+ model: SupportedLLMs,
25
+ maxTokens: number,
26
+ temperature: number,
27
+ systemPrompt: string,
28
+ tools: any,
29
+ stream: boolean = false,
30
+ credentials: Credentials
31
+ ): Promise<LLMResponse | AsyncGenerator<LLMResponse>> {
32
+ // Step 2: Identify the provider based on the model
33
+ const provider = ProviderFinder.getProvider(model);
34
+
35
+ // Initialize the correct service based on the provider
36
+ let service: OpenAIService | AwsBedrockAnthropicService;
37
+ if (provider === Providers.OPENAI) {
38
+ if (!credentials.apiKey) {
39
+ throw new Error("OpenAI API key is required for OpenAI models.");
40
+ }
41
+ service = new OpenAIService(credentials.apiKey);
42
+ } else if (provider === Providers.ANTHROPIC_BEDROCK) {
43
+ const awsConfig = credentials.awsConfig;
44
+ if (!awsConfig) {
45
+ throw new Error("AWS credentials are required for Bedrock models.");
46
+ }
47
+ service = new AwsBedrockAnthropicService(
48
+ awsConfig.accessKeyId,
49
+ awsConfig.secretAccessKey,
50
+ awsConfig.region
51
+ );
52
+ } else {
53
+ throw new Error("Unsupported provider");
54
+ }
55
+
56
+ // Step 3: If the provider is not OpenAI, adapt the input to provider format
57
+ const adaptedMessages =
58
+ provider !== Providers.OPENAI
59
+ ? InputFormatAdapter.adaptMessages(messages, provider)
60
+ : messages;
61
+
62
+ // Step 4: Process the response depending on whether streaming is requested
63
+ if (stream) {
64
+ return handleStreamResponse(
65
+ service,
66
+ adaptedMessages,
67
+ model,
68
+ maxTokens,
69
+ temperature,
70
+ systemPrompt,
71
+ tools,
72
+ provider
73
+ );
74
+ } else {
75
+ return handleNonStreamResponse(
76
+ service,
77
+ adaptedMessages,
78
+ model,
79
+ maxTokens,
80
+ temperature,
81
+ systemPrompt,
82
+ tools,
83
+ provider
84
+ );
85
+ }
86
+ }
87
+
88
+ // Helper for non-streaming response
89
+ async function handleNonStreamResponse(
90
+ service: OpenAIService | AwsBedrockAnthropicService,
91
+ messages: Messages,
92
+ model: SupportedLLMs,
93
+ maxTokens: number,
94
+ temperature: number,
95
+ systemPrompt: string,
96
+ tools: any,
97
+ provider: Providers
98
+ ): Promise<LLMResponse> {
99
+ const response = await service.generateCompletion(
100
+ provider === Providers.OPENAI
101
+ ? (messages as OpenAIMessages)
102
+ : (messages as BedrockAnthropicMessages as any),
103
+ model,
104
+ maxTokens,
105
+ temperature,
106
+ systemPrompt,
107
+ tools
108
+ );
109
+
110
+ // Step 6: Adapt the response if provider is not OpenAI
111
+ return provider === Providers.OPENAI
112
+ ? response
113
+ : OutputFormatAdapter.adaptResponse(response, provider);
114
+ }
115
+
116
+ // Helper for streaming response
117
+ async function* handleStreamResponse(
118
+ service: OpenAIService | AwsBedrockAnthropicService,
119
+ messages: Messages,
120
+ model: SupportedLLMs,
121
+ maxTokens: number,
122
+ temperature: number,
123
+ systemPrompt: string,
124
+ tools: any,
125
+ provider: Providers
126
+ ): AsyncGenerator<LLMResponse> {
127
+ const stream = service.generateStreamCompletion(
128
+ provider === Providers.OPENAI
129
+ ? (messages as OpenAIMessages)
130
+ : (messages as BedrockAnthropicMessages as any),
131
+ model,
132
+ maxTokens,
133
+ temperature,
134
+ systemPrompt,
135
+ tools,
136
+ true
137
+ );
138
+
139
+ // Step 7: Yield adapted chunks if the provider is not OpenAI
140
+ for await (const chunk of stream) {
141
+ yield provider === Providers.OPENAI
142
+ ? chunk
143
+ : OutputFormatAdapter.adaptResponse(chunk, provider);
144
+ }
145
+ }
@@ -0,0 +1,51 @@
1
+ import {
2
+ BedrockAnthropicContentType,
3
+ BedrockAnthropicMessage,
4
+ BedrockAnthropicMessageRole,
5
+ Messages,
6
+ OpenAIFunctionMessage,
7
+ OpenAIMessages,
8
+ OpenAIMessagesRoles,
9
+ Providers,
10
+ } from "../types";
11
+
12
+ export class InputFormatAdapter {
13
+ static adaptMessages(
14
+ messages: Messages,
15
+ provider: Providers
16
+ ): OpenAIMessages | BedrockAnthropicMessage[] {
17
+ switch (provider) {
18
+ case Providers.OPENAI:
19
+ return messages.map((msg) => {
20
+ if (msg.role === OpenAIMessagesRoles.FUNCTION) {
21
+ return {
22
+ role: msg.role,
23
+ content: msg.content,
24
+ name: (msg as OpenAIFunctionMessage).name,
25
+ };
26
+ }
27
+ return {
28
+ role: msg.role,
29
+ content: msg.content as string,
30
+ };
31
+ }) as OpenAIMessages;
32
+
33
+ case Providers.ANTHROPIC_BEDROCK:
34
+ return messages.map((msg) => ({
35
+ role:
36
+ msg.role === OpenAIMessagesRoles.USER
37
+ ? BedrockAnthropicMessageRole.USER
38
+ : BedrockAnthropicMessageRole.ASSISTANT,
39
+ content: [
40
+ {
41
+ type: BedrockAnthropicContentType.TEXT,
42
+ text: msg.content as string,
43
+ },
44
+ ],
45
+ })) as BedrockAnthropicMessage[];
46
+
47
+ default:
48
+ throw new Error(`Unsupported provider: ${provider}`);
49
+ }
50
+ }
51
+ }
@@ -0,0 +1,83 @@
1
+ import {
2
+ BedrockAnthropicResponse,
3
+ OpenAIResponse,
4
+ LLMResponse,
5
+ Providers,
6
+ BedrockAnthropicContent,
7
+ BedrockAnthropicContentType,
8
+ BedrockAnthropicTextContent,
9
+ BedrockAnthropicToolResultContent,
10
+ BedrockAnthropicToolUseContent,
11
+ } from "../types";
12
+
13
+ export class OutputFormatAdapter {
14
+ static adaptResponse(response: any, provider: Providers): LLMResponse {
15
+ switch (provider) {
16
+ case Providers.OPENAI:
17
+ return response as OpenAIResponse;
18
+
19
+ case Providers.ANTHROPIC_BEDROCK:
20
+ return this.adaptAnthropicBedrockResponse(
21
+ response as BedrockAnthropicResponse
22
+ );
23
+
24
+ default:
25
+ throw new Error(`Unsupported provider: ${provider}`);
26
+ }
27
+ }
28
+
29
+ private static adaptAnthropicBedrockResponse(
30
+ response: BedrockAnthropicResponse
31
+ ): OpenAIResponse {
32
+ const openAIResponse: OpenAIResponse = {
33
+ id: response.id,
34
+ object: "text_completion",
35
+ created: Date.now(),
36
+ model: response.model,
37
+ choices: response.content.map((contentBlock, index) => ({
38
+ index,
39
+ message: {
40
+ role: this.mapRole(contentBlock),
41
+ content: this.extractContent(contentBlock),
42
+ },
43
+ logprobs: null,
44
+ finish_reason: response.stop_reason,
45
+ })),
46
+ usage: {
47
+ prompt_tokens: response.usage.input_tokens,
48
+ completion_tokens: response.usage.output_tokens,
49
+ total_tokens:
50
+ response.usage.input_tokens + response.usage.output_tokens,
51
+ prompt_tokens_details: { cached_tokens: 0 },
52
+ completion_tokens_details: { reasoning_tokens: 0 },
53
+ },
54
+ system_fingerprint: "anthropic_translation",
55
+ };
56
+ return openAIResponse;
57
+ }
58
+
59
+ private static mapRole(content: BedrockAnthropicContent): string {
60
+ switch (content.type) {
61
+ case BedrockAnthropicContentType.TOOL_USE:
62
+ case BedrockAnthropicContentType.TOOL_RESULT:
63
+ return "tool";
64
+ case BedrockAnthropicContentType.TEXT:
65
+ return "assistant";
66
+ default:
67
+ return "unknown";
68
+ }
69
+ }
70
+
71
+ private static extractContent(content: BedrockAnthropicContent): string {
72
+ switch (content.type) {
73
+ case BedrockAnthropicContentType.TEXT:
74
+ return (content as BedrockAnthropicTextContent).text;
75
+ case BedrockAnthropicContentType.TOOL_RESULT:
76
+ return (content as BedrockAnthropicToolResultContent).content || "";
77
+ case BedrockAnthropicContentType.TOOL_USE:
78
+ return (content as BedrockAnthropicToolUseContent).id || "";
79
+ default:
80
+ return "";
81
+ }
82
+ }
83
+ }
@@ -0,0 +1,23 @@
1
+ import {
2
+ BedrockAnthropicSupportedLLMs,
3
+ OpenAISupportedLLMs,
4
+ Providers,
5
+ SupportedLLMs,
6
+ } from "../types";
7
+
8
+ export class ProviderFinder {
9
+ static getProvider(model: SupportedLLMs): Providers {
10
+ if (
11
+ model.type === "OpenAI" &&
12
+ Object.values(OpenAISupportedLLMs).includes(model.model)
13
+ ) {
14
+ return Providers.OPENAI;
15
+ } else if (
16
+ model.type === "BedrockAnthropic" &&
17
+ Object.values(BedrockAnthropicSupportedLLMs).includes(model.model)
18
+ ) {
19
+ return Providers.ANTHROPIC_BEDROCK;
20
+ }
21
+ throw new Error(`Unsupported model: ${model.model}`);
22
+ }
23
+ }
@@ -0,0 +1,111 @@
1
+ import {
2
+ BedrockAnthropicParsedChunk,
3
+ BedrockAnthropicResponse,
4
+ Messages,
5
+ SupportedLLMs,
6
+ } from "../types";
7
+ import {
8
+ InvokeModelCommand,
9
+ BedrockRuntimeClient,
10
+ InvokeModelWithResponseStreamCommand,
11
+ } from "@aws-sdk/client-bedrock-runtime";
12
+ import { ClientService } from "./ClientService";
13
+
14
+ export class AwsBedrockAnthropicService implements ClientService {
15
+ private bedrock: BedrockRuntimeClient;
16
+
17
+ constructor(awsAccessKey: string, awsSecretKey: string, region: string) {
18
+ this.bedrock = new BedrockRuntimeClient({
19
+ region,
20
+ credentials: {
21
+ accessKeyId: awsAccessKey,
22
+ secretAccessKey: awsSecretKey,
23
+ },
24
+ });
25
+ }
26
+
27
+ async generateCompletion(
28
+ messages: Messages,
29
+ model?: SupportedLLMs,
30
+ maxTokens?: number,
31
+ temperature?: number,
32
+ systemPrompt?: string,
33
+ tools?: any
34
+ ): Promise<BedrockAnthropicResponse> {
35
+ const modelId =
36
+ model?.type === "BedrockAnthropic" ? model.model : undefined;
37
+ if (!modelId) {
38
+ throw new Error("Invalid model type for AwsBedrockAnthropicService");
39
+ }
40
+
41
+ const body = JSON.stringify({
42
+ anthropic_version: "bedrock-2023-05-31",
43
+ max_tokens: maxTokens,
44
+ temperature,
45
+ messages,
46
+ system: systemPrompt,
47
+ ...(tools && tools.length > 0 ? { tools } : {}),
48
+ });
49
+
50
+ const command = new InvokeModelCommand({
51
+ modelId,
52
+ body,
53
+ contentType: "application/json",
54
+ accept: "application/json",
55
+ });
56
+
57
+ const response = await this.bedrock.send(command);
58
+ return JSON.parse(new TextDecoder().decode(response.body));
59
+ }
60
+
61
+ async *generateStreamCompletion(
62
+ messages: Messages,
63
+ model?: SupportedLLMs,
64
+ maxTokens?: number,
65
+ temperature?: number,
66
+ systemPrompt?: string,
67
+ tools?: any,
68
+ stream?: boolean
69
+ ): AsyncGenerator<BedrockAnthropicParsedChunk, void, unknown> {
70
+ const modelId =
71
+ model?.type === "BedrockAnthropic" ? model.model : undefined;
72
+ if (!modelId) {
73
+ throw new Error("Invalid model type for AwsBedrockAnthropicService");
74
+ }
75
+
76
+ const body = JSON.stringify({
77
+ anthropic_version: "bedrock-2023-05-31",
78
+ max_tokens: maxTokens,
79
+ temperature,
80
+ messages,
81
+ system: systemPrompt,
82
+ ...(tools && tools.length > 0 ? { tools } : {}),
83
+ });
84
+
85
+ const command = new InvokeModelWithResponseStreamCommand({
86
+ modelId,
87
+ body,
88
+ contentType: "application/json",
89
+ accept: "application/json",
90
+ });
91
+
92
+ const response = await this.bedrock.send(command);
93
+
94
+ if (response.body) {
95
+ const decoder = new TextDecoder("utf-8");
96
+
97
+ for await (const payload of response.body) {
98
+ const decodedString = decoder.decode(payload.chunk?.bytes, {
99
+ stream: true,
100
+ });
101
+
102
+ try {
103
+ const jsonObject = JSON.parse(decodedString);
104
+ yield jsonObject;
105
+ } catch (error) {
106
+ console.error("Failed to parse chunk as JSON:", error);
107
+ }
108
+ }
109
+ }
110
+ }
111
+ }
@@ -0,0 +1,29 @@
1
+ import {
2
+ BedrockAnthropicParsedChunk,
3
+ LLMResponse,
4
+ Messages,
5
+ SupportedLLMs,
6
+ } from "../types";
7
+
8
+ // for non streaming responses
9
+ export interface ClientService {
10
+ generateCompletion(
11
+ messages: Messages,
12
+ model?: SupportedLLMs,
13
+ maxTokens?: number,
14
+ temperature?: number,
15
+ systemPrompt?: string,
16
+ tools?: any
17
+ ): Promise<LLMResponse>;
18
+
19
+ // For streaming responses
20
+ generateStreamCompletion(
21
+ messages: Messages,
22
+ model?: SupportedLLMs,
23
+ maxTokens?: number,
24
+ temperature?: number,
25
+ systemPrompt?: string,
26
+ tools?: any,
27
+ stream?: boolean
28
+ ): AsyncGenerator<BedrockAnthropicParsedChunk, void, unknown>;
29
+ }
@@ -0,0 +1,68 @@
1
+ import OpenAI from "openai";
2
+ import { OpenAIMessages, OpenAIResponse, SupportedLLMs } from "../types";
3
+ import { ClientService } from "./ClientService";
4
+
5
+ export class OpenAIService implements ClientService {
6
+ private openai: OpenAI;
7
+
8
+ constructor(apiKey: string) {
9
+ this.openai = new OpenAI({ apiKey });
10
+ }
11
+
12
+ async generateCompletion(
13
+ messages: OpenAIMessages,
14
+ model: SupportedLLMs,
15
+ maxTokens: number,
16
+ temperature: number,
17
+ systemPrompt?: string, // Optional parameter
18
+ tools?: any // Optional parameter
19
+ ): Promise<OpenAIResponse> {
20
+ if (model.type !== "OpenAI") {
21
+ throw new Error("Unsupported model type for OpenAIService.");
22
+ }
23
+
24
+ try {
25
+ const response = await this.openai.chat.completions.create({
26
+ model: model.model,
27
+ messages,
28
+ max_tokens: maxTokens,
29
+ temperature,
30
+ });
31
+ return response as OpenAIResponse;
32
+ } catch (error) {
33
+ console.error("Error generating text:", error);
34
+ throw error;
35
+ }
36
+ }
37
+
38
+ async *generateStreamCompletion(
39
+ messages: OpenAIMessages,
40
+ model: SupportedLLMs,
41
+ maxTokens: number,
42
+ temperature: number,
43
+ systemPrompt?: string, // Optional parameter
44
+ tools?: any, // Optional parameter
45
+ stream?: boolean // Optional parameter
46
+ ): AsyncGenerator<any, void, unknown> {
47
+ if (model.type !== "OpenAI") {
48
+ throw new Error("Unsupported model type for OpenAIService.");
49
+ }
50
+
51
+ try {
52
+ const stream = await this.openai.chat.completions.create({
53
+ model: model.model,
54
+ messages,
55
+ max_tokens: maxTokens,
56
+ temperature,
57
+ stream: true,
58
+ });
59
+
60
+ for await (const chunk of stream) {
61
+ yield chunk;
62
+ }
63
+ } catch (error) {
64
+ console.error("Error in stream completion:", error);
65
+ throw error;
66
+ }
67
+ }
68
+ }