@pga-ai/adapters-llm-openai 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +208 -0
- package/package.json +30 -0
- package/src/index.ts +178 -0
- package/tsconfig.json +9 -0
package/README.md
ADDED
|
@@ -0,0 +1,208 @@
|
|
|
1
|
+
# @pga-ai/adapters-llm-openai
|
|
2
|
+
|
|
3
|
+
OpenAI adapter for PGA (Genomic Self-Evolving Prompts).
|
|
4
|
+
|
|
5
|
+
## Supported Models
|
|
6
|
+
|
|
7
|
+
- **GPT-4 Turbo** (`gpt-4-turbo-preview`)
|
|
8
|
+
- **GPT-4** (`gpt-4`)
|
|
9
|
+
- **GPT-3.5 Turbo** (`gpt-3.5-turbo`)
|
|
10
|
+
|
|
11
|
+
## Features
|
|
12
|
+
|
|
13
|
+
- ✅ Full LLMAdapter interface implementation
|
|
14
|
+
- ✅ Streaming responses support
|
|
15
|
+
- ✅ Token usage tracking
|
|
16
|
+
- ✅ Configurable temperature and top_p
|
|
17
|
+
- ✅ Support for custom base URLs (proxies, Azure)
|
|
18
|
+
- ✅ Organization ID support
|
|
19
|
+
|
|
20
|
+
## Installation
|
|
21
|
+
|
|
22
|
+
```bash
|
|
23
|
+
npm install @pga-ai/adapters-llm-openai
|
|
24
|
+
```
|
|
25
|
+
|
|
26
|
+
## Usage
|
|
27
|
+
|
|
28
|
+
### Basic Usage
|
|
29
|
+
|
|
30
|
+
```typescript
|
|
31
|
+
import { OpenAIAdapter } from '@pga-ai/adapters-llm-openai';
|
|
32
|
+
|
|
33
|
+
const adapter = new OpenAIAdapter({
|
|
34
|
+
apiKey: process.env.OPENAI_API_KEY!,
|
|
35
|
+
model: 'gpt-4-turbo-preview',
|
|
36
|
+
});
|
|
37
|
+
|
|
38
|
+
// Chat
|
|
39
|
+
const response = await adapter.chat([
|
|
40
|
+
{ role: 'user', content: 'Hello, GPT-4!' }
|
|
41
|
+
]);
|
|
42
|
+
|
|
43
|
+
console.log(response.content);
|
|
44
|
+
```
|
|
45
|
+
|
|
46
|
+
### Streaming Responses
|
|
47
|
+
|
|
48
|
+
```typescript
|
|
49
|
+
const adapter = new OpenAIAdapter({
|
|
50
|
+
apiKey: process.env.OPENAI_API_KEY!,
|
|
51
|
+
model: 'gpt-4',
|
|
52
|
+
});
|
|
53
|
+
|
|
54
|
+
for await (const chunk of adapter.chatStream([
|
|
55
|
+
{ role: 'user', content: 'Tell me a story' }
|
|
56
|
+
])) {
|
|
57
|
+
process.stdout.write(chunk.content);
|
|
58
|
+
}
|
|
59
|
+
```
|
|
60
|
+
|
|
61
|
+
### With PGA Genome
|
|
62
|
+
|
|
63
|
+
```typescript
|
|
64
|
+
import { PGA } from '@pga-ai/core';
|
|
65
|
+
import { OpenAIAdapter } from '@pga-ai/adapters-llm-openai';
|
|
66
|
+
import { PostgresAdapter } from '@pga-ai/adapters-storage-postgres';
|
|
67
|
+
|
|
68
|
+
const pga = new PGA({
|
|
69
|
+
llmAdapter: new OpenAIAdapter({
|
|
70
|
+
apiKey: process.env.OPENAI_API_KEY!,
|
|
71
|
+
model: 'gpt-4-turbo-preview',
|
|
72
|
+
temperature: 0.7,
|
|
73
|
+
}),
|
|
74
|
+
storageAdapter: new PostgresAdapter({
|
|
75
|
+
connectionString: process.env.DATABASE_URL!,
|
|
76
|
+
}),
|
|
77
|
+
});
|
|
78
|
+
|
|
79
|
+
const genome = await pga.createGenome({
|
|
80
|
+
layer0: {
|
|
81
|
+
systemPrompt: 'You are a helpful AI assistant.',
|
|
82
|
+
constraints: ['Be concise', 'Use examples'],
|
|
83
|
+
capabilities: ['coding', 'debugging'],
|
|
84
|
+
},
|
|
85
|
+
});
|
|
86
|
+
|
|
87
|
+
const response = await genome.chat('Help me debug this code', {
|
|
88
|
+
userId: 'user-123',
|
|
89
|
+
});
|
|
90
|
+
```
|
|
91
|
+
|
|
92
|
+
## Configuration
|
|
93
|
+
|
|
94
|
+
```typescript
|
|
95
|
+
interface OpenAIAdapterConfig {
|
|
96
|
+
/** OpenAI API Key (required) */
|
|
97
|
+
apiKey: string;
|
|
98
|
+
|
|
99
|
+
/** Model to use (default: 'gpt-4-turbo-preview') */
|
|
100
|
+
model?: string;
|
|
101
|
+
|
|
102
|
+
/** Organization ID (optional) */
|
|
103
|
+
organization?: string;
|
|
104
|
+
|
|
105
|
+
/** Base URL for proxies or Azure (optional) */
|
|
106
|
+
baseURL?: string;
|
|
107
|
+
|
|
108
|
+
/** Temperature 0-2 (default: 1.0) */
|
|
109
|
+
temperature?: number;
|
|
110
|
+
|
|
111
|
+
/** Top P 0-1 (default: 1.0) */
|
|
112
|
+
topP?: number;
|
|
113
|
+
}
|
|
114
|
+
```
|
|
115
|
+
|
|
116
|
+
## Available Models
|
|
117
|
+
|
|
118
|
+
| Model | ID | Context | Best For |
|
|
119
|
+
|-------|-----|---------|----------|
|
|
120
|
+
| **GPT-4 Turbo** | `gpt-4-turbo-preview` | 128K | Complex tasks, latest features |
|
|
121
|
+
| **GPT-4** | `gpt-4` | 8K | High-quality responses |
|
|
122
|
+
| **GPT-3.5 Turbo** | `gpt-3.5-turbo` | 16K | Fast, cost-effective |
|
|
123
|
+
|
|
124
|
+
## Environment Variables
|
|
125
|
+
|
|
126
|
+
```bash
|
|
127
|
+
# Required
|
|
128
|
+
OPENAI_API_KEY=sk-...
|
|
129
|
+
|
|
130
|
+
# Optional
|
|
131
|
+
OPENAI_ORGANIZATION=org-...
|
|
132
|
+
OPENAI_BASE_URL=https://api.openai.com/v1
|
|
133
|
+
```
|
|
134
|
+
|
|
135
|
+
## Error Handling
|
|
136
|
+
|
|
137
|
+
```typescript
|
|
138
|
+
try {
|
|
139
|
+
const response = await adapter.chat(messages);
|
|
140
|
+
} catch (error) {
|
|
141
|
+
if (error.status === 401) {
|
|
142
|
+
console.error('Invalid API key');
|
|
143
|
+
} else if (error.status === 429) {
|
|
144
|
+
console.error('Rate limit exceeded');
|
|
145
|
+
} else if (error.status === 500) {
|
|
146
|
+
console.error('OpenAI server error');
|
|
147
|
+
}
|
|
148
|
+
}
|
|
149
|
+
```
|
|
150
|
+
|
|
151
|
+
## Token Usage
|
|
152
|
+
|
|
153
|
+
```typescript
|
|
154
|
+
const response = await adapter.chat(messages);
|
|
155
|
+
|
|
156
|
+
console.log('Usage:', {
|
|
157
|
+
prompt: response.usage.promptTokens,
|
|
158
|
+
completion: response.usage.completionTokens,
|
|
159
|
+
total: response.usage.totalTokens,
|
|
160
|
+
});
|
|
161
|
+
```
|
|
162
|
+
|
|
163
|
+
## Advanced Configuration
|
|
164
|
+
|
|
165
|
+
### Custom Base URL (Azure OpenAI)
|
|
166
|
+
|
|
167
|
+
```typescript
|
|
168
|
+
const adapter = new OpenAIAdapter({
|
|
169
|
+
apiKey: process.env.AZURE_OPENAI_KEY!,
|
|
170
|
+
baseURL: 'https://your-resource.openai.azure.com',
|
|
171
|
+
model: 'gpt-4',
|
|
172
|
+
});
|
|
173
|
+
```
|
|
174
|
+
|
|
175
|
+
### Per-Request Options
|
|
176
|
+
|
|
177
|
+
```typescript
|
|
178
|
+
const response = await adapter.chat(messages, {
|
|
179
|
+
temperature: 0.5,
|
|
180
|
+
topP: 0.9,
|
|
181
|
+
maxTokens: 2000,
|
|
182
|
+
});
|
|
183
|
+
```
|
|
184
|
+
|
|
185
|
+
## Comparison with Claude
|
|
186
|
+
|
|
187
|
+
| Feature | OpenAI (GPT-4) | Claude (Opus/Sonnet) |
|
|
188
|
+
|---------|----------------|----------------------|
|
|
189
|
+
| Context Window | 128K (Turbo) | 200K |
|
|
190
|
+
| Streaming | ✅ | ✅ |
|
|
191
|
+
| Function Calling | ✅ | ✅ (Tool Use) |
|
|
192
|
+
| Vision | ✅ | ✅ |
|
|
193
|
+
| Speed | Fast | Very Fast (Sonnet) |
|
|
194
|
+
| Cost | Medium | Varies |
|
|
195
|
+
|
|
196
|
+
## License
|
|
197
|
+
|
|
198
|
+
MIT
|
|
199
|
+
|
|
200
|
+
## Author
|
|
201
|
+
|
|
202
|
+
**Luis Alfredo Velasquez Duran** (Germany, 2025)
|
|
203
|
+
|
|
204
|
+
## Links
|
|
205
|
+
|
|
206
|
+
- [PGA Core Documentation](../../core/README.md)
|
|
207
|
+
- [OpenAI API Documentation](https://platform.openai.com/docs)
|
|
208
|
+
- [GitHub Repository](https://github.com/LuisvelMarketer/pga-platform)
|
package/package.json
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "@pga-ai/adapters-llm-openai",
|
|
3
|
+
"version": "0.1.0",
|
|
4
|
+
"description": "OpenAI adapter for PGA",
|
|
5
|
+
"main": "./dist/index.js",
|
|
6
|
+
"types": "./dist/index.d.ts",
|
|
7
|
+
"type": "module",
|
|
8
|
+
"scripts": {
|
|
9
|
+
"build": "tsc"
|
|
10
|
+
},
|
|
11
|
+
"keywords": [
|
|
12
|
+
"pga",
|
|
13
|
+
"openai",
|
|
14
|
+
"gpt",
|
|
15
|
+
"llm",
|
|
16
|
+
"adapter"
|
|
17
|
+
],
|
|
18
|
+
"author": "Luis Alfredo Velasquez Duran",
|
|
19
|
+
"license": "MIT",
|
|
20
|
+
"dependencies": {
|
|
21
|
+
"@pga-ai/core": "*",
|
|
22
|
+
"openai": "^4.77.3"
|
|
23
|
+
},
|
|
24
|
+
"devDependencies": {
|
|
25
|
+
"typescript": "^5.3.3"
|
|
26
|
+
},
|
|
27
|
+
"publishConfig": {
|
|
28
|
+
"access": "public"
|
|
29
|
+
}
|
|
30
|
+
}
|
package/src/index.ts
ADDED
|
@@ -0,0 +1,178 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* OpenAI Adapter for PGA
|
|
3
|
+
* Created by Luis Alfredo Velasquez Duran (Germany, 2025)
|
|
4
|
+
*
|
|
5
|
+
* Supports:
|
|
6
|
+
* - GPT-4 Turbo
|
|
7
|
+
* - GPT-4
|
|
8
|
+
* - GPT-3.5 Turbo
|
|
9
|
+
* - Streaming responses
|
|
10
|
+
*
|
|
11
|
+
* @example
|
|
12
|
+
* ```typescript
|
|
13
|
+
* import { OpenAIAdapter } from '@pga-ai/adapters-llm-openai';
|
|
14
|
+
*
|
|
15
|
+
* const adapter = new OpenAIAdapter({
|
|
16
|
+
* apiKey: process.env.OPENAI_API_KEY,
|
|
17
|
+
* model: 'gpt-4-turbo-preview',
|
|
18
|
+
* });
|
|
19
|
+
* ```
|
|
20
|
+
*/
|
|
21
|
+
|
|
22
|
+
import OpenAI from 'openai';
|
|
23
|
+
import type {
|
|
24
|
+
LLMAdapter,
|
|
25
|
+
Message,
|
|
26
|
+
ChatOptions,
|
|
27
|
+
ChatResponse,
|
|
28
|
+
ChatChunk,
|
|
29
|
+
} from '@pga-ai/core';
|
|
30
|
+
|
|
31
|
+
export interface OpenAIAdapterConfig {
|
|
32
|
+
/**
|
|
33
|
+
* OpenAI API Key
|
|
34
|
+
*/
|
|
35
|
+
apiKey: string;
|
|
36
|
+
|
|
37
|
+
/**
|
|
38
|
+
* Model to use
|
|
39
|
+
* @default 'gpt-4-turbo-preview'
|
|
40
|
+
*/
|
|
41
|
+
model?: string;
|
|
42
|
+
|
|
43
|
+
/**
|
|
44
|
+
* Organization ID (optional)
|
|
45
|
+
*/
|
|
46
|
+
organization?: string;
|
|
47
|
+
|
|
48
|
+
/**
|
|
49
|
+
* Base URL (for proxies or Azure)
|
|
50
|
+
*/
|
|
51
|
+
baseURL?: string;
|
|
52
|
+
|
|
53
|
+
/**
|
|
54
|
+
* Default temperature (0-2)
|
|
55
|
+
* @default 1.0
|
|
56
|
+
*/
|
|
57
|
+
defaultTemperature?: number;
|
|
58
|
+
|
|
59
|
+
/**
|
|
60
|
+
* Default top P (0-1)
|
|
61
|
+
* @default 1.0
|
|
62
|
+
*/
|
|
63
|
+
defaultTopP?: number;
|
|
64
|
+
}
|
|
65
|
+
|
|
66
|
+
export class OpenAIAdapter implements LLMAdapter {
|
|
67
|
+
readonly name = 'openai';
|
|
68
|
+
readonly model: string;
|
|
69
|
+
|
|
70
|
+
private client: OpenAI;
|
|
71
|
+
private config: OpenAIAdapterConfig;
|
|
72
|
+
|
|
73
|
+
constructor(config: OpenAIAdapterConfig) {
|
|
74
|
+
this.config = config;
|
|
75
|
+
this.model = config.model || 'gpt-4-turbo-preview';
|
|
76
|
+
|
|
77
|
+
this.client = new OpenAI({
|
|
78
|
+
apiKey: config.apiKey,
|
|
79
|
+
organization: config.organization,
|
|
80
|
+
baseURL: config.baseURL,
|
|
81
|
+
});
|
|
82
|
+
}
|
|
83
|
+
|
|
84
|
+
/**
|
|
85
|
+
* Chat with OpenAI
|
|
86
|
+
*/
|
|
87
|
+
async chat(messages: Message[], options?: ChatOptions): Promise<ChatResponse> {
|
|
88
|
+
// Convert PGA messages to OpenAI format
|
|
89
|
+
const openaiMessages = this.convertMessages(messages);
|
|
90
|
+
|
|
91
|
+
const response = await this.client.chat.completions.create({
|
|
92
|
+
model: this.model,
|
|
93
|
+
messages: openaiMessages,
|
|
94
|
+
temperature: options?.temperature ?? this.config.defaultTemperature ?? 1.0,
|
|
95
|
+
top_p: this.config.defaultTopP ?? 1.0,
|
|
96
|
+
max_tokens: options?.maxTokens ?? 4096,
|
|
97
|
+
stream: false,
|
|
98
|
+
});
|
|
99
|
+
|
|
100
|
+
const choice = response.choices[0];
|
|
101
|
+
if (!choice) {
|
|
102
|
+
throw new Error('No response from OpenAI');
|
|
103
|
+
}
|
|
104
|
+
|
|
105
|
+
return {
|
|
106
|
+
content: choice.message.content || '',
|
|
107
|
+
usage: {
|
|
108
|
+
inputTokens: response.usage?.prompt_tokens || 0,
|
|
109
|
+
outputTokens: response.usage?.completion_tokens || 0,
|
|
110
|
+
},
|
|
111
|
+
metadata: {
|
|
112
|
+
finishReason: choice.finish_reason || 'stop',
|
|
113
|
+
model: this.model,
|
|
114
|
+
},
|
|
115
|
+
};
|
|
116
|
+
}
|
|
117
|
+
|
|
118
|
+
/**
|
|
119
|
+
* Chat with streaming
|
|
120
|
+
*/
|
|
121
|
+
async *chatStream(
|
|
122
|
+
messages: Message[],
|
|
123
|
+
options?: ChatOptions,
|
|
124
|
+
): AsyncGenerator<ChatChunk, void, unknown> {
|
|
125
|
+
const openaiMessages = this.convertMessages(messages);
|
|
126
|
+
|
|
127
|
+
const stream = await this.client.chat.completions.create({
|
|
128
|
+
model: this.model,
|
|
129
|
+
messages: openaiMessages,
|
|
130
|
+
temperature: options?.temperature ?? this.config.defaultTemperature ?? 1.0,
|
|
131
|
+
top_p: this.config.defaultTopP ?? 1.0,
|
|
132
|
+
max_tokens: options?.maxTokens ?? 4096,
|
|
133
|
+
stream: true,
|
|
134
|
+
});
|
|
135
|
+
|
|
136
|
+
for await (const chunk of stream) {
|
|
137
|
+
const delta = chunk.choices[0]?.delta;
|
|
138
|
+
if (!delta?.content) {
|
|
139
|
+
// Check if stream is done
|
|
140
|
+
if (chunk.choices[0]?.finish_reason) {
|
|
141
|
+
yield { delta: '', done: true };
|
|
142
|
+
}
|
|
143
|
+
continue;
|
|
144
|
+
}
|
|
145
|
+
|
|
146
|
+
yield {
|
|
147
|
+
delta: delta.content,
|
|
148
|
+
done: false,
|
|
149
|
+
};
|
|
150
|
+
}
|
|
151
|
+
}
|
|
152
|
+
|
|
153
|
+
/**
|
|
154
|
+
* Convert PGA messages to OpenAI format
|
|
155
|
+
*/
|
|
156
|
+
private convertMessages(
|
|
157
|
+
messages: Message[],
|
|
158
|
+
): OpenAI.Chat.Completions.ChatCompletionMessageParam[] {
|
|
159
|
+
return messages.map(msg => {
|
|
160
|
+
if (msg.role === 'system') {
|
|
161
|
+
return {
|
|
162
|
+
role: 'system',
|
|
163
|
+
content: msg.content,
|
|
164
|
+
};
|
|
165
|
+
} else if (msg.role === 'user') {
|
|
166
|
+
return {
|
|
167
|
+
role: 'user',
|
|
168
|
+
content: msg.content,
|
|
169
|
+
};
|
|
170
|
+
} else {
|
|
171
|
+
return {
|
|
172
|
+
role: 'assistant',
|
|
173
|
+
content: msg.content,
|
|
174
|
+
};
|
|
175
|
+
}
|
|
176
|
+
});
|
|
177
|
+
}
|
|
178
|
+
}
|