@pga-ai/adapters-llm-openai 0.1.0 → 0.8.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (3) hide show
  1. package/README.md +3 -3
  2. package/package.json +21 -5
  3. package/src/index.ts +57 -47
package/README.md CHANGED
@@ -1,6 +1,6 @@
1
1
  # @pga-ai/adapters-llm-openai
2
2
 
3
- OpenAI adapter for PGA (Genomic Self-Evolving Prompts).
3
+ OpenAI adapter for GSEP (Genomic Self-Evolving Prompts).
4
4
 
5
5
  ## Supported Models
6
6
 
@@ -58,7 +58,7 @@ for await (const chunk of adapter.chatStream([
58
58
  }
59
59
  ```
60
60
 
61
- ### With PGA Genome
61
+ ### With GSEP Genome
62
62
 
63
63
  ```typescript
64
64
  import { PGA } from '@pga-ai/core';
@@ -203,6 +203,6 @@ MIT
203
203
 
204
204
  ## Links
205
205
 
206
- - [PGA Core Documentation](../../core/README.md)
206
+ - [GSEP Core Documentation](../../core/README.md)
207
207
  - [OpenAI API Documentation](https://platform.openai.com/docs)
208
208
  - [GitHub Repository](https://github.com/LuisvelMarketer/pga-platform)
package/package.json CHANGED
@@ -1,22 +1,38 @@
1
1
  {
2
2
  "name": "@pga-ai/adapters-llm-openai",
3
- "version": "0.1.0",
4
- "description": "OpenAI adapter for PGA",
3
+ "version": "0.8.0",
4
+ "description": "OpenAI adapter for GSEP",
5
5
  "main": "./dist/index.js",
6
6
  "types": "./dist/index.d.ts",
7
+ "exports": {
8
+ ".": {
9
+ "import": "./dist/index.js",
10
+ "types": "./dist/index.d.ts"
11
+ }
12
+ },
7
13
  "type": "module",
8
14
  "scripts": {
9
- "build": "tsc"
15
+ "build": "tsc --build",
16
+ "clean": "rm -rf dist"
10
17
  },
11
18
  "keywords": [
12
- "pga",
19
+ "gsep",
13
20
  "openai",
14
21
  "gpt",
15
22
  "llm",
16
23
  "adapter"
17
24
  ],
18
- "author": "Luis Alfredo Velasquez Duran",
25
+ "author": "Luis Alfredo Velasquez Duran <contact@gsepcore.com>",
19
26
  "license": "MIT",
27
+ "homepage": "https://gsepcore.com",
28
+ "repository": {
29
+ "type": "git",
30
+ "url": "https://github.com/LuisvelMarketer/pga-platform",
31
+ "directory": "packages/adapters-llm/openai"
32
+ },
33
+ "bugs": {
34
+ "url": "https://github.com/LuisvelMarketer/pga-platform/issues"
35
+ },
20
36
  "dependencies": {
21
37
  "@pga-ai/core": "*",
22
38
  "openai": "^4.77.3"
package/src/index.ts CHANGED
@@ -1,5 +1,5 @@
1
1
  /**
2
- * OpenAI Adapter for PGA
2
+ * OpenAI Adapter for GSEP
3
3
  * Created by Luis Alfredo Velasquez Duran (Germany, 2025)
4
4
  *
5
5
  * Supports:
@@ -85,34 +85,39 @@ export class OpenAIAdapter implements LLMAdapter {
85
85
  * Chat with OpenAI
86
86
  */
87
87
  async chat(messages: Message[], options?: ChatOptions): Promise<ChatResponse> {
88
- // Convert PGA messages to OpenAI format
89
88
  const openaiMessages = this.convertMessages(messages);
90
89
 
91
- const response = await this.client.chat.completions.create({
92
- model: this.model,
93
- messages: openaiMessages,
94
- temperature: options?.temperature ?? this.config.defaultTemperature ?? 1.0,
95
- top_p: this.config.defaultTopP ?? 1.0,
96
- max_tokens: options?.maxTokens ?? 4096,
97
- stream: false,
98
- });
90
+ try {
91
+ const response = await this.client.chat.completions.create({
92
+ model: this.model,
93
+ messages: openaiMessages,
94
+ temperature: options?.temperature ?? this.config.defaultTemperature ?? 1.0,
95
+ top_p: this.config.defaultTopP ?? 1.0,
96
+ max_tokens: options?.maxTokens ?? 4096,
97
+ stream: false,
98
+ });
99
+
100
+ const choice = response.choices[0];
101
+ if (!choice) {
102
+ throw new Error('No response choices returned from OpenAI');
103
+ }
99
104
 
100
- const choice = response.choices[0];
101
- if (!choice) {
102
- throw new Error('No response from OpenAI');
105
+ return {
106
+ content: choice.message.content || '',
107
+ usage: {
108
+ inputTokens: response.usage?.prompt_tokens || 0,
109
+ outputTokens: response.usage?.completion_tokens || 0,
110
+ },
111
+ metadata: {
112
+ finishReason: choice.finish_reason || 'stop',
113
+ model: this.model,
114
+ },
115
+ };
116
+ } catch (error) {
117
+ throw new Error(
118
+ `OpenAI API error: ${error instanceof Error ? error.message : 'Unknown error'}`,
119
+ );
103
120
  }
104
-
105
- return {
106
- content: choice.message.content || '',
107
- usage: {
108
- inputTokens: response.usage?.prompt_tokens || 0,
109
- outputTokens: response.usage?.completion_tokens || 0,
110
- },
111
- metadata: {
112
- finishReason: choice.finish_reason || 'stop',
113
- model: this.model,
114
- },
115
- };
116
121
  }
117
122
 
118
123
  /**
@@ -124,34 +129,39 @@ export class OpenAIAdapter implements LLMAdapter {
124
129
  ): AsyncGenerator<ChatChunk, void, unknown> {
125
130
  const openaiMessages = this.convertMessages(messages);
126
131
 
127
- const stream = await this.client.chat.completions.create({
128
- model: this.model,
129
- messages: openaiMessages,
130
- temperature: options?.temperature ?? this.config.defaultTemperature ?? 1.0,
131
- top_p: this.config.defaultTopP ?? 1.0,
132
- max_tokens: options?.maxTokens ?? 4096,
133
- stream: true,
134
- });
135
-
136
- for await (const chunk of stream) {
137
- const delta = chunk.choices[0]?.delta;
138
- if (!delta?.content) {
139
- // Check if stream is done
140
- if (chunk.choices[0]?.finish_reason) {
141
- yield { delta: '', done: true };
132
+ try {
133
+ const stream = await this.client.chat.completions.create({
134
+ model: this.model,
135
+ messages: openaiMessages,
136
+ temperature: options?.temperature ?? this.config.defaultTemperature ?? 1.0,
137
+ top_p: this.config.defaultTopP ?? 1.0,
138
+ max_tokens: options?.maxTokens ?? 4096,
139
+ stream: true,
140
+ });
141
+
142
+ for await (const chunk of stream) {
143
+ const delta = chunk.choices[0]?.delta;
144
+ if (!delta?.content) {
145
+ if (chunk.choices[0]?.finish_reason) {
146
+ yield { delta: '', done: true };
147
+ }
148
+ continue;
142
149
  }
143
- continue;
144
- }
145
150
 
146
- yield {
147
- delta: delta.content,
148
- done: false,
149
- };
151
+ yield {
152
+ delta: delta.content,
153
+ done: false,
154
+ };
155
+ }
156
+ } catch (error) {
157
+ throw new Error(
158
+ `OpenAI streaming error: ${error instanceof Error ? error.message : 'Unknown error'}`,
159
+ );
150
160
  }
151
161
  }
152
162
 
153
163
  /**
154
- * Convert PGA messages to OpenAI format
164
+ * Convert GSEP messages to OpenAI format
155
165
  */
156
166
  private convertMessages(
157
167
  messages: Message[],