@aj-archipelago/cortex 1.1.30 → 1.1.32
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/config.js +30 -0
- package/helper-apps/cortex-autogen/.funcignore +8 -0
- package/helper-apps/cortex-autogen/Dockerfile +10 -0
- package/helper-apps/cortex-autogen/OAI_CONFIG_LIST +5 -0
- package/helper-apps/cortex-autogen/function_app.py +32 -0
- package/helper-apps/cortex-autogen/host.json +15 -0
- package/helper-apps/cortex-autogen/main.py +38 -0
- package/helper-apps/cortex-autogen/myautogen.py +158 -0
- package/helper-apps/cortex-autogen/prompt.txt +0 -0
- package/helper-apps/cortex-autogen/requirements.txt +6 -0
- package/helper-apps/cortex-autogen/sasfileuploader.py +93 -0
- package/helper-apps/cortex-file-handler/fileChunker.js +9 -2
- package/lib/requestExecutor.js +1 -1
- package/package.json +1 -1
- package/server/graphql.js +1 -0
- package/server/modelExecutor.js +4 -0
- package/server/plugins/openAiReasoningPlugin.js +61 -0
- package/server/typeDef.js +2 -2
package/config.js
CHANGED
|
@@ -154,6 +154,36 @@ var config = convict({
|
|
|
154
154
|
"maxReturnTokens": 4096,
|
|
155
155
|
"supportsStreaming": true
|
|
156
156
|
},
|
|
157
|
+
"oai-o1-mini": {
|
|
158
|
+
"type": "OPENAI-REASONING",
|
|
159
|
+
"url": "https://api.openai.com/v1/chat/completions",
|
|
160
|
+
"headers": {
|
|
161
|
+
"Authorization": "Bearer {{OPENAI_API_KEY}}",
|
|
162
|
+
"Content-Type": "application/json"
|
|
163
|
+
},
|
|
164
|
+
"params": {
|
|
165
|
+
"model": "o1-mini"
|
|
166
|
+
},
|
|
167
|
+
"requestsPerSecond": 10,
|
|
168
|
+
"maxTokenLength": 128000,
|
|
169
|
+
"maxReturnTokens": 65536,
|
|
170
|
+
"supportsStreaming": false
|
|
171
|
+
},
|
|
172
|
+
"oai-o1-preview": {
|
|
173
|
+
"type": "OPENAI-REASONING",
|
|
174
|
+
"url": "https://api.openai.com/v1/chat/completions",
|
|
175
|
+
"headers": {
|
|
176
|
+
"Authorization": "Bearer {{OPENAI_API_KEY}}",
|
|
177
|
+
"Content-Type": "application/json"
|
|
178
|
+
},
|
|
179
|
+
"params": {
|
|
180
|
+
"model": "o1-preview"
|
|
181
|
+
},
|
|
182
|
+
"requestsPerSecond": 10,
|
|
183
|
+
"maxTokenLength": 128000,
|
|
184
|
+
"maxReturnTokens": 32768,
|
|
185
|
+
"supportsStreaming": false
|
|
186
|
+
},
|
|
157
187
|
"azure-bing": {
|
|
158
188
|
"type": "AZURE-BING",
|
|
159
189
|
"url": "https://api.bing.microsoft.com/v7.0/search",
|
|
@@ -0,0 +1,32 @@
|
|
|
1
|
+
import azure.functions as func
|
|
2
|
+
import logging
|
|
3
|
+
import json
|
|
4
|
+
import autogen
|
|
5
|
+
from autogen import AssistantAgent, UserProxyAgent, config_list_from_json
|
|
6
|
+
from azure.storage.queue import QueueClient
|
|
7
|
+
import os
|
|
8
|
+
import tempfile
|
|
9
|
+
import redis
|
|
10
|
+
from myautogen import process_message
|
|
11
|
+
|
|
12
|
+
app = func.FunctionApp()
|
|
13
|
+
|
|
14
|
+
connection_string = os.environ["AZURE_STORAGE_CONNECTION_STRING"]
|
|
15
|
+
queue_name = os.environ.get("QUEUE_NAME", "autogen-message-queue")
|
|
16
|
+
queue_client = QueueClient.from_connection_string(connection_string, queue_name)
|
|
17
|
+
|
|
18
|
+
redis_client = redis.from_url(os.environ['REDIS_CONNECTION_STRING'])
|
|
19
|
+
channel = 'requestProgress'
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
@app.queue_trigger(arg_name="msg", queue_name=queue_name, connection="AZURE_STORAGE_CONNECTION_STRING")
|
|
23
|
+
def queue_trigger(msg: func.QueueMessage):
|
|
24
|
+
logging.info(f"Queue trigger Message ID: {msg.id}")
|
|
25
|
+
try:
|
|
26
|
+
message_data = json.loads(msg.get_body().decode('utf-8'))
|
|
27
|
+
if "requestId" not in message_data:
|
|
28
|
+
message_data['requestId'] = msg.id
|
|
29
|
+
process_message(message_data)
|
|
30
|
+
|
|
31
|
+
except Exception as e:
|
|
32
|
+
logging.error(f"Error processing message: {str(e)}")
|
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
{
|
|
2
|
+
"version": "2.0",
|
|
3
|
+
"logging": {
|
|
4
|
+
"applicationInsights": {
|
|
5
|
+
"samplingSettings": {
|
|
6
|
+
"isEnabled": true,
|
|
7
|
+
"excludedTypes": "Request"
|
|
8
|
+
}
|
|
9
|
+
}
|
|
10
|
+
},
|
|
11
|
+
"extensionBundle": {
|
|
12
|
+
"id": "Microsoft.Azure.Functions.ExtensionBundle",
|
|
13
|
+
"version": "[4.*, 5.0.0)"
|
|
14
|
+
}
|
|
15
|
+
}
|
|
@@ -0,0 +1,38 @@
|
|
|
1
|
+
import os
|
|
2
|
+
from azure.storage.queue import QueueClient
|
|
3
|
+
import base64
|
|
4
|
+
import json
|
|
5
|
+
from myautogen import process_message
|
|
6
|
+
import time
|
|
7
|
+
|
|
8
|
+
def main():
|
|
9
|
+
print("Starting message processing loop")
|
|
10
|
+
connection_string = os.environ["AZURE_STORAGE_CONNECTION_STRING"]
|
|
11
|
+
queue_name = os.environ.get("QUEUE_NAME", "autogen-message-queue")
|
|
12
|
+
|
|
13
|
+
queue_client = QueueClient.from_connection_string(connection_string, queue_name)
|
|
14
|
+
|
|
15
|
+
attempts = 0
|
|
16
|
+
max_attempts = 100
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
while attempts < max_attempts:
|
|
20
|
+
messages = queue_client.receive_messages(messages_per_page=1)
|
|
21
|
+
|
|
22
|
+
if messages:
|
|
23
|
+
for message in messages:
|
|
24
|
+
decoded_content = base64.b64decode(message.content).decode('utf-8')
|
|
25
|
+
message_data = json.loads(decoded_content)
|
|
26
|
+
if "requestId" not in message_data:
|
|
27
|
+
message_data['requestId'] = message.id
|
|
28
|
+
process_message(message_data)
|
|
29
|
+
queue_client.delete_message(message)
|
|
30
|
+
attempts = 0 # Reset attempts if a message was processed
|
|
31
|
+
else:
|
|
32
|
+
attempts += 1
|
|
33
|
+
time.sleep(1) # Wait for 1 second before checking again
|
|
34
|
+
|
|
35
|
+
print("No messages received after 100 attempts. Exiting.")
|
|
36
|
+
|
|
37
|
+
if __name__ == "__main__":
|
|
38
|
+
main()
|
|
@@ -0,0 +1,158 @@
|
|
|
1
|
+
import azure.functions as func
|
|
2
|
+
import logging
|
|
3
|
+
import json
|
|
4
|
+
import autogen
|
|
5
|
+
from autogen import AssistantAgent, UserProxyAgent, config_list_from_json
|
|
6
|
+
from azure.storage.queue import QueueClient
|
|
7
|
+
import os
|
|
8
|
+
import tempfile
|
|
9
|
+
import redis
|
|
10
|
+
from dotenv import load_dotenv
|
|
11
|
+
import requests
|
|
12
|
+
import pathlib
|
|
13
|
+
|
|
14
|
+
load_dotenv()
|
|
15
|
+
|
|
16
|
+
app = func.FunctionApp()
|
|
17
|
+
|
|
18
|
+
connection_string = os.environ["AZURE_STORAGE_CONNECTION_STRING"]
|
|
19
|
+
queue_name = os.environ.get("QUEUE_NAME", "autogen-message-queue")
|
|
20
|
+
queue_client = QueueClient.from_connection_string(connection_string, queue_name)
|
|
21
|
+
|
|
22
|
+
redis_client = redis.from_url(os.environ['REDIS_CONNECTION_STRING'])
|
|
23
|
+
channel = 'requestProgress'
|
|
24
|
+
|
|
25
|
+
def connect_redis():
|
|
26
|
+
if not redis_client.ping():
|
|
27
|
+
try:
|
|
28
|
+
redis_client.ping()
|
|
29
|
+
except redis.ConnectionError as e:
|
|
30
|
+
logging.error(f"Error reconnecting to Redis: {e}")
|
|
31
|
+
return False
|
|
32
|
+
return True
|
|
33
|
+
|
|
34
|
+
def publish_request_progress(data):
|
|
35
|
+
if connect_redis():
|
|
36
|
+
try:
|
|
37
|
+
message = json.dumps(data)
|
|
38
|
+
logging.info(f"Publishing message {message} to channel {channel}")
|
|
39
|
+
redis_client.publish(channel, message)
|
|
40
|
+
except Exception as e:
|
|
41
|
+
logging.error(f"Error publishing message: {e}")
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
def get_given_system_message():
|
|
45
|
+
env_context = os.environ.get("ENV_SYSTEM_MESSAGE_CONTEXT")
|
|
46
|
+
|
|
47
|
+
if not env_context:
|
|
48
|
+
return read_local_file("prompt.txt")
|
|
49
|
+
|
|
50
|
+
if env_context.startswith(("http://", "https://")):
|
|
51
|
+
return fetch_from_url(env_context)
|
|
52
|
+
|
|
53
|
+
if pathlib.Path(env_context).suffix:
|
|
54
|
+
return read_local_file(env_context)
|
|
55
|
+
|
|
56
|
+
return env_context
|
|
57
|
+
|
|
58
|
+
def read_local_file(filename):
|
|
59
|
+
try:
|
|
60
|
+
with open(filename, "r") as file:
|
|
61
|
+
return file.read()
|
|
62
|
+
except FileNotFoundError:
|
|
63
|
+
logging.error(f"{filename} not found")
|
|
64
|
+
return ""
|
|
65
|
+
|
|
66
|
+
def fetch_from_url(url):
|
|
67
|
+
try:
|
|
68
|
+
response = requests.get(url)
|
|
69
|
+
response.raise_for_status()
|
|
70
|
+
return response.text
|
|
71
|
+
except requests.RequestException as e:
|
|
72
|
+
logging.error(f"Error fetching from URL: {e}")
|
|
73
|
+
return ""
|
|
74
|
+
|
|
75
|
+
def process_message(message_data):
|
|
76
|
+
logging.info(f"Processing Message: {message_data}")
|
|
77
|
+
try:
|
|
78
|
+
message = message_data['message']
|
|
79
|
+
request_id = message_data.get('requestId') or msg.id
|
|
80
|
+
|
|
81
|
+
config_list = config_list_from_json(env_or_file="OAI_CONFIG_LIST")
|
|
82
|
+
base_url = os.environ.get("CORTEX_API_BASE_URL")
|
|
83
|
+
api_key = os.environ.get("CORTEX_API_KEY")
|
|
84
|
+
llm_config = {"config_list": config_list, "base_url": base_url, "api_key": api_key, "cache_seed": None}
|
|
85
|
+
|
|
86
|
+
with tempfile.TemporaryDirectory() as temp_dir:
|
|
87
|
+
code_executor = autogen.coding.LocalCommandLineCodeExecutor(work_dir=temp_dir)
|
|
88
|
+
|
|
89
|
+
message_count = 0
|
|
90
|
+
total_messages = 20 * 2
|
|
91
|
+
all_messages = []
|
|
92
|
+
|
|
93
|
+
def is_termination_msg(m):
|
|
94
|
+
content = m.get("content", "")
|
|
95
|
+
if message_count == 0:
|
|
96
|
+
return False
|
|
97
|
+
return (m.get("role") == "assistant" and not content.strip()) or \
|
|
98
|
+
content.rstrip().endswith("TERMINATE") or \
|
|
99
|
+
"first message must use the" in content.lower() or \
|
|
100
|
+
len(content.strip()) == 0
|
|
101
|
+
|
|
102
|
+
system_message_given = get_given_system_message()
|
|
103
|
+
system_message_assistant = AssistantAgent.DEFAULT_SYSTEM_MESSAGE
|
|
104
|
+
|
|
105
|
+
if system_message_given:
|
|
106
|
+
system_message_assistant = f"{system_message_assistant}\n\n{system_message_given}"
|
|
107
|
+
else:
|
|
108
|
+
print("No extra system message given for assistant")
|
|
109
|
+
|
|
110
|
+
assistant = AssistantAgent("assistant", llm_config=llm_config, system_message=system_message_assistant)
|
|
111
|
+
|
|
112
|
+
user_proxy = UserProxyAgent(
|
|
113
|
+
"user_proxy",
|
|
114
|
+
system_message=system_message_given,
|
|
115
|
+
code_execution_config={"executor": code_executor},
|
|
116
|
+
human_input_mode="NEVER",
|
|
117
|
+
max_consecutive_auto_reply=20,
|
|
118
|
+
is_termination_msg=is_termination_msg,
|
|
119
|
+
)
|
|
120
|
+
|
|
121
|
+
original_assistant_send = assistant.send
|
|
122
|
+
original_user_proxy_send = user_proxy.send
|
|
123
|
+
|
|
124
|
+
def logged_send(sender, original_send, message, recipient, request_reply=None, silent=True):
|
|
125
|
+
nonlocal message_count, all_messages
|
|
126
|
+
logging.info(f"Message from {sender.name} to {recipient.name}: {message}")
|
|
127
|
+
message_count += 1
|
|
128
|
+
progress = min(message_count / total_messages, 1)
|
|
129
|
+
all_messages.append({"sender": sender.name, "message": message})
|
|
130
|
+
publish_request_progress({
|
|
131
|
+
"requestId": request_id,
|
|
132
|
+
"progress": progress,
|
|
133
|
+
"info": message
|
|
134
|
+
})
|
|
135
|
+
return original_send(message, recipient, request_reply, silent)
|
|
136
|
+
|
|
137
|
+
assistant.send = lambda message, recipient, request_reply=None, silent=True: logged_send(assistant, original_assistant_send, message, recipient, request_reply, silent)
|
|
138
|
+
user_proxy.send = lambda message, recipient, request_reply=None, silent=True: logged_send(user_proxy, original_user_proxy_send, message, recipient, request_reply, silent)
|
|
139
|
+
|
|
140
|
+
chat_result = user_proxy.initiate_chat(assistant, message=message)
|
|
141
|
+
|
|
142
|
+
msg = all_messages[-3]["message"] if len(all_messages) >= 3 else ""
|
|
143
|
+
logging.info(f"####Final message: {msg}")
|
|
144
|
+
|
|
145
|
+
publish_request_progress({
|
|
146
|
+
"requestId": request_id,
|
|
147
|
+
"progress": 1,
|
|
148
|
+
"data": msg
|
|
149
|
+
})
|
|
150
|
+
|
|
151
|
+
except Exception as e:
|
|
152
|
+
logging.error(f"Error processing message: {str(e)}")
|
|
153
|
+
if request_id:
|
|
154
|
+
publish_request_progress({
|
|
155
|
+
"requestId": request_id,
|
|
156
|
+
"progress": 1,
|
|
157
|
+
"error": str(e)
|
|
158
|
+
})
|
|
File without changes
|
|
@@ -0,0 +1,93 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import sys
|
|
3
|
+
from datetime import datetime, timedelta
|
|
4
|
+
|
|
5
|
+
def install_azure_storage_blob():
|
|
6
|
+
print("Installing azure-storage-blob...")
|
|
7
|
+
import subprocess
|
|
8
|
+
subprocess.check_call([sys.executable, "-m", "pip", "install", "azure-storage-blob"])
|
|
9
|
+
print("azure-storage-blob installed successfully.")
|
|
10
|
+
|
|
11
|
+
try:
|
|
12
|
+
from azure.storage.blob import BlobServiceClient, BlobClient, generate_blob_sas, BlobSasPermissions
|
|
13
|
+
except ImportError:
|
|
14
|
+
install_azure_storage_blob()
|
|
15
|
+
from azure.storage.blob import BlobServiceClient, BlobClient, generate_blob_sas, BlobSasPermissions
|
|
16
|
+
|
|
17
|
+
def generate_sas_url(blob_service_client, container_name, blob_name):
|
|
18
|
+
"""
|
|
19
|
+
Generates a SAS URL for a blob.
|
|
20
|
+
"""
|
|
21
|
+
sas_token = generate_blob_sas(
|
|
22
|
+
account_name=blob_service_client.account_name,
|
|
23
|
+
container_name=container_name,
|
|
24
|
+
blob_name=blob_name,
|
|
25
|
+
account_key=blob_service_client.credential.account_key,
|
|
26
|
+
permission=BlobSasPermissions(read=True, write=True),
|
|
27
|
+
expiry=datetime.utcnow() + timedelta(hours=1)
|
|
28
|
+
)
|
|
29
|
+
return f"https://{blob_service_client.account_name}.blob.core.windows.net/{container_name}/{blob_name}?{sas_token}"
|
|
30
|
+
|
|
31
|
+
def upload_file_to_blob(file_path, sas_url):
|
|
32
|
+
"""
|
|
33
|
+
Uploads a single file to Azure Blob Storage using a SAS URL.
|
|
34
|
+
"""
|
|
35
|
+
try:
|
|
36
|
+
blob_client = BlobClient.from_blob_url(sas_url)
|
|
37
|
+
with open(file_path, "rb") as data:
|
|
38
|
+
blob_client.upload_blob(data, overwrite=True)
|
|
39
|
+
print(f"Successfully uploaded {os.path.basename(file_path)} to Azure Blob Storage.")
|
|
40
|
+
return True
|
|
41
|
+
except Exception as e:
|
|
42
|
+
print(f"Error uploading file: {e}")
|
|
43
|
+
return False
|
|
44
|
+
|
|
45
|
+
def main():
|
|
46
|
+
# Get Azure Storage connection string from environment variable
|
|
47
|
+
connect_str = os.environ.get('AZURE_STORAGE_CONNECTION_STRING')
|
|
48
|
+
if not connect_str:
|
|
49
|
+
print("Error: AZURE_STORAGE_CONNECTION_STRING is not set in environment variables.")
|
|
50
|
+
sys.exit(1)
|
|
51
|
+
|
|
52
|
+
# Create the BlobServiceClient object
|
|
53
|
+
blob_service_client = BlobServiceClient.from_connection_string(connect_str)
|
|
54
|
+
|
|
55
|
+
# Get the container name from environment variable or use a default
|
|
56
|
+
container_name = os.environ.get('AZURE_BLOB_CONTAINER', 'testcontainer')
|
|
57
|
+
|
|
58
|
+
# Test file details
|
|
59
|
+
file_path = "/tmp/test_file.txt"
|
|
60
|
+
blob_name = "test_file.txt"
|
|
61
|
+
|
|
62
|
+
# Create a test file
|
|
63
|
+
with open(file_path, "w") as f:
|
|
64
|
+
f.write("This is a test file for Azure Blob Storage upload.")
|
|
65
|
+
|
|
66
|
+
print(f"Test file created at: {file_path}")
|
|
67
|
+
|
|
68
|
+
# Generate SAS URL
|
|
69
|
+
sas_url = generate_sas_url(blob_service_client, container_name, blob_name)
|
|
70
|
+
print(f"Generated SAS URL: {sas_url}")
|
|
71
|
+
|
|
72
|
+
# Upload file
|
|
73
|
+
if upload_file_to_blob(file_path, sas_url):
|
|
74
|
+
print("File upload completed successfully.")
|
|
75
|
+
else:
|
|
76
|
+
print("File upload failed.")
|
|
77
|
+
|
|
78
|
+
# Clean up the test file
|
|
79
|
+
os.remove(file_path)
|
|
80
|
+
print(f"Test file removed: {file_path}")
|
|
81
|
+
|
|
82
|
+
# Upload this script to Azure Blob Storage
|
|
83
|
+
script_path = os.path.abspath(__file__)
|
|
84
|
+
script_name = os.path.basename(script_path)
|
|
85
|
+
script_sas_url = generate_sas_url(blob_service_client, container_name, script_name)
|
|
86
|
+
|
|
87
|
+
if upload_file_to_blob(script_path, script_sas_url):
|
|
88
|
+
print(f"Script uploaded successfully. You can access it at: {script_sas_url}")
|
|
89
|
+
else:
|
|
90
|
+
print("Failed to upload the script.")
|
|
91
|
+
|
|
92
|
+
if __name__ == "__main__":
|
|
93
|
+
main()
|
|
@@ -86,9 +86,16 @@ async function splitMediaFile(inputPath, chunkDurationInSeconds = 500) {
|
|
|
86
86
|
// Extract the original file name from the URL
|
|
87
87
|
const urlObj = new URL(inputPath);
|
|
88
88
|
const originalFileName = path.basename(urlObj.pathname);
|
|
89
|
+
const maxLength = 200; // Set the maximum length for the filename
|
|
90
|
+
let truncatedFileName = originalFileName;
|
|
91
|
+
if (originalFileName.length > maxLength) {
|
|
92
|
+
const extension = path.extname(originalFileName); // Preserve the file extension
|
|
93
|
+
const basename = path.basename(originalFileName, extension); // Get the filename without the extension
|
|
94
|
+
truncatedFileName = basename.substring(0, maxLength) + extension; // Truncate the filename and append the extension
|
|
95
|
+
}
|
|
89
96
|
|
|
90
|
-
// Use the original file name when saving the downloaded file
|
|
91
|
-
const downloadPath = path.join(uniqueOutputPath,
|
|
97
|
+
// Use the original-truncated file name when saving the downloaded file
|
|
98
|
+
const downloadPath = path.join(uniqueOutputPath, truncatedFileName);
|
|
92
99
|
await downloadFile(inputPath, downloadPath);
|
|
93
100
|
inputPath = downloadPath;
|
|
94
101
|
}
|
package/lib/requestExecutor.js
CHANGED
|
@@ -236,7 +236,7 @@ const makeRequest = async (cortexRequest) => {
|
|
|
236
236
|
promises.push(selectedEndpoint.limiter.schedule({expiration: pathway.timeout * 1000 + 1000, id: `${requestId}_${uuidv4()}`},() => requestWithMonitor(selectedEndpoint, url, data, axiosConfigObj)));
|
|
237
237
|
} else {
|
|
238
238
|
if (streamRequested) {
|
|
239
|
-
logger.info(`>>> [${requestId}] ${model} does not support streaming - sending non-streaming request`);
|
|
239
|
+
logger.info(`>>> [${requestId}] ${model.name || 'This model'} does not support streaming - sending non-streaming request`);
|
|
240
240
|
axiosConfigObj.params.stream = false;
|
|
241
241
|
data.stream = false;
|
|
242
242
|
}
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@aj-archipelago/cortex",
|
|
3
|
-
"version": "1.1.
|
|
3
|
+
"version": "1.1.32",
|
|
4
4
|
"description": "Cortex is a GraphQL API for AI. It provides a simple, extensible interface for using AI services from OpenAI, Azure and others.",
|
|
5
5
|
"private": false,
|
|
6
6
|
"repository": {
|
package/server/graphql.js
CHANGED
package/server/modelExecutor.js
CHANGED
|
@@ -17,6 +17,7 @@ import OpenAiEmbeddingsPlugin from './plugins/openAiEmbeddingsPlugin.js';
|
|
|
17
17
|
import OpenAIImagePlugin from './plugins/openAiImagePlugin.js';
|
|
18
18
|
import OpenAIDallE3Plugin from './plugins/openAiDallE3Plugin.js';
|
|
19
19
|
import OpenAIVisionPlugin from './plugins/openAiVisionPlugin.js';
|
|
20
|
+
import OpenAIReasoningPlugin from './plugins/openAiReasoningPlugin.js';
|
|
20
21
|
import GeminiChatPlugin from './plugins/geminiChatPlugin.js';
|
|
21
22
|
import GeminiVisionPlugin from './plugins/geminiVisionPlugin.js';
|
|
22
23
|
import Gemini15ChatPlugin from './plugins/gemini15ChatPlugin.js';
|
|
@@ -82,6 +83,9 @@ class ModelExecutor {
|
|
|
82
83
|
case 'OPENAI-VISION':
|
|
83
84
|
plugin = new OpenAIVisionPlugin(pathway, model);
|
|
84
85
|
break;
|
|
86
|
+
case 'OPENAI-REASONING':
|
|
87
|
+
plugin = new OpenAIReasoningPlugin(pathway, model);
|
|
88
|
+
break;
|
|
85
89
|
case 'GEMINI-CHAT':
|
|
86
90
|
plugin = new GeminiChatPlugin(pathway, model);
|
|
87
91
|
break;
|
|
@@ -0,0 +1,61 @@
|
|
|
1
|
+
import OpenAIChatPlugin from './openAiChatPlugin.js';
|
|
2
|
+
|
|
3
|
+
class OpenAIReasoningPlugin extends OpenAIChatPlugin {
|
|
4
|
+
|
|
5
|
+
tryParseMessages(messages) {
|
|
6
|
+
let newMessages = [];
|
|
7
|
+
|
|
8
|
+
for (const message of messages) {
|
|
9
|
+
if (message.role === 'user' || message.role === 'assistant') {
|
|
10
|
+
newMessages.push({
|
|
11
|
+
role: message.role,
|
|
12
|
+
content: this.parseContent(message.content)
|
|
13
|
+
});
|
|
14
|
+
}
|
|
15
|
+
// System messages are simply ignored
|
|
16
|
+
}
|
|
17
|
+
|
|
18
|
+
messages.length = 0;
|
|
19
|
+
messages.push(...newMessages);
|
|
20
|
+
}
|
|
21
|
+
|
|
22
|
+
parseContent(content) {
|
|
23
|
+
if (typeof content === 'string') {
|
|
24
|
+
return [{ type: 'text', text: content }];
|
|
25
|
+
}
|
|
26
|
+
if (Array.isArray(content)) {
|
|
27
|
+
return content.map(item => {
|
|
28
|
+
if (typeof item === 'string') {
|
|
29
|
+
return { type: 'text', text: item };
|
|
30
|
+
}
|
|
31
|
+
const { type, text } = item;
|
|
32
|
+
return { type, text: text || '' };
|
|
33
|
+
});
|
|
34
|
+
}
|
|
35
|
+
return [];
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
getRequestParameters(text, parameters, prompt) {
|
|
39
|
+
const requestParameters = super.getRequestParameters(text, parameters, prompt);
|
|
40
|
+
|
|
41
|
+
this.tryParseMessages(requestParameters.messages);
|
|
42
|
+
|
|
43
|
+
const modelMaxReturnTokens = this.getModelMaxReturnTokens();
|
|
44
|
+
const maxTokensPrompt = this.promptParameters.max_tokens;
|
|
45
|
+
const maxTokensModel = this.getModelMaxTokenLength() * (1 - this.getPromptTokenRatio());
|
|
46
|
+
|
|
47
|
+
const maxTokens = maxTokensPrompt || maxTokensModel;
|
|
48
|
+
|
|
49
|
+
requestParameters.max_completion_tokens = maxTokens ? Math.min(maxTokens, modelMaxReturnTokens) : modelMaxReturnTokens;
|
|
50
|
+
requestParameters.temperature = 1;
|
|
51
|
+
|
|
52
|
+
if (this.promptParameters.json) {
|
|
53
|
+
//requestParameters.response_format = { type: "json_object", }
|
|
54
|
+
}
|
|
55
|
+
|
|
56
|
+
return requestParameters;
|
|
57
|
+
}
|
|
58
|
+
|
|
59
|
+
}
|
|
60
|
+
|
|
61
|
+
export default OpenAIReasoningPlugin;
|
package/server/typeDef.js
CHANGED
|
@@ -36,8 +36,8 @@ const typeDef = (pathway) => {
|
|
|
36
36
|
|
|
37
37
|
const typeName = fields ? `${objName}Result` : `String`;
|
|
38
38
|
|
|
39
|
-
const messageType = `input Message { role: String, content: String }`;
|
|
40
|
-
const multiMessageType = `input MultiMessage { role: String, content: [String] }`;
|
|
39
|
+
const messageType = `input Message { role: String, content: String, name: String }`;
|
|
40
|
+
const multiMessageType = `input MultiMessage { role: String, content: [String], name: String }`;
|
|
41
41
|
|
|
42
42
|
const type = fields ? `type ${typeName} {
|
|
43
43
|
${fieldsStr}
|