ApiLogicServer 12.0.4__py3-none-any.whl → 12.1.26__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {ApiLogicServer-12.0.4.dist-info → ApiLogicServer-12.1.26.dist-info}/METADATA +2 -1
- {ApiLogicServer-12.0.4.dist-info → ApiLogicServer-12.1.26.dist-info}/RECORD +63 -40
- {ApiLogicServer-12.0.4.dist-info → ApiLogicServer-12.1.26.dist-info}/WHEEL +1 -1
- api_logic_server_cli/api_logic_server.py +23 -204
- api_logic_server_cli/api_logic_server_info.yaml +2 -2
- api_logic_server_cli/cli.py +25 -84
- api_logic_server_cli/cli_args_base.py +2 -0
- api_logic_server_cli/cli_args_project.py +9 -3
- api_logic_server_cli/create_from_model/__pycache__/dbml.cpython-312.pyc +0 -0
- api_logic_server_cli/create_from_model/__pycache__/meta_model.cpython-312.pyc +0 -0
- api_logic_server_cli/create_from_model/__pycache__/model_creation_services.cpython-312.pyc +0 -0
- api_logic_server_cli/create_from_model/__pycache__/ont_create.cpython-312.pyc +0 -0
- api_logic_server_cli/create_from_model/__pycache__/ui_admin_creator.cpython-312.pyc +0 -0
- api_logic_server_cli/create_from_model/meta_model.py +1 -1
- api_logic_server_cli/create_from_model/model_creation_services.py +3 -1
- api_logic_server_cli/create_from_model/ont_create.py +3 -1
- api_logic_server_cli/create_from_model/ui_admin_creator.py +6 -4
- api_logic_server_cli/genai.py +420 -198
- api_logic_server_cli/logging.yml +5 -0
- api_logic_server_cli/prototypes/.DS_Store +0 -0
- api_logic_server_cli/prototypes/base/api_logic_server_run.py +0 -2
- api_logic_server_cli/prototypes/base/config/server_setup.py +15 -1
- api_logic_server_cli/prototypes/base/integration/kafka/kafka_consumer.py +1 -1
- api_logic_server_cli/prototypes/base/integration/kafka/kafka_producer.py +1 -1
- api_logic_server_cli/prototypes/base/readme.md +21 -8
- api_logic_server_cli/prototypes/genai_demo/logic/declare_logic.py +11 -12
- api_logic_server_cli/prototypes/manager/.DS_Store +0 -0
- api_logic_server_cli/prototypes/manager/.vscode/.DS_Store +0 -0
- api_logic_server_cli/prototypes/manager/.vscode/launch.json +98 -1
- api_logic_server_cli/prototypes/manager/README.md +40 -3
- api_logic_server_cli/prototypes/manager/system/.DS_Store +0 -0
- api_logic_server_cli/prototypes/manager/system/genai/.DS_Store +0 -0
- api_logic_server_cli/prototypes/manager/system/genai/create_db_models_inserts/create_db_models_create_db.py +11 -0
- api_logic_server_cli/prototypes/manager/system/genai/create_db_models_inserts/create_db_models_imports.py +22 -0
- api_logic_server_cli/prototypes/manager/system/genai/create_db_models_inserts/create_db_models_test_data.py +7 -0
- api_logic_server_cli/prototypes/manager/system/genai/examples/emp_depts/emp_dept.prompt +4 -0
- api_logic_server_cli/prototypes/manager/system/genai/examples/genai_demo/genai_demo.prompt +1 -1
- api_logic_server_cli/prototypes/manager/system/genai/examples/genai_demo/genai_demo.response_example +133 -130
- api_logic_server_cli/prototypes/manager/system/genai/examples/genai_demo/genai_demo_informal.prompt +10 -0
- api_logic_server_cli/prototypes/manager/system/genai/examples/genai_demo/genai_demo_iterative_logic/genai_demo_iterative_logic_000.response +1 -0
- api_logic_server_cli/prototypes/manager/system/genai/examples/genai_demo/genai_demo_iterative_logic/genai_demo_iterative_logic_001.prompt +171 -0
- api_logic_server_cli/prototypes/manager/system/genai/examples/genai_demo/genai_demo_iterative_logic/genai_demo_iterative_logic_002.prompt +21 -0
- api_logic_server_cli/prototypes/manager/system/genai/examples/genai_demo/genai_demo_iterative_logic/genai_demo_iterative_logic_003.response +94 -0
- api_logic_server_cli/prototypes/manager/system/genai/examples/genai_demo/genai_demo_iterative_logic/genai_demo_iterative_logic_004.prompt +6 -0
- api_logic_server_cli/prototypes/manager/system/genai/examples/genai_demo/genai_demo_iterative_logic/genai_demo_iterative_logic_005.response_example +122 -0
- api_logic_server_cli/prototypes/manager/system/genai/learning_requests/logic_bank_api.prompt +162 -12
- api_logic_server_cli/prototypes/manager/system/genai/prompt_inserts/.DS_Store +0 -0
- api_logic_server_cli/prototypes/manager/system/genai/prompt_inserts/logic_inserts.prompt +1 -0
- api_logic_server_cli/prototypes/manager/system/genai/prompt_inserts/response_format.prompt +30 -0
- api_logic_server_cli/prototypes/manager/system/genai/prompt_inserts/sqlite_inserts.prompt +17 -2
- api_logic_server_cli/prototypes/manager/system/genai/prompt_inserts/zsqlite_inserts_iterations.prompt +28 -0
- api_logic_server_cli/prototypes/manager/system/genai/retry/conv/create_db_models.py +96 -0
- api_logic_server_cli/prototypes/manager/system/genai/retry/conv/inf-1_iter_1_1_000.response +1 -0
- api_logic_server_cli/prototypes/manager/system/genai/retry/conv/inf-1_iter_1_1_001.prompt +208 -0
- api_logic_server_cli/prototypes/manager/system/genai/retry/conv/inf-1_iter_1_1_002.prompt +89 -0
- api_logic_server_cli/prototypes/manager/system/genai/retry/conv/inf-1_iter_1_1_003.prompt +40 -0
- api_logic_server_cli/prototypes/manager/system/genai/retry/conv/inf-1_iter_1_1_004.response +57 -0
- api_logic_server_cli/prototypes/manager/system/genai/retry/conv/inf-1_iter_1_1_005.response +57 -0
- api_logic_server_cli/prototypes/manager/system/genai/retry/readme.md +1 -0
- api_logic_server_cli/prototypes/manager/system/genai/retry/retry.response +57 -0
- api_logic_server_cli/prototypes/manager/system/genai/examples/emp_depts/emp_dept_explicit.prompt +0 -7
- api_logic_server_cli/prototypes/manager/system/genai/examples/emp_depts/emp_dept_implicit_fails.prompt +0 -5
- api_logic_server_cli/prototypes/manager/system/secrets.txt +0 -6
- {ApiLogicServer-12.0.4.dist-info → ApiLogicServer-12.1.26.dist-info}/LICENSE +0 -0
- {ApiLogicServer-12.0.4.dist-info → ApiLogicServer-12.1.26.dist-info}/entry_points.txt +0 -0
- {ApiLogicServer-12.0.4.dist-info → ApiLogicServer-12.1.26.dist-info}/top_level.txt +0 -0
api_logic_server_cli/genai.py
CHANGED
|
@@ -1,17 +1,62 @@
|
|
|
1
|
+
import json
|
|
2
|
+
import sys
|
|
3
|
+
import time
|
|
4
|
+
import traceback
|
|
1
5
|
from typing import Dict, List
|
|
2
6
|
from api_logic_server_cli.cli_args_project import Project
|
|
3
7
|
import logging
|
|
4
8
|
from pathlib import Path
|
|
5
9
|
import importlib
|
|
6
10
|
import requests
|
|
7
|
-
import os
|
|
11
|
+
import os,re
|
|
8
12
|
import create_from_model.api_logic_server_utils as utils
|
|
9
13
|
import shutil
|
|
14
|
+
import openai
|
|
15
|
+
from openai import OpenAI
|
|
16
|
+
from typing import List, Dict
|
|
17
|
+
from pydantic import BaseModel
|
|
18
|
+
from dotmap import DotMap
|
|
19
|
+
import importlib.util
|
|
10
20
|
|
|
11
21
|
log = logging.getLogger(__name__)
|
|
12
22
|
|
|
13
|
-
|
|
14
|
-
|
|
23
|
+
K_LogicBankOff = "LBX"
|
|
24
|
+
''' Disable Logic (for demos) '''
|
|
25
|
+
|
|
26
|
+
class Rule(BaseModel):
|
|
27
|
+
name: str
|
|
28
|
+
description: str
|
|
29
|
+
use_case: str
|
|
30
|
+
code: str # logicbank rule code
|
|
31
|
+
|
|
32
|
+
class Model(BaseModel):
|
|
33
|
+
classname: str
|
|
34
|
+
code: str # sqlalchemy model code
|
|
35
|
+
sqlite_create: str # sqlite create table statement
|
|
36
|
+
description: str
|
|
37
|
+
name: str
|
|
38
|
+
|
|
39
|
+
class TestDataRow(BaseModel):
|
|
40
|
+
test_data_row_variable: str # the Python test data row variable
|
|
41
|
+
code: str # Python code to create a test data row instance
|
|
42
|
+
|
|
43
|
+
class WGResult(BaseModel): # must match system/genai/prompt_inserts/response_format.prompt
|
|
44
|
+
# response: str # result
|
|
45
|
+
models : List[Model] # list of sqlalchemy classes in the response
|
|
46
|
+
rules : List[Rule] # list rule declarations
|
|
47
|
+
test_data: str
|
|
48
|
+
test_data_rows: List[TestDataRow] # list of test data rows
|
|
49
|
+
test_data_sqlite: str # test data as sqlite INSERT statements
|
|
50
|
+
name: str # suggest a short name for the project
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
def import_module_from_path(module_name, file_path):
|
|
54
|
+
spec = importlib.util.spec_from_file_location(module_name, file_path)
|
|
55
|
+
module = importlib.util.module_from_spec(spec)
|
|
56
|
+
sys.modules[module_name] = module
|
|
57
|
+
spec.loader.exec_module(module)
|
|
58
|
+
return module
|
|
59
|
+
|
|
15
60
|
|
|
16
61
|
class GenAI(object):
|
|
17
62
|
""" Create project from genai prompt(s).
|
|
@@ -29,12 +74,11 @@ class GenAI(object):
|
|
|
29
74
|
The key argument is `--using`
|
|
30
75
|
* It can be a file, dir (conversation) or text argument.
|
|
31
76
|
* It's "stem" denotes the project name to be created at cwd
|
|
32
|
-
* `self.project.genai_using`
|
|
77
|
+
* `self.project.genai_using` (not used by WebGenAI)
|
|
33
78
|
|
|
34
|
-
The (rarely used) `--repaired_response`
|
|
79
|
+
The (rarely used) `--repaired_response` --> `self.project.genai_repaired_response`
|
|
35
80
|
* is for retry from corrected response
|
|
36
81
|
* `--using` is required to get the project name, to be created at cwd
|
|
37
|
-
* `self.project.genai_repaired_response`
|
|
38
82
|
|
|
39
83
|
__init__() is the main driver (work directory is <manager>/system/genai/temp/)
|
|
40
84
|
|
|
@@ -54,28 +98,10 @@ class GenAI(object):
|
|
|
54
98
|
|
|
55
99
|
see key_module_map() for key methods
|
|
56
100
|
|
|
57
|
-
|
|
58
|
-
##### Explore interim copilot access:
|
|
59
|
-
|
|
60
|
-
VSCode/Copilot-chat can turn prompts into logic, so can we automate with API?
|
|
61
|
-
|
|
62
|
-
https://stackoverflow.com/questions/76741410/how-to-invoke-github-copilot-programmatically
|
|
63
|
-
https://docs.google.com/document/d/1o0TeNQtuT6moWU1bOq2K20IbSw4YhV1x_aFnKwo_XeU/edit#heading=h.3xmoi7pevsnp
|
|
64
|
-
https://code.visualstudio.com/api/extension-guides/chat
|
|
65
|
-
https://code.visualstudio.com/api/extension-guides/language-model
|
|
66
|
-
https://github.com/B00TK1D/copilot-api
|
|
67
|
-
|
|
68
|
-
### Or use ChatGPT:
|
|
69
|
-
|
|
70
|
-
Not sure vscode/copilot is best approach, since we'd like to activate this during project creation
|
|
71
|
-
(eg on web/GenAI - not using vscode).
|
|
72
|
-
|
|
73
|
-
* Thomas suggests there are ways to "teach" ChatGPT about Logic Bank. This is a *great* idea.
|
|
74
|
-
|
|
75
|
-
https://platform.openai.com/docs/guides/fine-tuning/create-a-fine-tuned-model
|
|
101
|
+
https://platform.openai.com/finetune/ftjob-2i1wkh4t4l855NKCovJeHExs?filter=all
|
|
76
102
|
"""
|
|
77
103
|
|
|
78
|
-
self.project = project
|
|
104
|
+
self.project = project # als project info (cli args etc)
|
|
79
105
|
log.info(f'\nGenAI [{self.project.project_name}] creating microservice from: {self.project.genai_using}')
|
|
80
106
|
if self.project.genai_repaired_response != '':
|
|
81
107
|
log.info(f'.. retry from [repaired] response file: {self.project.genai_repaired_response}')
|
|
@@ -83,32 +109,57 @@ class GenAI(object):
|
|
|
83
109
|
self.project.from_model = f'system/genai/temp/create_db_models.py' # we always write the model to this file
|
|
84
110
|
self.ensure_system_dir_exists() # ~ manager, so we can write to system/genai/temp
|
|
85
111
|
self.delete_temp_files()
|
|
112
|
+
self.post_error = ""
|
|
113
|
+
""" eg, if response contains table defs, save_prompt_messages_to_system_genai_temp_project raises an exception to trigger retry """
|
|
86
114
|
self.prompt = ""
|
|
87
115
|
""" `--using` - can come from file or text argument """
|
|
88
|
-
|
|
116
|
+
self.logic_enabled = True
|
|
117
|
+
""" K_LogicBankOff is used for demos, where we don't want to create logic """
|
|
89
118
|
self.messages = self.get_prompt_messages() # compute self.messages, from file, dir or text argument
|
|
90
119
|
|
|
91
120
|
if self.project.genai_repaired_response == '': # normal path - get response from ChatGPT
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
121
|
+
try:
|
|
122
|
+
api_version = f'{self.project.genai_version}' # eg, "gpt-4o"
|
|
123
|
+
start_time = time.time()
|
|
124
|
+
db_key = os.getenv("APILOGICSERVER_CHATGPT_APIKEY")
|
|
125
|
+
client = OpenAI(api_key=os.getenv("APILOGICSERVER_CHATGPT_APIKEY"))
|
|
126
|
+
model = api_version
|
|
127
|
+
if model == "": # default from CLI is '', meaning fall back to env variable or system default...
|
|
128
|
+
model = os.getenv("APILOGICSERVER_CHATGPT_MODEL")
|
|
129
|
+
if model is None or model == "*": # system default chatgpt model
|
|
130
|
+
model = "gpt-4o-2024-08-06"
|
|
131
|
+
self.resolved_model = model
|
|
132
|
+
completion = client.beta.chat.completions.parse(
|
|
133
|
+
messages=self.messages, response_format=WGResult,
|
|
134
|
+
# temperature=self.project.genai_temperature, values .1 and .7 made students / charges fail
|
|
135
|
+
model=model # for own model, use "ft:gpt-4o-2024-08-06:personal:logicbank:ARY904vS"
|
|
136
|
+
)
|
|
137
|
+
log.debug(f'ChatGPT ({str(int(time.time() - start_time))} secs) - response at: system/genai/temp/chatgpt_original.response')
|
|
138
|
+
|
|
139
|
+
data = completion.choices[0].message.content
|
|
140
|
+
response_dict = json.loads(data)
|
|
141
|
+
self.get_and_save_raw_response_data(completion=completion, response_dict=response_dict)
|
|
142
|
+
# print(json.dumps(json.loads(data), indent=4))
|
|
143
|
+
pass
|
|
144
|
+
except Exception as inst:
|
|
145
|
+
log.error(f"\n\nError: ChatGPT call failed\n{inst}\n\n")
|
|
146
|
+
sys.exit('ChatGPT call failed - please see https://apilogicserver.github.io/Docs/WebGenAI-CLI/#configuration')
|
|
99
147
|
else: # for retry from corrected response... eg system/genai/temp/chatgpt_retry.response
|
|
148
|
+
self.resolved_model = "(n/a: model not used for repaired response)"
|
|
100
149
|
log.debug(f'\nUsing [corrected] response from: {self.project.genai_repaired_response}')
|
|
101
|
-
with open(self.project.genai_repaired_response, 'r') as
|
|
102
|
-
|
|
103
|
-
|
|
150
|
+
with open(self.project.genai_repaired_response, 'r') as response_file:
|
|
151
|
+
response_dict = json.load(response_file)
|
|
152
|
+
|
|
153
|
+
self.response_dict = DotMap(response_dict)
|
|
104
154
|
""" the raw response data from ChatGPT which will be fixed & saved create_db_models.py """
|
|
105
155
|
|
|
106
|
-
self.
|
|
156
|
+
self.get_valid_project_name()
|
|
107
157
|
|
|
108
|
-
self.fix_and_write_model_file(
|
|
158
|
+
self.fix_and_write_model_file() # write create_db_models.py for db creation, & logic
|
|
109
159
|
self.save_prompt_messages_to_system_genai_temp_project() # save prompts, response and models.py
|
|
110
160
|
if project.project_name_last_node == 'genai_demo_conversation':
|
|
111
161
|
debug_string = "good breakpoint - check create_db_models.py"
|
|
162
|
+
pass # if we've set self.post_error, we'll raise an exception to trigger retry
|
|
112
163
|
pass # return to api_logic_server.ProjectRun to create db/project from create_db_models.py
|
|
113
164
|
|
|
114
165
|
def delete_temp_files(self):
|
|
@@ -123,7 +174,10 @@ class GenAI(object):
|
|
|
123
174
|
""" Create presets - you are a data modelling expert, and logicbank api etc """
|
|
124
175
|
pass
|
|
125
176
|
|
|
126
|
-
|
|
177
|
+
you_are = "You are a data modelling expert and python software architect who expands on user input ideas. You create data models with at least 4 tables"
|
|
178
|
+
if self.project.genai_tables > 0:
|
|
179
|
+
you_are = you_are.replace('4', str(self.project.genai_tables))
|
|
180
|
+
starting_message = {"role": "system", "content": you_are}
|
|
127
181
|
prompt_messages.append( starting_message)
|
|
128
182
|
|
|
129
183
|
learning_requests = self.get_learning_requests()
|
|
@@ -133,6 +187,27 @@ class GenAI(object):
|
|
|
133
187
|
log.debug(f'.. conv[001] presets: {learning_requests[0]["content"][:30]}...')
|
|
134
188
|
return len(learning_requests)
|
|
135
189
|
|
|
190
|
+
def chatgpt_excp(self):
|
|
191
|
+
# https://apilogicserver.github.io/Docs/WebGenAI-CLI/
|
|
192
|
+
pass
|
|
193
|
+
|
|
194
|
+
def get_valid_project_name(self):
|
|
195
|
+
""" Get a valid project name from the project name
|
|
196
|
+
Takes a string and returns a valid filename constructed from the string.
|
|
197
|
+
"""
|
|
198
|
+
|
|
199
|
+
# Replace invalid characters with underscores
|
|
200
|
+
valid_name = re.sub(r'[ \\/*?:"<>|\t\n\r\x0b\x0c]', '_', self.response_dict.name)
|
|
201
|
+
valid_name = valid_name.strip() # Remove leading and trailing spaces
|
|
202
|
+
valid_name = valid_name[:255] # Limit the filename length
|
|
203
|
+
if self.project.project_name == '_genai_default':
|
|
204
|
+
log.debug(f'.. project name: {valid_name} (from response: {self.response_dict.name})')
|
|
205
|
+
self.response_dict.name = valid_name
|
|
206
|
+
self.project.project_name = self.response_dict.name
|
|
207
|
+
self.project.project_name_last_node = self.response_dict.name
|
|
208
|
+
else:
|
|
209
|
+
self.project.directory_setup() # avoid names like "system/genai/temp/TBD"
|
|
210
|
+
return
|
|
136
211
|
|
|
137
212
|
def get_prompt_messages(self) -> List[Dict[str, str]]:
|
|
138
213
|
""" Get prompt from file, dir (conversation) or text argument
|
|
@@ -176,9 +251,9 @@ class GenAI(object):
|
|
|
176
251
|
response_count += 1
|
|
177
252
|
else:
|
|
178
253
|
request_count += 1 # rebuild response with *all* tables
|
|
179
|
-
if request_count >
|
|
254
|
+
if request_count > 2: # Run Config: genai AUTO DEALERSHIP CONVERSATION
|
|
180
255
|
if 'updating the prior response' not in prompt:
|
|
181
|
-
prompt
|
|
256
|
+
prompt = self.get_prompt__with_inserts(raw_prompt=prompt, for_iteration=True)
|
|
182
257
|
prompt_messages.append( {"role": role, "content": prompt})
|
|
183
258
|
else:
|
|
184
259
|
log.debug(f'.. .. conv ignores: {os.path.basename(each_file)}')
|
|
@@ -218,7 +293,7 @@ class GenAI(object):
|
|
|
218
293
|
learning_requests.append( {"role": "user", "content": learning_request_lines})
|
|
219
294
|
return learning_requests # TODO - what if no learning requests?
|
|
220
295
|
|
|
221
|
-
def get_prompt__with_inserts(self, raw_prompt: str) -> str:
|
|
296
|
+
def get_prompt__with_inserts(self, raw_prompt: str, for_iteration: bool = False) -> str:
|
|
222
297
|
""" prompt-engineering: insert db-specific logic into prompt
|
|
223
298
|
raw_prompt: the prompt from file or text argument
|
|
224
299
|
"""
|
|
@@ -235,13 +310,52 @@ class GenAI(object):
|
|
|
235
310
|
elif 'mysql' in self.project.db_url:
|
|
236
311
|
prompt_inserts = f'mysql_inserts.prompt'
|
|
237
312
|
|
|
238
|
-
if prompt_inserts
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
|
|
313
|
+
if prompt_inserts == "*":
|
|
314
|
+
pass # '*' means caller has computed their own prompt -- no inserts
|
|
315
|
+
else: # do prompt engineering (inserts)
|
|
316
|
+
prompt_eng_file_name = f'system/genai/prompt_inserts/{prompt_inserts}'
|
|
317
|
+
assert Path(prompt_eng_file_name).exists(), \
|
|
318
|
+
f"Missing prompt_inserts file: {prompt_eng_file_name}" # eg api_logic_server_cli/prototypes/manager/system/genai/prompt_inserts/sqlite_inserts.prompt
|
|
319
|
+
log.debug(f'get_prompt__with_inserts: {str(os.getcwd())} \n .. merged with: {prompt_eng_file_name}')
|
|
320
|
+
with open(prompt_eng_file_name, 'r') as file:
|
|
243
321
|
pre_post = file.read() # eg, Use SQLAlchemy to create a sqlite database named system/genai/temp/create_db_models.sqlite, with
|
|
244
322
|
prompt_result = pre_post.replace('{{prompt}}', raw_prompt)
|
|
323
|
+
if for_iteration:
|
|
324
|
+
# Update the prior response - be sure not to lose classes and test data already created.
|
|
325
|
+
prompt_result = 'Update the prior response - be sure not to lose classes and test data already created.' \
|
|
326
|
+
+ '\n\n' + prompt_result
|
|
327
|
+
log.debug(f'.. iteration inserted: Update the prior response')
|
|
328
|
+
log.debug(f'.... iteration prompt result: {prompt_result}')
|
|
329
|
+
|
|
330
|
+
prompt_lines = prompt_result.split('\n')
|
|
331
|
+
prompt_line_number = 0
|
|
332
|
+
do_logic = True
|
|
333
|
+
for each_line in prompt_lines:
|
|
334
|
+
if 'Create multiple rows of test data' in each_line:
|
|
335
|
+
if self.project.genai_test_data_rows > 0:
|
|
336
|
+
each_line = each_line.replace(
|
|
337
|
+
f'Create multiple rows',
|
|
338
|
+
f'Create {self.project.genai_test_data_rows} rows')
|
|
339
|
+
prompt_lines[prompt_line_number] = each_line
|
|
340
|
+
log.debug(f'.. inserted explicit test data: {each_line}')
|
|
341
|
+
if K_LogicBankOff in each_line:
|
|
342
|
+
self.logic_enabled = False # for demos
|
|
343
|
+
if "LogicBank" in each_line and do_logic == True:
|
|
344
|
+
log.debug(f'.. inserted: {each_line}')
|
|
345
|
+
prompt_eng_logic_file_name = f'system/genai/prompt_inserts/logic_inserts.prompt'
|
|
346
|
+
with open(prompt_eng_logic_file_name, 'r') as file:
|
|
347
|
+
prompt_logic = file.read() # eg, Use SQLAlchemy to...
|
|
348
|
+
prompt_lines[prompt_line_number] = prompt_logic
|
|
349
|
+
do_logic = False
|
|
350
|
+
prompt_line_number += 1
|
|
351
|
+
|
|
352
|
+
response_format_file_name = f'system/genai/prompt_inserts/response_format.prompt'
|
|
353
|
+
with open(response_format_file_name, 'r') as file:
|
|
354
|
+
response_format = file.readlines()
|
|
355
|
+
prompt_lines.extend(response_format)
|
|
356
|
+
|
|
357
|
+
prompt_result = "\n".join(prompt_lines) # back to a string
|
|
358
|
+
pass
|
|
245
359
|
return prompt_result
|
|
246
360
|
|
|
247
361
|
def ensure_system_dir_exists(self):
|
|
@@ -261,7 +375,7 @@ class GenAI(object):
|
|
|
261
375
|
copied_path = shutil.copytree(src=from_dir, dst=to_dir, dirs_exist_ok=True)
|
|
262
376
|
|
|
263
377
|
def get_logic_from_prompt(self) -> list[str]:
|
|
264
|
-
""" Get logic from ChatGPT prompt
|
|
378
|
+
""" Get logic from ChatGPT prompt (code after "Enforce")
|
|
265
379
|
|
|
266
380
|
Args:
|
|
267
381
|
|
|
@@ -288,39 +402,6 @@ class GenAI(object):
|
|
|
288
402
|
logic_text += ' ' + each_line + '\n'
|
|
289
403
|
return logic_text
|
|
290
404
|
|
|
291
|
-
@staticmethod
|
|
292
|
-
def remove_logic_halluncinations(each_line: str) -> str:
|
|
293
|
-
"""remove hallucinations from logic
|
|
294
|
-
|
|
295
|
-
eg: Rule.setup()
|
|
296
|
-
|
|
297
|
-
Args:
|
|
298
|
-
each_line (str): _description_
|
|
299
|
-
|
|
300
|
-
Returns:
|
|
301
|
-
str: _description_
|
|
302
|
-
""" """ """
|
|
303
|
-
return_line = each_line
|
|
304
|
-
if each_line.startswith(' Rule.') or each_line.startswith(' DeclareRule.'):
|
|
305
|
-
if 'Rule.sum' in each_line:
|
|
306
|
-
pass
|
|
307
|
-
elif 'Rule.count' in each_line:
|
|
308
|
-
pass
|
|
309
|
-
elif 'Rule.formula' in each_line:
|
|
310
|
-
pass
|
|
311
|
-
elif 'Rule.copy' in each_line:
|
|
312
|
-
pass
|
|
313
|
-
elif 'Rule.constraint' in each_line:
|
|
314
|
-
pass
|
|
315
|
-
elif 'Rule.allocate' in each_line:
|
|
316
|
-
pass
|
|
317
|
-
elif 'Rule.calculate' in each_line:
|
|
318
|
-
return_line = each_line.replace('Rule.calculate', 'Rule.copy')
|
|
319
|
-
else:
|
|
320
|
-
return_line = each_line.replace(' ', ' # ')
|
|
321
|
-
log.debug(f'.. removed hallucination: {each_line}')
|
|
322
|
-
return return_line
|
|
323
|
-
|
|
324
405
|
def insert_logic_into_created_project(self): # TODO - redesign if conversation
|
|
325
406
|
"""Called *after project created* to insert prompt logic into
|
|
326
407
|
1. declare_logic.py (as comment)
|
|
@@ -329,20 +410,62 @@ class GenAI(object):
|
|
|
329
410
|
Also creates the doc directory for record of prompt, response.
|
|
330
411
|
"""
|
|
331
412
|
|
|
413
|
+
def remove_logic_halluncinations(each_line: str) -> str:
|
|
414
|
+
"""remove hallucinations from logic
|
|
415
|
+
|
|
416
|
+
eg: Rule.setup()
|
|
417
|
+
|
|
418
|
+
Args:
|
|
419
|
+
each_line (str): _description_
|
|
420
|
+
|
|
421
|
+
Returns:
|
|
422
|
+
str: _description_
|
|
423
|
+
""" """ """
|
|
424
|
+
return_line = each_line
|
|
425
|
+
if each_line.startswith('Rule.'):
|
|
426
|
+
# Sometimes indents left out (EmpDepts) - "code": "Rule.sum(derive=Department.salary_total, as_sum_of=Employee.salary)\nRule.constraint(validate=Department,\n as_condition=lambda row: row.salary_total <= row.budget,\n error_msg=\"Department salary total ({row.salary_total}) exceeds budget ({row.budget})\")"
|
|
427
|
+
each_line = " " + each_line # add missing indent
|
|
428
|
+
log.debug(f'.. fixed hallucination/indent: {each_line}')
|
|
429
|
+
if each_line.startswith(' Rule.') or each_line.startswith(' DeclareRule.'):
|
|
430
|
+
if 'Rule.sum' in each_line:
|
|
431
|
+
pass
|
|
432
|
+
elif 'Rule.count' in each_line:
|
|
433
|
+
pass
|
|
434
|
+
elif 'Rule.formula' in each_line:
|
|
435
|
+
pass
|
|
436
|
+
elif 'Rule.copy' in each_line:
|
|
437
|
+
pass
|
|
438
|
+
elif 'Rule.constraint' in each_line:
|
|
439
|
+
pass
|
|
440
|
+
elif 'Rule.allocate' in each_line:
|
|
441
|
+
pass
|
|
442
|
+
elif 'Rule.calculate' in each_line:
|
|
443
|
+
return_line = each_line.replace('Rule.calculate', 'Rule.copy')
|
|
444
|
+
else:
|
|
445
|
+
return_line = each_line.replace(' ', ' # ')
|
|
446
|
+
log.debug(f'.. removed hallucination: {each_line}')
|
|
447
|
+
return return_line
|
|
448
|
+
|
|
449
|
+
logic_enabled = True
|
|
332
450
|
logic_file = self.project.project_directory_path.joinpath('logic/declare_logic.py')
|
|
333
451
|
in_logic = False
|
|
334
|
-
translated_logic = "\n # Logic from GenAI
|
|
335
|
-
for
|
|
336
|
-
|
|
337
|
-
|
|
338
|
-
|
|
339
|
-
|
|
340
|
-
|
|
341
|
-
|
|
342
|
-
|
|
343
|
-
|
|
344
|
-
|
|
345
|
-
|
|
452
|
+
translated_logic = "\n # Logic from GenAI: (or, use your IDE w/ code completion)\n"
|
|
453
|
+
for each_rule in self.response_dict.rules:
|
|
454
|
+
comment_line = each_rule.description
|
|
455
|
+
translated_logic += f'\n # {comment_line}\n'
|
|
456
|
+
code_lines = each_rule.code.split('\n')
|
|
457
|
+
if '\n' in each_rule.code:
|
|
458
|
+
debug_string = "good breakpoint - multi-line rule"
|
|
459
|
+
for each_line in code_lines:
|
|
460
|
+
if 'declare_logic.py' not in each_line:
|
|
461
|
+
each_repaired_line = remove_logic_halluncinations(each_line=each_line)
|
|
462
|
+
if not each_repaired_line.startswith(' '): # sometimes in indents, sometimes not
|
|
463
|
+
each_repaired_line = ' ' + each_repaired_line
|
|
464
|
+
if 'def declare_logic' not in each_repaired_line:
|
|
465
|
+
translated_logic += each_repaired_line + '\n'
|
|
466
|
+
if self.logic_enabled == False:
|
|
467
|
+
translated_logic = "\n # Logic from GenAI: (or, use your IDE w/ code completion)\n"
|
|
468
|
+
translated_logic += "\n # LogicBank Disabled \n"
|
|
346
469
|
translated_logic += "\n # End Logic from GenAI\n\n"
|
|
347
470
|
utils.insert_lines_at(lines=translated_logic,
|
|
348
471
|
file_name=logic_file,
|
|
@@ -380,7 +503,7 @@ class GenAI(object):
|
|
|
380
503
|
log.error(f"\n\nERROR creating genai project docs: {docs_dir}\n\n{traceback.format_exc()}")
|
|
381
504
|
pass
|
|
382
505
|
|
|
383
|
-
def fix_and_write_model_file(self
|
|
506
|
+
def fix_and_write_model_file(self):
|
|
384
507
|
"""
|
|
385
508
|
1. break response data into lines
|
|
386
509
|
2. throw away instructions
|
|
@@ -392,30 +515,24 @@ class GenAI(object):
|
|
|
392
515
|
response_data (str): the chatgpt response
|
|
393
516
|
|
|
394
517
|
"""
|
|
395
|
-
|
|
396
|
-
|
|
397
|
-
|
|
398
|
-
|
|
399
|
-
|
|
400
|
-
|
|
401
|
-
|
|
402
|
-
|
|
403
|
-
|
|
404
|
-
|
|
405
|
-
|
|
406
|
-
|
|
407
|
-
|
|
408
|
-
|
|
409
|
-
|
|
410
|
-
|
|
411
|
-
|
|
412
|
-
|
|
413
|
-
position = each_line.find("```")
|
|
414
|
-
if position > 0:
|
|
415
|
-
indents_to_remove = each_line[:position].count(' ')
|
|
416
|
-
elif "```" in each_line:
|
|
417
|
-
writing = False
|
|
418
|
-
elif writing: # ChatGPT work-arounds
|
|
518
|
+
|
|
519
|
+
def get_model_class_lines(model: DotMap) -> list[str]:
|
|
520
|
+
"""Get the model class from the model
|
|
521
|
+
|
|
522
|
+
Args:
|
|
523
|
+
model (Model): the model
|
|
524
|
+
|
|
525
|
+
Returns:
|
|
526
|
+
stlist[str]: the model class lines, fixed up
|
|
527
|
+
"""
|
|
528
|
+
|
|
529
|
+
create_db_model_lines = list()
|
|
530
|
+
create_db_model_lines.append('\n\n')
|
|
531
|
+
class_lines = model.code.split('\n')
|
|
532
|
+
line_num = 0
|
|
533
|
+
indents_to_remove = 0
|
|
534
|
+
for each_line in class_lines:
|
|
535
|
+
line_num += 1
|
|
419
536
|
''' decimal issues
|
|
420
537
|
|
|
421
538
|
1. bad import: see Run: tests/test_databases/ai-created/genai_demo/genai_demo_decimal
|
|
@@ -439,6 +556,9 @@ class GenAI(object):
|
|
|
439
556
|
got: or DECIMAL('
|
|
440
557
|
needed: or decimal.Decimal('0.00')
|
|
441
558
|
'''
|
|
559
|
+
if "= Table(" in each_line: # tests/test_databases/ai-created/time_cards/time_card_kw_arg/genai.response
|
|
560
|
+
log.debug(f'.. fix_and_write_model_file detects table - raise excp to trigger retry')
|
|
561
|
+
self.post_error = "ChatGPT Response contains table (not class) definitions: " + each_line
|
|
442
562
|
if 'sqlite:///' in each_line: # must be sqlite:///system/genai/temp/create_db_models.sqlite
|
|
443
563
|
current_url_rest = each_line.split('sqlite:///')[1]
|
|
444
564
|
quote_type = "'"
|
|
@@ -476,7 +596,7 @@ class GenAI(object):
|
|
|
476
596
|
each_line = '# ' + each_line
|
|
477
597
|
if 'sqlite:///system/genai/temp/model.sqlite': # fix prior version
|
|
478
598
|
each_line = each_line.replace('sqlite:///system/genai/temp/model.sqlite',
|
|
479
|
-
|
|
599
|
+
'sqlite:///system/genai/temp/create_db_models.sqlite')
|
|
480
600
|
|
|
481
601
|
# logicbank fixes
|
|
482
602
|
if 'from logic_bank' in each_line: # we do our own imports
|
|
@@ -484,12 +604,135 @@ class GenAI(object):
|
|
|
484
604
|
if 'LogicBank.activate' in each_line:
|
|
485
605
|
each_line = each_line.replace('LogicBank.activate', '# LogicBank.activate')
|
|
486
606
|
|
|
487
|
-
|
|
488
|
-
|
|
489
|
-
|
|
607
|
+
create_db_model_lines.append(each_line + '\n')
|
|
608
|
+
return create_db_model_lines
|
|
609
|
+
|
|
610
|
+
def fix_model_lines(models, create_db_model_lines):
|
|
611
|
+
did_base = False
|
|
612
|
+
for each_model in models:
|
|
613
|
+
model_lines = get_model_class_lines(model=each_model)
|
|
614
|
+
for each_line in model_lines:
|
|
615
|
+
each_fixed_line = each_line.replace('sa.', '') # sometimes it puts sa. in front of Column
|
|
616
|
+
if 'Base = declarative_base()' in each_fixed_line: # sometimes created for each class
|
|
617
|
+
if did_base:
|
|
618
|
+
each_fixed_line = '# ' + each_fixed_line
|
|
619
|
+
did_base = True
|
|
620
|
+
if 'datetime.datetime.utcnow' in each_fixed_line:
|
|
621
|
+
each_fixed_line = each_fixed_line.replace('datetime.datetime.utcnow', 'datetime.now()')
|
|
622
|
+
if 'Column(date' in each_fixed_line:
|
|
623
|
+
each_fixed_line = each_fixed_line.replace('Column(dat', 'column(Date')
|
|
624
|
+
create_db_model_lines.append(each_fixed_line)
|
|
625
|
+
return create_db_model_lines
|
|
626
|
+
|
|
627
|
+
def insert_test_data_lines(test_data_lines : list[str]) -> list[str]:
|
|
628
|
+
"""Insert test data lines into the model file
|
|
629
|
+
|
|
630
|
+
Args:
|
|
631
|
+
test_data_lines (list(str)):
|
|
632
|
+
* initially header (engine =, sesssion =)
|
|
633
|
+
* this function appends CPT test data
|
|
634
|
+
|
|
635
|
+
Returns:
|
|
636
|
+
list[str]: variable names for the test data rows (for create_all)
|
|
637
|
+
"""
|
|
638
|
+
|
|
639
|
+
def fix_test_data_line(each_fixed_line: str) -> str:
|
|
640
|
+
"""Fix the test data line
|
|
641
|
+
|
|
642
|
+
Args:
|
|
643
|
+
each_fixed_line (str): the test data line
|
|
644
|
+
|
|
645
|
+
Returns:
|
|
646
|
+
str: the fixed test data line
|
|
647
|
+
"""
|
|
648
|
+
|
|
649
|
+
if '=datetime' in each_fixed_line:
|
|
650
|
+
each_fixed_line = each_fixed_line.replace('=datetime.date', '=date')
|
|
651
|
+
if 'datetime.datetime.utcnow' in each_fixed_line:
|
|
652
|
+
each_fixed_line = each_fixed_line.replace('datetime.datetime.utcnow', 'datetime.now()')
|
|
653
|
+
if 'engine = create_engine' in each_fixed_line: # CBT sometimes has engine = create_engine, so do we!
|
|
654
|
+
each_fixed_line = each_fixed_line.replace('engine = create_engine', '# engine = create_engine')
|
|
655
|
+
check_for_row_name = False
|
|
656
|
+
if each_fixed_line.startswith('Base') or each_fixed_line.startswith('engine'):
|
|
657
|
+
check_for_row_name = False
|
|
658
|
+
if 'Base.metadata.create_all(engine)' in each_fixed_line:
|
|
659
|
+
each_fixed_line = each_fixed_line.replace('Base.metadata.create_all(engine)', '# Base.metadata.create_all(engine)')
|
|
660
|
+
return each_fixed_line
|
|
661
|
+
|
|
662
|
+
row_names = list()
|
|
663
|
+
use_test_data_rows = True # CPT test data, new format - test_data_rows (*way* less variable)
|
|
664
|
+
if use_test_data_rows & hasattr(self.response_dict, 'test_data_rows'):
|
|
665
|
+
test_data_rows = self.response_dict.test_data_rows
|
|
666
|
+
log.debug(f'.... test_data_rows: {len(test_data_rows)}')
|
|
667
|
+
for each_row in test_data_rows:
|
|
668
|
+
each_fixed_line = fix_test_data_line(each_row.code)
|
|
669
|
+
test_data_lines.append(each_fixed_line)
|
|
670
|
+
row_names.append(each_row.test_data_row_variable)
|
|
671
|
+
pass
|
|
672
|
+
else: # CPT test data, old format - rows, plus session, engine etc (quite variable)
|
|
673
|
+
test_data_lines_ori = self.response_dict.test_data.split('\n') # gpt response
|
|
674
|
+
log.debug(f'.... test_data_lines...')
|
|
675
|
+
for each_line in test_data_lines_ori:
|
|
676
|
+
each_fixed_line = fix_test_data_line(each_line)
|
|
677
|
+
check_for_row_name = True
|
|
678
|
+
test_data_lines.append(each_fixed_line) # append the fixed test data line
|
|
679
|
+
if check_for_row_name and ' = ' in each_line and '(' in each_line: # CPT test data might have: tests = []
|
|
680
|
+
assign = each_line.split(' = ')[0]
|
|
681
|
+
# no tokens for: Session = sessionmaker(bind=engine) or session = Session()
|
|
682
|
+
if '.' not in assign and 'Session' not in each_line and 'session.' not in each_line:
|
|
683
|
+
row_names.append(assign)
|
|
684
|
+
return row_names
|
|
685
|
+
|
|
686
|
+
create_db_model_lines = list()
|
|
687
|
+
create_db_model_lines.append(f'# using resolved_model {self.resolved_model}')
|
|
688
|
+
create_db_model_lines.extend( # imports for classes (comes from api_logic_server_cli/prototypes/manager/system/genai/create_db_models_inserts/create_db_models_imports.py)
|
|
689
|
+
self.get_lines_from_file(f'system/genai/create_db_models_inserts/create_db_models_imports.py'))
|
|
690
|
+
create_db_model_lines.append("\nfrom sqlalchemy.dialects.sqlite import *\n") # specific for genai
|
|
691
|
+
|
|
692
|
+
models = self.response_dict.models
|
|
693
|
+
|
|
694
|
+
# Usage inside the class
|
|
695
|
+
create_db_model_lines = fix_model_lines(models, create_db_model_lines)
|
|
696
|
+
|
|
697
|
+
with open(f'{self.project.from_model}', "w") as create_db_model_file:
|
|
698
|
+
create_db_model_file.write("".join(create_db_model_lines))
|
|
699
|
+
create_db_model_file.write("\n\n# end of model classes\n\n")
|
|
700
|
+
|
|
701
|
+
# classes done, create db and add test_data code
|
|
702
|
+
test_data_lines = self.get_lines_from_file(f'system/genai/create_db_models_inserts/create_db_models_create_db.py')
|
|
703
|
+
test_data_lines.append('session.commit()')
|
|
704
|
+
|
|
705
|
+
row_names = insert_test_data_lines(test_data_lines)
|
|
706
|
+
|
|
707
|
+
test_data_lines.append('\n\n')
|
|
708
|
+
row_name_list = ', '.join(row_names)
|
|
709
|
+
add_rows = f'session.add_all([{row_name_list}])'
|
|
710
|
+
test_data_lines.append(add_rows )
|
|
711
|
+
test_data_lines.append('session.commit()')
|
|
712
|
+
test_data_lines.append('# end of test data\n\n')
|
|
713
|
+
|
|
714
|
+
with open(f'{self.project.from_model}', "a") as create_db_model_file:
|
|
715
|
+
create_db_model_file.write("try:\n ")
|
|
716
|
+
create_db_model_file.write("\n ".join(test_data_lines))
|
|
717
|
+
create_db_model_file.write("except Exception as exc:\n")
|
|
718
|
+
create_db_model_file.write(" print(f'Test Data Error: {exc}')\n")
|
|
490
719
|
|
|
491
|
-
log.debug(f'..
|
|
720
|
+
log.debug(f'.. code for db creation and test data: {self.project.from_model}')
|
|
492
721
|
|
|
722
|
+
def get_lines_from_file(self, file_name: str) -> list[str]:
|
|
723
|
+
"""Get lines from a file
|
|
724
|
+
|
|
725
|
+
Args:
|
|
726
|
+
file_name (str): the file name
|
|
727
|
+
|
|
728
|
+
Returns:
|
|
729
|
+
list[str]: the lines from the file
|
|
730
|
+
"""
|
|
731
|
+
|
|
732
|
+
with open(file_name, "r") as file:
|
|
733
|
+
lines = file.readlines()
|
|
734
|
+
return lines
|
|
735
|
+
|
|
493
736
|
def save_prompt_messages_to_system_genai_temp_project(self):
|
|
494
737
|
"""
|
|
495
738
|
Save prompts / responses to system/genai/temp/{project}/genai.response
|
|
@@ -509,90 +752,61 @@ class GenAI(object):
|
|
|
509
752
|
if to_dir_save_dir.exists():
|
|
510
753
|
shutil.rmtree(to_dir_save_dir)
|
|
511
754
|
os.makedirs(to_dir_save_dir, exist_ok=True)
|
|
512
|
-
log.debug(f'save_prompt_messages_to_system_genai_temp_project()')
|
|
755
|
+
log.debug(f'save_prompt_messages_to_system_genai_temp_project() - {str(to_dir_save_dir)}')
|
|
513
756
|
|
|
514
757
|
if self.project.genai_repaired_response == '': # normal path, from --using
|
|
515
758
|
if write_prompt := True:
|
|
516
759
|
pass
|
|
517
760
|
file_num = 0
|
|
761
|
+
flat_project_name = Path(self.project.project_name).stem # in case project is dir/project-name
|
|
518
762
|
for each_message in self.messages:
|
|
519
763
|
suffix = 'prompt'
|
|
520
764
|
if each_message['role'] == 'system':
|
|
521
765
|
suffix = 'response'
|
|
522
|
-
file_name = f'{
|
|
766
|
+
file_name = f'{flat_project_name}_{str(file_num).zfill(3)}.{suffix}'
|
|
523
767
|
file_path = to_dir_save_dir.joinpath(file_name)
|
|
524
768
|
log.debug(f'.. saving[{file_name}] - {each_message["content"][:30]}...')
|
|
525
769
|
with open(file_path, "w") as message_file:
|
|
526
770
|
message_file.write(each_message['content'])
|
|
527
771
|
file_num += 1
|
|
528
772
|
suffix = 'response' # now add the this response
|
|
529
|
-
file_name = f'{
|
|
773
|
+
file_name = f'{flat_project_name}_{str(file_num).zfill(3)}.{suffix}' # FIXME
|
|
530
774
|
file_path = to_dir_save_dir.joinpath(file_name)
|
|
531
775
|
log.debug(f'.. saving[{file_name}] - {each_message["content"][:30]}...')
|
|
532
776
|
with open(file_path, "w") as message_file:
|
|
533
|
-
|
|
777
|
+
json.dump(self.response_dict.toDict(), message_file, indent=4)
|
|
534
778
|
shutil.copyfile(self.project.from_model, to_dir_save_dir.joinpath('create_db_models.py'))
|
|
535
779
|
except Exception as inst:
|
|
536
780
|
# FileNotFoundError(2, 'No such file or directory')
|
|
537
|
-
log.error(f"\n\nError {inst} creating
|
|
781
|
+
log.error(f"\n\nError: {inst} \n..creating diagnostic files into dir: {str(gen_temp_dir)}\n\n")
|
|
538
782
|
pass # intentional try/catch/bury - it's just diagnostics, so don't fail
|
|
539
783
|
debug_string = "good breakpoint - return to main driver, and execute create_db_models.py"
|
|
540
|
-
|
|
541
|
-
def get_headers_with_openai_api_key(self) -> dict:
|
|
542
|
-
"""
|
|
543
|
-
Returns:
|
|
544
|
-
dict: api header with OpenAI key (exits if not provided)
|
|
545
|
-
"""
|
|
546
|
-
|
|
547
|
-
pass # https://community.openai.com/t/how-do-i-call-chatgpt-api-with-python-code/554554
|
|
548
|
-
if os.getenv('APILOGICSERVER_CHATGPT_APIKEY'):
|
|
549
|
-
openai_api_key = os.getenv('APILOGICSERVER_CHATGPT_APIKEY')
|
|
550
|
-
else:
|
|
551
|
-
from dotenv import dotenv_values
|
|
552
|
-
secrets = dotenv_values("system/secrets.txt")
|
|
553
|
-
openai_api_key = secrets['APILOGICSERVER_CHATGPT_APIKEY']
|
|
554
|
-
if openai_api_key == 'your-api-key-here':
|
|
555
|
-
if os.getenv('APILOGICSERVER_CHATGPT_APIKEY'):
|
|
556
|
-
openai_api_key = os.getenv('APILOGICSERVER_CHATGPT_APIKEY')
|
|
557
|
-
else:
|
|
558
|
-
log.error("\n\nMissing env value: APILOGICSERVER_CHATGPT_APIKEY")
|
|
559
|
-
log.error("... Check your system/secrets file...\n")
|
|
560
|
-
exit(1)
|
|
561
|
-
headers = {
|
|
562
|
-
"Content-Type": "application/json",
|
|
563
|
-
"Authorization": f"Bearer {openai_api_key}"
|
|
564
|
-
}
|
|
565
|
-
return headers
|
|
566
784
|
|
|
567
|
-
def get_and_save_raw_response_data(self,
|
|
785
|
+
def get_and_save_raw_response_data(self, completion: object, response_dict: dict):
|
|
568
786
|
"""
|
|
569
|
-
|
|
570
|
-
str: response_data
|
|
787
|
+
Write prompt --> system/genai/temp/chatgpt_original/retry.response
|
|
571
788
|
"""
|
|
572
789
|
|
|
573
|
-
|
|
790
|
+
''' TODO - is exception used instead of return_code...
|
|
574
791
|
# Check if the request was successful
|
|
575
|
-
if
|
|
576
|
-
raise Exception("Bad ChatGPT Request: " +
|
|
792
|
+
if completion.status_code == 400:
|
|
793
|
+
raise Exception("Bad ChatGPT Request: " + completion.text)
|
|
577
794
|
|
|
578
|
-
if
|
|
579
|
-
print("Error:",
|
|
580
|
-
|
|
581
|
-
|
|
582
|
-
|
|
583
|
-
with open(f'system/genai/temp/
|
|
584
|
-
|
|
585
|
-
|
|
586
|
-
with open(f'system/genai/temp/chatgpt_retry.response', "w") as model_file: # repair this & retry
|
|
587
|
-
model_file.write(response_data)
|
|
588
|
-
log.debug(f'.. stored raw response: {model_file.name}')
|
|
589
|
-
return response_data
|
|
795
|
+
if completion.status_code != 200:
|
|
796
|
+
print("Error:", completion.status_code, completion.text) # eg, You exceeded your current quota
|
|
797
|
+
'''
|
|
798
|
+
with open(f'system/genai/temp/chatgpt_original.response', "w") as response_file: # save for debug
|
|
799
|
+
json.dump(response_dict, response_file, indent=4)
|
|
800
|
+
with open(f'system/genai/temp/chatgpt_retry.response', "w") as response_file: # repair this & retry
|
|
801
|
+
json.dump(response_dict, response_file, indent=4)
|
|
802
|
+
return
|
|
590
803
|
|
|
591
804
|
|
|
592
|
-
def
|
|
805
|
+
def genai_cli_retry(using: str, db_url: str, repaired_response: str, genai_version: str,
|
|
593
806
|
retries: int, opt_locking: str, prompt_inserts: str, quote: bool,
|
|
594
|
-
use_relns: bool, project_name: str
|
|
595
|
-
|
|
807
|
+
use_relns: bool, project_name: str, tables: int, test_data_rows: int,
|
|
808
|
+
temperature: float) -> None:
|
|
809
|
+
""" CLI Caller: provides using, or repaired_response & using
|
|
596
810
|
|
|
597
811
|
Called from cli commands: genai, genai-create, genai-iterate
|
|
598
812
|
|
|
@@ -603,45 +817,48 @@ def genai(using, db_url, repaired_response: bool, genai_version: str,
|
|
|
603
817
|
import api_logic_server_cli.api_logic_server as PR
|
|
604
818
|
|
|
605
819
|
resolved_project_name = project_name
|
|
606
|
-
if
|
|
607
|
-
resolved_project_name
|
|
820
|
+
if repaired_response != "":
|
|
821
|
+
if resolved_project_name == '' or resolved_project_name is None:
|
|
822
|
+
resolved_project_name = Path(using).stem # project dir is the <cwd>/last node of using
|
|
608
823
|
resolved_project_name = resolved_project_name.replace(' ', '_')
|
|
824
|
+
start_time = time.time()
|
|
609
825
|
|
|
610
826
|
try_number = 1
|
|
611
827
|
genai_use_relns = use_relns
|
|
612
828
|
""" if 'unable to determine join condition', we retry this with False """
|
|
613
829
|
if repaired_response != "":
|
|
614
830
|
try_number = retries # if not calling GenAI, no need to retry:
|
|
615
|
-
|
|
831
|
+
failed = False
|
|
832
|
+
pr = PR.ProjectRun(command="create",
|
|
833
|
+
genai_version=genai_version,
|
|
834
|
+
genai_temperature = temperature,
|
|
835
|
+
genai_using=using, # the prompt file, or dir of prompt/response
|
|
836
|
+
repaired_response=repaired_response, # retry from [repaired] response file
|
|
837
|
+
opt_locking=opt_locking,
|
|
838
|
+
genai_prompt_inserts=prompt_inserts,
|
|
839
|
+
genai_use_relns=genai_use_relns,
|
|
840
|
+
quote=quote,
|
|
841
|
+
genai_tables=tables,
|
|
842
|
+
genai_test_data_rows=test_data_rows,
|
|
843
|
+
project_name=resolved_project_name, db_url=db_url,
|
|
844
|
+
execute=False)
|
|
616
845
|
if retries < 0: # for debug: catch exceptions at point of failure
|
|
617
|
-
|
|
618
|
-
genai_using=using, # the prompt file, or conversation dir
|
|
619
|
-
repaired_response=repaired_response, # retry from [repaired] response file
|
|
620
|
-
opt_locking=opt_locking,
|
|
621
|
-
genai_prompt_inserts=prompt_inserts,
|
|
622
|
-
genai_use_relns=genai_use_relns,
|
|
623
|
-
quote=quote,
|
|
624
|
-
project_name=resolved_project_name, db_url=db_url)
|
|
846
|
+
pr.create_project() # calls GenAI() - the main driver
|
|
625
847
|
log.info(f"GENAI successful")
|
|
626
848
|
else:
|
|
627
849
|
while try_number <= retries:
|
|
628
850
|
try:
|
|
629
851
|
failed = False
|
|
630
|
-
|
|
631
|
-
genai_using=using, # the prompt file, or dir of prompt/response
|
|
632
|
-
repaired_response=repaired_response, # retry from [repaired] response file
|
|
633
|
-
opt_locking=opt_locking,
|
|
634
|
-
genai_prompt_inserts=prompt_inserts,
|
|
635
|
-
genai_use_relns=genai_use_relns,
|
|
636
|
-
quote=quote,
|
|
637
|
-
project_name=resolved_project_name, db_url=db_url)
|
|
852
|
+
pr.create_project() # calls GenAI() - the main driver
|
|
638
853
|
if do_force_failure := False:
|
|
639
854
|
if try_number < 3:
|
|
640
855
|
raise Exception("Forced Failure for Internal Testing")
|
|
641
856
|
break # success - exit the loop
|
|
642
857
|
except Exception as e: # almost certaily in api_logic_server_cli/create_from_model/create_db_from_model.py
|
|
858
|
+
log.error(traceback.format_exc())
|
|
643
859
|
log.error(f"\n\nGenai failed With Error: {e}")
|
|
644
|
-
|
|
860
|
+
if resolved_project_name == '_genai_default':
|
|
861
|
+
resolved_project_name = pr.project_name # defaulted in genai from response
|
|
645
862
|
if Path(using).is_dir():
|
|
646
863
|
log.debug('conversation dir, check in-place iteration')
|
|
647
864
|
'''
|
|
@@ -687,6 +904,9 @@ def genai(using, db_url, repaired_response: bool, genai_version: str,
|
|
|
687
904
|
# to_dir_save_dir.rename(to_dir_save_dir_retry)
|
|
688
905
|
assert to_dir_save_dir.is_dir(), f"\nInternal Error - missing save directory: {to_dir_save_dir}"
|
|
689
906
|
# assert to_dir_save_dir_retry.is_dir(), f"\nInternal Error - missing retry directory: {to_dir_save_dir_retry}"
|
|
907
|
+
log.debug(f'.. copying work files...')
|
|
908
|
+
log.debug(f'.... from: {to_dir_save_dir}')
|
|
909
|
+
log.debug(f'.... to: {to_dir_save_dir_retry}')
|
|
690
910
|
shutil.copytree(to_dir_save_dir, to_dir_save_dir_retry, dirs_exist_ok=True)
|
|
691
911
|
|
|
692
912
|
failed = True
|
|
@@ -696,11 +916,13 @@ def genai(using, db_url, repaired_response: bool, genai_version: str,
|
|
|
696
916
|
failed = False
|
|
697
917
|
else:
|
|
698
918
|
try_number += 1
|
|
919
|
+
log.debug(f"\n\nRetry Genai #{try_number}\n")
|
|
699
920
|
pass # retry (retries times)
|
|
700
|
-
if failed:
|
|
921
|
+
if failed == True: # retries exhausted (if failed: threw "an integer is required" ??
|
|
922
|
+
pass # https://github.com/microsoft/debugpy/issues/1708
|
|
701
923
|
log.error(f"\n\nGenai Failed (Retries: {retries})")
|
|
702
924
|
exit(1)
|
|
703
|
-
log.info(f"
|
|
925
|
+
log.info(f"\nGENAI ({str(int(time.time() - start_time))} secs) successful on try {try_number}\n")
|
|
704
926
|
|
|
705
927
|
|
|
706
928
|
def key_module_map():
|