ApiLogicServer 12.1.0__py3-none-any.whl → 12.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (51) hide show
  1. {ApiLogicServer-12.1.0.dist-info → ApiLogicServer-12.2.0.dist-info}/METADATA +1 -1
  2. {ApiLogicServer-12.1.0.dist-info → ApiLogicServer-12.2.0.dist-info}/RECORD +47 -39
  3. {ApiLogicServer-12.1.0.dist-info → ApiLogicServer-12.2.0.dist-info}/WHEEL +1 -1
  4. api_logic_server_cli/api_logic_server.py +15 -4
  5. api_logic_server_cli/api_logic_server_info.yaml +3 -3
  6. api_logic_server_cli/cli.py +20 -9
  7. api_logic_server_cli/cli_args_base.py +2 -0
  8. api_logic_server_cli/cli_args_project.py +9 -3
  9. api_logic_server_cli/create_from_model/__pycache__/ont_create.cpython-312.pyc +0 -0
  10. api_logic_server_cli/create_from_model/__pycache__/ui_admin_creator.cpython-312.pyc +0 -0
  11. api_logic_server_cli/create_from_model/ont_create.py +3 -1
  12. api_logic_server_cli/create_from_model/ui_admin_creator.py +6 -4
  13. api_logic_server_cli/genai.py +387 -287
  14. api_logic_server_cli/logging.yml +5 -0
  15. api_logic_server_cli/prototypes/.DS_Store +0 -0
  16. api_logic_server_cli/prototypes/base/api_logic_server_run.py +0 -2
  17. api_logic_server_cli/prototypes/base/config/server_setup.py +15 -1
  18. api_logic_server_cli/prototypes/base/integration/kafka/kafka_consumer.py +1 -1
  19. api_logic_server_cli/prototypes/base/integration/kafka/kafka_producer.py +1 -1
  20. api_logic_server_cli/prototypes/base/readme.md +21 -8
  21. api_logic_server_cli/prototypes/manager/.DS_Store +0 -0
  22. api_logic_server_cli/prototypes/manager/.vscode/.DS_Store +0 -0
  23. api_logic_server_cli/prototypes/manager/.vscode/launch.json +20 -0
  24. api_logic_server_cli/prototypes/manager/README.md +25 -1
  25. api_logic_server_cli/prototypes/manager/system/.DS_Store +0 -0
  26. api_logic_server_cli/prototypes/manager/system/genai/.DS_Store +0 -0
  27. api_logic_server_cli/prototypes/manager/system/genai/create_db_models_inserts/create_db_models_create_db.py +1 -0
  28. api_logic_server_cli/prototypes/manager/system/genai/create_db_models_inserts/create_db_models_imports.py +10 -7
  29. api_logic_server_cli/prototypes/manager/system/genai/create_db_models_inserts/create_db_models_test_data.py +1 -1
  30. api_logic_server_cli/prototypes/manager/system/genai/examples/genai_demo/genai_demo.response_example +99 -22
  31. api_logic_server_cli/prototypes/manager/system/genai/learning_requests/logic_bank_api.prompt +120 -7
  32. api_logic_server_cli/prototypes/manager/system/genai/prompt_inserts/.DS_Store +0 -0
  33. api_logic_server_cli/prototypes/manager/system/genai/prompt_inserts/response_format.prompt +26 -2
  34. api_logic_server_cli/prototypes/manager/system/genai/prompt_inserts/sqlite_inserts.prompt +10 -4
  35. api_logic_server_cli/prototypes/manager/system/genai/prompt_inserts/{sqlite_inserts_iterations.prompt → zsqlite_inserts_iterations.prompt} +5 -2
  36. api_logic_server_cli/prototypes/manager/system/genai/retry/conv/create_db_models.py +96 -0
  37. api_logic_server_cli/prototypes/manager/system/genai/retry/conv/inf-1_iter_1_1_000.response +1 -0
  38. api_logic_server_cli/prototypes/manager/system/genai/retry/conv/inf-1_iter_1_1_001.prompt +208 -0
  39. api_logic_server_cli/prototypes/manager/system/genai/retry/conv/inf-1_iter_1_1_002.prompt +89 -0
  40. api_logic_server_cli/prototypes/manager/system/genai/retry/conv/inf-1_iter_1_1_003.prompt +40 -0
  41. api_logic_server_cli/prototypes/manager/system/genai/retry/conv/inf-1_iter_1_1_004.response +57 -0
  42. api_logic_server_cli/prototypes/manager/system/genai/retry/conv/inf-1_iter_1_1_005.response +57 -0
  43. api_logic_server_cli/prototypes/manager/system/genai/retry/readme.md +1 -0
  44. api_logic_server_cli/prototypes/manager/system/genai/retry/retry.response +57 -0
  45. api_logic_server_cli/genaiZ.py +0 -752
  46. api_logic_server_cli/prototypes/manager/system/genai/examples/genai_demo/genai_demo.response_example_z +0 -130
  47. api_logic_server_cli/prototypes/manager/system/genai/prompt_inserts/web_genai copy.prompt +0 -15
  48. api_logic_server_cli/prototypes/manager/system/secrets.txt +0 -6
  49. {ApiLogicServer-12.1.0.dist-info → ApiLogicServer-12.2.0.dist-info}/LICENSE +0 -0
  50. {ApiLogicServer-12.1.0.dist-info → ApiLogicServer-12.2.0.dist-info}/entry_points.txt +0 -0
  51. {ApiLogicServer-12.1.0.dist-info → ApiLogicServer-12.2.0.dist-info}/top_level.txt +0 -0
@@ -1,752 +0,0 @@
1
- from typing import Dict, List
2
- from api_logic_server_cli.cli_args_project import Project
3
- import logging
4
- from pathlib import Path
5
- import importlib
6
- import requests
7
- import os
8
- import create_from_model.api_logic_server_utils as utils
9
- import shutil
10
-
11
- log = logging.getLogger(__name__)
12
-
13
-
14
- class GenAI(object):
15
- """ Create project from genai prompt(s).
16
-
17
- Called by api_logic_server, to run ChatGPT (or respone file) to create SQLAlchemy model
18
-
19
- api_logic_server then uses model to create db, proceeds with normal project creation.
20
-
21
- * there is also a callback to genai to insert logic into created project
22
- """
23
-
24
- def __init__(self, project: Project):
25
- """
26
-
27
- The key argument is `--using`
28
- * It can be a file, dir (conversation) or text argument.
29
- * It's "stem" denotes the project name to be created at cwd
30
- * `self.project.genai_using`
31
-
32
- The (rarely used) `--repaired_response`
33
- * is for retry from corrected response
34
- * `--using` is required to get the project name, to be created at cwd
35
- * `self.project.genai_repaired_response`
36
-
37
- __init__() is the main driver (work directory is <manager>/system/genai/temp/)
38
-
39
- 1. run ChatGPT to create system/genai/temp/chatgpt_original.response, using...
40
- 2. get_prompt_messages() - get self.messages[] from file, dir (conversation) or text argument
41
- 3. Compute create_db_models
42
- a. Usually call chatGPT to get response, save to system/genai/temp/chatgpt_original.response
43
- b. If --gen-using-file, read response from file
44
- 4. self.get_logic() - saves prompt logic as comments for insertion into model (4.3)
45
- 5. fix_and_write_model_file()
46
- 6. returns to main driver (api_logic_server#create_project()), which
47
- 1. runs create_db_from_model.create_db(self)
48
- 2. proceeds to create project
49
- 3. calls this.insert_logic_into_created_project() - merge logic into declare_logic.py
50
-
51
- developer then can use CoPilot to create logic (Rule.) from the prompt (or just code completion)
52
-
53
- see key_module_map() for key methods
54
-
55
-
56
- ##### Explore interim copilot access:
57
-
58
- VSCode/Copilot-chat can turn prompts into logic, so can we automate with API?
59
-
60
- https://stackoverflow.com/questions/76741410/how-to-invoke-github-copilot-programmatically
61
- https://docs.google.com/document/d/1o0TeNQtuT6moWU1bOq2K20IbSw4YhV1x_aFnKwo_XeU/edit#heading=h.3xmoi7pevsnp
62
- https://code.visualstudio.com/api/extension-guides/chat
63
- https://code.visualstudio.com/api/extension-guides/language-model
64
- https://github.com/B00TK1D/copilot-api
65
-
66
- ### Or use ChatGPT:
67
-
68
- Not sure vscode/copilot is best approach, since we'd like to activate this during project creation
69
- (eg on web/GenAI - not using vscode).
70
-
71
- * Thomas suggests there are ways to "teach" ChatGPT about Logic Bank. This is a *great* idea.
72
-
73
- https://platform.openai.com/docs/guides/fine-tuning/create-a-fine-tuned-model
74
- """
75
-
76
- self.project = project
77
- log.info(f'\nGenAI [{self.project.project_name}] creating microservice from: {self.project.genai_using}')
78
- if self.project.genai_repaired_response != '':
79
- log.info(f'.. retry from [repaired] response file: {self.project.genai_repaired_response}')
80
-
81
- self.project.from_model = f'system/genai/temp/create_db_models.py' # we always write the model to this file
82
- self.ensure_system_dir_exists() # ~ manager, so we can write to system/genai/temp
83
- self.delete_temp_files()
84
- self.post_error = ""
85
- """ eg, if response contains table defs, save_prompt_messages_to_system_genai_temp_project raises an exception to trigger retry """
86
- self.prompt = ""
87
- """ `--using` - can come from file or text argument """
88
-
89
- self.messages = self.get_prompt_messages() # compute self.messages, from file, dir or text argument
90
-
91
- if self.project.genai_repaired_response == '': # normal path - get response from ChatGPT
92
- log.debug(f'.. ChatGPT - saving response to: system/genai/temp/chatgpt_original.response')
93
- self.headers = self.get_headers_with_openai_api_key()
94
- url = "https://api.openai.com/v1/chat/completions"
95
- api_version = f'{self.project.genai_version}' # eg, "gpt-4o"
96
- data = {"model": api_version, "messages": self.messages}
97
- response = requests.post(url, headers=self.headers, json=data)
98
- # todo - review request structured output using openapi pkg
99
- # but, does not create the .py code to create database via SQLAlchemy
100
- create_db_models = self.get_and_save_raw_response_data(response)
101
- else: # for retry from corrected response... eg system/genai/temp/chatgpt_retry.response
102
- log.debug(f'\nUsing [corrected] response from: {self.project.genai_repaired_response}')
103
- with open(self.project.genai_repaired_response, 'r') as file:
104
- create_db_models = file.read()
105
- self.create_db_models = create_db_models
106
- """ the raw response data from ChatGPT which will be fixed & saved create_db_models.py """
107
-
108
- self.project.genai_logic = self.get_logic_from_prompt()
109
-
110
- self.fix_and_write_model_file(create_db_models) # write create_db_models.py for db creation
111
- self.save_prompt_messages_to_system_genai_temp_project() # save prompts, response and models.py
112
- if project.project_name_last_node == 'genai_demo_conversation':
113
- debug_string = "good breakpoint - check create_db_models.py"
114
- pass # if we've set self.post_error, we'll raise an exception to trigger retry
115
- pass # return to api_logic_server.ProjectRun to create db/project from create_db_models.py
116
-
117
- def delete_temp_files(self):
118
- """Delete temp files created by genai ((system/genai/temp -- models, responses)"""
119
- Path('system/genai/temp/create_db_models.sqlite').unlink(missing_ok=True) # delete temp (work) files
120
- Path(self.project.from_model).unlink(missing_ok=True)
121
- if self.project.genai_repaired_response == '': # clean up unless retrying from chatgpt_original.response
122
- Path('system/genai/temp/chatgpt_original.response').unlink(missing_ok=True)
123
- Path('system/genai/temp/chatgpt_retry.response').unlink(missing_ok=True)
124
-
125
- def create_presets(self, prompt_messages: List[Dict[str, str]]):
126
- """ Create presets - you are a data modelling expert, and logicbank api etc """
127
- pass
128
-
129
- starting_message = {"role": "system", "content": "You are a data modelling expert and python software architect who expands on user input ideas. You create data models with at least 4 tables"}
130
- prompt_messages.append( starting_message)
131
-
132
- learning_requests = self.get_learning_requests()
133
- prompt_messages.extend(learning_requests) # if any, prepend learning requests (logicbank api etc)
134
- log.debug(f'get_prompt_messages()')
135
- log.debug(f'.. conv[000] presets: {starting_message}')
136
- log.debug(f'.. conv[001] presets: {learning_requests[0]["content"][:30]}...')
137
- return len(learning_requests)
138
-
139
- def get_prompt_messages(self) -> List[Dict[str, str]]:
140
- """ Get prompt from file, dir (conversation) or text argument
141
- Prepend with learning_requests (if any)
142
-
143
- Returned prompts include inserts from prompt_inserts (prompt engineering)
144
-
145
- Returns:
146
- dict[]: [ {role: (system | user) }: { content: user-prompt-or-system-response } ]
147
-
148
- """
149
-
150
- prompt_messages : List[ Dict[str, str] ] = [] # prompt/response conversation to be sent to ChatGPT
151
-
152
- if self.project.genai_repaired_response != '': # if exists, get prompt (just for inserting into declare_logic.py)
153
- prompt = "" # we are not calling ChatGPT, just getting the prompt to scan for logic
154
- if Path(self.project.genai_using).is_file(): # eg, launch.json for airport_4 is just a name
155
- with open(f'{self.project.genai_using}', 'r') as file:
156
- prompt = file.read()
157
- prompt_messages.append( {"role": "user", "content": prompt})
158
- elif Path(self.project.genai_using).is_dir(): # conversation from directory
159
- response_count = 0
160
- request_count = 0
161
- learning_requests_len = 0
162
- prompt = ""
163
- for each_file in sorted(Path(self.project.genai_using).iterdir()):
164
- if each_file.is_file() and each_file.suffix == '.prompt' or each_file.suffix == '.response':
165
- with open(each_file, 'r') as file:
166
- prompt = file.read()
167
- role = "user"
168
- if response_count == 0 and request_count == 0:
169
- if not prompt.startswith('You are a '): # add *missing* presets
170
- learning_requests_len = self.create_presets(prompt_messages)
171
- request_count = 1
172
- response_count = learning_requests_len
173
- file_num = request_count + response_count
174
- file_str = str(file_num).zfill(3)
175
- log.debug(f'.. conv[{file_str}] processes: {os.path.basename(each_file)} - {prompt[:30]}...')
176
- if each_file.suffix == ".response":
177
- role = 'system'
178
- response_count += 1
179
- else:
180
- request_count += 1 # rebuild response with *all* tables
181
- if request_count > 1: # Run Config: genai AUTO DEALERSHIP CONVERSATION
182
- if 'updating the prior response' not in prompt:
183
- prompt = self.get_prompt__with_inserts(raw_prompt=prompt, for_iteration=True)
184
- prompt_messages.append( {"role": role, "content": prompt})
185
- else:
186
- log.debug(f'.. .. conv ignores: {os.path.basename(each_file)}')
187
- if response_count == 0:
188
- log.debug(f".. no response files - applying insert to prompt")
189
- prompt = self.get_prompt__with_inserts(raw_prompt=prompt) # insert db-specific logic
190
- prompt_messages[1 + learning_requests_len]["content"] = prompt
191
- else: # prompt from text (add system/genai/pre_post.prompt)
192
- # open and read the project description in natural language
193
- learning_requests_len = self.create_presets(prompt_messages)
194
- with open(f'{self.project.genai_using}', 'r') as file:
195
- log.debug(f'.. from file: {self.project.genai_using}')
196
- raw_prompt = file.read()
197
- prompt = self.get_prompt__with_inserts(raw_prompt=raw_prompt) # insert db-specific logic
198
- prompt_messages.append( {"role": "user", "content": prompt})
199
-
200
-
201
- # log.debug(f'.. prompt_messages: {prompt_messages}') # heaps of output
202
- return prompt_messages
203
-
204
- def get_learning_requests(self) -> List [ Dict[str, str]]:
205
- """ Get learning requests from cwd/system/genai/learning_requests
206
-
207
- Returns:
208
- list: learning_requests dicts {"role": "user", "content": learning_request_lines}
209
- """
210
-
211
- learning_requests : List[ Dict[str, str] ] = [] # learning -> prompt/response conversation to be sent to ChatGPT
212
- request_files_dir_path = Path(f'system/genai/learning_requests')
213
- if request_files_dir_path.exists():
214
- # loop through files in request_files_dir, and append to prompt_messages
215
- for root, dirs, files in os.walk(request_files_dir_path):
216
- for file in files:
217
- if file.endswith(".prompt"):
218
- with open(request_files_dir_path.joinpath(file), "r") as learnings:
219
- learning_request_lines = learnings.read()
220
- learning_requests.append( {"role": "user", "content": learning_request_lines})
221
- return learning_requests # TODO - what if no learning requests?
222
-
223
- def get_prompt__with_inserts(self, raw_prompt: str, for_iteration: bool = False) -> str:
224
- """ prompt-engineering: insert db-specific logic into prompt
225
- raw_prompt: the prompt from file or text argument
226
- """
227
- prompt_result = raw_prompt
228
- prompt_inserts = ''
229
- if '*' == self.project.genai_prompt_inserts: # * means no inserts
230
- prompt_inserts = "*"
231
- elif '' != self.project.genai_prompt_inserts: # if text, use this file
232
- prompt_inserts = self.project.genai_prompt_inserts
233
- elif 'sqlite' in self.project.db_url: # if blank, use default for db
234
- prompt_inserts = f'sqlite_inserts.prompt'
235
- elif 'postgresql' in self.project.db_url:
236
- prompt_inserts = f'postgresql_inserts.prompt'
237
- elif 'mysql' in self.project.db_url:
238
- prompt_inserts = f'mysql_inserts.prompt'
239
-
240
- if prompt_inserts != "*":
241
- prompt_eng_file_name = f'system/genai/prompt_inserts/{prompt_inserts}'
242
- if for_iteration:
243
- prompt_eng_file_name = prompt_eng_file_name.replace('.', '_iterations.')
244
- assert Path(prompt_eng_file_name).exists(), \
245
- f"Missing prompt_inserts file: {prompt_eng_file_name}" # eg api_logic_server_cli/prototypes/manager/system/genai/prompt_inserts/sqlite_inserts.prompt
246
- log.debug(f'get_prompt__with_inserts: {str(os.getcwd())} / {prompt_eng_file_name}')
247
- with open(prompt_eng_file_name, 'r') as file:
248
- pre_post = file.read() # eg, Use SQLAlchemy to create a sqlite database named system/genai/temp/create_db_models.sqlite, with
249
- prompt_result = pre_post.replace('{{prompt}}', raw_prompt)
250
-
251
- prompt_lines = prompt_result.split('\n')
252
- prompt_line_number = 0
253
- for each_line in prompt_lines:
254
- if "LogicBank" in each_line:
255
- log.debug(f'.. inserted: {each_line}')
256
- prompt_eng_logic_file_name = f'system/genai/prompt_inserts/logic_inserts.prompt'
257
- with open(prompt_eng_logic_file_name, 'r') as file:
258
- prompt_logic = file.read() # eg, Use SQLAlchemy to...
259
- prompt_lines[prompt_line_number] = prompt_logic
260
- break
261
- prompt_line_number += 1
262
- prompt_result = "\n".join(prompt_lines) # back to a string
263
- pass
264
- return prompt_result
265
-
266
- def ensure_system_dir_exists(self):
267
- """
268
- If missing, copy prototypes/manager/system -> os.getcwd()/system
269
-
270
- cwd is typically a manager...
271
- * eg als dev: ~/dev/ApiLogicServer/ApiLogicServer-dev/build_and_test/ApiLogicServer
272
- * eg, user: ~/dev/ApiLogicServer/
273
- * we need to create genai/temp files there
274
- """
275
-
276
- from_dir = self.project.api_logic_server_dir_path.joinpath('prototypes/manager')
277
- to_dir = Path(os.getcwd())
278
- to_dir_check = Path(to_dir).joinpath('system')
279
- if not to_dir_check.exists():
280
- copied_path = shutil.copytree(src=from_dir, dst=to_dir, dirs_exist_ok=True)
281
-
282
- def get_logic_from_prompt(self) -> list[str]:
283
- """ Get logic from ChatGPT prompt (code after "Enforce")
284
-
285
- Args:
286
-
287
- Returns: list[str] of the prompt logic
288
- """
289
-
290
- prompt = self.prompt # TODO - redesign if conversation
291
- prompt_array = prompt.split('\n')
292
- logic_text = """
293
- GenAI: Used Logic Bank to enforce these requirements:
294
-
295
- """
296
- line_num = 0
297
- logic_lines = 0
298
- writing = False
299
- for each_line in prompt_array:
300
- line_num += 1
301
- if "Enforce" in each_line:
302
- writing = True
303
- if writing:
304
- if 'Hints: use autonum keys' in each_line:
305
- break
306
- logic_lines += 1
307
- logic_text += ' ' + each_line + '\n'
308
- return logic_text
309
-
310
- @staticmethod
311
- def remove_logic_halluncinations(each_line: str) -> str:
312
- """remove hallucinations from logic
313
-
314
- eg: Rule.setup()
315
-
316
- Args:
317
- each_line (str): _description_
318
-
319
- Returns:
320
- str: _description_
321
- """ """ """
322
- return_line = each_line
323
- if each_line.startswith(' Rule.') or each_line.startswith(' DeclareRule.'):
324
- if 'Rule.sum' in each_line:
325
- pass
326
- elif 'Rule.count' in each_line:
327
- pass
328
- elif 'Rule.formula' in each_line:
329
- pass
330
- elif 'Rule.copy' in each_line:
331
- pass
332
- elif 'Rule.constraint' in each_line:
333
- pass
334
- elif 'Rule.allocate' in each_line:
335
- pass
336
- elif 'Rule.calculate' in each_line:
337
- return_line = each_line.replace('Rule.calculate', 'Rule.copy')
338
- else:
339
- return_line = each_line.replace(' ', ' # ')
340
- log.debug(f'.. removed hallucination: {each_line}')
341
- return return_line
342
-
343
- def insert_logic_into_created_project(self): # TODO - redesign if conversation
344
- """Called *after project created* to insert prompt logic into
345
- 1. declare_logic.py (as comment)
346
- 2. readme.md
347
-
348
- Also creates the doc directory for record of prompt, response.
349
- """
350
-
351
- logic_file = self.project.project_directory_path.joinpath('logic/declare_logic.py')
352
- in_logic = False
353
- translated_logic = "\n # Logic from GenAI: (or, use your IDE w/ code completion)\n\n"
354
- for each_line in self.create_db_models.split('\n'):
355
- if in_logic:
356
- if each_line.startswith(' '): # indent => still in logic
357
- each_repaired_line = self.remove_logic_halluncinations(each_line=each_line)
358
- translated_logic += each_repaired_line + '\n'
359
- elif each_line.strip() == '': # blank => still in logic
360
- pass
361
- else: # no-indent => end of logic
362
- in_logic = False
363
- if "declare_logic()" in each_line:
364
- in_logic = True
365
- translated_logic += "\n # End Logic from GenAI\n\n"
366
- utils.insert_lines_at(lines=translated_logic,
367
- file_name=logic_file,
368
- at='discover_logic()',
369
- after=True)
370
-
371
- readme_lines = \
372
- f'\n**GenAI Microservice Automation:** after verifying, apply logic:\n' +\
373
- f'1. Open [logic/declare_logic.py](logic/declare_logic.py) and use Copilot\n' +\
374
- f'\n' +\
375
- f'&nbsp;\n'
376
- if update_readme := False:
377
- readme_file = self.project.project_directory_path.joinpath('readme.md')
378
- utils.insert_lines_at(lines=readme_lines,
379
- file_name=readme_file,
380
- at='**other IDEs,**',
381
- after=True)
382
- try:
383
- docs_dir = self.project.project_directory_path.joinpath("docs")
384
- # os.makedirs(docs_dir, exist_ok=True)
385
- # prompt_file_path = docs_dir.joinpath("created_genai_using.prompt")
386
- # copy self.project.gen_ai_save_dir to docs_dir
387
- shutil.copytree(self.project.gen_ai_save_dir, docs_dir, dirs_exist_ok=True)
388
-
389
- docs_readme_file_path = docs_dir.joinpath("readme.md")
390
- docs_readme_lines = "## GenAI Notes\n\n"
391
- docs_readme_lines += "Review the [database diagram](https://apilogicserver.github.io/Docs/Database-Diagram/).\n\n"
392
- docs_readme_lines += "GenAI work files (exact prompts, with inserts) saved for iterations, diagnostics\n"
393
- docs_readme_lines += "See [WebGenAI-CLI](https://apilogicserver.github.io/Docs/WebGenAI-CLI/). "
394
- with open(docs_readme_file_path, "w") as docs_readme_file:
395
- docs_readme_file.write(docs_readme_lines)
396
-
397
- except: # intentional try/catch/bury - it's just docs, so don't fail
398
- import traceback
399
- log.error(f"\n\nERROR creating genai project docs: {docs_dir}\n\n{traceback.format_exc()}")
400
- pass
401
-
402
- def fix_and_write_model_file(self, response_data: str):
403
- """
404
- 1. break response data into lines
405
- 2. throw away instructions
406
- 3. ChatGPT work-arounds (decimal, indent, bogus relns)
407
- 4. Ensure the sqlite url is correct: sqlite:///system/genai/temp/create_db_models.sqlite
408
- 5. write model file to self.project.from_model
409
-
410
- Args:
411
- response_data (str): the chatgpt response
412
-
413
- """
414
- model_class = "# created from response - used to create database and project\n"
415
- model_class += "# should run without error\n"
416
- model_class += "# if not, check for decimal, indent, or import issues\n\n"
417
- with open(f'system/genai/create_db_models_inserts/create_db_models_prefix.py', "r") as inserts:
418
- model_lines = inserts.readlines()
419
- for each_line in model_lines:
420
- model_class += each_line + '\n'
421
-
422
- response_array = response_data.split('\n')
423
- line_num = 0
424
- writing = False
425
- indents_to_remove = 0
426
- for each_line in response_array:
427
- line_num += 1
428
- if "```python" in each_line:
429
- writing = True
430
- # count spaces before "```"
431
- # next_line = response_array[line_num+1]
432
- position = each_line.find("```")
433
- if position > 0:
434
- indents_to_remove = each_line[:position].count(' ')
435
- elif "```" in each_line:
436
- writing = False
437
- elif writing: # ChatGPT work-arounds
438
- ''' decimal issues
439
-
440
- 1. bad import: see Run: tests/test_databases/ai-created/genai_demo/genai_demo_decimal
441
- from decimal import Decimal # Decimal fix: needs to be from decimal import DECIMAL
442
-
443
- 2. Missing missing import: from SQLAlchemy import .... DECIMAL
444
-
445
- 3. Column(Decimal) -> Column(DECIMAL)
446
- see in: tests/test_databases/ai-created/budget_allocation/budget_allocations/budget_allocations_3_decimal
447
-
448
- 4. Bad syntax on test data: see Run: blt/time_cards_decimal from RESPONSE
449
- got: balance=DECIMAL('100.50')
450
- needed: balance=1000.0
451
- fixed with import in create_db_models_prefix.py
452
-
453
- 5. Bad syntax on test data cals: see api_logic_server_cli/prototypes/manager/system/genai/examples/genai_demo/genai_demo_conversation_bad_decimal/genai_demo_03.response
454
- got: or Decimal('0.00')
455
- needed: or decimal.Decimal('0.00')
456
-
457
- 6. Bad syntax on test data cals: see api_logic_server_cli/prototypes/manager/system/genai/examples/genai_demo/genai_demo_conversation_bad_decimal_2/genai_demo_conversation_002.response
458
- got: or DECIMAL('
459
- needed: or decimal.Decimal('0.00')
460
- '''
461
- if "= Table(" in each_line: # tests/test_databases/ai-created/time_cards/time_card_kw_arg/genai.response
462
- log.debug(f'.. fix_and_write_model_file detects table - raise excp to trigger retry')
463
- self.post_error = "ChatGPT Response contains table (not class) definitions: " + each_line
464
- if 'sqlite:///' in each_line: # must be sqlite:///system/genai/temp/create_db_models.sqlite
465
- current_url_rest = each_line.split('sqlite:///')[1]
466
- quote_type = "'"
467
- if '"' in current_url_rest:
468
- quote_type = '"' # eg, tests/test_databases/ai-created/time_cards/time_card_decimal/genai.response
469
- current_url = current_url_rest.split(quote_type)[0]
470
- proper_url = 'system/genai/temp/create_db_models.sqlite'
471
- each_line = each_line.replace(current_url, proper_url)
472
- if current_url != proper_url:
473
- log.debug(f'.. fixed sqlite url: {current_url} -> system/genai/temp/create_db_models.sqlite')
474
- if 'Decimal,' in each_line: # SQLAlchemy import
475
- each_line = each_line.replace('Decimal,', 'DECIMAL,')
476
- # other Decimal bugs: see api_logic_server_cli/prototypes/manager/system/genai/reference/errors/chatgpt_decimal.txt
477
- if ', Decimal' in each_line: # Cap'n K, at your service
478
- each_line = each_line.replace(', Decimal', ', DECIMAL')
479
- if 'rom decimal import Decimal' in each_line:
480
- each_line = each_line.replace('from decimal import Decimal', 'import decimal')
481
- if '=Decimal(' in each_line:
482
- each_line = each_line.replace('=Decimal(', '=decimal.Decimal(')
483
- if ' Decimal(' in each_line:
484
- each_line = each_line.replace(' Decimal(', ' decimal.Decimal(')
485
- if 'Column(Decimal)' in each_line:
486
- each_line = each_line.replace('Column(Decimal)', 'Column(DECIMAL)')
487
- if "DECIMAL('" in each_line:
488
- each_line = each_line.replace("DECIMAL('", "decimal.Decimal('")
489
- if 'end_time(datetime' in each_line: # tests/test_databases/ai-created/time_cards/time_card_kw_arg/genai.response
490
- each_line = each_line.replace('end_time(datetime', 'end_time=datetime')
491
- if indents_to_remove > 0:
492
- each_line = each_line[indents_to_remove:]
493
- if 'relationship(' in each_line and self.project.genai_use_relns == False:
494
- # airport4 fails with could not determine join condition between parent/child tables on relationship Airport.flights
495
- if each_line.startswith(' '):
496
- each_line = each_line.replace(' ', ' # ')
497
- else: # sometimes it puts relns outside the class (so, outdented)
498
- each_line = '# ' + each_line
499
- if 'sqlite:///system/genai/temp/model.sqlite': # fix prior version
500
- each_line = each_line.replace('sqlite:///system/genai/temp/model.sqlite',
501
- 'sqlite:///system/genai/temp/create_db_models.sqlite')
502
-
503
- # logicbank fixes
504
- if 'from logic_bank' in each_line: # we do our own imports
505
- each_line = each_line.replace('from', '# from')
506
- if 'LogicBank.activate' in each_line:
507
- each_line = each_line.replace('LogicBank.activate', '# LogicBank.activate')
508
-
509
- model_class += each_line + '\n'
510
- with open(f'{self.project.from_model}', "w") as model_file:
511
- model_file.write(model_class)
512
-
513
- log.debug(f'.. model file created: {self.project.from_model}')
514
-
515
- def save_prompt_messages_to_system_genai_temp_project(self):
516
- """
517
- Save prompts / responses to system/genai/temp/{project}/genai.response
518
-
519
- Copy system/genai/temp/create_db_models.py to system/genai/temp/{project}/create_db_models.py
520
- """
521
- try:
522
- to_dir = Path(os.getcwd())
523
- gen_temp_dir = Path(to_dir).joinpath(f'system/genai/temp')
524
- to_dir_save_dir = Path(to_dir).joinpath(f'system/genai/temp/{self.project.project_name_last_node}')
525
- """ project work files saved to system/genai/temp/<project> """
526
- log.info(f'.. saving work files to: system/genai/temp/{self.project.project_name_last_node}')
527
- """ system/genai/temp/project - save prompt, response, and create_db_models.py to this directory """
528
- self.project.gen_ai_save_dir = to_dir_save_dir
529
- """ project work files saved to system/genai/temp/<project> """
530
- # delete and recreate the directory
531
- if to_dir_save_dir.exists():
532
- shutil.rmtree(to_dir_save_dir)
533
- os.makedirs(to_dir_save_dir, exist_ok=True)
534
- log.debug(f'save_prompt_messages_to_system_genai_temp_project() - {str(to_dir_save_dir)}')
535
-
536
- if self.project.genai_repaired_response == '': # normal path, from --using
537
- if write_prompt := True:
538
- pass
539
- file_num = 0
540
- flat_project_name = Path(self.project.project_name).stem # in case project is dir/project-name
541
- for each_message in self.messages:
542
- suffix = 'prompt'
543
- if each_message['role'] == 'system':
544
- suffix = 'response'
545
- file_name = f'{flat_project_name}_{str(file_num).zfill(3)}.{suffix}'
546
- file_path = to_dir_save_dir.joinpath(file_name)
547
- log.debug(f'.. saving[{file_name}] - {each_message["content"][:30]}...')
548
- with open(file_path, "w") as message_file:
549
- message_file.write(each_message['content'])
550
- file_num += 1
551
- suffix = 'response' # now add the this response
552
- file_name = f'{flat_project_name}_{str(file_num).zfill(3)}.{suffix}' # FIXME
553
- file_path = to_dir_save_dir.joinpath(file_name)
554
- log.debug(f'.. saving[{file_name}] - {each_message["content"][:30]}...')
555
- with open(file_path, "w") as message_file:
556
- message_file.write(self.create_db_models)
557
- shutil.copyfile(self.project.from_model, to_dir_save_dir.joinpath('create_db_models.py'))
558
- except Exception as inst:
559
- # FileNotFoundError(2, 'No such file or directory')
560
- log.error(f"\n\nError: {inst} \n..creating diagnostic files into dir: {str(gen_temp_dir)}\n\n")
561
- pass # intentional try/catch/bury - it's just diagnostics, so don't fail
562
- debug_string = "good breakpoint - return to main driver, and execute create_db_models.py"
563
-
564
- def get_headers_with_openai_api_key(self) -> dict:
565
- """
566
- Returns:
567
- dict: api header with OpenAI key (exits if not provided)
568
- """
569
-
570
- pass # https://community.openai.com/t/how-do-i-call-chatgpt-api-with-python-code/554554
571
- if os.getenv('APILOGICSERVER_CHATGPT_APIKEY'):
572
- openai_api_key = os.getenv('APILOGICSERVER_CHATGPT_APIKEY')
573
- else:
574
- from dotenv import dotenv_values
575
- secrets = dotenv_values("system/secrets.txt")
576
- openai_api_key = secrets['APILOGICSERVER_CHATGPT_APIKEY']
577
- if openai_api_key == 'your-api-key-here':
578
- if os.getenv('APILOGICSERVER_CHATGPT_APIKEY'):
579
- openai_api_key = os.getenv('APILOGICSERVER_CHATGPT_APIKEY')
580
- else:
581
- log.error("\n\nMissing env value: APILOGICSERVER_CHATGPT_APIKEY")
582
- log.error("... Check your system/secrets file...\n")
583
- exit(1)
584
- headers = {
585
- "Content-Type": "application/json",
586
- "Authorization": f"Bearer {openai_api_key}"
587
- }
588
- return headers
589
-
590
- def get_and_save_raw_response_data(self, response) -> str:
591
- """
592
- Returns:
593
- str: response_data
594
- """
595
-
596
-
597
- # Check if the request was successful
598
- if response.status_code == 400:
599
- raise Exception("Bad ChatGPT Request: " + response.text)
600
-
601
- if response.status_code != 200:
602
- print("Error:", response.status_code, response.text) # eg, You exceeded your current quota
603
-
604
- response_data = response.json()['choices'][0]['message']['content']
605
- file_name = f'system/genai/temp/chatgpt_original.response'
606
- with open(f'system/genai/temp/chatgpt_original.response', "w") as model_file: # save for debug
607
- model_file.write(response_data)
608
- file_name = model_file.name
609
- with open(f'system/genai/temp/chatgpt_retry.response', "w") as model_file: # repair this & retry
610
- model_file.write(response_data)
611
- log.debug(f'.. stored raw response: {model_file.name}')
612
- return response_data
613
-
614
-
615
- def genai(using, db_url, repaired_response: bool, genai_version: str,
616
- retries: int, opt_locking: str, prompt_inserts: str, quote: bool,
617
- use_relns: bool, project_name: str):
618
- """ cli caller provides using, or repaired_response & using
619
-
620
- Called from cli commands: genai, genai-create, genai-iterate
621
-
622
- Invokes api_logic_server.ProjectRun (with 3 retries)
623
-
624
- Which calls Genai()
625
- """
626
- import api_logic_server_cli.api_logic_server as PR
627
-
628
- resolved_project_name = project_name
629
- if resolved_project_name == '' or resolved_project_name is None:
630
- resolved_project_name = Path(using).stem # default project name is the <cwd>/last node of using
631
- resolved_project_name = resolved_project_name.replace(' ', '_')
632
-
633
- try_number = 1
634
- genai_use_relns = use_relns
635
- """ if 'unable to determine join condition', we retry this with False """
636
- if repaired_response != "":
637
- try_number = retries # if not calling GenAI, no need to retry:
638
- # TODO or 0, right?
639
- if retries < 0: # for debug: catch exceptions at point of failure
640
- PR.ProjectRun(command="create", genai_version=genai_version,
641
- genai_using=using, # the prompt file, or conversation dir
642
- repaired_response=repaired_response, # retry from [repaired] response file
643
- opt_locking=opt_locking,
644
- genai_prompt_inserts=prompt_inserts,
645
- genai_use_relns=genai_use_relns,
646
- quote=quote,
647
- project_name=resolved_project_name, db_url=db_url)
648
- log.info(f"GENAI successful")
649
- else:
650
- failed = False
651
- while try_number <= retries:
652
- try:
653
- failed = False
654
- PR.ProjectRun(command="create", genai_version=genai_version,
655
- genai_using=using, # the prompt file, or dir of prompt/response
656
- repaired_response=repaired_response, # retry from [repaired] response file
657
- opt_locking=opt_locking,
658
- genai_prompt_inserts=prompt_inserts,
659
- genai_use_relns=genai_use_relns,
660
- quote=quote,
661
- project_name=resolved_project_name, db_url=db_url)
662
- if do_force_failure := False:
663
- if try_number < 3:
664
- raise Exception("Forced Failure for Internal Testing")
665
- break # success - exit the loop
666
- except Exception as e: # almost certaily in api_logic_server_cli/create_from_model/create_db_from_model.py
667
- log.error(f"\n\nGenai failed With Error: {e}")
668
-
669
- if Path(using).is_dir():
670
- log.debug('conversation dir, check in-place iteration')
671
- '''
672
- cases:
673
- - conv in temp - in_place_conversation
674
- - conv elsewhere
675
- test (sorry, no automated blt test for this):
676
- 1. genai CONVERSATION - clean/ApiLogicServer/genai_demo_conversation
677
- 2. genai CONVERSATION ITERATE IN-PLACE (NB: DELETE LAST RESPONSE FIRST)
678
- a. Stop: find 'good breakpoint - check create_db_models.py'
679
- b. Introduce error in system/genai/temp/create_db_models.py
680
- '''
681
-
682
- to_dir_save_dir = Path(Path(os.getcwd())).joinpath(f'system/genai/temp/{resolved_project_name}')
683
- in_place_conversation = str(to_dir_save_dir) == str(Path(using).resolve())
684
- """ means we are using to_dir as the save directory """
685
- if in_place_conversation:
686
- last_response_file_name = ''
687
- last_type = ''
688
- for each_file in sorted(Path(to_dir_save_dir).iterdir()):
689
- if each_file.is_file() and each_file.suffix == '.prompt':
690
- last_type = '.prompt'
691
- if each_file.suffix == '.response':
692
- last_type = '.response'
693
- last_response_file_name = each_file.name
694
- if last_type == ".response": # being careful to delete only recent response
695
- last_response_path = to_dir_save_dir.joinpath(last_response_file_name)
696
- log.debug(f'in-place conversation dir, deleting most recent response: {last_response_path}')
697
- Path(last_response_path).unlink(missing_ok=True)
698
-
699
- # save the temp files for diagnosis (eg, <resolved_project_name>_1)
700
- manager_dir = Path(os.getcwd()) # rename save dir (append retry) for diagnosis
701
- resolved__project_name_parts = resolved_project_name
702
- parts = resolved__project_name_parts.split('/')
703
- # Return the last element
704
- resolved_temp_project_name = parts[-1]
705
- to_dir_save_dir = Path(manager_dir).joinpath(f'system/genai/temp/{resolved_temp_project_name}')
706
- to_dir_save_dir_retry = Path(manager_dir).joinpath(f'system/genai/temp/{resolved_temp_project_name}_{try_number}')
707
- if repaired_response != "":
708
- to_dir_save_dir_retry = Path(manager_dir).joinpath(f'system/genai/temp/{resolved_temp_project_name}_retry')
709
- if to_dir_save_dir_retry.exists():
710
- shutil.rmtree(to_dir_save_dir_retry)
711
- # to_dir_save_dir.rename(to_dir_save_dir_retry)
712
- assert to_dir_save_dir.is_dir(), f"\nInternal Error - missing save directory: {to_dir_save_dir}"
713
- # assert to_dir_save_dir_retry.is_dir(), f"\nInternal Error - missing retry directory: {to_dir_save_dir_retry}"
714
- log.debug(f'.. copying work files...')
715
- log.debug(f'.... from: {to_dir_save_dir}')
716
- log.debug(f'.... to: {to_dir_save_dir_retry}')
717
- shutil.copytree(to_dir_save_dir, to_dir_save_dir_retry, dirs_exist_ok=True)
718
-
719
- failed = True
720
- if genai_use_relns and "Could not determine join condition" in str(e):
721
- genai_use_relns = False # just for db_models (fk's still there!!)
722
- log.error(f"\n Failed with join condition - retrying without relns\n")
723
- failed = False
724
- else:
725
- try_number += 1
726
- log.debug(f"\n\nRetry Genai #{try_number}\n")
727
- pass # retry (retries times)
728
- if failed == True: # retries exhausted (if failed: threw "an integer is required" ??
729
- log.error(f"\n\nGenai Failed (Retries: {retries})")
730
- exit(1)
731
- log.info(f"GENAI successful on try {try_number}")
732
-
733
-
734
- def key_module_map():
735
- """ does not execute - strictly fo find key modules """
736
- import api_logic_server_cli.api_logic_server as als
737
- import api_logic_server_cli.create_from_model.create_db_from_model as create_db_from_model
738
-
739
- genai() # called from cli.genai/create/iterate
740
- # try/catch/retry loop!
741
- als.ProjectRun() # calls api_logic_server.ProjectRun
742
-
743
- genai = GenAI(Project()) # called from api_logic_server.ProjectRun
744
- genai.__init__() # main driver, calls...
745
- genai.get_prompt_messages() # get self.messages from file/dir/text/arg
746
- genai.fix_and_write_model_file('response_data') # write create_db_models.py for db creation
747
- genai.save_files_to_system_genai_temp_project() # save prompt, response and create_db_models.py
748
- # returns to api_logic_server, which...
749
- create_db_from_model.create_db() # creates create_db_models.sqlite from create_db_models.py
750
- # creates project from that db; and calls...
751
- genai.insert_logic_into_created_project() # merge logic (comments) into declare_logic.py
752
-