dmddl 0.2.15__tar.gz → 0.2.17__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: dmddl
3
- Version: 0.2.15
3
+ Version: 0.2.17
4
4
  Summary: cli tool for creating insert script from ddl script
5
5
  License: MIT
6
6
  Author: HoJLter
@@ -9,6 +9,8 @@ Requires-Python: >=3.13
9
9
  Classifier: License :: OSI Approved :: MIT License
10
10
  Classifier: Programming Language :: Python :: 3
11
11
  Classifier: Programming Language :: Python :: 3.13
12
+ Requires-Dist: aiohttp (>=3.11.18,<4.0.0)
13
+ Requires-Dist: bs4 (>=0.0.2,<0.0.3)
12
14
  Requires-Dist: pydantic (>=2.11.4,<3.0.0)
13
15
  Requires-Dist: pydantic-settings (>=2.9.1,<3.0.0)
14
16
  Description-Content-Type: text/markdown
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "dmddl"
3
- version = "0.2.15"
3
+ version = "0.2.17"
4
4
  description = "cli tool for creating insert script from ddl script"
5
5
  authors = [
6
6
  {name = "HoJLter",email = "hojlter.work@gmail.com"}
@@ -10,7 +10,9 @@ readme = "README.md"
10
10
  requires-python = ">=3.13"
11
11
  dependencies = [
12
12
  "pydantic (>=2.11.4,<3.0.0)",
13
- "pydantic-settings (>=2.9.1,<3.0.0)"
13
+ "pydantic-settings (>=2.9.1,<3.0.0)",
14
+ "aiohttp (>=3.11.18,<4.0.0)",
15
+ "bs4 (>=0.0.2,<0.0.3)"
14
16
  ]
15
17
 
16
18
 
@@ -0,0 +1,2 @@
1
+ # Automatically created by ruff.
2
+ *
@@ -0,0 +1 @@
1
+ Signature: 8a477f597d28d172789f06886806bc55
@@ -0,0 +1,145 @@
1
+ import asyncio
2
+
3
+ import questionary
4
+ from dmddl.config.settings import LLMSettings
5
+ from rich import print
6
+ from rich.syntax import Syntax
7
+ from rich.console import Console
8
+ from dmddl.models.llm import openai_request
9
+ from dmddl.models.prompt import prompt as base_prompt
10
+ import argparse
11
+ from dmddl.config.proxy import get_valid_proxies
12
+
13
+ AVAILABLE_PROVIDERS = ["OpenAI"]
14
+
15
+
16
+ def choose_provider(providers):
17
+ provider = questionary.select("Choose your LLM provider:",
18
+ choices=providers).ask()
19
+ if provider:
20
+ return provider
21
+ else:
22
+ raise Exception("LLM Provider isn't found")
23
+
24
+
25
+ def ask_api_key():
26
+ api_key = questionary.password("Enter your api key:").ask()
27
+ if api_key:
28
+ return api_key
29
+ else:
30
+ raise Exception("API key isn't provided")
31
+
32
+
33
+ def make_query(provider, api_key, prompt, proxies = None):
34
+ console = Console()
35
+ with console.status("[bold blue]Making query. Wait for result..."):
36
+ if provider:
37
+ if provider == "OpenAI":
38
+ response = openai_request(prompt=base_prompt+prompt, api_key=api_key, proxies=proxies)
39
+ return response
40
+
41
+ raise Exception("LLM Provider not found")
42
+ else:
43
+ raise Exception("Use -c (--config) to configurate app and set LLM provider.")
44
+
45
+
46
+ def write_output_file(data):
47
+ with open("output.txt", 'w') as file:
48
+ file.write(data)
49
+
50
+
51
+ def set_parameters():
52
+ settings = LLMSettings()
53
+
54
+ llm_provider = choose_provider(AVAILABLE_PROVIDERS)
55
+ api_key = ask_api_key()
56
+
57
+ settings['DMDDL_CUR_PROVIDER'] = llm_provider
58
+ settings['DMDDL_LLM_KEY'] = api_key
59
+
60
+
61
+ def get_args():
62
+ parser = argparse.ArgumentParser()
63
+ parser.add_argument("-c", "--config", action="store_true")
64
+ parser.add_argument("-s", "--source")
65
+ parser.add_argument("-p", "--proxy", action="store_true")
66
+
67
+ return parser.parse_args()
68
+
69
+
70
+ def input_prompt_dialogue(args):
71
+ console = Console()
72
+
73
+ with open(args.source, "r", encoding='utf-8') as file:
74
+ user_prompt = file.read()
75
+
76
+ syntax = Syntax(user_prompt, 'sql', line_numbers=True)
77
+ print(f"\n[yellow bold]{args.source.upper()}\n", )
78
+ console.print(syntax)
79
+ confirmation = questionary.confirm("Do you want to use this DDL script to generate the insert?").ask()
80
+
81
+ return confirmation, user_prompt
82
+
83
+
84
+ def query_dialog(llm_provider, api_key, user_prompt, proxies=None):
85
+ console = Console()
86
+
87
+ success, response = make_query(provider=llm_provider,
88
+ api_key=api_key,
89
+ prompt=user_prompt,
90
+ proxies=proxies)
91
+
92
+ write_output_file(response)
93
+
94
+ print("\n[yellow bold]OUTPUT.TXT\n", )
95
+ if success:
96
+ syntax = Syntax(response, 'sql', line_numbers=True)
97
+ console.print(syntax)
98
+ print("[green bold] Your DML script is ready! Check output.txt")
99
+
100
+ else:
101
+ syntax = Syntax(response, 'bash', line_numbers=True)
102
+ console.print(syntax)
103
+ print("[red bold] Error has occurred... Check output.txt")
104
+
105
+
106
+ def main():
107
+ settings = LLMSettings()
108
+ args = get_args()
109
+ console = Console()
110
+
111
+ llm_provider = settings['DMDDL_CUR_PROVIDER']
112
+ api_key = settings['DMDDL_LLM_KEY']
113
+
114
+
115
+ if not args.source and not args.config and not args.proxy:
116
+ print("[red bold]You must provide any arguments:\n"
117
+ "-c (--config): opens settings menu\n"
118
+ "-s (--source): specify the input file"
119
+ "-sp (--proxy): specify the input file (request with proxy)")
120
+
121
+ if args.config:
122
+ set_parameters()
123
+
124
+
125
+ if args.source:
126
+ proxies = None
127
+
128
+ if args.proxy:
129
+ with console.status("[blue bold] Finding valid proxies..."):
130
+ proxies = asyncio.run(get_valid_proxies())
131
+ if proxies:
132
+ print("[bold blue] Proxy found!")
133
+ for proxy in proxies:
134
+ print(f"[yellow bold]- {proxy}")
135
+ else:
136
+ print("[yellow bold] Proxy does not found :( \n Try again later. (it really helps)")
137
+
138
+ confirmation, user_prompt = input_prompt_dialogue(args)
139
+
140
+ if confirmation:
141
+ query_dialog(llm_provider, api_key, user_prompt, proxies)
142
+
143
+
144
+ if __name__ == '__main__':
145
+ main()
@@ -0,0 +1,2 @@
1
+ DMDDL_CUR_PROVIDER='OpenAI'
2
+ DMDDL_LLM_KEY='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'
@@ -0,0 +1,35 @@
1
+ import asyncio
2
+ from bs4 import BeautifulSoup
3
+ import aiohttp
4
+ import requests
5
+
6
+
7
+ def get_proxy_list():
8
+ url = "https://free-proxy-list.net/"
9
+ response = requests.get(url)
10
+ soup = BeautifulSoup(response.text, 'html.parser')
11
+ proxies = []
12
+ table = soup.find('table')
13
+ for row in table.tbody.find_all('tr'):
14
+ cols = row.find_all('td')
15
+ if cols[4].text == 'elite proxy' and cols[6].text == 'yes':
16
+ proxies.append(f"http://{cols[0].text}:{cols[1].text}")
17
+
18
+ return proxies
19
+
20
+
21
+ async def test_request(url, proxy, session):
22
+ try:
23
+ async with session.get(url, proxy=proxy, timeout=5) as response:
24
+ return proxy
25
+ except:
26
+ pass
27
+
28
+
29
+ async def get_valid_proxies():
30
+ proxies = get_proxy_list()
31
+ url = "https://platform.openai.com"
32
+ async with aiohttp.ClientSession() as session:
33
+ tasks = [test_request(url, proxy, session) for proxy in proxies]
34
+ good_proxies = [proxy for proxy in await asyncio.gather(*tasks) if proxy]
35
+ return good_proxies
@@ -3,10 +3,8 @@ import os
3
3
  from pathlib import Path
4
4
 
5
5
  def get_dotenv_path():
6
- # Проверяем стандартные места поиска .env
7
6
  env_path = dotenv.find_dotenv()
8
7
 
9
-
10
8
  if not env_path:
11
9
  env_dir = "config"
12
10
  env_path = os.path.join(env_dir, ".env")
@@ -0,0 +1 @@
1
+ test
@@ -1,6 +1,7 @@
1
1
  import requests
2
+ from rich import print
2
3
 
3
- def openai_request(prompt, api_key):
4
+ def openai_request(prompt, api_key, proxies = None):
4
5
  headers = {
5
6
  "Authorization": f"Bearer {api_key}",
6
7
  "Content-Type": "application/json"
@@ -15,12 +16,28 @@ def openai_request(prompt, api_key):
15
16
  }
16
17
  ]
17
18
  }
18
- response = requests.post(url=url, headers=headers, json=data)
19
+
20
+ if proxies:
21
+ for proxy in proxies:
22
+ print(f"[yellow bold]\nUsing proxy: {proxy}")
23
+ req_proxies = {
24
+ "https": proxy
25
+ }
26
+ response = requests.post(url=url, headers=headers, json=data, proxies = req_proxies)
27
+ if response.status_code == 200:
28
+ break
29
+
30
+ else:
31
+ response = requests.post(url=url, headers=headers, json=data)
32
+
33
+
19
34
  if response.status_code == 200:
20
35
  return True, response.json()['choices'][0]['message']['content']
36
+
21
37
  elif response.status_code == 401:
22
38
  return False, ("Your api key is incorrect. \n"
23
39
  "Use -c (--config) to configurate app and set new API key.")
40
+
24
41
  else:
25
42
  return False, response.json()['error']['message']
26
43
 
@@ -0,0 +1,62 @@
1
+ -- TABLE: users (15 records)
2
+ INSERT INTO users (user_id, username, email) VALUES
3
+ (1, 'alice_jones', 'alice.jones@example.com'),
4
+ (2, 'bob_brown', 'bob.brown@example.com'),
5
+ (3, 'charlie_lee', 'charlie.lee@test.com'),
6
+ (4, 'david_clark', 'david.clark@example.com'),
7
+ (5, 'emily_williams', 'emily.williams@test.com'),
8
+ (6, 'frank_miller', 'frank.miller@example.com'),
9
+ (7, 'grace_king', 'grace.king@test.com'),
10
+ (8, 'hannah_white', 'hannah.white@example.com'),
11
+ (9, 'ian_green', 'ian.green@example.com'),
12
+ (10, 'jake_harris', 'jake.harris@test.com'),
13
+ (11, 'kate_scott', 'kate.scott@example.com'),
14
+ (12, 'luke_adams', 'luke.adams@test.com'),
15
+ (13, 'mike_james', 'mike.james@example.com'),
16
+ (14, 'nora_moore', 'nora.moore@test.com'),
17
+ (15, 'olivia_taylor', 'olivia.taylor@example.com');
18
+
19
+ -- TABLE: orders (40 records)
20
+ INSERT INTO orders (order_id, user_id, order_date) VALUES
21
+ (1, 1, '2023-01-01'),
22
+ (2, 1, '2023-01-05'),
23
+ (3, 2, '2023-01-02'),
24
+ (4, 3, '2023-01-01'),
25
+ (5, 3, '2023-01-03'),
26
+ (6, 4, '2023-01-04'),
27
+ (7, 4, '2023-01-07'),
28
+ (8, 5, '2023-01-02'),
29
+ (9, 5, '2023-01-06'),
30
+ (10, 6, '2023-01-05'),
31
+ (11, 6, '2023-01-08'),
32
+ (12, 7, '2023-01-01'),
33
+ (13, 8, '2023-01-03'),
34
+ (14, 8, '2023-01-09'),
35
+ (15, 9, '2023-01-01'),
36
+ (16, 10, '2023-01-02'),
37
+ (17, 10, '2023-01-06'),
38
+ (18, 11, '2023-01-04'),
39
+ (19, 11, '2023-01-10'),
40
+ (20, 12, '2023-01-01'),
41
+ (21, 12, '2023-01-03'),
42
+ (22, 13, '2023-01-02'),
43
+ (23, 13, '2023-01-05'),
44
+ (24, 14, '2023-01-04'),
45
+ (25, 14, '2023-01-08'),
46
+ (26, 15, '2023-01-02'),
47
+ (27, 15, '2023-01-09'),
48
+ (28, 1, '2023-01-06'),
49
+ (29, 2, '2023-01-03'),
50
+ (30, 3, '2023-01-07'),
51
+ (31, 4, '2023-01-08'),
52
+ (32, 5, '2023-01-01'),
53
+ (33, 6, '2023-01-05'),
54
+ (34, 7, '2023-01-02'),
55
+ (35, 8, '2023-01-10'),
56
+ (36, 9, '2023-01-06'),
57
+ (37, 10, '2023-01-04'),
58
+ (38, 11, '2023-01-03'),
59
+ (39, 12, '2023-01-07'),
60
+ (40, 13, '2023-01-09');
61
+
62
+ -- WARNING: Consider implementing ON DELETE CASCADE for foreign keys to maintain data integrity.
@@ -1,109 +0,0 @@
1
- import questionary
2
- from dmddl.config.settings import LLMSettings
3
- from rich import print
4
- from rich.syntax import Syntax
5
- from rich.console import Console
6
- from dmddl.models.llm import openai_request
7
- from dmddl.models.prompt import prompt as base_prompt
8
- import argparse
9
-
10
-
11
- AVAILABLE_PROVIDERS = ["OpenAI"]
12
-
13
-
14
- def choose_provider(providers):
15
- provider = questionary.select("Choose your LLM provider:",
16
- choices=providers).ask()
17
- if provider:
18
- return provider
19
- else:
20
- raise Exception("LLM Provider isn't found")
21
-
22
-
23
- def ask_api_key():
24
- api_key = questionary.password("Enter your api key:").ask()
25
- if api_key:
26
- return api_key
27
- else:
28
- raise Exception("API key isn't provided")
29
-
30
-
31
- def make_query(provider, api_key, prompt):
32
- console = Console()
33
- with console.status("[bold blue]Making query. Wait for result..."):
34
- if provider:
35
- if provider == "OpenAI":
36
- response = openai_request(base_prompt+prompt, api_key)
37
- return response
38
-
39
- raise Exception("LLM Provider not found")
40
- else:
41
- raise Exception("Use -c (--config) to configurate app and set LLM provider.")
42
-
43
-
44
- def write_output_file(data):
45
- with open("output.txt", 'w') as file:
46
- file.write(data)
47
-
48
-
49
- def set_parameters():
50
- settings = LLMSettings()
51
-
52
- llm_provider = choose_provider(AVAILABLE_PROVIDERS)
53
- api_key = ask_api_key()
54
-
55
- settings['DMDDL_CUR_PROVIDER'] = llm_provider
56
- settings['DMDDL_LLM_KEY'] = api_key
57
-
58
-
59
- def get_args():
60
- parser = argparse.ArgumentParser()
61
- parser.add_argument("-c", "--config", action="store_true")
62
- parser.add_argument("-s", "--source")
63
-
64
- return parser.parse_args()
65
-
66
-
67
- def main():
68
- settings = LLMSettings()
69
- args = get_args()
70
- console = Console()
71
-
72
- llm_provider = settings['DMDDL_CUR_PROVIDER']
73
- api_key = settings['DMDDL_LLM_KEY']
74
-
75
-
76
- if not args.source and not args.config:
77
- print("[red bold]You must provide any arguments:\n"
78
- "-c (--config): opens settings menu\n"
79
- "-s (--source): specify the input file")
80
-
81
- if args.config:
82
- set_parameters()
83
-
84
- if args.source:
85
- with open(args.source, "r", encoding='utf-8') as file:
86
- user_prompt = file.read()
87
- syntax = Syntax(user_prompt, 'sql', line_numbers=True)
88
- print(f"\n[yellow bold]{args.source.upper()}\n", )
89
- console.print(syntax)
90
- confirmation = questionary.confirm("Do you want to use this DDL script to generate the insert?").ask()
91
-
92
- if confirmation:
93
- success, response = make_query(provider=llm_provider,
94
- api_key=api_key,
95
- prompt=user_prompt)
96
- write_output_file(response)
97
- print("\n\n[yellow bold]OUTPUT.TXT\n",)
98
- if success:
99
- syntax = Syntax(response, 'sql', line_numbers=True)
100
- console.print(syntax)
101
- print("[green bold] Your DML script is ready! Check output.txt")
102
- if not success:
103
- syntax = Syntax(response, 'python', line_numbers=True)
104
- console.print(syntax)
105
- print("[red bold] Error has occurred... Check output.txt")
106
-
107
-
108
- if __name__ == '__main__':
109
- main()
File without changes
File without changes
File without changes