AtCoderStudyBooster 0.4.1__py3-none-any.whl → 0.4.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
atcdr/ai.py ADDED
@@ -0,0 +1,434 @@
1
+ import json
2
+ import random
3
+ import time
4
+ from pathlib import Path
5
+ from typing import Any, Callable, Dict, List, Optional
6
+
7
+ import rich_click as click
8
+ from openai import BadRequestError, OpenAI
9
+ from rich.console import Console
10
+ from rich.live import Live
11
+ from rich.markup import escape
12
+ from rich.panel import Panel
13
+ from rich.syntax import Syntax
14
+ from rich.table import Table
15
+ from rich.text import Text
16
+
17
+ from atcdr.test import (
18
+ LabeledTestCase,
19
+ ResultStatus,
20
+ TestCase,
21
+ TestRunner,
22
+ )
23
+ from atcdr.util.fileops import add_file_selector
24
+ from atcdr.util.filetype import (
25
+ COMPILED_LANGUAGES,
26
+ FILE_EXTENSIONS,
27
+ INTERPRETED_LANGUAGES,
28
+ Lang,
29
+ str2lang,
30
+ )
31
+ from atcdr.util.i18n import _
32
+ from atcdr.util.openai import set_api_key
33
+ from atcdr.util.parse import ProblemHTML
34
+
35
+
36
+ def render_result_for_GPT(test: TestRunner) -> tuple[str, bool]:
37
+ results = list(test)
38
+ match test.info.summary:
39
+ case ResultStatus.CE:
40
+ return f'Compile Error \n {test.info.compiler_message}', False
41
+ case _:
42
+ message_for_gpt = ''.join(
43
+ (
44
+ f'\n{r.label} => {r.result.passed.value}, Execution Time : {r.result.executed_time}\n'
45
+ f'\nInput :\n{r.testcase.input}\nOutput :\n{r.result.output}\nExpected :\n{r.testcase.output}\n'
46
+ if r.result.passed == ResultStatus.WA
47
+ else f'\n{r.label} => {r.result.passed.value}\nInput :\n{r.testcase.input}\nOutput :\n{r.result.output}\n'
48
+ )
49
+ for r in results
50
+ )
51
+ return message_for_gpt, False
52
+
53
+
54
+ def display_test_results(console: Console, test: TestRunner) -> None:
55
+ results = list(test)
56
+
57
+ table = Table(title='🧪 Test Results')
58
+ table.add_column('Test Case', style='cyan', no_wrap=True)
59
+ table.add_column('Status', justify='center', no_wrap=True)
60
+ table.add_column('Input', style='dim', max_width=30)
61
+ table.add_column('Output', style='yellow', max_width=30)
62
+ table.add_column('Expected', style='green', max_width=30)
63
+
64
+ for r in results:
65
+ if r.result.passed == ResultStatus.AC:
66
+ status = '[green]✅ AC[/green]'
67
+ elif r.result.passed == ResultStatus.WA:
68
+ status = '[red]❌ WA[/red]'
69
+ elif r.result.passed == ResultStatus.TLE:
70
+ status = '[yellow]⏰ TLE[/yellow]'
71
+ elif r.result.passed == ResultStatus.RE:
72
+ status = '[red]💥 RE[/red]'
73
+ else:
74
+ status = f'[red]{r.result.passed.value}[/red]'
75
+
76
+ input_preview = escape(
77
+ r.testcase.input.strip()[:50] + '...'
78
+ if len(r.testcase.input.strip()) > 50
79
+ else r.testcase.input.strip()
80
+ )
81
+ output_preview = escape(
82
+ r.result.output.strip()[:50] + '...'
83
+ if len(r.result.output.strip()) > 50
84
+ else r.result.output.strip()
85
+ )
86
+ expected_preview = escape(
87
+ r.testcase.output.strip()[:50] + '...'
88
+ if len(r.testcase.output.strip()) > 50
89
+ else r.testcase.output.strip()
90
+ )
91
+
92
+ table.add_row(r.label, status, input_preview, output_preview, expected_preview)
93
+
94
+ console.print(table)
95
+
96
+
97
+ def create_func(labeled_cases: list[LabeledTestCase], model: str):
98
+ def test_example_case(code: str, language: str) -> str:
99
+ language_enum: Lang = str2lang(language)
100
+ source_path = Path(f'{model}{FILE_EXTENSIONS[language_enum]}')
101
+ source_path.write_text(code, encoding='utf-8')
102
+ test = TestRunner(str(source_path), labeled_cases)
103
+ message_for_gpt, _ = render_result_for_GPT(test)
104
+ return message_for_gpt
105
+
106
+ def execute_code(input: Optional[str], code: str, language: str) -> str:
107
+ language_enum: Lang = str2lang(language)
108
+ random_name = random.randint(0, 100_000_000)
109
+ source_path = Path(f'tmp{random_name}{FILE_EXTENSIONS[language_enum]}')
110
+ source_path.write_text(code, encoding='utf-8')
111
+ labeled_cases = [LabeledTestCase('case by gpt', TestCase(input or '', ''))]
112
+ test = TestRunner(str(source_path), labeled_cases)
113
+ labeled_result = next(test)
114
+ source_path.unlink(missing_ok=True)
115
+ return labeled_result.result.output
116
+
117
+ return test_example_case, execute_code
118
+
119
+
120
+ def solve_problem(path: Path, lang: Lang, model: str) -> None:
121
+ console = Console()
122
+ content = path.read_text(encoding='utf-8')
123
+ html = ProblemHTML(content)
124
+ md = html.make_problem_markdown('en')
125
+ labeled_cases = html.load_labeled_testcase()
126
+
127
+ test_example_case, execute_code = create_func(labeled_cases, model)
128
+
129
+ # Responses API 形式のツール定義(トップレベル)
130
+ TOOLS = [
131
+ {
132
+ 'type': 'function',
133
+ 'name': 'test_example_case',
134
+ 'description': 'Run the given source code against example test cases and return a summarized result.',
135
+ 'parameters': {
136
+ 'type': 'object',
137
+ 'properties': {
138
+ 'code': {'type': 'string'},
139
+ 'language': {
140
+ 'type': 'string',
141
+ 'enum': [
142
+ lang.value
143
+ for lang in (COMPILED_LANGUAGES + INTERPRETED_LANGUAGES)
144
+ ],
145
+ },
146
+ },
147
+ 'required': ['code', 'language'],
148
+ 'additionalProperties': False,
149
+ },
150
+ 'strict': True,
151
+ },
152
+ {
153
+ 'type': 'function',
154
+ 'name': 'execute_code',
155
+ 'description': 'Execute the given source code with a single input and return the actual output.',
156
+ 'parameters': {
157
+ 'type': 'object',
158
+ 'properties': {
159
+ 'input': {'type': 'string'},
160
+ 'code': {'type': 'string'},
161
+ 'language': {
162
+ 'type': 'string',
163
+ 'enum': [
164
+ lang.value
165
+ for lang in (COMPILED_LANGUAGES + INTERPRETED_LANGUAGES)
166
+ ],
167
+ },
168
+ },
169
+ 'required': ['input', 'code', 'language'],
170
+ 'additionalProperties': False,
171
+ },
172
+ 'strict': True,
173
+ },
174
+ ]
175
+
176
+ client = OpenAI()
177
+ if set_api_key() is None:
178
+ console.print('[red]OpenAI API key is not set.[/red]')
179
+ return
180
+
181
+ system_prompt = f"""You are a competitive programming assistant for {lang.value}.
182
+ The user will provide problems in Markdown format.
183
+ Read the problem carefully and output a complete, correct, and efficient solution in {lang.value}.
184
+ Use standard input and output. Do not omit any code.
185
+ Always pay close attention to algorithmic complexity (time and space).
186
+ Choose the most optimal algorithms and data structures so that the solution runs within time limits even for the largest possible inputs.
187
+
188
+ Use the provided tool test_example_case to run the example test cases from the problem statement.
189
+ If tests do not pass, fix the code and repeat.
190
+ The last tested code will be automatically saved to a local file on the user's computer.
191
+ You do not need to include the final source code in your response.
192
+ Simply confirm to the user that all tests passed, or briefly explain if they did not.
193
+ Once you run test_example_case, the exact code you tested will already be saved locally on the user's machine, so sending it again in the response is unnecessary."""
194
+
195
+ # ツール名→ローカル実装のディスパッチ
196
+ tool_impl: Dict[str, Callable[..., Any]] = {
197
+ 'test_example_case': test_example_case,
198
+ 'execute_code': lambda **p: execute_code(
199
+ p.get('input', ''), # ← 空なら空文字に
200
+ p.get('code', ''),
201
+ p.get('language', lang.value),
202
+ ),
203
+ }
204
+
205
+ console.print(f'Solving :{path} Language: {lang.value} / Model: {model}')
206
+
207
+ context_msgs = [
208
+ {'role': 'system', 'content': system_prompt},
209
+ {'role': 'user', 'content': md},
210
+ ]
211
+ turn = 1
212
+ assistant_text = Text()
213
+
214
+ def call_model():
215
+ try:
216
+ return client.responses.create(
217
+ model=model,
218
+ input=context_msgs,
219
+ tools=TOOLS,
220
+ tool_choice='auto',
221
+ include=['reasoning.encrypted_content'],
222
+ store=False,
223
+ )
224
+ except BadRequestError as e:
225
+ body = getattr(getattr(e, 'response', None), 'json', lambda: None)()
226
+ console.print(
227
+ Panel.fit(f'{e}\n\n{body}', title='API Error', border_style='red')
228
+ )
229
+ raise
230
+
231
+ while True:
232
+ start_time = time.time()
233
+ with Live(
234
+ Panel(
235
+ f'[bold blue]🤔 Thinking... (turn {turn})[/bold blue]\n[dim]Elapsed: 0.0s[/dim]',
236
+ border_style='blue',
237
+ ),
238
+ console=console,
239
+ refresh_per_second=10,
240
+ ) as live:
241
+
242
+ def update_timer():
243
+ elapsed = time.time() - start_time
244
+ live.update(
245
+ Panel(
246
+ f'[bold blue]🤔 Thinking... (turn {turn})[/bold blue]\n[dim]Elapsed: {elapsed:.1f}s[/dim]',
247
+ border_style='blue',
248
+ )
249
+ )
250
+
251
+ import threading
252
+
253
+ resp = None
254
+ error = None
255
+
256
+ def model_call():
257
+ nonlocal resp, error
258
+ try:
259
+ resp = call_model()
260
+ except Exception as e:
261
+ error = e
262
+
263
+ thread = threading.Thread(target=model_call)
264
+ thread.start()
265
+
266
+ while thread.is_alive():
267
+ update_timer()
268
+ time.sleep(0.1)
269
+
270
+ thread.join()
271
+
272
+ if error:
273
+ raise error
274
+
275
+ elapsed = time.time() - start_time
276
+ live.update(
277
+ Panel(
278
+ f'[bold green]✓ Completed thinking (turn {turn})[/bold green]\n[dim]Time taken: {elapsed:.1f}s[/dim]',
279
+ border_style='green',
280
+ )
281
+ )
282
+
283
+ # Display token usage
284
+ if resp and hasattr(resp, 'usage') and resp.usage:
285
+ usage = resp.usage
286
+ input_tokens = getattr(usage, 'input_tokens', 0)
287
+ output_tokens = getattr(usage, 'output_tokens', 0)
288
+ total_tokens = getattr(usage, 'total_tokens', 0)
289
+
290
+ # Check for cached tokens
291
+ cached_tokens = 0
292
+ if hasattr(usage, 'input_tokens_details'):
293
+ details = usage.input_tokens_details
294
+ if hasattr(details, 'cached_tokens'):
295
+ cached_tokens = details.cached_tokens
296
+
297
+ # Check for reasoning tokens
298
+ reasoning_tokens = 0
299
+ if hasattr(usage, 'output_tokens_details'):
300
+ details = usage.output_tokens_details
301
+ if hasattr(details, 'reasoning_tokens'):
302
+ reasoning_tokens = details.reasoning_tokens
303
+
304
+ token_msg = f'[dim]Tokens - Input: {input_tokens:,}'
305
+ if cached_tokens > 0:
306
+ token_msg += f' (cached: {cached_tokens:,})'
307
+ token_msg += f' | Output: {output_tokens:,}'
308
+ if reasoning_tokens > 0:
309
+ token_msg += f' (reasoning: {reasoning_tokens:,})'
310
+ token_msg += f' | Total: {total_tokens:,}[/dim]'
311
+ console.print(token_msg)
312
+
313
+ if resp and getattr(resp, 'output_text', None):
314
+ assistant_text.append(resp.output_text)
315
+
316
+ output_content = str(resp.output_text).strip()
317
+ if any(
318
+ keyword in output_content
319
+ for keyword in [
320
+ 'def ',
321
+ 'class ',
322
+ 'import ',
323
+ 'from ',
324
+ '#include',
325
+ 'public class',
326
+ ]
327
+ ):
328
+ try:
329
+ syntax = Syntax(
330
+ output_content, lang, theme='monokai', line_numbers=True
331
+ )
332
+ console.print(
333
+ Panel(
334
+ syntax,
335
+ title='Assistant Output (Code)',
336
+ border_style='green',
337
+ )
338
+ )
339
+ except Exception:
340
+ console.print(
341
+ Panel(
342
+ assistant_text,
343
+ title='Assistant Output',
344
+ border_style='green',
345
+ )
346
+ )
347
+ else:
348
+ console.print(
349
+ Panel(
350
+ assistant_text, title='Assistant Output', border_style='green'
351
+ )
352
+ )
353
+
354
+ if resp and hasattr(resp, 'output'):
355
+ context_msgs += resp.output
356
+
357
+ # function_call を収集
358
+ calls: List[dict] = []
359
+ for o in resp.output:
360
+ if getattr(o, 'type', '') == 'function_call':
361
+ try:
362
+ args = json.loads(o.arguments or '{}')
363
+ except Exception:
364
+ args = {}
365
+ call_id = getattr(o, 'call_id', None) or getattr(
366
+ o, 'id'
367
+ ) # ★ ここがポイント
368
+ calls.append({'name': o.name, 'call_id': call_id, 'args': args})
369
+ else:
370
+ calls = []
371
+
372
+ if not calls:
373
+ console.print(
374
+ Panel.fit('✅ Done (no more tool calls).', border_style='green')
375
+ )
376
+ break
377
+
378
+ # ツールを実行し、function_call_output を context に積む
379
+ for c in calls:
380
+ args_str = json.dumps(c['args'], ensure_ascii=False) if c['args'] else ''
381
+ console.print(
382
+ Panel.fit(
383
+ f"Tool: [bold]{c['name']}[/bold]\nargs: {args_str}",
384
+ title=f"function_call ({c['call_id']})",
385
+ border_style='cyan',
386
+ )
387
+ )
388
+
389
+ impl = tool_impl.get(c['name'])
390
+ if not impl:
391
+ out = f"[ERROR] Unknown tool: {c['name']}"
392
+ else:
393
+ try:
394
+ with console.status(f"Running {c['name']}...", spinner='dots'):
395
+ out = impl(**c['args']) if c['args'] else impl()
396
+ except TypeError:
397
+ out = impl(
398
+ **{
399
+ k: v
400
+ for k, v in c['args'].items()
401
+ if k in impl.__code__.co_varnames
402
+ }
403
+ )
404
+ except Exception as e:
405
+ out = f"[Tool '{c['name']}' error] {e}"
406
+
407
+ console.print(
408
+ Panel(
409
+ str(out) or '(no output)',
410
+ title=f"{c['name']} result",
411
+ border_style='magenta',
412
+ )
413
+ )
414
+
415
+ context_msgs.append(
416
+ {
417
+ 'type': 'function_call_output',
418
+ 'call_id': c['call_id'],
419
+ 'output': str(out),
420
+ }
421
+ )
422
+
423
+ turn += 1
424
+
425
+
426
+ @click.command(short_help=_('cmd_generate'), help=_('cmd_generate'))
427
+ @add_file_selector('files', filetypes=[Lang.HTML])
428
+ @click.option('--lang', default='Python', help=_('opt_output_lang'))
429
+ @click.option('--model', default='gpt-5-mini', help=_('opt_model'))
430
+ def ai(files, lang, model):
431
+ """HTMLファイルからコード生成またはテンプレート出力を行います。"""
432
+ lang_enum: Lang = str2lang(lang)
433
+ for path in files:
434
+ solve_problem(Path(path), lang_enum, model)
atcdr/cli.py CHANGED
@@ -8,8 +8,8 @@ from rich.table import Table
8
8
  from rich.traceback import install
9
9
  from rich_click import RichGroup
10
10
 
11
+ from atcdr.ai import ai
11
12
  from atcdr.download import download
12
- from atcdr.generate import generate
13
13
  from atcdr.login import login
14
14
  from atcdr.logout import logout
15
15
  from atcdr.markdown import markdown
@@ -76,7 +76,7 @@ def cli():
76
76
  cli.add_command(test, aliases=['t'])
77
77
  cli.add_command(download, aliases=['d'])
78
78
  cli.add_command(open_files, 'open', aliases=['o'])
79
- cli.add_command(generate, aliases=['g'])
79
+ cli.add_command(ai)
80
80
  cli.add_command(markdown, aliases=['md'])
81
81
  cli.add_command(submit, aliases=['s'])
82
82
  cli.add_command(login)
atcdr/util/openai.py ADDED
@@ -0,0 +1,45 @@
1
+ import os
2
+ from typing import Optional
3
+
4
+ import requests
5
+
6
+ from atcdr.util.i18n import _
7
+
8
+
9
+ def set_api_key() -> Optional[str]:
10
+ api_key = os.getenv('OPENAI_API_KEY')
11
+ if api_key and validate_api_key(api_key):
12
+ return api_key
13
+ elif api_key:
14
+ print(_('api_key_validation_failed'))
15
+ else:
16
+ pass
17
+
18
+ api_key = input(_('get_api_key_prompt'))
19
+ if validate_api_key(api_key):
20
+ print(_('api_key_test_success'))
21
+ print(_('save_api_key_prompt'))
22
+ if input() == 'y':
23
+ zshrc_path = os.path.expanduser('~/.zshrc')
24
+ with open(zshrc_path, 'a') as f:
25
+ f.write(f'export OPENAI_API_KEY={api_key}\n')
26
+ print(_('api_key_saved', zshrc_path))
27
+ os.environ['OPENAI_API_KEY'] = api_key
28
+ return api_key
29
+ else:
30
+ print(_('api_key_required'))
31
+ return None
32
+
33
+
34
+ def validate_api_key(api_key: str) -> bool:
35
+ headers = {
36
+ 'Content-Type': 'application/json',
37
+ 'Authorization': f'Bearer {api_key}',
38
+ }
39
+
40
+ response = requests.get('https://api.openai.com/v1/models', headers=headers)
41
+ if response.status_code == 200:
42
+ return True
43
+ else:
44
+ print(_('api_key_validation_error'))
45
+ return False
atcdr/util/parse.py CHANGED
@@ -110,7 +110,7 @@ class ProblemHTML(HTML):
110
110
  def load_labeled_testcase(self) -> List:
111
111
  from atcdr.test import LabeledTestCase, TestCase
112
112
 
113
- problem_part = self.abstract_problem_part(i18n.language)
113
+ problem_part = self.abstract_problem_part('en')
114
114
  if problem_part is None:
115
115
  return []
116
116
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: AtCoderStudyBooster
3
- Version: 0.4.1
3
+ Version: 0.4.2
4
4
  Summary: A tool to download and manage AtCoder problems.
5
5
  Project-URL: Homepage, https://github.com/yuta6/AtCoderStudyBooster
6
6
  Author-email: yuta6 <46110512+yuta6@users.noreply.github.com>
@@ -9,6 +9,7 @@ Requires-Python: >=3.8
9
9
  Requires-Dist: beautifulsoup4
10
10
  Requires-Dist: click-aliases>=1.0.5
11
11
  Requires-Dist: markdownify==0.13.1
12
+ Requires-Dist: openai>=1.99.6
12
13
  Requires-Dist: pywebview>=5.4
13
14
  Requires-Dist: questionary>=2.0.1
14
15
  Requires-Dist: requests
@@ -1,7 +1,7 @@
1
1
  atcdr/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
- atcdr/cli.py,sha256=xiCnGf14VHtLcRlVHXNpLKW3FIxOHS2duTRDTsG8a5w,2595
2
+ atcdr/ai.py,sha256=mjGfst2PaxgKskGkBVyrsNsSDlkgsQeyhPK9I8hOq-w,15894
3
+ atcdr/cli.py,sha256=KPpnxqlvUwiXZdGmaO07bv3sRFJezjyXs0HPqF9r5I4,2562
3
4
  atcdr/download.py,sha256=sMILF_FKLTe3OBk_ZnA8pzvnlGz68jrHaU-lxDAy6pA,7678
4
- atcdr/generate.py,sha256=FBOPJ2wU827GzkmYxsFrZKJYKXVC_h675_9-mZ7wRdc,7899
5
5
  atcdr/login.py,sha256=3lyuo19EKOj3eaiOcOUnrkkgEEqWO0D1EnjLPKNxkPU,4655
6
6
  atcdr/logout.py,sha256=XwyR5oSipg-iQzfdfkx4Cs0Q88ZWJ548FmdhlIhpUL8,777
7
7
  atcdr/markdown.py,sha256=x0OORs1eun14c0XZWUap6HljTHhJ6UM6OfkJlDXeeSo,1587
@@ -11,12 +11,12 @@ atcdr/test.py,sha256=tbjkBETQG48AuH1F_hBlla3lpfcQiVvFO1YjZ2QDB9A,13518
11
11
  atcdr/util/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
12
12
  atcdr/util/fileops.py,sha256=kvQDO85Ii5n__Wo8KmoE6Ecgbxx9MfmF7BDYz1sL5Sk,3200
13
13
  atcdr/util/filetype.py,sha256=pceB08trkwNkdzKJx4fFfOWpz9jYMBRDwXLW4un0sRM,2254
14
- atcdr/util/gpt.py,sha256=GNJqgV-eVQlxkQ3tnwNiGec27FV6O4QSpwaDiXbFM0w,3392
15
14
  atcdr/util/i18n.py,sha256=WRptkwqRSYSpA7L2CeWwv4ufv_uK65KjnorAcOTOot8,17949
16
- atcdr/util/parse.py,sha256=fo_LtEXLwz0g3871iUKZFHMitxbBh-bz9uBndpKfQeI,7130
15
+ atcdr/util/openai.py,sha256=BygabBuUmuD7wCyEujS4-79XpmLaBi8L_mGUq-JsTGs,1232
16
+ atcdr/util/parse.py,sha256=fPcDqnb-COGJA49sc8HLNQBjCATII0edOtpe_sVIx0g,7121
17
17
  atcdr/util/problem.py,sha256=ZLD-WvhC5SC6TGbyqmiOpTVZVSn-KuhOazWvAvHdyqI,1222
18
18
  atcdr/util/session.py,sha256=xct266SY7QuC3bc1IKlkVginttvXE_qK5tepUx6hfWE,4820
19
- atcoderstudybooster-0.4.1.dist-info/METADATA,sha256=yV1OZY9piILs7WW_qfKEsGk_az03Vh4ehgrbq1iO-nQ,6077
20
- atcoderstudybooster-0.4.1.dist-info/WHEEL,sha256=C2FUgwZgiLbznR-k0b_5k3Ai_1aASOXDss3lzCUsUug,87
21
- atcoderstudybooster-0.4.1.dist-info/entry_points.txt,sha256=-stL-IwnheQGlYAdm82RuZu8CGgSakU0aVIVlA7DmFA,40
22
- atcoderstudybooster-0.4.1.dist-info/RECORD,,
19
+ atcoderstudybooster-0.4.2.dist-info/METADATA,sha256=RKaWJkW3AiRZeIDRTQuhGshxruW7CvTo_lUbZmzi6gE,6107
20
+ atcoderstudybooster-0.4.2.dist-info/WHEEL,sha256=C2FUgwZgiLbznR-k0b_5k3Ai_1aASOXDss3lzCUsUug,87
21
+ atcoderstudybooster-0.4.2.dist-info/entry_points.txt,sha256=-stL-IwnheQGlYAdm82RuZu8CGgSakU0aVIVlA7DmFA,40
22
+ atcoderstudybooster-0.4.2.dist-info/RECORD,,
atcdr/generate.py DELETED
@@ -1,197 +0,0 @@
1
- import json
2
- import os
3
- import re
4
-
5
- import rich_click as click
6
- from rich.console import Console
7
- from rich.panel import Panel
8
- from rich.syntax import Syntax
9
-
10
- from atcdr.test import ResultStatus, TestRunner, create_renderable_test_info
11
- from atcdr.util.fileops import add_file_selector
12
- from atcdr.util.filetype import (
13
- FILE_EXTENSIONS,
14
- Filename,
15
- Lang,
16
- lang2str,
17
- str2lang,
18
- )
19
- from atcdr.util.gpt import ChatGPT, Model, set_api_key
20
- from atcdr.util.i18n import _
21
- from atcdr.util.parse import ProblemHTML
22
-
23
-
24
- def get_code_from_gpt_output(output: str) -> str:
25
- pattern = re.compile(r'```(?:\w+)?\s*(.*?)\s*```', re.DOTALL)
26
- match = pattern.search(output)
27
- return match.group(1) if match else ''
28
-
29
-
30
- def render_result_for_GPT(
31
- test: TestRunner,
32
- ) -> tuple[str, bool]:
33
- results = list(test)
34
-
35
- match test.info.summary:
36
- case ResultStatus.AC:
37
- return 'Accepted', True
38
- case ResultStatus.CE:
39
- return f'Compile Error \n {test.info.compiler_message}', False
40
- case _:
41
- message_for_gpt = ''.join(
42
- f'\n{result.label} => {result.result.passed.value}\nInput :\n{result.testcase.input}\nOutput :\n{result.result.output}\nExpected :\n{result.testcase.output}\n'
43
- if result.result.passed == ResultStatus.WA
44
- else f'\n{result.label} => {result.result.passed.value}\nInput :\n{result.testcase.input}\nOutput :\n{result.result.output}\n'
45
- for result in results
46
- )
47
- return message_for_gpt, False
48
-
49
-
50
- def generate_code(file: Filename, lang: Lang, model: Model) -> None:
51
- console = Console()
52
- with open(file, 'r') as f:
53
- html_content = f.read()
54
- md = ProblemHTML(html_content).make_problem_markdown('en')
55
-
56
- if set_api_key() is None:
57
- return
58
- gpt = ChatGPT(
59
- system_prompt=f"""You are an excellent programmer. You solve problems in competitive programming.When a user provides you with a problem from a programming contest called AtCoder, including the Problem,Constraints, Input, Output, Input Example, and Output Example, please carefully consider these and solve the problem.Make sure that your output code block contains no more than two blocks. Pay close attention to the Input, Input Example, Output, and Output Example.Create the solution in {lang2str(lang)}.""",
60
- model=model,
61
- )
62
- with console.status(_('generating_code', gpt.model.value)):
63
- reply = gpt.tell(md)
64
-
65
- code = get_code_from_gpt_output(reply)
66
- console.print('[green][+][/green] ' + _('code_generation_success'))
67
- console.rule(_('code_by_model', lang2str(lang), gpt.model.value))
68
- console.print(Syntax(code=code, lexer=lang2str(lang)))
69
-
70
- saved_filename = (
71
- os.path.splitext(file)[0] + f'_by_{gpt.model.value}' + FILE_EXTENSIONS[lang]
72
- )
73
- with open(saved_filename, 'w') as f:
74
- console.print('[green][+][/green] ' + _('code_saved', gpt.model.value, f.name))
75
- f.write(code)
76
-
77
-
78
- def generate_template(file: Filename, lang: Lang) -> None:
79
- console = Console()
80
- with open(file, 'r') as f:
81
- html_content = f.read()
82
- md = ProblemHTML(html_content).make_problem_markdown('en')
83
-
84
- if set_api_key() is None:
85
- return
86
- gpt = ChatGPT(
87
- system_prompt='You are a highly skilled programmer. Your role is to create a template code for competitive programming.',
88
- temperature=0.0,
89
- )
90
-
91
- propmpt = f"""
92
- The user will provide a problem from a programming contest called AtCoder. This problem will include the Problem Statement, Constraints, Input, Output, Input Example, and Output Example. You should focus on the Constraints and Input sections to create the template in {lang2str(lang)}.
93
-
94
- - First, create the part of the code that handles input. Then, you should read ###Input Block and ###Constraints Block.
95
- - After receiving the input, define variables in the program by reading ###Constraints Block and explain how to use the variables in the comment of your code block with example.
96
- - Last, define variables needed for output. Then you should read ###Output Block and ###Constraints Block.
97
-
98
- You must not solve the problem. Please faithfully reproduce the variable names defined in the problem.
99
- """
100
- with console.status(_('generating_template', lang2str(lang))):
101
- reply = gpt.tell(md + propmpt)
102
- code = get_code_from_gpt_output(reply)
103
-
104
- savaed_filename = os.path.splitext(file)[0] + FILE_EXTENSIONS[lang]
105
- with open(savaed_filename, 'x') as f:
106
- console.print('[green][+][/green] ' + _('template_created', savaed_filename))
107
- f.write(code)
108
-
109
-
110
- def solve_problem(file: Filename, lang: Lang, model: Model) -> None:
111
- console = Console()
112
- with open(file, 'r') as f:
113
- html = ProblemHTML(f.read())
114
-
115
- md = html.make_problem_markdown('en')
116
- labeled_cases = html.load_labeled_testcase()
117
-
118
- if set_api_key() is None:
119
- return
120
- gpt = ChatGPT(
121
- system_prompt=f"""You are a brilliant programmer. Your task is to solve an AtCoder problem. AtCoder is a platform that hosts programming competitions where participants write programs to solve algorithmic challenges.Please solve the problem in {lang2str(lang)}.""",
122
- model=model,
123
- )
124
-
125
- file_without_ext = os.path.splitext(file)[0]
126
-
127
- for i in range(1, 4):
128
- with console.status(_('nth_code_generation', i, gpt.model.value)):
129
- if i == 1:
130
- test_report = ''
131
- reply = gpt.tell(md)
132
- else:
133
- prompt = f"""The following is the test report for the code you provided:
134
- {test_report}
135
- Please provide an updated version of the code in {lang2str(lang)}."""
136
- console.print(
137
- '[green][+][/] ' + _('regenerating_with_prompt', gpt.model.value)
138
- )
139
- console.print(Panel(prompt))
140
- reply = gpt.tell(prompt)
141
-
142
- code = get_code_from_gpt_output(reply)
143
-
144
- saved_filename = (
145
- f'{i}_'
146
- + file_without_ext
147
- + f'_by_{gpt.model.value}'
148
- + FILE_EXTENSIONS[lang]
149
- )
150
- with open(saved_filename, 'w') as f:
151
- console.print('[green][+][/] ' + _('code_generation_success_file', f.name))
152
- f.write(code)
153
-
154
- with console.status(
155
- _('testing_generated_code', gpt.model.value), spinner='circleHalves'
156
- ):
157
- test = TestRunner(saved_filename, labeled_cases)
158
- test_report, is_ac = render_result_for_GPT(test)
159
-
160
- console.print(create_renderable_test_info(test.info))
161
-
162
- if is_ac:
163
- console.print('[green][+][/] ' + _('test_success'))
164
- break
165
- else:
166
- console.print('[red][-][/] ' + _('test_failed'))
167
-
168
- with open(
169
- 'log_'
170
- + file_without_ext
171
- + f'_by_{gpt.model.value}'
172
- + FILE_EXTENSIONS[Lang.JSON],
173
- 'w',
174
- ) as f:
175
- console.print('[green][+][/] ' + _('log_saved', gpt.model.value, f.name))
176
- f.write(json.dumps(gpt.messages, indent=2))
177
- return
178
-
179
-
180
- @click.command(short_help=_('cmd_generate'), help=_('cmd_generate'))
181
- @add_file_selector('files', filetypes=[Lang.HTML])
182
- @click.option('--lang', default='Python', help=_('opt_output_lang'))
183
- @click.option('--model', default=Model.GPT41_MINI.value, help=_('opt_model'))
184
- @click.option('--without-test', is_flag=True, help=_('opt_without_test'))
185
- @click.option('--template', is_flag=True, help=_('opt_template'))
186
- def generate(files, lang, model, without_test, template):
187
- """HTMLファイルからコード生成またはテンプレート出力を行います。"""
188
- la = str2lang(lang)
189
- model_enum = Model(model)
190
-
191
- for path in files:
192
- if template:
193
- generate_template(path, la)
194
- elif without_test:
195
- generate_code(path, la, model_enum)
196
- else:
197
- solve_problem(path, la, model_enum)
atcdr/util/gpt.py DELETED
@@ -1,116 +0,0 @@
1
- import os
2
- from enum import Enum
3
- from typing import Dict, List, Optional
4
-
5
- import requests
6
-
7
- from atcdr.util.i18n import _
8
-
9
-
10
- class Model(Enum):
11
- GPT4O = 'gpt-4o'
12
- GPT41 = 'gpt-4.1'
13
- GPT41_MINI = 'gpt-4.1-mini'
14
- GPT41_NANO = 'gpt-4.1-nano'
15
- GPT4O_MINI = 'gpt-4o-mini'
16
- O1_PREVIEW = 'o1-preview'
17
- O1 = 'o1'
18
- O3 = 'o3'
19
- O1_MINI = 'o1-mini'
20
- O3_MINI = 'o3-mini'
21
- O4_MINI = 'o4-mini'
22
-
23
-
24
- def set_api_key() -> Optional[str]:
25
- api_key = os.getenv('OPENAI_API_KEY')
26
- if api_key and validate_api_key(api_key):
27
- return api_key
28
- elif api_key:
29
- print(_('api_key_validation_failed'))
30
- else:
31
- pass
32
-
33
- api_key = input(_('get_api_key_prompt'))
34
- if validate_api_key(api_key):
35
- print(_('api_key_test_success'))
36
- print(_('save_api_key_prompt'))
37
- if input() == 'y':
38
- zshrc_path = os.path.expanduser('~/.zshrc')
39
- with open(zshrc_path, 'a') as f:
40
- f.write(f'export OPENAI_API_KEY={api_key}\n')
41
- print(_('api_key_saved', zshrc_path))
42
- os.environ['OPENAI_API_KEY'] = api_key
43
- return api_key
44
- else:
45
- print(_('api_key_required'))
46
- return None
47
-
48
-
49
- def validate_api_key(api_key: str) -> bool:
50
- headers = {
51
- 'Content-Type': 'application/json',
52
- 'Authorization': f'Bearer {api_key}',
53
- }
54
-
55
- response = requests.get('https://api.openai.com/v1/models', headers=headers)
56
-
57
- if response.status_code == 200:
58
- return True
59
- else:
60
- print(_('api_key_validation_error'))
61
- return False
62
-
63
-
64
- class ChatGPT:
65
- API_URL = 'https://api.openai.com/v1/chat/completions'
66
-
67
- # APIの使い方 https://platform.openai.com/docs/api-reference/making-requests
68
- def __init__(
69
- self,
70
- api_key: Optional[str] = None,
71
- model: Model = Model.GPT41_MINI,
72
- max_tokens: int = 3000,
73
- temperature: float = 0.7,
74
- messages: Optional[List[Dict[str, str]]] = None,
75
- system_prompt: str = 'You are a helpful assistant.',
76
- ) -> None:
77
- self.api_key = api_key or os.getenv('OPENAI_API_KEY')
78
- self.model = model
79
- self.max_tokens = max_tokens
80
- self.temperature = temperature
81
- self.messages = (
82
- messages
83
- if messages is not None
84
- else [{'role': 'system', 'content': system_prompt}]
85
- )
86
-
87
- self.__headers = {
88
- 'Content-Type': 'application/json',
89
- 'Authorization': f'Bearer {self.api_key}',
90
- }
91
-
92
- def tell(self, message: str) -> str:
93
- self.messages.append({'role': 'user', 'content': message})
94
-
95
- settings = {
96
- 'model': self.model.value,
97
- 'messages': self.messages,
98
- 'max_tokens': self.max_tokens,
99
- 'temperature': self.temperature,
100
- }
101
-
102
- response = requests.post(self.API_URL, headers=self.__headers, json=settings)
103
- responsej = response.json()
104
- try:
105
- reply = responsej['choices'][0]['message']['content']
106
- except KeyError:
107
- print(_('response_format_error') + str(responsej))
108
- return 'Error: Unable to retrieve response.'
109
-
110
- self.messages.append({'role': 'assistant', 'content': reply})
111
-
112
- # usage = responsej['usage']
113
- # input_tokens = usage.get('prompt_tokens', 0)
114
- # output_tokens = usage.get('completion_tokens', 0)
115
-
116
- return reply