gpt-shell-4o-mini 1.2.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
chatgpt/main.py ADDED
@@ -0,0 +1,548 @@
1
+ """
2
+ Main CLI interface for GPT-shell-4o-mini.
3
+
4
+ This module contains the main function and command-line interface logic,
5
+ integrating all the other modules.
6
+ """
7
+
8
+ import os
9
+ import sys
10
+ import argparse
11
+ import subprocess
12
+ from datetime import datetime
13
+ from pathlib import Path
14
+ from rich.console import Console
15
+ from rich.markdown import Markdown
16
+
17
+ # Import modules from the package
18
+ from .checks import check_api_key
19
+ from .api_client import (
20
+ initialize_client,
21
+ list_models,
22
+ get_model_details,
23
+ generate_image,
24
+ get_chat_completion,
25
+ get_system_prompt,
26
+ set_custom_system_prompt,
27
+ get_custom_system_prompt,
28
+ reset_system_prompt,
29
+ COMMAND_GENERATION_PROMPT,
30
+ DEFAULT_MODEL,
31
+ DEFAULT_TEMPERATURE,
32
+ DEFAULT_MAX_TOKENS,
33
+ DEFAULT_IMAGE_SIZE,
34
+ MAX_CONTEXT_MESSAGES,
35
+ )
36
+
37
+ # Configuration
38
+ HISTORY_FILE = Path.home() / ".chatgpt_py_history"
39
+
40
+ console = Console()
41
+
42
+
43
+ def append_history(prompt, response):
44
+ """Appends interaction to the history file."""
45
+ try:
46
+ with open(HISTORY_FILE, "a", encoding="utf-8") as f:
47
+ timestamp = datetime.now().strftime("%Y-%m-%d %H:%M")
48
+ f.write(f"{timestamp} User: {prompt}\n")
49
+ f.write(f"{timestamp} Assistant: {response}\n\n")
50
+ except IOError as e:
51
+ console.print(
52
+ f"[yellow]Warning:[/yellow] Could not write to history file {HISTORY_FILE}: {e}"
53
+ )
54
+
55
+
56
+ def display_history():
57
+ """Displays the chat history."""
58
+ if not HISTORY_FILE.exists():
59
+ console.print("[yellow]History file not found.[/yellow]")
60
+ return
61
+ try:
62
+ with open(HISTORY_FILE, "r", encoding="utf-8") as f:
63
+ console.print(f.read())
64
+ except IOError as e:
65
+ console.print(f"[red]Error reading history file {HISTORY_FILE}: {e}[/red]")
66
+
67
+
68
+ def is_dangerous(command):
69
+ """Checks if a command contains potentially dangerous patterns."""
70
+ # Basic checks, similar to the bash script but can be expanded
71
+ dangerous_patterns = [
72
+ "rm ",
73
+ ">",
74
+ "mv ",
75
+ "mkfs",
76
+ ":(){:|:&};",
77
+ "dd ",
78
+ "chmod ",
79
+ "wget ",
80
+ "curl ",
81
+ ]
82
+ # Check common redirection/overwriting patterns more carefully
83
+ if ">" in command and not command.strip().endswith(
84
+ " > /dev/null"
85
+ ): # Allow piping output to null
86
+ if not command.strip().endswith(
87
+ (">>", "2>", "1>", "&>")
88
+ ): # Basic check, might need refinement
89
+ # Check if '>' is followed by a space and a potential filename
90
+ if " > " in command or ">" == command.strip()[-1]:
91
+ return True
92
+ # Check for patterns explicitly
93
+ for pattern in dangerous_patterns:
94
+ if pattern in command:
95
+ # Avoid false positives like 'curl --help' vs 'curl http...'
96
+ if pattern in ["wget ", "curl "]:
97
+ # Simple check: is there likely a URL or option after it?
98
+ parts = command.split(pattern, 1)
99
+ if (
100
+ len(parts) > 1
101
+ and parts[1].strip()
102
+ and not parts[1].strip().startswith("-")
103
+ ):
104
+ return True
105
+ else:
106
+ return True
107
+ return False
108
+
109
+
110
+ def print_debug_info(messages, model, temperature, max_tokens, debug_flag):
111
+ """Print debug information about the context being sent to AI."""
112
+ if debug_flag:
113
+ console.print("\n[bold yellow]=== DEBUG: Context Sent to AI ===[/bold yellow]")
114
+
115
+ # Import here to avoid circular imports
116
+ from .user_profile import format_user_profile
117
+ from .terminal_context import format_terminal_session
118
+
119
+ # Build and display full context only
120
+ context_parts = []
121
+ profile = format_user_profile()
122
+ if profile:
123
+ context_parts.append(profile)
124
+ terminal = format_terminal_session()
125
+ if terminal:
126
+ context_parts.append(terminal)
127
+ full_context = "\n".join(context_parts)
128
+
129
+ console.print(f"[cyan]Full Context:[/cyan]\n{full_context}")
130
+
131
+ console.print(f"[cyan]Messages being sent:[/cyan]")
132
+ for i, msg in enumerate(messages):
133
+ role_color = "green" if msg["role"] == "system" else "blue"
134
+ content_preview = msg["content"][:100] + (
135
+ "..." if len(msg["content"]) > 100 else ""
136
+ )
137
+ console.print(f" [{role_color}] {msg['role']}", end=" ")
138
+ console.print(content_preview, markup=False)
139
+
140
+ console.print(
141
+ f"[cyan]Model: {model}, Temperature: {temperature}, Max Tokens: {max_tokens}[/cyan]"
142
+ )
143
+ console.print("[bold yellow]=== END DEBUG ===[/bold yellow]\n")
144
+
145
+
146
+ def main():
147
+ """Main CLI interface function."""
148
+
149
+ # --- Argument Parsing ---
150
+ parser = argparse.ArgumentParser(
151
+ description="A Python script to interact with OpenAI's API from the terminal.",
152
+ epilog="Example: python -m chatgpt -p 'Translate to French: Hello World!'",
153
+ )
154
+ parser.add_argument(
155
+ "--debug",
156
+ action="store_true",
157
+ help="Print debug information including context sent to AI.",
158
+ )
159
+ parser.add_argument(
160
+ "-p", "--prompt", help="Provide prompt directly instead of starting chat."
161
+ )
162
+ parser.add_argument(
163
+ "--prompt-from-file",
164
+ type=argparse.FileType("r", encoding="utf-8"),
165
+ help="Provide prompt from a file.",
166
+ )
167
+ parser.add_argument(
168
+ "-i", "--init-prompt", help="Provide initial system prompt (overrides default)."
169
+ )
170
+ parser.add_argument(
171
+ "--init-prompt-from-file",
172
+ type=argparse.FileType("r", encoding="utf-8"),
173
+ help="Provide initial system prompt from file.",
174
+ )
175
+ parser.add_argument(
176
+ "-l", "--list", action="store_true", help="List available OpenAI models."
177
+ )
178
+ parser.add_argument(
179
+ "-m",
180
+ "--model",
181
+ default=DEFAULT_MODEL,
182
+ help=f"Model to use (default: {DEFAULT_MODEL}).",
183
+ )
184
+ parser.add_argument(
185
+ "-t",
186
+ "--temperature",
187
+ type=float,
188
+ default=DEFAULT_TEMPERATURE,
189
+ help=f"Sampling temperature (default: {DEFAULT_TEMPERATURE}).",
190
+ )
191
+ parser.add_argument(
192
+ "--max-tokens",
193
+ type=int,
194
+ default=DEFAULT_MAX_TOKENS,
195
+ help=f"Max tokens for completion (default: {DEFAULT_MAX_TOKENS}).",
196
+ )
197
+ parser.add_argument(
198
+ "-s",
199
+ "--size",
200
+ default=DEFAULT_IMAGE_SIZE,
201
+ choices=[
202
+ "256x256",
203
+ "512x512",
204
+ "1024x1024",
205
+ "1792x1024",
206
+ "1024x1792",
207
+ "2048x2048",
208
+ "4096x4096",
209
+ ], # Added DALL-E 3 sizes
210
+ help=f"Image size for DALL-E (default: {DEFAULT_IMAGE_SIZE}).",
211
+ )
212
+
213
+ # System prompt management arguments
214
+ parser.add_argument(
215
+ "--sys-prompt",
216
+ help="Set or update custom system prompt (saved to ~/.chatgpt_py_sys_prompt).",
217
+ )
218
+ parser.add_argument(
219
+ "--get-sys-prompt",
220
+ action="store_true",
221
+ help="Show current custom system prompt.",
222
+ )
223
+ parser.add_argument(
224
+ "--reset-sys-prompt",
225
+ action="store_true",
226
+ help="Remove custom system prompt and use default.",
227
+ )
228
+
229
+ args = parser.parse_args()
230
+
231
+ # --- Initial Checks ---
232
+ api_key = check_api_key()
233
+
234
+ # Initialize OpenAI client now that we know the key exists
235
+ initialize_client(api_key)
236
+
237
+ # --- Handle Standalone Actions ---
238
+ if args.list:
239
+ list_models()
240
+ sys.exit(0)
241
+
242
+ # Handle system prompt management
243
+ if args.sys_prompt is not None:
244
+ if args.sys_prompt:
245
+ # Set custom system prompt from argument
246
+ if set_custom_system_prompt(args.sys_prompt):
247
+ console.print(
248
+ "[green]✓[/green] Custom system prompt saved to ~/.chatgpt_py_sys_prompt"
249
+ )
250
+ else:
251
+ console.print("[red]✗[/red] Failed to save custom system prompt")
252
+ else:
253
+ console.print("[yellow]Warning:[/yellow] System prompt cannot be empty")
254
+ sys.exit(0)
255
+
256
+ if args.get_sys_prompt:
257
+ current_prompt = get_custom_system_prompt()
258
+ if current_prompt:
259
+ console.print(
260
+ f"[cyan]Current custom system prompt:[/cyan]\n{current_prompt}"
261
+ )
262
+ else:
263
+ console.print(
264
+ "[yellow]No custom system prompt found. Using default prompt.[/yellow]"
265
+ )
266
+ console.print(f"[cyan]Default prompt:[/cyan]\n{get_system_prompt()}")
267
+ sys.exit(0)
268
+
269
+ if args.reset_sys_prompt:
270
+ if reset_system_prompt():
271
+ console.print(
272
+ "[green]✓[/green] Custom system prompt removed. Using default prompt."
273
+ )
274
+ else:
275
+ console.print("[yellow]No custom system prompt to remove.[/yellow]")
276
+ sys.exit(0)
277
+
278
+ # --- Determine Initial Prompt and Mode ---
279
+ initial_prompt_text = None
280
+ pipe_mode = False
281
+
282
+ if args.prompt:
283
+ initial_prompt_text = args.prompt
284
+ pipe_mode = True
285
+ elif args.prompt_from_file:
286
+ initial_prompt_text = args.prompt_from_file.read()
287
+ args.prompt_from_file.close()
288
+ pipe_mode = True
289
+ elif not sys.stdin.isatty(): # Check if input is being piped
290
+ initial_prompt_text = sys.stdin.read()
291
+ pipe_mode = True
292
+
293
+ # --- Setup System Prompt ---
294
+ system_prompt = get_system_prompt() # Default
295
+ if args.init_prompt:
296
+ system_prompt = args.init_prompt
297
+ elif args.init_prompt_from_file:
298
+ system_prompt = args.init_prompt_from_file.read()
299
+ args.init_prompt_from_file.close()
300
+
301
+ # --- Chat History Initialization ---
302
+ messages = [{"role": "system", "content": system_prompt}]
303
+
304
+ # --- Execute ---
305
+
306
+ if pipe_mode:
307
+ # --- Pipe/Single Prompt Mode ---
308
+ prompt = initial_prompt_text.strip()
309
+ if not prompt:
310
+ console.print("[red]Error:[/red] Received empty prompt from pipe/argument.")
311
+ sys.exit(1)
312
+
313
+ if prompt.lower().startswith("image:"):
314
+ image_prompt = prompt[len("image:") :].strip()
315
+ generate_image(image_prompt, args.size)
316
+ elif prompt.lower().startswith("model:"):
317
+ model_id = prompt[len("model:") :].strip()
318
+ get_model_details(model_id)
319
+ elif prompt.lower() == "history":
320
+ display_history()
321
+ elif prompt.lower() == "models":
322
+ list_models()
323
+ elif prompt.lower().startswith("command:"):
324
+ command_desc = prompt[len("command:") :].strip()
325
+ request_prompt = f"{COMMAND_GENERATION_PROMPT} {command_desc}"
326
+ messages.append({"role": "user", "content": request_prompt})
327
+ print_debug_info(
328
+ messages, args.model, args.temperature, args.max_tokens, args.debug
329
+ )
330
+ console.print("[grey50]Generating command...[/grey50]", end="\r")
331
+ command_output = get_chat_completion(
332
+ messages, args.model, args.temperature, args.max_tokens
333
+ )
334
+ console.print(" " * 30, end="\r") # Clear
335
+
336
+ if command_output:
337
+ console.print(
338
+ f"[bold cyan]Suggested Command:[/bold cyan]\n{command_output}"
339
+ )
340
+ append_history(prompt, command_output) # Log before asking to run
341
+
342
+ if is_dangerous(command_output):
343
+ console.print(
344
+ "[bold yellow]Warning![/bold yellow] This command might modify your file system, download files, or execute complex operations. Review it carefully."
345
+ )
346
+
347
+ try:
348
+ if console.input("Execute this command? (y/N) ").lower() == "y":
349
+ console.print(f"\n[grey50]Executing: {command_output}[/grey50]")
350
+ # Use shell=True cautiously, as the model might generate complex pipes/chains
351
+ result = subprocess.run(
352
+ command_output,
353
+ shell=True,
354
+ check=False,
355
+ capture_output=True,
356
+ text=True,
357
+ )
358
+ if result.stdout:
359
+ console.print("[bold green]Output:[/bold green]")
360
+ console.print(result.stdout)
361
+ if result.stderr:
362
+ console.print("[bold red]Error Output:[/bold red]")
363
+ console.print(result.stderr)
364
+ if result.returncode != 0:
365
+ console.print(
366
+ f"[yellow]Command exited with status code:[/yellow] {result.returncode}"
367
+ )
368
+
369
+ except Exception as e:
370
+ console.print(
371
+ f"[bold red]Failed to execute command:[/bold red] {e}"
372
+ )
373
+ else:
374
+ console.print("[red]Failed to generate command.[/red]")
375
+ else:
376
+ # Default to chat completion
377
+ messages.append({"role": "user", "content": prompt})
378
+ print_debug_info(
379
+ messages, args.model, args.temperature, args.max_tokens, args.debug
380
+ )
381
+ console.print("[grey50]Processing...[/grey50]", end="\r")
382
+ response_data = get_chat_completion(
383
+ messages, args.model, args.temperature, args.max_tokens
384
+ )
385
+ console.print(" " * 30, end="\r") # Clear processing message
386
+ if response_data:
387
+ console.print(Markdown(response_data))
388
+ console.print(
389
+ "─" * console.width
390
+ ) # Full-width horizontal line separator
391
+ append_history(prompt, response_data)
392
+ else:
393
+ console.print("[red]Failed to get response.[/red]")
394
+
395
+ else:
396
+ # --- Interactive Chat Mode ---
397
+ console.print(
398
+ f"Welcome to ChatGPT in Python! Model: [cyan]{args.model}[/cyan]. Type 'exit' or 'quit' to end."
399
+ )
400
+ while True:
401
+ try:
402
+ prompt = console.input("[bold green]You: [/bold green]")
403
+ except (EOFError, KeyboardInterrupt):
404
+ console.print("\nExiting.")
405
+ break
406
+
407
+ prompt_lower = prompt.lower().strip()
408
+
409
+ if prompt_lower in ["exit", "quit", "q"]:
410
+ break
411
+ if not prompt.strip():
412
+ continue
413
+
414
+ if prompt_lower == "history":
415
+ display_history()
416
+ continue
417
+ elif prompt_lower == "models":
418
+ list_models()
419
+ continue
420
+ elif prompt_lower.startswith("model:"):
421
+ model_id = prompt[len("model:") :].strip()
422
+ if model_id:
423
+ get_model_details(model_id)
424
+ else:
425
+ console.print(
426
+ "[yellow]Please specify a model ID after 'model:'.[/yellow]"
427
+ )
428
+ continue
429
+ elif prompt_lower.startswith("image:"):
430
+ image_prompt = prompt[len("image:") :].strip()
431
+ if image_prompt:
432
+ generate_image(image_prompt, args.size)
433
+ # Don't add image prompts/responses to chat history for now
434
+ else:
435
+ console.print(
436
+ "[yellow]Please provide a description after 'image:'.[/yellow]"
437
+ )
438
+ continue
439
+ elif prompt_lower.startswith("command:"):
440
+ command_desc = prompt[len("command:") :].strip()
441
+ if not command_desc:
442
+ console.print(
443
+ "[yellow]Please describe the command you want after 'command:'.[/yellow]"
444
+ )
445
+ continue
446
+
447
+ # Prepare message list *specifically* for command generation
448
+ command_messages = [
449
+ {
450
+ "role": "system",
451
+ "content": get_system_prompt(),
452
+ }, # Use base system prompt if needed
453
+ {
454
+ "role": "user",
455
+ "content": f"{COMMAND_GENERATION_PROMPT} {command_desc}",
456
+ },
457
+ ]
458
+
459
+ console.print("[grey50]Generating command...[/grey50]", end="\r")
460
+ print_debug_info(
461
+ command_messages,
462
+ args.model,
463
+ args.temperature,
464
+ args.max_tokens,
465
+ args.debug,
466
+ )
467
+ command_output = get_chat_completion(
468
+ command_messages, args.model, args.temperature, args.max_tokens
469
+ )
470
+ console.print(" " * 30, end="\r") # Clear
471
+
472
+ if command_output:
473
+ console.print(
474
+ f"[bold cyan]Suggested Command:[/bold cyan]\n{command_output}"
475
+ )
476
+ # Log the original 'command:' request and the generated command
477
+ append_history(prompt, command_output)
478
+
479
+ if is_dangerous(command_output):
480
+ console.print(
481
+ "[bold yellow]Warning![/bold yellow] This command might modify your file system, download files, or execute complex operations. Review it carefully."
482
+ )
483
+
484
+ try:
485
+ if console.input("Execute this command? (y/N) ").lower() == "y":
486
+ console.print(
487
+ f"\n[grey50]Executing: {command_output}[/grey50]"
488
+ )
489
+ # Use shell=True cautiously
490
+ result = subprocess.run(
491
+ command_output,
492
+ shell=True,
493
+ check=False,
494
+ capture_output=True,
495
+ text=True,
496
+ )
497
+ if result.stdout:
498
+ console.print("[bold green]Output:[/bold green]")
499
+ console.print(result.stdout.strip())
500
+ if result.stderr:
501
+ console.print("[bold red]Error Output:[/bold red]")
502
+ console.print(result.stderr.strip())
503
+ if result.returncode != 0:
504
+ console.print(
505
+ f"[yellow]Command exited with status code:[/yellow] {result.returncode}"
506
+ )
507
+ except Exception as e:
508
+ console.print(
509
+ f"[bold red]Failed to execute command:[/bold red] {e}"
510
+ )
511
+ else:
512
+ console.print("[red]Failed to generate command.[/red]")
513
+ # Don't add the command generation interaction to the main chat history list `messages`
514
+ continue # Go to next prompt
515
+
516
+ # --- Regular Chat ---
517
+ messages.append({"role": "user", "content": prompt})
518
+
519
+ console.print("[grey50]ChatGPT is thinking...[/grey50]", end="\r")
520
+ print_debug_info(
521
+ messages, args.model, args.temperature, args.max_tokens, args.debug
522
+ )
523
+ response_data = get_chat_completion(
524
+ messages, args.model, args.temperature, args.max_tokens
525
+ )
526
+ console.print(" " * 30, end="\r") # Clear thinking message
527
+
528
+ if response_data:
529
+ console.print("[bold cyan]ChatGPT:[/bold cyan]")
530
+ console.print(Markdown(response_data))
531
+ console.print(
532
+ "─" * console.width
533
+ ) # Full-width horizontal line separator
534
+ messages.append({"role": "assistant", "content": response_data})
535
+ append_history(prompt, response_data)
536
+
537
+ # Simple context management: Keep only the last N pairs + system prompt
538
+ if len(messages) > (1 + MAX_CONTEXT_MESSAGES * 2): # 1 system + N pairs
539
+ # Keep system prompt and the last MAX_CONTEXT_MESSAGES*2 messages (user+assistant)
540
+ messages = [messages[0]] + messages[-(MAX_CONTEXT_MESSAGES * 2) :]
541
+ else:
542
+ console.print("[red]Failed to get response.[/red]")
543
+ # Remove the user message that failed
544
+ messages.pop()
545
+
546
+
547
+ if __name__ == "__main__":
548
+ main()
@@ -0,0 +1,48 @@
1
+ """
2
+ Terminal context capture functions for GPT-shell-4o-mini.
3
+
4
+ This module handles capturing terminal session data from various sources
5
+ like tmux, screen, and shell history.
6
+ """
7
+
8
+ import os
9
+ import platform
10
+ from pathlib import Path
11
+
12
+
13
+ def get_current_shell():
14
+ """Detect current shell."""
15
+ if platform.system() == "Windows":
16
+ if os.environ.get("PSModulePath"):
17
+ return "PowerShell"
18
+ return "cmd"
19
+
20
+ shell_path = os.environ.get("SHELL", "")
21
+ return shell_path.split("/")[-1] if shell_path else "unknown"
22
+
23
+
24
+ def format_terminal_session():
25
+ """Format terminal session as context string."""
26
+ try:
27
+ # Build terminal session info only (no history)
28
+ parts = [
29
+ f"Shell: {get_current_shell()}",
30
+ f"CWD: {os.getcwd()}",
31
+ f"User: {os.getenv('USER', os.getenv('USERNAME', 'unknown'))}",
32
+ f"Home: {Path.home()}",
33
+ ]
34
+
35
+ # Add environment info
36
+ if os.getenv("VIRTUAL_ENV"):
37
+ parts.append(f"VEnv: {os.path.basename(os.getenv('VIRTUAL_ENV'))}")
38
+
39
+ if os.getenv("CONDA_DEFAULT_ENV"):
40
+ parts.append(f"Conda: {os.getenv('CONDA_DEFAULT_ENV')}")
41
+
42
+ terminal_info = " | ".join(parts)
43
+
44
+ # Format as terminal session info only
45
+ return f"[Terminal Session: ({terminal_info})]"
46
+ except Exception:
47
+ # Silently fail if context collection fails
48
+ return ""