commandchat 0.0.12__tar.gz → 0.0.13__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (26) hide show
  1. {commandchat-0.0.12 → commandchat-0.0.13}/PKG-INFO +4 -1
  2. {commandchat-0.0.12 → commandchat-0.0.13}/commandchat.egg-info/PKG-INFO +4 -1
  3. {commandchat-0.0.12 → commandchat-0.0.13}/commandchat.egg-info/requires.txt +3 -0
  4. commandchat-0.0.13/occ/CommandChat.py +407 -0
  5. {commandchat-0.0.12 → commandchat-0.0.13}/occ/command/__main__.py +69 -8
  6. {commandchat-0.0.12 → commandchat-0.0.13}/occ/commons/config.py +60 -5
  7. commandchat-0.0.13/occ/configuration/profile_config.py +262 -0
  8. {commandchat-0.0.12 → commandchat-0.0.13}/pyproject.toml +4 -1
  9. commandchat-0.0.12/occ/CommandChat.py +0 -209
  10. commandchat-0.0.12/occ/configuration/profile_config.py +0 -46
  11. {commandchat-0.0.12 → commandchat-0.0.13}/LICENSE +0 -0
  12. {commandchat-0.0.12 → commandchat-0.0.13}/README.md +0 -0
  13. {commandchat-0.0.12 → commandchat-0.0.13}/commandchat.egg-info/SOURCES.txt +0 -0
  14. {commandchat-0.0.12 → commandchat-0.0.13}/commandchat.egg-info/dependency_links.txt +0 -0
  15. {commandchat-0.0.12 → commandchat-0.0.13}/commandchat.egg-info/entry_points.txt +0 -0
  16. {commandchat-0.0.12 → commandchat-0.0.13}/commandchat.egg-info/top_level.txt +0 -0
  17. {commandchat-0.0.12 → commandchat-0.0.13}/occ/ConvertLogToMarkDown.py +0 -0
  18. {commandchat-0.0.12 → commandchat-0.0.13}/occ/__init__.py +0 -0
  19. {commandchat-0.0.12 → commandchat-0.0.13}/occ/command/__init__.py +0 -0
  20. {commandchat-0.0.12 → commandchat-0.0.13}/occ/commons/__init__.py +0 -0
  21. {commandchat-0.0.12 → commandchat-0.0.13}/occ/configuration/__init__.py +0 -0
  22. {commandchat-0.0.12 → commandchat-0.0.13}/occ/utils/CommonUtil.py +0 -0
  23. {commandchat-0.0.12 → commandchat-0.0.13}/occ/utils/__init__.py +0 -0
  24. {commandchat-0.0.12 → commandchat-0.0.13}/occ/utils/logger.py +0 -0
  25. {commandchat-0.0.12 → commandchat-0.0.13}/setup.cfg +0 -0
  26. {commandchat-0.0.12 → commandchat-0.0.13}/setup.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: commandchat
3
- Version: 0.0.12
3
+ Version: 0.0.13
4
4
  Summary: use command to chat with openai models
5
5
  Home-page: https://github.com/
6
6
  Author: xoto
@@ -17,7 +17,10 @@ Requires-Dist: click
17
17
  Requires-Dist: Image
18
18
  Requires-Dist: openai
19
19
  Requires-Dist: prompt_toolkit
20
+ Requires-Dist: pyperclip
20
21
  Requires-Dist: rich
22
+ Requires-Dist: requests
23
+ Requires-Dist: questionary
21
24
  Dynamic: author
22
25
  Dynamic: home-page
23
26
  Dynamic: license-file
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: commandchat
3
- Version: 0.0.12
3
+ Version: 0.0.13
4
4
  Summary: use command to chat with openai models
5
5
  Home-page: https://github.com/
6
6
  Author: xoto
@@ -17,7 +17,10 @@ Requires-Dist: click
17
17
  Requires-Dist: Image
18
18
  Requires-Dist: openai
19
19
  Requires-Dist: prompt_toolkit
20
+ Requires-Dist: pyperclip
20
21
  Requires-Dist: rich
22
+ Requires-Dist: requests
23
+ Requires-Dist: questionary
21
24
  Dynamic: author
22
25
  Dynamic: home-page
23
26
  Dynamic: license-file
@@ -2,4 +2,7 @@ click
2
2
  Image
3
3
  openai
4
4
  prompt_toolkit
5
+ pyperclip
5
6
  rich
7
+ requests
8
+ questionary
@@ -0,0 +1,407 @@
1
+ import asyncio
2
+ import json
3
+ import os
4
+ import sys
5
+ import time
6
+ from pathlib import Path
7
+ from typing import AsyncGenerator, Optional
8
+ from dataclasses import dataclass
9
+
10
+ from openai import AzureOpenAI
11
+ from openai import OpenAI
12
+ from openai.types.chat.chat_completion_chunk import Choice
13
+ from prompt_toolkit import print_formatted_text, HTML, Application
14
+ from prompt_toolkit.clipboard.pyperclip import PyperclipClipboard
15
+ from prompt_toolkit.layout import Layout, HSplit
16
+ from prompt_toolkit.widgets import TextArea
17
+ from rich.console import Console
18
+ from rich.live import Live
19
+ from rich.markdown import Markdown
20
+
21
+ from occ.commons.config import get_env
22
+
23
+
24
+ @dataclass
25
+ class StreamChunk:
26
+ """Unified streaming chunk for both chat.completions and responses API"""
27
+ content: Optional[str] = None
28
+ role: Optional[str] = None
29
+ finish_reason: Optional[str] = None
30
+ event_type: Optional[str] = None
31
+
32
+
33
+ DEFAULT_CHAT_LOG_ID = "chat-1"
34
+ DEFAULT_PROFILE = "default"
35
+ USER_COLOR = "ansiyellow"
36
+ ASSISTANT_COLOR = "ansicyan"
37
+ TYPING_DELAY = 0.01 # 打字速度(秒/字符)
38
+ SEPARATOR = "─" * 30
39
+
40
+
41
+ def get_home_path():
42
+ homedir = os.environ.get('HOME', None)
43
+ if os.name == 'nt':
44
+ homedir = os.path.expanduser('~')
45
+ return homedir
46
+
47
+
48
+ clip = PyperclipClipboard()
49
+ console = Console()
50
+
51
+ def print_formatted(content: str, live: Live):
52
+ md = Markdown(content)
53
+ live.update(md)
54
+ sys.stdout.flush()
55
+
56
+
57
+ class CommandChat:
58
+ partial_text = []
59
+ role = None
60
+
61
+ def __init__(self, profile=None, chat_log_id=None, model=None):
62
+ now = time.strftime("%Y%m%d", time.localtime())
63
+ self.profile = profile or DEFAULT_PROFILE
64
+ self.api_server_type = get_env(self.profile, "api_server_type")
65
+
66
+ if not self.api_server_type:
67
+ raise ValueError(f"Profile '{self.profile}' is not configured. Please run 'occ configure -p {self.profile}' first.")
68
+
69
+ self.limit_history = int(get_env(self.profile, "limit_history") or 4)
70
+ self.chat_log_id = chat_log_id or DEFAULT_CHAT_LOG_ID
71
+ self.folder_path = os.path.join(get_home_path(), ".occ", self.profile)
72
+ self.image_folder_path = os.path.join(self.folder_path, "images")
73
+ self.file_name = os.path.join(self.folder_path, f"{self.chat_log_id}.log")
74
+ os.makedirs(self.folder_path, exist_ok=True)
75
+ os.makedirs(self.image_folder_path, exist_ok=True)
76
+ self.model = model
77
+ self.current_model_config = None
78
+
79
+ if not os.path.exists(self.file_name):
80
+ open(self.file_name, 'w').close()
81
+ self.history_path = Path(self.folder_path, self.chat_log_id) / f"md_history_{now}.md"
82
+ # Load messages and filter out invalid ones (with null role)
83
+ self.messages = []
84
+ for line in open(self.file_name):
85
+ line = line.strip()
86
+ if line:
87
+ try:
88
+ msg = json.loads(line)
89
+ # Ensure role is valid
90
+ if msg.get('role') in ['system', 'assistant', 'user', 'function', 'tool', 'developer']:
91
+ self.messages.append(msg)
92
+ except json.JSONDecodeError:
93
+ continue
94
+
95
+ # Initialize client based on API server type
96
+ if self.api_server_type == "azure-openai":
97
+ # For Azure OpenAI, we'll initialize client per model in chat method
98
+ self.client = None
99
+ elif self.api_server_type == "openai":
100
+ self.api_key = get_env(self.profile, "api_key")
101
+ self.api_base = get_env(self.profile, "api_base_url")
102
+
103
+ if not self.api_key:
104
+ raise ValueError(f"API key not configured for profile '{self.profile}'. Please run 'occ configure -p {self.profile}' first.")
105
+
106
+ os.environ.setdefault("OPENAI_API_KEY", self.api_key)
107
+ os.environ.setdefault("OPENAI_BASE_URL", self.api_base)
108
+ self.client = OpenAI()
109
+ else:
110
+ # Fallback for legacy "azure" type
111
+ self.api_key = get_env(self.profile, "api_key")
112
+ self.api_base = get_env(self.profile, "api_base_url")
113
+
114
+ if not self.api_key:
115
+ raise ValueError(f"API key not configured for profile '{self.profile}'. Please run 'occ configure -p {self.profile}' first.")
116
+
117
+ os.environ.setdefault("OPENAI_API_KEY", self.api_key)
118
+ os.environ.setdefault("OPENAI_BASE_URL", self.api_base)
119
+ if "azure" == self.api_server_type:
120
+ self.client = AzureOpenAI(api_key=self.api_key,
121
+ api_version=get_env(self.profile, "api_version"),
122
+ azure_endpoint=self.api_base)
123
+ else:
124
+ self.client = OpenAI()
125
+
126
+ def _get_azure_client(self, model):
127
+ """Get Azure OpenAI client for a specific model"""
128
+ from occ.commons.config import get_model_config
129
+
130
+ model_config = get_model_config(self.profile, model)
131
+ if not model_config:
132
+ raise ValueError(f"Model '{model}' not found in profile '{self.profile}'")
133
+
134
+ self.current_model_config = model_config
135
+ return AzureOpenAI(
136
+ api_key=model_config['api_key'],
137
+ api_version=model_config['api_version'],
138
+ azure_endpoint=model_config['api_base_url']
139
+ )
140
+
141
+ def _is_completions_model(self, model):
142
+ """Check if model uses completions API instead of chat completions API"""
143
+ # Azure OpenAI behavior is different from standard OpenAI
144
+ # For Azure, most models (including codex) use chat completions API
145
+ if self.api_server_type in ["azure-openai", "azure"]:
146
+ # Only specific instruct models use completions API in Azure
147
+ azure_completions_models = [
148
+ 'gpt-35-turbo-instruct',
149
+ 'text-davinci-003',
150
+ 'text-davinci-002',
151
+ ]
152
+ return model in azure_completions_models
153
+
154
+ # For standard OpenAI
155
+ completions_models = [
156
+ 'gpt-35-turbo-instruct',
157
+ 'text-davinci-003',
158
+ 'text-davinci-002',
159
+ 'text-curie-001',
160
+ 'text-babbage-001',
161
+ 'text-ada-001',
162
+ ]
163
+
164
+ # Check exact match
165
+ if model in completions_models:
166
+ return True
167
+
168
+ # Check if model contains 'instruct' or 'davinci' (but not codex for standard OpenAI)
169
+ # Note: Codex models behavior varies, so we only check by keyword for OpenAI
170
+ model_lower = model.lower()
171
+ if any(keyword in model_lower for keyword in ['instruct', 'davinci']):
172
+ return True
173
+
174
+ return False
175
+
176
+ def image_create(self, description, size, num):
177
+ raise NotImplementedError
178
+
179
+ def chat(self, message, model):
180
+ # Initialize Azure client if needed
181
+ if self.api_server_type == "azure-openai":
182
+ self.client = self._get_azure_client(model)
183
+
184
+ print_formatted_text(HTML(f"<{ASSISTANT_COLOR}>🤖 Assistant: </{ASSISTANT_COLOR}>"))
185
+
186
+ # Check if model requires completions API instead of chat completions
187
+ # Models like gpt-35-turbo-instruct, text-davinci-003, codex variants use completions API
188
+ if self._is_completions_model(model):
189
+ self.completions(message, model)
190
+ else:
191
+ self.chat_completions(message, model)
192
+
193
+ def completions(self, message, model):
194
+ stream = self.client.completions.create(
195
+ model=model,
196
+ prompt=message,
197
+ max_tokens=4090 - len(message),
198
+ temperature=0.1,
199
+ stream=True
200
+ )
201
+ completion_text = ''
202
+ with Live(console=console, refresh_per_second=8) as live:
203
+ for completion in stream:
204
+ for choice in completion.choices:
205
+ completion_text += choice.text
206
+ print_formatted(completion_text, live)
207
+ clip.set_text(completion_text)
208
+ print("\n")
209
+
210
+ def chat_completions(self, message, model):
211
+ message = {"role": "user", "content": message}
212
+ self.messages.append(message)
213
+ self.model = model
214
+ # Reset role for this chat session
215
+ self.role = None
216
+ loop = asyncio.new_event_loop()
217
+ asyncio.set_event_loop(loop)
218
+ try:
219
+ final_text = loop.run_until_complete(self.print_streaming(self.async_stream))
220
+ except KeyboardInterrupt:
221
+ final_text = None
222
+ finally:
223
+ loop.close()
224
+
225
+ if final_text is None:
226
+ console.print("\n[bold red]Stream was interrupted or user exited (no final output).[/bold red]")
227
+ sys.exit(0)
228
+ md = Markdown(final_text)
229
+ self.append_to_history(final_text)
230
+ console.print(md)
231
+ clip.set_text(final_text)
232
+ # Ensure role is always set (default to 'assistant' if not returned by model)
233
+ response_role = self.role if self.role else "assistant"
234
+ self.record_chat_logs(message, {"role": response_role, "content": final_text.replace("\n\n", "")})
235
+
236
+ async def async_stream(self) -> AsyncGenerator[StreamChunk, None]:
237
+ """
238
+ Unified streaming generator that returns StreamChunk objects.
239
+ Handles both responses API (o1, codex) and chat.completions API (gpt-4, etc.)
240
+ """
241
+ # Detect which API to use
242
+ model_lower = self.model.lower()
243
+ use_responses_api = (
244
+ self.model.startswith('o1-') or
245
+ self.model.startswith('o1') or
246
+ 'codex' in model_lower
247
+ )
248
+
249
+ if use_responses_api:
250
+ # Use responses API for o1 and codex models
251
+ response = self.client.responses.create(
252
+ model=self.model,
253
+ input=self.messages,
254
+ stream=True
255
+ )
256
+
257
+ # Handle responses API streaming with event types
258
+ for event in response:
259
+ if hasattr(event, "type"):
260
+ match event.type:
261
+ case "response.output_text.delta":
262
+ # Incremental text output
263
+ yield StreamChunk(
264
+ content=event.delta,
265
+ event_type=event.type
266
+ )
267
+ await asyncio.sleep(0.01)
268
+
269
+ case "response.output_text.done":
270
+ # Text output completed
271
+ yield StreamChunk(
272
+ finish_reason="stop",
273
+ event_type=event.type
274
+ )
275
+
276
+ case "response.output_item.done":
277
+ # Item completed - only extract role if status is completed
278
+ if hasattr(event, "item"):
279
+ if hasattr(event.item, "status") and event.item.status == 'completed':
280
+ # Only get role when status is completed
281
+ if hasattr(event.item, "role"):
282
+ yield StreamChunk(
283
+ role=event.item.role,
284
+ finish_reason="completed",
285
+ event_type=event.type
286
+ )
287
+ else:
288
+ yield StreamChunk(
289
+ finish_reason="completed",
290
+ event_type=event.type
291
+ )
292
+
293
+ case _:
294
+ # Other event types, just pass through
295
+ pass
296
+ else:
297
+ # Use chat.completions API for regular models
298
+ params = {
299
+ 'model': self.model,
300
+ 'messages': self.messages,
301
+ 'temperature': 1,
302
+ 'top_p': 1,
303
+ 'frequency_penalty': 0.0,
304
+ 'stream': True
305
+ }
306
+
307
+ response = self.client.chat.completions.create(**params)
308
+
309
+ for chunk in response:
310
+ if chunk.choices is None or len(chunk.choices) == 0:
311
+ continue
312
+
313
+ choice = chunk.choices[0]
314
+ delta = choice.delta
315
+
316
+ # Convert to unified StreamChunk format
317
+ yield StreamChunk(
318
+ content=delta.content if hasattr(delta, 'content') else None,
319
+ role=delta.role if hasattr(delta, 'role') else None,
320
+ finish_reason=choice.finish_reason
321
+ )
322
+ await asyncio.sleep(0.01)
323
+
324
+ async def print_streaming(self, async_stream):
325
+ self.partial_text = []
326
+ text_area = TextArea(
327
+ text="",
328
+ wrap_lines=True,
329
+ read_only=True,
330
+ )
331
+ app = Application(layout=Layout(HSplit([text_area])), full_screen=False)
332
+
333
+ async def producer():
334
+ """
335
+ Process streaming chunks from either API in a unified way.
336
+ Handles StreamChunk objects regardless of source API.
337
+ """
338
+ try:
339
+ async for chunk in async_stream():
340
+ # Handle finish conditions
341
+ if chunk.finish_reason in ("stop", "completed"):
342
+ # Extract role before finishing (for responses API)
343
+ if chunk.role and self.role is None:
344
+ self.role = chunk.role
345
+ break
346
+
347
+ # Extract role if provided (usually first chunk for chat.completions)
348
+ if chunk.role and self.role is None:
349
+ self.role = chunk.role
350
+
351
+ # Append content if available
352
+ if chunk.content:
353
+ self.partial_text.append(chunk.content)
354
+ joined = "".join(self.partial_text)
355
+ text_area.text = joined
356
+ text_area.buffer.cursor_position = len(text_area.buffer.text)
357
+ app.invalidate()
358
+
359
+ # Clear text area and exit
360
+ text_area.text = ""
361
+ app.invalidate()
362
+ app.exit()
363
+ except asyncio.CancelledError:
364
+ app.exit(result=None)
365
+ except Exception as e:
366
+ self.partial_text.append(f"\n\n[ERROR] {e}")
367
+ app.exit(result="".join(self.partial_text))
368
+
369
+ app.create_background_task(producer())
370
+ await app.run_async()
371
+ return "".join(self.partial_text)
372
+
373
+ def record_chat_logs(self, content, completion_text):
374
+ with open(self.file_name, 'r+') as f:
375
+ lines = f.readlines()
376
+ if len(lines) >= self.limit_history:
377
+ limit_history_ = (len(lines) + 2 - self.limit_history)
378
+ with open(os.path.join(self.folder_path, self.chat_log_id + '_history.log'), 'a+') as hf:
379
+ hf.writelines("\n")
380
+ hf.writelines(lines[:limit_history_])
381
+ lines = lines[limit_history_:]
382
+ if len(lines) == 0:
383
+ lines.append('{}\n{}'.format(json.dumps(content, ensure_ascii=False),
384
+ json.dumps(completion_text, ensure_ascii=False)))
385
+ else:
386
+ lines.append('\n{}\n{}'.format(json.dumps(content, ensure_ascii=False),
387
+ json.dumps(completion_text, ensure_ascii=False)))
388
+ f.seek(0)
389
+ f.truncate()
390
+ f.writelines(lines)
391
+
392
+ def append_to_history(self, md_text: str):
393
+ self.history_path.parent.mkdir(parents=True, exist_ok=True)
394
+ # 追加分隔符 + markdown 内容
395
+ with self.history_path.open("a", encoding="utf-8") as f:
396
+ f.write("\n\n---\n\n")
397
+ f.write(md_text)
398
+
399
+ def read_history(self) -> str:
400
+ if not self.history_path.exists():
401
+ return ""
402
+ return self.history_path.read_text(encoding="utf-8")
403
+
404
+
405
+ if __name__ == '__main__':
406
+ command_chat = CommandChat()
407
+ command_chat.chat("帮我写一个python的冒泡排序算法", "o1-mini")
@@ -12,8 +12,7 @@ from pygments.styles.tango import TangoStyle
12
12
 
13
13
  import occ.utils.logger as logger
14
14
  from occ.CommandChat import CommandChat
15
- from occ.configuration.profile_config import add_profile, add_default_profile
16
- from occ.utils.CommonUtil import waiting_stop
15
+
17
16
 
18
17
  VERSION = importlib.metadata.version("commandchat")
19
18
 
@@ -25,12 +24,17 @@ def commandchat_operator():
25
24
 
26
25
 
27
26
  @click.command()
28
- @click.option('--profile', '-p', help='Enable profile name')
27
+ @click.option('--profile', '-p', help='Specify profile name to configure')
29
28
  def configure(profile):
29
+ """Configure OpenAI or Azure OpenAI profiles"""
30
30
  if profile is not None:
31
- add_profile(profile)
31
+ # If profile is specified, configure it directly
32
+ from occ.configuration.profile_config import configure_profile
33
+ configure_profile(profile)
32
34
  else:
33
- add_default_profile()
35
+ # No profile specified, show interactive selection
36
+ from occ.configuration.profile_config import select_profile_interactively
37
+ select_profile_interactively()
34
38
 
35
39
 
36
40
  @click.command()
@@ -42,6 +46,64 @@ def configure(profile):
42
46
  @click.option('--file', '-f', type=click.Path(exists=True), help='the prompt or message is from a file')
43
47
  def chat(message, id, profile, model, file):
44
48
  try:
49
+ import questionary
50
+ from occ.commons import config as cfg
51
+ from questionary import Style
52
+
53
+ custom_style = Style([
54
+ ('qmark', 'fg:#673ab7 bold'),
55
+ ('question', 'bold'),
56
+ ('answer', 'fg:#f44336 bold'),
57
+ ('pointer', 'fg:#673ab7 bold'),
58
+ ('highlighted', 'fg:#673ab7 bold'),
59
+ ])
60
+
61
+ # Use default profile if none specified
62
+ active_profile = profile or "default"
63
+
64
+ # Check if profile exists
65
+ if not cfg.profile_exists(active_profile):
66
+ logger.log_r(f"Profile '{active_profile}' does not exist. Please run 'occ configure' first.")
67
+ return
68
+
69
+ # Check API server type
70
+ api_server_type = cfg.get_env(active_profile, 'api_server_type')
71
+
72
+ # For azure-openai, check for multiple models
73
+ if api_server_type == 'azure-openai':
74
+ available_models = cfg.get_profile_models(active_profile)
75
+
76
+ if not available_models:
77
+ logger.log_r(f"No models configured for profile '{active_profile}'. Please run 'occ configure' first.")
78
+ return
79
+
80
+ # If user didn't specify model and there are multiple models, let them choose
81
+ if not model or model == "o1-mini": # o1-mini is the default, treat as not specified
82
+ if len(available_models) > 1:
83
+ # Interactive mode - let user select model
84
+ if not message and not file and sys.stdin.isatty():
85
+ model = questionary.select(
86
+ "Select a model:",
87
+ choices=available_models,
88
+ style=custom_style
89
+ ).ask()
90
+
91
+ if model is None:
92
+ logger.log_r("Model selection cancelled.")
93
+ return
94
+ else:
95
+ # Non-interactive mode - use first available model
96
+ model = available_models[0]
97
+ logger.log_g(f"Using model: {model}")
98
+ else:
99
+ # Only one model, use it
100
+ model = available_models[0]
101
+ else:
102
+ # User specified a model, verify it exists
103
+ if model not in available_models:
104
+ logger.log_r(f"Model '{model}' not found in profile '{active_profile}'. Available models: {', '.join(available_models)}")
105
+ return
106
+
45
107
  if file:
46
108
  with open(file, 'r') as f:
47
109
  message = f.read()
@@ -67,15 +129,14 @@ def chat(message, id, profile, model, file):
67
129
  if message.lower() in {"/exit", "/quit", "/q"}:
68
130
  print_formatted_text(HTML("<ansired>Bye 👋</ansired>"))
69
131
  exit(0)
70
- CommandChat(profile=profile, chat_log_id=id).chat(message, model)
132
+ CommandChat(profile=active_profile, chat_log_id=id).chat(message, model)
71
133
  print()
72
134
  except KeyboardInterrupt:
73
135
  print_formatted_text(HTML("<ansired>\n(Interrupted)</ansired>"))
74
136
  exit(0)
75
- CommandChat(profile=profile, chat_log_id=id).chat(message, model)
137
+ CommandChat(profile=active_profile, chat_log_id=id).chat(message, model)
76
138
  except Exception as e:
77
139
  logger.log_g(str(e))
78
- waiting_stop()
79
140
 
80
141
 
81
142
  size_map = {
@@ -71,10 +71,8 @@ def get_env(profile, key):
71
71
  if config.has_option('default', key):
72
72
  return config.get('default', key)
73
73
 
74
- logger.debug("No Value Found in DEAFULT SECTION as well")
75
- logger.log_r(
76
- 'Value not found in [Default Profile] use `occ configure`comamnd')
77
- exit()
74
+ logger.debug("No Value Found in DEFAULT SECTION as well")
75
+ return None
78
76
 
79
77
 
80
78
  def get_default_env(key):
@@ -84,7 +82,64 @@ def get_default_env(key):
84
82
 
85
83
 
86
84
  def get_profiles():
87
- return config.sections()
85
+ """Get all profile names (excluding model-specific sections)"""
86
+ profiles = []
87
+ for section in config.sections():
88
+ # Filter out model-specific sections (e.g., profile_name.model_name)
89
+ if '.' not in section:
90
+ profiles.append(section)
91
+ return profiles
92
+
93
+
94
+ def get_profile_models(profile):
95
+ """Get all models configured for a specific profile"""
96
+ models = []
97
+ prefix = f"{profile}."
98
+ for section in config.sections():
99
+ if section.startswith(prefix):
100
+ model_name = section[len(prefix):]
101
+ models.append(model_name)
102
+ return models
103
+
104
+
105
+ def get_model_config(profile, model):
106
+ """Get configuration for a specific model under a profile"""
107
+ section = f"{profile}.{model}"
108
+ if not config.has_section(section):
109
+ return None
110
+ return {
111
+ 'api_key': config.get(section, 'api_key', fallback=None),
112
+ 'api_base_url': config.get(section, 'api_base_url', fallback=None),
113
+ 'api_version': config.get(section, 'api_version', fallback=None)
114
+ }
115
+
116
+
117
+ def set_model_config(profile, model, api_key, api_base_url, api_version):
118
+ """Set configuration for a specific model under a profile"""
119
+ section = f"{profile}.{model}"
120
+ if not config.has_section(section):
121
+ config.add_section(section)
122
+ config.set(section, 'api_key', api_key)
123
+ config.set(section, 'api_base_url', api_base_url)
124
+ config.set(section, 'api_version', api_version)
125
+ write_config()
126
+
127
+
128
+ def remove_profile(profile):
129
+ """Remove a profile and all its associated models"""
130
+ if config.has_section(profile):
131
+ config.remove_section(profile)
132
+ # Remove all model sections
133
+ prefix = f"{profile}."
134
+ for section in list(config.sections()):
135
+ if section.startswith(prefix):
136
+ config.remove_section(section)
137
+ write_config()
138
+
139
+
140
+ def profile_exists(profile):
141
+ """Check if a profile exists"""
142
+ return config.has_section(profile)
88
143
 
89
144
 
90
145
  def log_config():
@@ -0,0 +1,262 @@
1
+ import questionary
2
+ from questionary import Style
3
+
4
+ import occ.commons.config as config
5
+ import occ.utils.logger as logger
6
+
7
+ # Custom style for questionary
8
+ custom_style = Style([
9
+ ('qmark', 'fg:#673ab7 bold'),
10
+ ('question', 'bold'),
11
+ ('answer', 'fg:#f44336 bold'),
12
+ ('pointer', 'fg:#673ab7 bold'),
13
+ ('highlighted', 'fg:#673ab7 bold'),
14
+ ('selected', 'fg:#cc5454'),
15
+ ('separator', 'fg:#cc5454'),
16
+ ('instruction', ''),
17
+ ('text', ''),
18
+ ])
19
+
20
+ API_SERVER_TYPES = ['openai', 'azure-openai']
21
+
22
+
23
+ def select_profile_interactively():
24
+ """Show a list of profiles and let user select one"""
25
+ profiles = config.get_profiles()
26
+ if not profiles:
27
+ logger.log_r("No profiles found. Creating default profile...")
28
+ configure_profile('default')
29
+ return
30
+
31
+ choices = profiles + ['[Create New Profile]']
32
+ selected = questionary.select(
33
+ "Select a profile to configure:",
34
+ choices=choices,
35
+ style=custom_style
36
+ ).ask()
37
+
38
+ if selected is None:
39
+ logger.log_r("Configuration cancelled.")
40
+ return
41
+
42
+ if selected == '[Create New Profile]':
43
+ profile_name = questionary.text(
44
+ "Enter new profile name:",
45
+ style=custom_style
46
+ ).ask()
47
+ if profile_name:
48
+ configure_profile(profile_name)
49
+ else:
50
+ configure_profile(selected)
51
+
52
+
53
+ def configure_profile(profile_name):
54
+ """Configure a profile (create new or update existing)"""
55
+ is_existing = config.profile_exists(profile_name)
56
+
57
+ if is_existing:
58
+ logger.log_g(f"\nConfiguring existing profile: {profile_name}")
59
+ else:
60
+ logger.log_g(f"\nCreating new profile: {profile_name}")
61
+ config.add_profile(profile_name)
62
+
63
+ # Common configuration
64
+ configure_common_settings(profile_name, is_existing)
65
+
66
+ # API server type specific configuration
67
+ api_server_type = config.get_env(profile_name, 'api_server_type') if is_existing else None
68
+ api_server_type = questionary.select(
69
+ "Select API server type:",
70
+ choices=API_SERVER_TYPES,
71
+ default=api_server_type if api_server_type in API_SERVER_TYPES else API_SERVER_TYPES[0],
72
+ style=custom_style
73
+ ).ask()
74
+
75
+ if api_server_type is None:
76
+ logger.log_r("Configuration cancelled.")
77
+ return
78
+
79
+ config.set_env(profile_name, 'api_server_type', api_server_type)
80
+
81
+ if api_server_type == 'openai':
82
+ configure_openai(profile_name, is_existing)
83
+ elif api_server_type == 'azure-openai':
84
+ configure_azure_openai(profile_name, is_existing)
85
+
86
+ logger.log_g(f"\n✓ Profile '{profile_name}' configured successfully!")
87
+
88
+
89
+ def configure_common_settings(profile_name, is_existing):
90
+ """Configure common settings for all API types"""
91
+ default_limit = '4'
92
+ if is_existing:
93
+ existing_limit = config.get_env(profile_name, 'limit_history')
94
+ if existing_limit:
95
+ default_limit = existing_limit
96
+
97
+ limit_history = questionary.text(
98
+ "Limit history (number of messages to keep):",
99
+ default=default_limit,
100
+ style=custom_style
101
+ ).ask()
102
+
103
+ if limit_history:
104
+ config.set_env(profile_name, 'limit_history', limit_history)
105
+
106
+
107
+ def configure_openai(profile_name, is_existing):
108
+ """Configure OpenAI specific settings"""
109
+ default_key = ''
110
+ default_url = 'https://api.openai.com/v1'
111
+
112
+ if is_existing:
113
+ existing_key = config.get_env(profile_name, 'api_key')
114
+ existing_url = config.get_env(profile_name, 'api_base_url')
115
+ if existing_key:
116
+ default_key = existing_key
117
+ if existing_url:
118
+ default_url = existing_url
119
+
120
+ api_key = questionary.password(
121
+ "OpenAI API Key:",
122
+ default=default_key,
123
+ style=custom_style
124
+ ).ask()
125
+
126
+ api_base_url = questionary.text(
127
+ "API Base URL:",
128
+ default=default_url,
129
+ style=custom_style
130
+ ).ask()
131
+
132
+ if api_key:
133
+ config.set_env(profile_name, 'api_key', api_key)
134
+ if api_base_url:
135
+ config.set_env(profile_name, 'api_base_url', api_base_url)
136
+
137
+
138
+ def configure_azure_openai(profile_name, is_existing):
139
+ """Configure Azure OpenAI with multiple models"""
140
+ logger.log_g("\nConfiguring Azure OpenAI models...")
141
+
142
+ # Get existing models
143
+ existing_models = config.get_profile_models(profile_name)
144
+
145
+ while True:
146
+ if existing_models:
147
+ logger.log_g(f"\nExisting models: {', '.join(existing_models)}")
148
+ action = questionary.select(
149
+ "What would you like to do?",
150
+ choices=[
151
+ 'Add new model',
152
+ 'Edit existing model',
153
+ 'Remove model',
154
+ 'Finish configuration'
155
+ ],
156
+ style=custom_style
157
+ ).ask()
158
+
159
+ if action is None or action == 'Finish configuration':
160
+ break
161
+ elif action == 'Add new model':
162
+ add_azure_model(profile_name)
163
+ existing_models = config.get_profile_models(profile_name)
164
+ elif action == 'Edit existing model':
165
+ model_to_edit = questionary.select(
166
+ "Select model to edit:",
167
+ choices=existing_models,
168
+ style=custom_style
169
+ ).ask()
170
+ if model_to_edit:
171
+ edit_azure_model(profile_name, model_to_edit)
172
+ elif action == 'Remove model':
173
+ model_to_remove = questionary.select(
174
+ "Select model to remove:",
175
+ choices=existing_models,
176
+ style=custom_style
177
+ ).ask()
178
+ if model_to_remove:
179
+ remove_azure_model(profile_name, model_to_remove)
180
+ existing_models = config.get_profile_models(profile_name)
181
+ else:
182
+ logger.log_g("\nNo models configured yet.")
183
+ should_add = questionary.confirm(
184
+ "Add a model?",
185
+ default=True,
186
+ style=custom_style
187
+ ).ask()
188
+
189
+ if should_add:
190
+ add_azure_model(profile_name)
191
+ existing_models = config.get_profile_models(profile_name)
192
+ else:
193
+ break
194
+
195
+
196
+ def add_azure_model(profile_name):
197
+ """Add a new Azure OpenAI model"""
198
+ model_name = questionary.text(
199
+ "Model name (e.g., gpt-4, gpt-35-turbo):",
200
+ style=custom_style
201
+ ).ask()
202
+
203
+ if not model_name:
204
+ return
205
+
206
+ edit_azure_model(profile_name, model_name)
207
+
208
+
209
+ def edit_azure_model(profile_name, model_name):
210
+ """Edit Azure OpenAI model configuration"""
211
+ # Get existing config if available
212
+ existing_config = config.get_model_config(profile_name, model_name)
213
+
214
+ api_key = questionary.password(
215
+ f"API Key for {model_name}:",
216
+ default=existing_config.get('api_key', '') if existing_config else '',
217
+ style=custom_style
218
+ ).ask()
219
+
220
+ api_base_url = questionary.text(
221
+ f"Azure endpoint URL for {model_name}:",
222
+ default=existing_config.get('api_base_url', '') if existing_config else '',
223
+ style=custom_style
224
+ ).ask()
225
+
226
+ api_version = questionary.text(
227
+ f"API version for {model_name}:",
228
+ default=existing_config.get('api_version', '2024-02-15-preview') if existing_config else '2024-02-15-preview',
229
+ style=custom_style
230
+ ).ask()
231
+
232
+ if api_key and api_base_url and api_version:
233
+ config.set_model_config(profile_name, model_name, api_key, api_base_url, api_version)
234
+ logger.log_g(f"✓ Model '{model_name}' configured successfully!")
235
+ else:
236
+ logger.log_r("Configuration incomplete. Model not saved.")
237
+
238
+
239
+ def remove_azure_model(profile_name, model_name):
240
+ """Remove an Azure OpenAI model configuration"""
241
+ confirm = questionary.confirm(
242
+ f"Are you sure you want to remove model '{model_name}'?",
243
+ default=False,
244
+ style=custom_style
245
+ ).ask()
246
+
247
+ if confirm:
248
+ section = f"{profile_name}.{model_name}"
249
+ config.config.remove_section(section)
250
+ config.write_config()
251
+ logger.log_g(f"✓ Model '{model_name}' removed.")
252
+
253
+
254
+ # Legacy functions for backward compatibility
255
+ def add_default_profile():
256
+ """Legacy function - redirects to new configure_profile"""
257
+ configure_profile('default')
258
+
259
+
260
+ def add_profile(profile_name):
261
+ """Legacy function - redirects to new configure_profile"""
262
+ configure_profile(profile_name)
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "commandchat"
7
- version = "0.0.12"
7
+ version = "0.0.13"
8
8
  description = "use command to chat with openai models"
9
9
  readme = "README.md"
10
10
  requires-python = ">=3.8"
@@ -17,7 +17,10 @@ dependencies = [
17
17
  "Image",
18
18
  "openai",
19
19
  "prompt_toolkit",
20
+ "pyperclip",
20
21
  "rich",
22
+ "requests",
23
+ "questionary",
21
24
  ]
22
25
 
23
26
  classifiers = [
@@ -1,209 +0,0 @@
1
- import asyncio
2
- import json
3
- import os
4
- import sys
5
- import time
6
- from pathlib import Path
7
- from typing import AsyncGenerator
8
-
9
- from openai import AzureOpenAI
10
- from openai import OpenAI
11
- from openai.types.chat.chat_completion_chunk import Choice
12
- from prompt_toolkit import print_formatted_text, HTML, Application
13
- from prompt_toolkit.clipboard.pyperclip import PyperclipClipboard
14
- from prompt_toolkit.layout import Layout, HSplit
15
- from prompt_toolkit.widgets import TextArea
16
- from rich.console import Console
17
- from rich.live import Live
18
- from rich.markdown import Markdown
19
-
20
- from occ.commons.config import get_env
21
-
22
- DEFAULT_CHAT_LOG_ID = "chat-1"
23
- DEFAULT_PROFILE = "default"
24
- USER_COLOR = "ansiyellow"
25
- ASSISTANT_COLOR = "ansicyan"
26
- TYPING_DELAY = 0.01 # 打字速度(秒/字符)
27
- SEPARATOR = "─" * 30
28
-
29
-
30
- def get_home_path():
31
- homedir = os.environ.get('HOME', None)
32
- if os.name == 'nt':
33
- homedir = os.path.expanduser('~')
34
- return homedir
35
-
36
-
37
- clip = PyperclipClipboard()
38
- console = Console()
39
-
40
- def print_formatted(content: str, live: Live):
41
- md = Markdown(content)
42
- live.update(md)
43
- sys.stdout.flush()
44
-
45
-
46
- class CommandChat:
47
- partial_text = []
48
- role = None
49
-
50
- def __init__(self, profile=None, chat_log_id=None):
51
- now = time.strftime("%Y%m%d", time.localtime())
52
- self.api_key = get_env(profile or DEFAULT_PROFILE, "api_key")
53
- self.api_base = get_env(profile or DEFAULT_PROFILE, "api_base_url")
54
- os.environ.setdefault("OPENAI_API_KEY", self.api_key)
55
- os.environ.setdefault("OPENAI_BASE_URL", self.api_base)
56
- self.limit_history = int(get_env(profile or DEFAULT_PROFILE, "limit_history") or 4)
57
- self.chat_log_id = chat_log_id or DEFAULT_CHAT_LOG_ID
58
- self.folder_path = os.path.join(get_home_path(), ".occ", profile or DEFAULT_PROFILE)
59
- self.image_folder_path = os.path.join(self.folder_path, "images")
60
- self.file_name = os.path.join(self.folder_path, f"{self.chat_log_id}.log")
61
- os.makedirs(self.folder_path, exist_ok=True)
62
- os.makedirs(self.image_folder_path, exist_ok=True)
63
- self.model = None
64
- if not os.path.exists(self.file_name):
65
- open(self.file_name, 'w').close()
66
- self.history_path = Path(self.folder_path, self.chat_log_id) / f"md_history_{now}.md"
67
- self.messages = [json.loads(line) for line in (line.strip() for line in open(self.file_name)) if line.strip()]
68
- if "azure" == get_env(profile or DEFAULT_PROFILE, "api_server_type"):
69
- self.client = AzureOpenAI(api_key=self.api_key,
70
- api_version=get_env(profile or DEFAULT_PROFILE, "api_version"),
71
- azure_endpoint=self.api_base)
72
- else:
73
- self.client = OpenAI()
74
-
75
- def image_create(self, description, size, num):
76
- raise NotImplementedError
77
-
78
- def chat(self, message, model):
79
- print_formatted_text(HTML(f"<{ASSISTANT_COLOR}>🤖 Assistant: </{ASSISTANT_COLOR}>"))
80
- if model == "gpt-35-turbo-instruct":
81
- self.completions(message, model)
82
- else:
83
- self.chat_completions(message, model)
84
-
85
- def completions(self, message, model):
86
- stream = self.client.completions.create(
87
- model=model,
88
- prompt=message,
89
- max_tokens=4090 - len(message),
90
- temperature=0.1,
91
- stream=True
92
- )
93
- completion_text = ''
94
- with Live(console=console, refresh_per_second=8) as live:
95
- for completion in stream:
96
- for choice in completion.choices:
97
- completion_text += choice.text
98
- print_formatted(completion_text, live)
99
- clip.set_text(completion_text)
100
- print("\n")
101
-
102
- def chat_completions(self, message, model):
103
- message = {"role": "user", "content": message}
104
- self.messages.append(message)
105
- self.model = model
106
- loop = asyncio.new_event_loop()
107
- asyncio.set_event_loop(loop)
108
- try:
109
- final_text = loop.run_until_complete(self.print_streaming(self.async_stream))
110
- except KeyboardInterrupt:
111
- final_text = None
112
- finally:
113
- loop.close()
114
-
115
- if final_text is None:
116
- console.print("\n[bold red]Stream was interrupted or user exited (no final output).[/bold red]")
117
- sys.exit(0)
118
- md = Markdown(final_text)
119
- self.append_to_history(final_text)
120
- console.print(md)
121
- clip.set_text(final_text)
122
- self.record_chat_logs(message, {"role": self.role, "content": final_text.replace("\n\n", "")})
123
-
124
- async def async_stream(self) -> AsyncGenerator[Choice, None]:
125
- response = self.client.chat.completions.create(
126
- model=self.model,
127
- messages=self.messages,
128
- temperature=1,
129
- top_p=1,
130
- frequency_penalty=0.0,
131
- stream=True
132
- )
133
- for chunk in response:
134
- if chunk.choices is None or len(chunk.choices) == 0:
135
- continue
136
- choice = chunk.choices[0]
137
- await asyncio.sleep(0.01)
138
- yield choice
139
-
140
- async def print_streaming(self, async_stream):
141
- self.partial_text = []
142
- text_area = TextArea(
143
- text="",
144
- wrap_lines=True,
145
- read_only=True,
146
- )
147
- app = Application(layout=Layout(HSplit([text_area])), full_screen=False)
148
-
149
- async def producer():
150
- try:
151
- async for chunk in async_stream():
152
- delta = chunk.delta
153
- if chunk.finish_reason == "stop": break
154
- if self.role is None and delta.role:
155
- self.role = delta.role
156
- if delta.content:
157
- self.partial_text.append(delta.content)
158
- joined = "".join(self.partial_text)
159
- text_area.text = joined
160
- text_area.buffer.cursor_position = len(text_area.buffer.text)
161
- app.invalidate()
162
- text_area.text = ""
163
- app.invalidate()
164
- app.exit()
165
- except asyncio.CancelledError:
166
- app.exit(result=None)
167
- except Exception as e:
168
- self.partial_text.append(f"\n\n[ERROR] {e}")
169
- app.exit(result="".join(self.partial_text))
170
-
171
- app.create_background_task(producer())
172
- await app.run_async()
173
- return "".join(self.partial_text)
174
-
175
- def record_chat_logs(self, content, completion_text):
176
- with open(self.file_name, 'r+') as f:
177
- lines = f.readlines()
178
- if len(lines) >= self.limit_history:
179
- limit_history_ = (len(lines) + 2 - self.limit_history)
180
- with open(os.path.join(self.folder_path, self.chat_log_id + '_history.log'), 'a+') as hf:
181
- hf.writelines("\n")
182
- hf.writelines(lines[:limit_history_])
183
- lines = lines[limit_history_:]
184
- if len(lines) == 0:
185
- lines.append('{}\n{}'.format(json.dumps(content, ensure_ascii=False),
186
- json.dumps(completion_text, ensure_ascii=False)))
187
- else:
188
- lines.append('\n{}\n{}'.format(json.dumps(content, ensure_ascii=False),
189
- json.dumps(completion_text, ensure_ascii=False)))
190
- f.seek(0)
191
- f.truncate()
192
- f.writelines(lines)
193
-
194
- def append_to_history(self, md_text: str):
195
- self.history_path.parent.mkdir(parents=True, exist_ok=True)
196
- # 追加分隔符 + markdown 内容
197
- with self.history_path.open("a", encoding="utf-8") as f:
198
- f.write("\n\n---\n\n")
199
- f.write(md_text)
200
-
201
- def read_history(self) -> str:
202
- if not self.history_path.exists():
203
- return ""
204
- return self.history_path.read_text(encoding="utf-8")
205
-
206
-
207
- if __name__ == '__main__':
208
- command_chat = CommandChat()
209
- command_chat.chat("帮我写一个python的冒泡排序算法", "o1-mini")
@@ -1,46 +0,0 @@
1
- import click
2
-
3
- import occ.commons.config as config
4
- import occ.utils.logger as logger
5
-
6
-
7
- def add_default_profile():
8
- api_key = click.prompt(logger.style('\n[default] Your default api_key '),
9
- default=config.get_default_env("api_key"), type=str)
10
- config.set_env('default', 'api_key', api_key)
11
-
12
- api_base_url = click.prompt(logger.style('\n[default] Your api_base_url(Optional) '),
13
- default=config.get_default_env("api_base_url"), type=str)
14
- config.set_env('default', 'api_base_url', api_base_url)
15
-
16
- limit_history = click.prompt(logger.style('\n[default] Your default limit_history '),
17
- default=config.get_default_env("limit_history"), type=str)
18
- config.set_env('default', 'limit_history', limit_history)
19
-
20
- api_server_type = click.prompt(logger.style('\n[default] Your default api_server_type '),
21
- default=config.get_default_env("api_server_type"), type=str)
22
- config.set_env('default', 'api_server_type', api_server_type)
23
-
24
- api_version = click.prompt(logger.style('\n[default] Your default api_version '),
25
- default=config.get_default_env("api_version"), type=str)
26
- config.set_env('default', 'api_version', api_version)
27
-
28
-
29
- def input_config_vars(profile_name, key_name, is_default_exist):
30
- if is_default_exist:
31
- default_value = config.get_env('default', key_name)
32
- user_input = click.prompt(logger.style('\ndefault value ' + key_name), default=default_value, type=str)
33
- else:
34
- user_input = click.prompt(logger.style('\nYour ' + key_name), type=str)
35
-
36
- config.set_env(profile_name, key_name, user_input)
37
-
38
-
39
- def add_profile(profile_name):
40
- config.add_profile(profile_name)
41
- config.set_env(profile_name, 'profile_name', profile_name)
42
- input_config_vars(profile_name, 'api_key', False)
43
- input_config_vars(profile_name, 'api_base_url', False)
44
- input_config_vars(profile_name, 'limit_history', False)
45
- input_config_vars(profile_name, 'api_server_type', False)
46
- input_config_vars(profile_name, 'api_version', False)
File without changes
File without changes
File without changes
File without changes