pysfi 0.1.10__py3-none-any.whl → 0.1.11__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (47) hide show
  1. {pysfi-0.1.10.dist-info → pysfi-0.1.11.dist-info}/METADATA +7 -7
  2. pysfi-0.1.11.dist-info/RECORD +60 -0
  3. {pysfi-0.1.10.dist-info → pysfi-0.1.11.dist-info}/entry_points.txt +12 -2
  4. sfi/__init__.py +1 -1
  5. sfi/alarmclock/alarmclock.py +40 -40
  6. sfi/bumpversion/__init__.py +1 -1
  7. sfi/cleanbuild/cleanbuild.py +155 -0
  8. sfi/condasetup/condasetup.py +116 -0
  9. sfi/docscan/__init__.py +1 -1
  10. sfi/docscan/docscan_gui.py +1 -1
  11. sfi/docscan/lang/eng.py +152 -152
  12. sfi/docscan/lang/zhcn.py +170 -170
  13. sfi/filedate/filedate.py +185 -112
  14. sfi/gittool/__init__.py +2 -0
  15. sfi/gittool/gittool.py +401 -0
  16. sfi/llmclient/llmclient.py +592 -0
  17. sfi/llmquantize/llmquantize.py +480 -0
  18. sfi/llmserver/llmserver.py +335 -0
  19. sfi/makepython/makepython.py +2 -2
  20. sfi/pdfsplit/pdfsplit.py +4 -4
  21. sfi/pyarchive/pyarchive.py +418 -0
  22. sfi/pyembedinstall/pyembedinstall.py +629 -0
  23. sfi/pylibpack/pylibpack.py +813 -269
  24. sfi/pylibpack/rules/numpy.json +22 -0
  25. sfi/pylibpack/rules/pymupdf.json +10 -0
  26. sfi/pylibpack/rules/pyqt5.json +19 -0
  27. sfi/pylibpack/rules/pyside2.json +23 -0
  28. sfi/pylibpack/rules/scipy.json +23 -0
  29. sfi/pylibpack/rules/shiboken2.json +24 -0
  30. sfi/pyloadergen/pyloadergen.py +271 -572
  31. sfi/pypack/pypack.py +822 -471
  32. sfi/pyprojectparse/__init__.py +0 -0
  33. sfi/pyprojectparse/pyprojectparse.py +500 -0
  34. sfi/pysourcepack/pysourcepack.py +308 -369
  35. sfi/quizbase/__init__.py +0 -0
  36. sfi/quizbase/quizbase.py +828 -0
  37. sfi/quizbase/quizbase_gui.py +987 -0
  38. sfi/regexvalidate/__init__.py +0 -0
  39. sfi/regexvalidate/regex_help.html +284 -0
  40. sfi/regexvalidate/regexvalidate.py +468 -0
  41. sfi/taskkill/taskkill.py +0 -2
  42. pysfi-0.1.10.dist-info/RECORD +0 -39
  43. sfi/embedinstall/embedinstall.py +0 -478
  44. sfi/projectparse/projectparse.py +0 -152
  45. {pysfi-0.1.10.dist-info → pysfi-0.1.11.dist-info}/WHEEL +0 -0
  46. /sfi/{embedinstall → llmquantize}/__init__.py +0 -0
  47. /sfi/{projectparse → pyembedinstall}/__init__.py +0 -0
@@ -0,0 +1,592 @@
1
+ """LLM Chat client application.
2
+
3
+ Provides a graphical interface client for streaming conversations with LLM servers.
4
+ Supports real-time streaming response display, connection testing, and parameter adjustment.
5
+ """
6
+
7
+ from __future__ import annotations
8
+
9
+ import atexit
10
+ import json
11
+ import logging
12
+ import sys
13
+ from codecs import getincrementaldecoder
14
+ from pathlib import Path
15
+ from types import SimpleNamespace
16
+ from typing import ClassVar
17
+ from urllib.error import URLError
18
+ from urllib.request import Request, urlopen
19
+
20
+ from PySide2.QtCore import Qt, QThread, Signal
21
+ from PySide2.QtGui import QMoveEvent, QResizeEvent, QTextCursor
22
+ from PySide2.QtWidgets import (
23
+ QApplication,
24
+ QDoubleSpinBox,
25
+ QGroupBox,
26
+ QHBoxLayout,
27
+ QLabel,
28
+ QLineEdit,
29
+ QMainWindow,
30
+ QPushButton,
31
+ QSpinBox,
32
+ QTextEdit,
33
+ QVBoxLayout,
34
+ QWidget,
35
+ )
36
+
37
+ CONFIG_FILE = Path.home() / ".sfi" / "llmclient.json"
38
+ logging.basicConfig(level=logging.INFO, format="%(levelname)s: %(message)s")
39
+ logger = logging.getLogger(__name__)
40
+
41
+ CONNECTION_TIMEOUT = 5
42
+
43
+
44
+ class LLMClientConfig(SimpleNamespace):
45
+ """LLM Chat client configuration."""
46
+
47
+ TITLE: str = "Llama Local Model Tool"
48
+ WIN_SIZE: ClassVar[list[int]] = [800, 600]
49
+ WIN_POS: ClassVar[list[int]] = [100, 100]
50
+ SERVER_URL: str = "http://localhost:8080"
51
+ MAX_TOKENS: int = 256
52
+ TEMPERATURE: float = 0.7
53
+ TOP_P: float = 0.9
54
+ TOP_K: int = 40
55
+
56
+ MAX_TOKENS_RANGE: ClassVar[list[int]] = [1, 4096]
57
+ TEMPERATURE_RANGE: ClassVar[list[float]] = [0.0, 2.0]
58
+ TOP_P_RANGE: ClassVar[list[float]] = [0.0, 1.0]
59
+ TOP_K_RANGE: ClassVar[list[int]] = [1, 100]
60
+
61
+ def __init__(self) -> None:
62
+ if CONFIG_FILE.exists():
63
+ logger.info("Loading configuration from %s", CONFIG_FILE)
64
+ try:
65
+ self.__dict__.update(json.loads(CONFIG_FILE.read_text()))
66
+ except (json.JSONDecodeError, TypeError) as e:
67
+ logger.warning("Failed to load configuration: %s", e)
68
+ logger.info("Using default configuration")
69
+ else:
70
+ logger.info("Using default configuration")
71
+
72
+ def save(self) -> None:
73
+ """Save configuration."""
74
+ CONFIG_FILE.parent.mkdir(parents=True, exist_ok=True)
75
+ CONFIG_FILE.write_text(json.dumps(vars(self), indent=4))
76
+
77
+
78
+ conf = LLMClientConfig()
79
+ atexit.register(conf.save)
80
+
81
+
82
+ class LLMWorker(QThread):
83
+ """LLM server communication worker thread.
84
+
85
+ Handles HTTP streaming requests in background thread to avoid blocking UI main thread.
86
+ Uses incremental UTF-8 decoder to correctly handle multi-byte characters across lines,
87
+ preventing garbled text from truncated multi-byte characters.
88
+
89
+ Signals:
90
+ response_received: Emitted when response content is received, carries response text
91
+ error_occurred: Emitted when error occurs, carries error message
92
+ is_finished: Emitted when request completes
93
+ """
94
+
95
+ response_received = Signal(str)
96
+ error_occurred = Signal(str)
97
+ is_finished = Signal()
98
+
99
+ def __init__(
100
+ self,
101
+ prompt: str,
102
+ server_url: str,
103
+ max_tokens: int,
104
+ temperature: float,
105
+ top_p: float,
106
+ top_k: int,
107
+ ) -> None:
108
+ """Initialize LLM worker thread.
109
+
110
+ Args:
111
+ prompt: User input prompt text
112
+ server_url: LLM server address
113
+ max_tokens: Maximum number of tokens to generate
114
+ temperature: Temperature parameter controlling randomness (0.0-2.0)
115
+ top_p: Nucleus sampling parameter (0.0-1.0)
116
+ top_k: Number of candidate tokens to retain
117
+ """
118
+ super().__init__()
119
+ self.prompt = prompt
120
+ self.server_url = server_url
121
+ self.max_tokens = max_tokens
122
+ self.temperature = temperature
123
+ self.top_p = top_p
124
+ self.top_k = top_k
125
+ self._is_running = True
126
+
127
+ def run(self) -> None:
128
+ """Execute streaming HTTP request and process response.
129
+
130
+ Receives streaming response using Server-Sent Events (SSE) format.
131
+ Uses incremental UTF-8 decoder to avoid garbled text from truncated multi-byte characters,
132
+ ensuring full-chain character encoding consistency (request/response both use UTF-8).
133
+ """
134
+ try:
135
+ headers = {"Content-Type": "application/json; charset=utf-8"}
136
+ data = {
137
+ "prompt": self.prompt,
138
+ "max_tokens": self.max_tokens,
139
+ "temperature": self.temperature,
140
+ "top_p": self.top_p,
141
+ "top_k": self.top_k,
142
+ "stream": True,
143
+ }
144
+
145
+ request = Request(
146
+ f"{self.server_url}/completion",
147
+ data=json.dumps(data, ensure_ascii=False).encode("utf-8"),
148
+ headers=headers,
149
+ )
150
+
151
+ with urlopen(request) as response:
152
+ if response.status != 200:
153
+ error_text = response.read().decode("utf-8")
154
+ self.error_occurred.emit(
155
+ f"Error: {response.status} - {error_text}",
156
+ )
157
+ return
158
+
159
+ decoder = getincrementaldecoder("utf-8")(errors="replace")
160
+ buffer = ""
161
+
162
+ for line in response:
163
+ if not self._is_running:
164
+ break
165
+
166
+ if line:
167
+ try:
168
+ decoded_line = decoder.decode(line, False).strip()
169
+ if not decoded_line:
170
+ continue
171
+
172
+ if decoded_line.startswith("data: "):
173
+ json_str = decoded_line[6:]
174
+ try:
175
+ json_data = json.loads(json_str)
176
+ content = json_data.get("content", "")
177
+ if content:
178
+ buffer += content
179
+ self.response_received.emit(buffer)
180
+ except json.JSONDecodeError as e:
181
+ logger.debug(
182
+ f"JSON parsing failed: {json_str}, error: {e}"
183
+ )
184
+ continue
185
+ except Exception as e:
186
+ logger.debug(f"Line processing failed: {e}")
187
+ continue
188
+
189
+ decoder.decode(b"", True)
190
+
191
+ except URLError as e:
192
+ logger.error(f"Connection error: {e.reason}")
193
+ self.error_occurred.emit(f"Connection error: {e.reason}")
194
+ except Exception as e:
195
+ logger.error(f"Request error: {e!s}")
196
+ self.error_occurred.emit(f"Request error: {e!s}")
197
+ finally:
198
+ self.is_finished.emit()
199
+
200
+ def stop(self) -> None:
201
+ """Stop worker thread execution."""
202
+ self._is_running = False
203
+
204
+
205
+ class ConnectionTestWorker(QThread):
206
+ """Server connection test worker thread.
207
+
208
+ Tests whether the LLM server's health check endpoint is accessible in background thread.
209
+
210
+ Signals:
211
+ result_ready: Emitted when test completes, passes (success_flag, message_text)
212
+ """
213
+
214
+ result_ready = Signal(bool, str)
215
+
216
+ def __init__(self, server_url: str) -> None:
217
+ """Initialize connection test worker thread.
218
+
219
+ Args:
220
+ server_url: LLM server address
221
+ """
222
+ super().__init__()
223
+ self.server_url = server_url
224
+
225
+ def run(self) -> None:
226
+ """Execute connection test by accessing server health check endpoint."""
227
+ try:
228
+ request = Request(f"{self.server_url}/health")
229
+ with urlopen(request, timeout=CONNECTION_TIMEOUT) as response:
230
+ if response.status == 200:
231
+ self.result_ready.emit(True, "Connection successful!")
232
+ else:
233
+ self.result_ready.emit(
234
+ False, f"Connection failed: {response.status}"
235
+ )
236
+ except URLError as e:
237
+ logger.error(f"Connection error: {e.reason}")
238
+ self.result_ready.emit(False, f"Connection error: {e.reason}")
239
+ except Exception as e:
240
+ logger.error(f"Request error: {e!s}")
241
+ self.result_ready.emit(False, f"Request error: {e!s}")
242
+
243
+
244
+ class LLMChatApp(QMainWindow):
245
+ """LLM Chat client main window.
246
+
247
+ Provides graphical interface for interacting with LLM server, supports:
248
+ - Real-time streaming response display (uses incremental updates to avoid repeated rendering)
249
+ - Connection testing
250
+ - Model parameter adjustment (temperature, top-p, top-k, etc.)
251
+ - Conversation history tracking
252
+ """
253
+
254
+ def __init__(self) -> None:
255
+ """Initialize LLM Chat main window."""
256
+ super().__init__()
257
+ self.setWindowTitle(conf.TITLE)
258
+ self.setGeometry(*conf.WIN_POS, *conf.WIN_SIZE)
259
+
260
+ self.init_ui()
261
+
262
+ self.worker_thread: LLMWorker | None = None
263
+ self.test_thread: ConnectionTestWorker | None = None
264
+ self.current_ai_start_pos = -1
265
+
266
+ def init_ui(self) -> None:
267
+ """Initialize user interface components."""
268
+ main_widget = QWidget()
269
+ main_layout = QVBoxLayout()
270
+ main_widget.setLayout(main_layout)
271
+ self.setCentralWidget(main_widget)
272
+
273
+ # 构建界面各部分
274
+ main_layout.addWidget(self._create_server_group())
275
+ main_layout.addWidget(self._create_params_group())
276
+ main_layout.addWidget(self._create_chat_display())
277
+ main_layout.addLayout(self._create_input_layout())
278
+
279
+ self.statusBar().showMessage("Ready")
280
+
281
+ def _create_server_group(self) -> QGroupBox:
282
+ """Create server settings group.
283
+
284
+ Returns:
285
+ Group box containing server address input and test connection button
286
+ """
287
+ server_group = QGroupBox("Server Settings")
288
+ server_layout = QHBoxLayout()
289
+
290
+ self.server_url_input = QLineEdit(conf.SERVER_URL)
291
+ self.server_url_input.setPlaceholderText("Enter llama-server address")
292
+ self.server_url_input.textChanged.connect(self.on_config_changed)
293
+
294
+ self.test_connection_btn = QPushButton("Test Connection")
295
+ self.test_connection_btn.clicked.connect(self.test_connection)
296
+
297
+ server_layout.addWidget(QLabel("Server Address:"))
298
+ server_layout.addWidget(self.server_url_input)
299
+ server_layout.addWidget(self.test_connection_btn)
300
+ server_group.setLayout(server_layout)
301
+
302
+ return server_group
303
+
304
+ def _create_params_group(self) -> QGroupBox:
305
+ """Create model parameter settings group.
306
+
307
+ Returns:
308
+ Group box containing all model parameter adjustment controls
309
+ """
310
+ params_group = QGroupBox("Model Parameters")
311
+ params_layout = QHBoxLayout()
312
+
313
+ self.max_tokens_spin = QSpinBox()
314
+ self.max_tokens_spin.setRange(*conf.MAX_TOKENS_RANGE)
315
+ self.max_tokens_spin.setValue(conf.MAX_TOKENS)
316
+ self.max_tokens_spin.valueChanged.connect(self.on_config_changed)
317
+
318
+ self.temperature_spin = QDoubleSpinBox()
319
+ self.temperature_spin.setRange(*conf.TEMPERATURE_RANGE)
320
+ self.temperature_spin.setSingleStep(0.1)
321
+ self.temperature_spin.setValue(conf.TEMPERATURE)
322
+ self.temperature_spin.valueChanged.connect(self.on_config_changed)
323
+
324
+ self.top_p_spin = QDoubleSpinBox()
325
+ self.top_p_spin.setRange(*conf.TOP_P_RANGE)
326
+ self.top_p_spin.setSingleStep(0.05)
327
+ self.top_p_spin.setValue(conf.TOP_P)
328
+ self.top_p_spin.valueChanged.connect(self.on_config_changed)
329
+
330
+ self.top_k_spin = QSpinBox()
331
+ self.top_k_spin.setRange(*conf.TOP_K_RANGE)
332
+ self.top_k_spin.setValue(conf.TOP_K)
333
+ self.top_k_spin.valueChanged.connect(self.on_config_changed)
334
+
335
+ params_layout.addWidget(QLabel("Max Tokens:"))
336
+ params_layout.addWidget(self.max_tokens_spin)
337
+ params_layout.addWidget(QLabel("Temperature:"))
338
+ params_layout.addWidget(self.temperature_spin)
339
+ params_layout.addWidget(QLabel("Top P:"))
340
+ params_layout.addWidget(self.top_p_spin)
341
+ params_layout.addWidget(QLabel("Top K:"))
342
+ params_layout.addWidget(self.top_k_spin)
343
+ params_group.setLayout(params_layout)
344
+
345
+ return params_group
346
+
347
+ def _create_chat_display(self) -> QTextEdit:
348
+ """Create chat display area.
349
+
350
+ Returns:
351
+ Read-only text display widget
352
+ """
353
+ self.chat_display = QTextEdit()
354
+ self.chat_display.setReadOnly(True)
355
+ self.chat_display.setStyleSheet("font-family: monospace;")
356
+ return self.chat_display
357
+
358
+ def _create_input_layout(self) -> QHBoxLayout:
359
+ """Create user input area layout.
360
+
361
+ Returns:
362
+ Layout containing input box and send/stop buttons
363
+ """
364
+ input_layout = QHBoxLayout()
365
+
366
+ self.user_input = QLineEdit()
367
+ self.user_input.setPlaceholderText("Enter your prompt...")
368
+ self.user_input.returnPressed.connect(self.send_prompt)
369
+
370
+ self.send_btn = QPushButton("Send")
371
+ self.send_btn.clicked.connect(self.send_prompt)
372
+
373
+ self.stop_btn = QPushButton("Stop")
374
+ self.stop_btn.clicked.connect(self.stop_generation)
375
+ self.stop_btn.setEnabled(False)
376
+
377
+ input_layout.addWidget(self.user_input)
378
+ input_layout.addWidget(self.send_btn)
379
+ input_layout.addWidget(self.stop_btn)
380
+
381
+ return input_layout
382
+
383
+ def on_config_changed(self) -> None:
384
+ """Configuration changed handler."""
385
+ conf.SERVER_URL = self.server_url_input.text().strip()
386
+ conf.MAX_TOKENS = self.max_tokens_spin.value()
387
+ conf.TEMPERATURE = self.temperature_spin.value()
388
+ conf.TOP_P = self.top_p_spin.value()
389
+ conf.TOP_K = self.top_k_spin.value()
390
+
391
+ def test_connection(self) -> None:
392
+ """Test connection with LLM server.
393
+
394
+ Sends health check request in background thread to avoid blocking UI.
395
+ """
396
+ server_url = self.server_url_input.text().strip()
397
+ if not server_url:
398
+ self.statusBar().showMessage("Please enter server address")
399
+ return
400
+
401
+ if self.test_thread and self.test_thread.isRunning():
402
+ self.statusBar().showMessage("Test in progress...")
403
+ return
404
+
405
+ self.test_thread = ConnectionTestWorker(server_url)
406
+ self.test_thread.result_ready.connect(self.on_connection_test_result)
407
+ self.test_thread.finished.connect(self.on_test_thread_finished)
408
+
409
+ self.statusBar().showMessage("Testing connection...")
410
+ self.test_connection_btn.setEnabled(False)
411
+
412
+ self.test_thread.start()
413
+
414
+ def on_test_thread_finished(self) -> None:
415
+ """Handle test thread completion event and clean up resources."""
416
+ if self.test_thread:
417
+ self.test_thread.quit()
418
+ self.test_thread.wait()
419
+ self.test_thread = None
420
+
421
+ def on_connection_test_result(self, success: bool, message: str) -> None:
422
+ """Handle connection test result.
423
+
424
+ Args:
425
+ success: Whether connection succeeded
426
+ message: Result message text
427
+ """
428
+ self.statusBar().showMessage(message)
429
+ self.test_connection_btn.setEnabled(True)
430
+
431
+ def send_prompt(self) -> None:
432
+ """Send user input prompt to LLM server.
433
+
434
+ Creates worker thread to handle streaming response and updates UI to display conversation.
435
+ Resets AI reply start position to avoid repeated rendering issues.
436
+ """
437
+ if self.worker_thread and self.worker_thread.isRunning():
438
+ self.statusBar().showMessage("Please wait for current request to complete")
439
+ return
440
+
441
+ prompt = self.user_input.text().strip()
442
+ if not prompt:
443
+ self.statusBar().showMessage("Please enter prompt")
444
+ return
445
+
446
+ self.current_ai_start_pos = -1
447
+
448
+ server_url = self.server_url_input.text().strip()
449
+ max_tokens = self.max_tokens_spin.value()
450
+ temperature = self.temperature_spin.value()
451
+ top_p = self.top_p_spin.value()
452
+ top_k = self.top_k_spin.value()
453
+
454
+ self._display_user_input(prompt, server_url)
455
+ self.user_input.clear()
456
+
457
+ logger.info(f"Sending prompt: {prompt}")
458
+ logger.info(
459
+ f"Parameters: max_tokens={max_tokens}, temperature={temperature}, top_p={top_p}, top_k={top_k}"
460
+ )
461
+
462
+ self.worker_thread = LLMWorker(
463
+ prompt=prompt,
464
+ server_url=server_url,
465
+ max_tokens=max_tokens,
466
+ temperature=temperature,
467
+ top_p=top_p,
468
+ top_k=top_k,
469
+ )
470
+
471
+ self.worker_thread.response_received.connect(self.update_response)
472
+ self.worker_thread.error_occurred.connect(self.handle_error)
473
+ self.worker_thread.is_finished.connect(self.on_finished)
474
+
475
+ self.send_btn.setEnabled(False)
476
+ self.stop_btn.setEnabled(True)
477
+ self.statusBar().showMessage("Generating response...")
478
+
479
+ self.worker_thread.start()
480
+
481
+ def _display_user_input(self, prompt: str, server_url: str) -> None:
482
+ """Display user input and target in chat area.
483
+
484
+ Args:
485
+ prompt: User input prompt text
486
+ server_url: Target server address
487
+ """
488
+ cursor = self.chat_display.textCursor()
489
+ cursor.movePosition(QTextCursor.End)
490
+ self.chat_display.setTextCursor(cursor)
491
+
492
+ self.chat_display.setTextColor(Qt.blue)
493
+ self.chat_display.insertPlainText(f"You: {prompt}\n")
494
+
495
+ self.chat_display.setTextColor(Qt.darkGray)
496
+ self.chat_display.insertPlainText(f"[Sending to {server_url}]\n")
497
+
498
+ self.chat_display.setTextColor(Qt.black)
499
+ self.chat_display.insertPlainText("AI:")
500
+
501
+ def stop_generation(self) -> None:
502
+ """Stop current ongoing response generation."""
503
+ if self.worker_thread and self.worker_thread.isRunning():
504
+ self.worker_thread.stop()
505
+ self.statusBar().showMessage("Generation stopped")
506
+
507
+ def update_response(self, text: str) -> None:
508
+ """Update AI response content in chat display area.
509
+
510
+ Uses incremental update strategy to replace only AI reply portion of text,
511
+ avoiding performance issues and repeated rendering from frequent redrawing of entire text area.
512
+
513
+ Args:
514
+ text: Complete AI generated response text
515
+ """
516
+ cursor = self.chat_display.textCursor()
517
+
518
+ if self.current_ai_start_pos == -1:
519
+ cursor.movePosition(QTextCursor.End)
520
+ self.current_ai_start_pos = cursor.position()
521
+ self.chat_display.insertPlainText(f" {text}")
522
+ else:
523
+ cursor.setPosition(self.current_ai_start_pos)
524
+ cursor.movePosition(QTextCursor.End, QTextCursor.KeepAnchor)
525
+ cursor.removeSelectedText()
526
+ self.chat_display.setTextCursor(cursor)
527
+ self.chat_display.insertPlainText(f" {text}")
528
+
529
+ self.chat_display.ensureCursorVisible()
530
+
531
+ def append_to_chat(self, text: str, *, is_user: bool = False) -> None:
532
+ """Append text to chat area.
533
+
534
+ Args:
535
+ text: Text content to append
536
+ is_user: Whether this is a user message (for color setting)
537
+ """
538
+ cursor = self.chat_display.textCursor()
539
+ cursor.movePosition(QTextCursor.End)
540
+ self.chat_display.setTextCursor(cursor)
541
+
542
+ if is_user:
543
+ self.chat_display.setTextColor(Qt.blue)
544
+ else:
545
+ self.chat_display.setTextColor(Qt.black)
546
+
547
+ self.chat_display.insertPlainText(text + "\n")
548
+ self.chat_display.setTextColor(Qt.black)
549
+
550
+ def handle_error(self, error_msg: str) -> None:
551
+ """Handle error messages.
552
+
553
+ Args:
554
+ error_msg: Error message text
555
+ """
556
+ self.append_to_chat(f"Error: {error_msg}")
557
+ self.statusBar().showMessage(error_msg)
558
+
559
+ def on_finished(self) -> None:
560
+ """Handle response generation completion event, restore UI state and clean up resources."""
561
+ self.send_btn.setEnabled(True)
562
+ self.stop_btn.setEnabled(False)
563
+ self.statusBar().showMessage("Generation complete")
564
+ self.append_to_chat("")
565
+
566
+ if self.worker_thread:
567
+ self.worker_thread.quit()
568
+ self.worker_thread.wait()
569
+ self.worker_thread = None
570
+
571
+ def moveEvent(self, event: QMoveEvent) -> None: # noqa: N802
572
+ """Handle window move event."""
573
+ top_left = self.geometry().topLeft()
574
+ conf.WIN_POS = [top_left.x(), top_left.y()]
575
+ return super().moveEvent(event)
576
+
577
+ def resizeEvent(self, event: QResizeEvent) -> None: # noqa: N802
578
+ """Handle window resize event."""
579
+ geometry = self.geometry()
580
+ conf.WIN_SIZE = [geometry.width(), geometry.height()]
581
+ return super().resizeEvent(event)
582
+
583
+
584
+ def main() -> None:
585
+ """Application entry point."""
586
+ app = QApplication(sys.argv)
587
+ app.setStyle("Fusion")
588
+
589
+ window = LLMChatApp()
590
+ window.show()
591
+
592
+ sys.exit(app.exec_())