ngpt 2.16.5__tar.gz → 2.16.6__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {ngpt-2.16.5 → ngpt-2.16.6}/PKG-INFO +1 -1
- {ngpt-2.16.5 → ngpt-2.16.6}/docs/api/cli.md +32 -6
- {ngpt-2.16.5 → ngpt-2.16.6}/docs/examples/advanced.md +119 -111
- {ngpt-2.16.5 → ngpt-2.16.6}/docs/examples/cli_components.md +37 -4
- {ngpt-2.16.5 → ngpt-2.16.6}/docs/usage/library_usage.md +23 -9
- {ngpt-2.16.5 → ngpt-2.16.6}/ngpt/cli/modes/rewrite.py +0 -2
- {ngpt-2.16.5 → ngpt-2.16.6}/ngpt/cli/modes/text.py +0 -2
- {ngpt-2.16.5 → ngpt-2.16.6}/ngpt/cli/renderers.py +9 -3
- {ngpt-2.16.5 → ngpt-2.16.6}/pyproject.toml +1 -1
- {ngpt-2.16.5 → ngpt-2.16.6}/uv.lock +1 -1
- {ngpt-2.16.5 → ngpt-2.16.6}/.github/workflows/python-publish.yml +0 -0
- {ngpt-2.16.5 → ngpt-2.16.6}/.gitignore +0 -0
- {ngpt-2.16.5 → ngpt-2.16.6}/.python-version +0 -0
- {ngpt-2.16.5 → ngpt-2.16.6}/COMMIT_GUIDELINES.md +0 -0
- {ngpt-2.16.5 → ngpt-2.16.6}/CONTRIBUTING.md +0 -0
- {ngpt-2.16.5 → ngpt-2.16.6}/LICENSE +0 -0
- {ngpt-2.16.5 → ngpt-2.16.6}/README.md +0 -0
- {ngpt-2.16.5 → ngpt-2.16.6}/docs/CONTRIBUTING.md +0 -0
- {ngpt-2.16.5 → ngpt-2.16.6}/docs/LICENSE.md +0 -0
- {ngpt-2.16.5 → ngpt-2.16.6}/docs/README.md +0 -0
- {ngpt-2.16.5 → ngpt-2.16.6}/docs/_config.yml +0 -0
- {ngpt-2.16.5 → ngpt-2.16.6}/docs/api/README.md +0 -0
- {ngpt-2.16.5 → ngpt-2.16.6}/docs/api/cli_config.md +0 -0
- {ngpt-2.16.5 → ngpt-2.16.6}/docs/api/client.md +0 -0
- {ngpt-2.16.5 → ngpt-2.16.6}/docs/api/config.md +0 -0
- {ngpt-2.16.5 → ngpt-2.16.6}/docs/api/logging.md +0 -0
- {ngpt-2.16.5 → ngpt-2.16.6}/docs/assets/css/style.scss +0 -0
- {ngpt-2.16.5 → ngpt-2.16.6}/docs/configuration.md +0 -0
- {ngpt-2.16.5 → ngpt-2.16.6}/docs/examples/README.md +0 -0
- {ngpt-2.16.5 → ngpt-2.16.6}/docs/examples/basic.md +0 -0
- {ngpt-2.16.5 → ngpt-2.16.6}/docs/examples/integrations.md +0 -0
- {ngpt-2.16.5 → ngpt-2.16.6}/docs/installation.md +0 -0
- {ngpt-2.16.5 → ngpt-2.16.6}/docs/overview.md +0 -0
- {ngpt-2.16.5 → ngpt-2.16.6}/docs/usage/README.md +0 -0
- {ngpt-2.16.5 → ngpt-2.16.6}/docs/usage/cli_config.md +0 -0
- {ngpt-2.16.5 → ngpt-2.16.6}/docs/usage/cli_framework.md +0 -0
- {ngpt-2.16.5 → ngpt-2.16.6}/docs/usage/cli_usage.md +0 -0
- {ngpt-2.16.5 → ngpt-2.16.6}/docs/usage/gitcommsg.md +0 -0
- {ngpt-2.16.5 → ngpt-2.16.6}/ngpt/__init__.py +0 -0
- {ngpt-2.16.5 → ngpt-2.16.6}/ngpt/__main__.py +0 -0
- {ngpt-2.16.5 → ngpt-2.16.6}/ngpt/cli/__init__.py +0 -0
- {ngpt-2.16.5 → ngpt-2.16.6}/ngpt/cli/args.py +0 -0
- {ngpt-2.16.5 → ngpt-2.16.6}/ngpt/cli/config_manager.py +0 -0
- {ngpt-2.16.5 → ngpt-2.16.6}/ngpt/cli/formatters.py +0 -0
- {ngpt-2.16.5 → ngpt-2.16.6}/ngpt/cli/interactive.py +0 -0
- {ngpt-2.16.5 → ngpt-2.16.6}/ngpt/cli/main.py +0 -0
- {ngpt-2.16.5 → ngpt-2.16.6}/ngpt/cli/modes/__init__.py +0 -0
- {ngpt-2.16.5 → ngpt-2.16.6}/ngpt/cli/modes/chat.py +0 -0
- {ngpt-2.16.5 → ngpt-2.16.6}/ngpt/cli/modes/code.py +0 -0
- {ngpt-2.16.5 → ngpt-2.16.6}/ngpt/cli/modes/gitcommsg.py +0 -0
- {ngpt-2.16.5 → ngpt-2.16.6}/ngpt/cli/modes/shell.py +0 -0
- {ngpt-2.16.5 → ngpt-2.16.6}/ngpt/cli/ui.py +0 -0
- {ngpt-2.16.5 → ngpt-2.16.6}/ngpt/client.py +0 -0
- {ngpt-2.16.5 → ngpt-2.16.6}/ngpt/utils/__init__.py +0 -0
- {ngpt-2.16.5 → ngpt-2.16.6}/ngpt/utils/cli_config.py +0 -0
- {ngpt-2.16.5 → ngpt-2.16.6}/ngpt/utils/config.py +0 -0
- {ngpt-2.16.5 → ngpt-2.16.6}/ngpt/utils/log.py +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: ngpt
|
3
|
-
Version: 2.16.
|
3
|
+
Version: 2.16.6
|
4
4
|
Summary: Swiss army knife for LLMs: powerful CLI, interactive chatbot, and flexible Python library. Works with OpenAI, Ollama, Groq, Claude, and any OpenAI-compatible API.
|
5
5
|
Project-URL: Homepage, https://github.com/nazdridoy/ngpt
|
6
6
|
Project-URL: Repository, https://github.com/nazdridoy/ngpt
|
@@ -160,7 +160,7 @@ from ngpt.cli.renderers import prettify_streaming_markdown
|
|
160
160
|
def prettify_streaming_markdown(renderer='rich', is_interactive=False, header_text=None)
|
161
161
|
```
|
162
162
|
|
163
|
-
Creates a streaming markdown renderer that updates in real-time.
|
163
|
+
Creates a streaming markdown renderer that updates in real-time with loading spinner functionality.
|
164
164
|
|
165
165
|
**Parameters:**
|
166
166
|
- `renderer` (str): Which renderer to use ('auto', 'rich', 'glow')
|
@@ -168,21 +168,47 @@ Creates a streaming markdown renderer that updates in real-time.
|
|
168
168
|
- `header_text` (str, optional): Header text to display above the content
|
169
169
|
|
170
170
|
**Returns:**
|
171
|
-
-
|
171
|
+
- tuple: (live_display, update_function, setup_spinner_func) if successful, (None, None, None) otherwise
|
172
|
+
- `live_display`: The rich.Live display object for controlling the display lifecycle
|
173
|
+
- `update_function`: Function to call with updated content that will refresh the display
|
174
|
+
- `setup_spinner_func`: Function to set up a spinner while waiting for first content
|
172
175
|
|
173
176
|
**Example:**
|
174
177
|
```python
|
175
178
|
from ngpt import NGPTClient, load_config
|
176
179
|
from ngpt.cli.renderers import prettify_streaming_markdown
|
180
|
+
import threading
|
177
181
|
|
178
182
|
client = NGPTClient(**load_config())
|
179
|
-
streamer = prettify_streaming_markdown(renderer='rich')
|
180
183
|
|
181
|
-
|
182
|
-
|
184
|
+
# Get components for streaming display with spinner
|
185
|
+
live_display, update_function, setup_spinner = prettify_streaming_markdown(renderer='rich')
|
186
|
+
|
187
|
+
# Set up spinner (optional)
|
188
|
+
stop_spinner_event = threading.Event()
|
189
|
+
stop_spinner_func = None
|
190
|
+
if setup_spinner:
|
191
|
+
stop_spinner_func = setup_spinner(stop_spinner_event, "Waiting for response...")
|
192
|
+
|
193
|
+
# The update_function will automatically:
|
194
|
+
# 1. Start the live display when first content arrives
|
195
|
+
# 2. Stop the spinner when first content arrives
|
196
|
+
# 3. Update the display with new content
|
197
|
+
|
198
|
+
# Use with client
|
199
|
+
response = client.chat(
|
200
|
+
"Explain quantum computing",
|
183
201
|
stream=True,
|
184
|
-
stream_callback=
|
202
|
+
stream_callback=update_function
|
185
203
|
)
|
204
|
+
|
205
|
+
# Ensure spinner is stopped if no content was received
|
206
|
+
if not stop_spinner_event.is_set():
|
207
|
+
stop_spinner_event.set()
|
208
|
+
|
209
|
+
# Stop the display when done
|
210
|
+
if live_display:
|
211
|
+
live_display.stop()
|
186
212
|
```
|
187
213
|
|
188
214
|
### `has_markdown_renderer`
|
@@ -217,38 +217,33 @@ def custom_streaming_markdown():
|
|
217
217
|
# Create a Rich console
|
218
218
|
console = Console()
|
219
219
|
|
220
|
-
# Initialize
|
221
|
-
|
220
|
+
# Initialize a real-time markdown renderer
|
221
|
+
live_display, update_function, setup_spinner = prettify_streaming_markdown(renderer='rich')
|
222
222
|
|
223
|
-
#
|
224
|
-
|
225
|
-
|
226
|
-
|
223
|
+
# Set up spinner for waiting period
|
224
|
+
import threading
|
225
|
+
stop_spinner_event = threading.Event()
|
226
|
+
if setup_spinner:
|
227
|
+
stop_spinner_func = setup_spinner(stop_spinner_event, "Waiting for response...")
|
227
228
|
|
228
|
-
|
229
|
-
|
230
|
-
]
|
229
|
+
# Start the live display
|
230
|
+
markdown_renderer.start()
|
231
231
|
|
232
|
-
|
233
|
-
|
234
|
-
|
235
|
-
|
236
|
-
|
237
|
-
|
238
|
-
|
239
|
-
|
240
|
-
|
241
|
-
|
242
|
-
|
243
|
-
"",
|
244
|
-
messages=messages,
|
245
|
-
stream=True,
|
246
|
-
markdown_format=True,
|
247
|
-
stream_callback=markdown_renderer.update_content
|
248
|
-
)
|
232
|
+
# Stream the response with real-time rendering
|
233
|
+
for chunk in client.chat(
|
234
|
+
"Explain quantum computing briefly",
|
235
|
+
stream=True
|
236
|
+
):
|
237
|
+
full_response += chunk
|
238
|
+
update_function(full_response)
|
239
|
+
|
240
|
+
# Ensure spinner is stopped if still running
|
241
|
+
if not stop_spinner_event.is_set():
|
242
|
+
stop_spinner_event.set()
|
249
243
|
|
250
|
-
|
251
|
-
|
244
|
+
# Stop the live display when done
|
245
|
+
if live_display:
|
246
|
+
live_display.stop()
|
252
247
|
|
253
248
|
if __name__ == "__main__":
|
254
249
|
custom_streaming_markdown()
|
@@ -584,101 +579,114 @@ def main():
|
|
584
579
|
log_file=log_file
|
585
580
|
)
|
586
581
|
|
587
|
-
#
|
588
|
-
|
589
|
-
|
590
|
-
|
591
|
-
|
582
|
+
# Real-time prettified markdown mode
|
583
|
+
if args.stream_prettify:
|
584
|
+
live_display, update_function, setup_spinner = prettify_streaming_markdown(renderer='rich')
|
585
|
+
|
586
|
+
# Set up spinner for waiting period
|
587
|
+
import threading
|
588
|
+
stop_spinner_event = threading.Event()
|
589
|
+
if setup_spinner:
|
590
|
+
stop_spinner_func = setup_spinner(stop_spinner_event, "Waiting for response...")
|
591
|
+
|
592
|
+
response = client.chat(
|
593
|
+
prompt,
|
594
|
+
temperature=args.temperature,
|
595
|
+
stream=True,
|
596
|
+
stream_callback=update_function,
|
597
|
+
markdown_format=True,
|
598
|
+
web_search=args.web_search,
|
599
|
+
messages=messages
|
600
|
+
)
|
601
|
+
|
602
|
+
# Ensure spinner is stopped if still running
|
603
|
+
if not stop_spinner_event.is_set():
|
604
|
+
stop_spinner_event.set()
|
605
|
+
|
606
|
+
# Stop the live display when done
|
607
|
+
if live_display:
|
608
|
+
live_display.stop()
|
609
|
+
|
610
|
+
if args.output:
|
611
|
+
with open(args.output, 'w') as f:
|
612
|
+
f.write(response)
|
613
|
+
print(f"{COLORS['green']}Response saved to {args.output}{COLORS['reset']}")
|
614
|
+
|
615
|
+
if log_file:
|
616
|
+
log_file.write(f"Assistant: {response}\n\n")
|
617
|
+
|
618
|
+
# Basic prettify mode
|
619
|
+
elif args.prettify:
|
620
|
+
from rich.markdown import Markdown
|
621
|
+
from rich.console import Console
|
622
|
+
|
623
|
+
console = Console()
|
624
|
+
|
625
|
+
if args.output:
|
626
|
+
# No streaming if saving to file
|
592
627
|
response = client.chat(
|
593
628
|
prompt,
|
594
|
-
temperature=args.temperature,
|
595
|
-
stream=
|
596
|
-
stream_callback=streamer.update_content,
|
597
|
-
markdown_format=True,
|
629
|
+
temperature=args.temperature,
|
630
|
+
stream=False,
|
598
631
|
web_search=args.web_search,
|
632
|
+
markdown_format=True,
|
599
633
|
messages=messages
|
600
634
|
)
|
635
|
+
with open(args.output, 'w') as f:
|
636
|
+
f.write(response)
|
637
|
+
print(f"{COLORS['green']}Response saved to {args.output}{COLORS['reset']}")
|
601
638
|
|
602
|
-
if args.output:
|
603
|
-
with open(args.output, 'w') as f:
|
604
|
-
f.write(response)
|
605
|
-
print(f"{COLORS['green']}Response saved to {args.output}{COLORS['reset']}")
|
606
|
-
|
607
639
|
if log_file:
|
608
640
|
log_file.write(f"Assistant: {response}\n\n")
|
609
|
-
|
610
|
-
|
611
|
-
|
612
|
-
|
613
|
-
|
641
|
+
else:
|
642
|
+
# Use rich to render markdown after completion
|
643
|
+
response = client.chat(
|
644
|
+
prompt,
|
645
|
+
temperature=args.temperature,
|
646
|
+
stream=False,
|
647
|
+
web_search=args.web_search,
|
648
|
+
markdown_format=True,
|
649
|
+
messages=messages
|
650
|
+
)
|
651
|
+
console.print(Markdown(response))
|
614
652
|
|
615
|
-
|
653
|
+
if log_file:
|
654
|
+
log_file.write(f"Assistant: {response}\n\n")
|
655
|
+
|
656
|
+
# Simple mode
|
657
|
+
else:
|
658
|
+
if args.output:
|
659
|
+
# No streaming if saving to file
|
660
|
+
response = client.chat(
|
661
|
+
prompt,
|
662
|
+
temperature=args.temperature,
|
663
|
+
stream=False,
|
664
|
+
web_search=args.web_search,
|
665
|
+
messages=messages
|
666
|
+
)
|
667
|
+
with open(args.output, 'w') as f:
|
668
|
+
f.write(response)
|
669
|
+
print(f"{COLORS['green']}Response saved to {args.output}{COLORS['reset']}")
|
616
670
|
|
617
|
-
if
|
618
|
-
|
619
|
-
response = client.chat(
|
620
|
-
prompt,
|
621
|
-
temperature=args.temperature,
|
622
|
-
stream=False,
|
623
|
-
web_search=args.web_search,
|
624
|
-
markdown_format=True,
|
625
|
-
messages=messages
|
626
|
-
)
|
627
|
-
with open(args.output, 'w') as f:
|
628
|
-
f.write(response)
|
629
|
-
print(f"{COLORS['green']}Response saved to {args.output}{COLORS['reset']}")
|
630
|
-
|
631
|
-
if log_file:
|
632
|
-
log_file.write(f"Assistant: {response}\n\n")
|
633
|
-
else:
|
634
|
-
# Use rich to render markdown after completion
|
635
|
-
response = client.chat(
|
636
|
-
prompt,
|
637
|
-
temperature=args.temperature,
|
638
|
-
stream=False,
|
639
|
-
web_search=args.web_search,
|
640
|
-
markdown_format=True,
|
641
|
-
messages=messages
|
642
|
-
)
|
643
|
-
console.print(Markdown(response))
|
644
|
-
|
645
|
-
if log_file:
|
646
|
-
log_file.write(f"Assistant: {response}\n\n")
|
647
|
-
|
648
|
-
# Simple mode
|
671
|
+
if log_file:
|
672
|
+
log_file.write(f"Assistant: {response}\n\n")
|
649
673
|
else:
|
650
|
-
|
651
|
-
|
652
|
-
|
653
|
-
|
654
|
-
|
655
|
-
|
656
|
-
|
657
|
-
|
658
|
-
|
659
|
-
|
660
|
-
|
661
|
-
|
662
|
-
|
663
|
-
if log_file:
|
664
|
-
log_file.write(f"Assistant: {response}\n\n")
|
665
|
-
else:
|
666
|
-
# Stream to console
|
667
|
-
full_response = ""
|
668
|
-
for chunk in client.chat(
|
669
|
-
prompt,
|
670
|
-
temperature=args.temperature,
|
671
|
-
stream=True,
|
672
|
-
web_search=args.web_search,
|
673
|
-
messages=messages
|
674
|
-
):
|
675
|
-
print(chunk, end="", flush=True)
|
676
|
-
full_response += chunk
|
677
|
-
print() # Final newline
|
678
|
-
|
679
|
-
if log_file:
|
680
|
-
log_file.write(f"Assistant: {full_response}\n\n")
|
674
|
+
# Stream to console
|
675
|
+
full_response = ""
|
676
|
+
for chunk in client.chat(
|
677
|
+
prompt,
|
678
|
+
temperature=args.temperature,
|
679
|
+
stream=True,
|
680
|
+
web_search=args.web_search,
|
681
|
+
messages=messages
|
682
|
+
):
|
683
|
+
print(chunk, end="", flush=True)
|
684
|
+
full_response += chunk
|
685
|
+
print() # Final newline
|
681
686
|
|
687
|
+
if log_file:
|
688
|
+
log_file.write(f"Assistant: {full_response}\n\n")
|
689
|
+
|
682
690
|
if log_file:
|
683
691
|
log_file.close()
|
684
692
|
|
@@ -260,17 +260,35 @@ CODE:
|
|
260
260
|
# Use streaming markdown renderer if Rich is available
|
261
261
|
if has_rich and has_markdown_renderer(renderer='rich'):
|
262
262
|
# Create a streaming markdown renderer
|
263
|
-
|
263
|
+
live_display, update_function, setup_spinner = prettify_streaming_markdown(
|
264
264
|
renderer='rich',
|
265
265
|
header_text=f"Documentation for {args.file}"
|
266
266
|
)
|
267
267
|
|
268
|
+
# Setup spinner for waiting period
|
269
|
+
import threading
|
270
|
+
stop_spinner_event = threading.Event()
|
271
|
+
stop_spinner_func = None
|
272
|
+
if setup_spinner:
|
273
|
+
stop_spinner_func = setup_spinner(stop_spinner_event, "Generating documentation...")
|
274
|
+
|
268
275
|
# Stream the response with live updating
|
269
276
|
try:
|
270
277
|
for chunk in client.chat(prompt, stream=True):
|
271
278
|
full_response += chunk
|
272
|
-
|
279
|
+
update_function(full_response)
|
280
|
+
|
281
|
+
# Ensure spinner is stopped if still running
|
282
|
+
if not stop_spinner_event.is_set():
|
283
|
+
stop_spinner_event.set()
|
284
|
+
|
285
|
+
# Stop the display when done
|
286
|
+
if live_display:
|
287
|
+
live_display.stop()
|
273
288
|
except Exception as e:
|
289
|
+
# Ensure spinner is stopped on error
|
290
|
+
if not stop_spinner_event.is_set():
|
291
|
+
stop_spinner_event.set()
|
274
292
|
print(f"\nError generating documentation: {e}", file=sys.stderr)
|
275
293
|
sys.exit(1)
|
276
294
|
else:
|
@@ -511,11 +529,18 @@ Return ONLY the improved text without explanations or notes."""
|
|
511
529
|
if args.stream:
|
512
530
|
# Stream with live updates
|
513
531
|
if args.prettify:
|
514
|
-
|
532
|
+
live_display, update_function, setup_spinner = prettify_streaming_markdown(
|
515
533
|
renderer='rich',
|
516
534
|
header_text="Improved Text"
|
517
535
|
)
|
518
536
|
|
537
|
+
# Setup spinner for waiting period
|
538
|
+
import threading
|
539
|
+
stop_spinner_event = threading.Event()
|
540
|
+
stop_spinner_func = None
|
541
|
+
if setup_spinner:
|
542
|
+
stop_spinner_func = setup_spinner(stop_spinner_event, "Improving text...")
|
543
|
+
|
519
544
|
full_response = ""
|
520
545
|
for chunk in client.chat(
|
521
546
|
text,
|
@@ -523,7 +548,15 @@ Return ONLY the improved text without explanations or notes."""
|
|
523
548
|
stream=True
|
524
549
|
):
|
525
550
|
full_response += chunk
|
526
|
-
|
551
|
+
update_function(full_response)
|
552
|
+
|
553
|
+
# Ensure spinner is stopped if still running
|
554
|
+
if not stop_spinner_event.is_set():
|
555
|
+
stop_spinner_event.set()
|
556
|
+
|
557
|
+
# Stop the display when done
|
558
|
+
if live_display:
|
559
|
+
live_display.stop()
|
527
560
|
|
528
561
|
improved_text = full_response
|
529
562
|
else:
|
@@ -381,37 +381,51 @@ config = load_config()
|
|
381
381
|
client = NGPTClient(**config)
|
382
382
|
|
383
383
|
# Get the markdown streamer
|
384
|
-
|
384
|
+
live_display, update_function, setup_spinner = prettify_streaming_markdown(
|
385
385
|
renderer='rich',
|
386
386
|
header_text="Streaming Response:"
|
387
387
|
)
|
388
388
|
|
389
|
-
#
|
390
|
-
if
|
391
|
-
|
389
|
+
# Set up spinner while waiting for first content
|
390
|
+
if live_display and setup_spinner:
|
391
|
+
import threading
|
392
|
+
stop_spinner_event = threading.Event()
|
393
|
+
stop_spinner_func = setup_spinner(stop_spinner_event, "Waiting for response...")
|
394
|
+
|
395
|
+
# The live display will start automatically when the first content is received
|
396
|
+
# (no need to call live_display.start() manually)
|
392
397
|
|
393
|
-
# Use the
|
398
|
+
# Use the update function with the client
|
394
399
|
response = client.chat(
|
395
400
|
"Explain quantum computing with code examples",
|
396
401
|
stream=True,
|
397
402
|
markdown_format=True,
|
398
|
-
stream_callback=
|
403
|
+
stream_callback=update_function
|
399
404
|
)
|
400
405
|
|
406
|
+
# Ensure spinner is stopped if no content was received
|
407
|
+
if not stop_spinner_event.is_set():
|
408
|
+
stop_spinner_event.set()
|
409
|
+
|
401
410
|
# Stop the live display when done
|
402
|
-
|
411
|
+
live_display.stop()
|
403
412
|
```
|
404
413
|
|
405
414
|
For more control, you can access the live display and update function directly:
|
406
415
|
|
407
416
|
```python
|
408
|
-
live_display, update_function = prettify_streaming_markdown(
|
417
|
+
live_display, update_function, setup_spinner = prettify_streaming_markdown(
|
409
418
|
renderer='rich',
|
410
419
|
header_text="Custom Header"
|
411
420
|
)
|
412
421
|
|
413
422
|
if live_display: # Check if setup was successful
|
414
|
-
|
423
|
+
# Optional: Set up a spinner while waiting for content
|
424
|
+
import threading
|
425
|
+
stop_spinner_event = threading.Event()
|
426
|
+
stop_spinner_func = setup_spinner(stop_spinner_event, "Processing...")
|
427
|
+
|
428
|
+
# The first update_function call will automatically start the display and stop the spinner
|
415
429
|
|
416
430
|
# Update the content manually
|
417
431
|
update_function("# Header\nThis is *formatted* content")
|
@@ -131,8 +131,6 @@ def rewrite_mode(client, args, logger=None):
|
|
131
131
|
{"role": "user", "content": input_text}
|
132
132
|
]
|
133
133
|
|
134
|
-
print("\nSubmission successful. Waiting for response...")
|
135
|
-
|
136
134
|
# Log the messages if logging is enabled
|
137
135
|
if logger:
|
138
136
|
logger.log("system", REWRITE_SYSTEM_PROMPT)
|
@@ -239,8 +239,14 @@ def prettify_streaming_markdown(renderer='rich', is_interactive=False, header_te
|
|
239
239
|
else:
|
240
240
|
md_obj = Markdown("")
|
241
241
|
|
242
|
-
# Initialize the Live display with
|
243
|
-
live = Live(
|
242
|
+
# Initialize the Live display with vertical overflow handling
|
243
|
+
live = Live(
|
244
|
+
md_obj,
|
245
|
+
console=console,
|
246
|
+
refresh_per_second=10,
|
247
|
+
auto_refresh=False,
|
248
|
+
vertical_overflow="visible" # Attempt to make overflow visible
|
249
|
+
)
|
244
250
|
|
245
251
|
# Track if this is the first content update
|
246
252
|
first_update = True
|
@@ -259,7 +265,7 @@ def prettify_streaming_markdown(renderer='rich', is_interactive=False, header_te
|
|
259
265
|
sys.stdout.flush()
|
260
266
|
live.start()
|
261
267
|
|
262
|
-
# Update content
|
268
|
+
# Update content in live display
|
263
269
|
if is_interactive and header_text:
|
264
270
|
# Update the panel content
|
265
271
|
md_obj.renderable = Markdown(content)
|
@@ -1,6 +1,6 @@
|
|
1
1
|
[project]
|
2
2
|
name = "ngpt"
|
3
|
-
version = "2.16.
|
3
|
+
version = "2.16.6"
|
4
4
|
description = "Swiss army knife for LLMs: powerful CLI, interactive chatbot, and flexible Python library. Works with OpenAI, Ollama, Groq, Claude, and any OpenAI-compatible API."
|
5
5
|
authors = [
|
6
6
|
{name = "nazDridoy", email = "nazdridoy399@gmail.com"},
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|