UI4AI 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- UI4AI/__init__.py +1 -0
- UI4AI/chat_ui.py +158 -0
- UI4AI-0.1.0.dist-info/METADATA +52 -0
- UI4AI-0.1.0.dist-info/RECORD +6 -0
- UI4AI-0.1.0.dist-info/WHEEL +5 -0
- UI4AI-0.1.0.dist-info/top_level.txt +1 -0
UI4AI/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
1
|
+
from .chat_ui import run_chat
|
UI4AI/chat_ui.py
ADDED
|
@@ -0,0 +1,158 @@
|
|
|
1
|
+
import streamlit as st
|
|
2
|
+
import uuid
|
|
3
|
+
from datetime import datetime
|
|
4
|
+
from typing import List, Dict, Callable, Optional
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
def run_chat(
|
|
8
|
+
generate_response: Optional[Callable[[List[Dict]], str]],
|
|
9
|
+
generate_title: Optional[Callable[[str], str]] = None,
|
|
10
|
+
count_tokens: Optional[Callable[[List[Dict]], int]] = None,
|
|
11
|
+
page_title: str = "AI Chat",
|
|
12
|
+
title: str = "Conversational bot",
|
|
13
|
+
layout: str = "wide",
|
|
14
|
+
new_conversation: str = "β New Chat",
|
|
15
|
+
chat_placeholder: str = "Ask me anything...",
|
|
16
|
+
sidebar_instructions: str = "Conversation History",
|
|
17
|
+
spinner_text: str = "Thinking...",
|
|
18
|
+
max_history_tokens: Optional[int] = None
|
|
19
|
+
):
|
|
20
|
+
"""Streamlit UI for LLM chat with flexible core"""
|
|
21
|
+
|
|
22
|
+
if not generate_response:
|
|
23
|
+
print("No generate_response function provided.")
|
|
24
|
+
st.set_page_config(page_title="Error", layout="wide")
|
|
25
|
+
st.error("No `generate_response` function provided.")
|
|
26
|
+
return
|
|
27
|
+
|
|
28
|
+
_init_session_state()
|
|
29
|
+
|
|
30
|
+
st.set_page_config(page_title=page_title, layout=layout)
|
|
31
|
+
st.title(title)
|
|
32
|
+
|
|
33
|
+
with st.sidebar:
|
|
34
|
+
_render_sidebar(generate_title, count_tokens, sidebar_instructions, new_conversation)
|
|
35
|
+
|
|
36
|
+
_render_chat_history()
|
|
37
|
+
|
|
38
|
+
_handle_user_input(
|
|
39
|
+
generate_response,
|
|
40
|
+
generate_title,
|
|
41
|
+
count_tokens,
|
|
42
|
+
chat_placeholder,
|
|
43
|
+
spinner_text,
|
|
44
|
+
max_history_tokens
|
|
45
|
+
)
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
def _init_session_state():
|
|
49
|
+
defaults = {"conversations": {}, "current_convo_id": None, "messages": []}
|
|
50
|
+
for key, val in defaults.items():
|
|
51
|
+
if key not in st.session_state:
|
|
52
|
+
st.session_state[key] = val
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
def _render_sidebar(
|
|
56
|
+
generate_title: Optional[Callable],
|
|
57
|
+
count_tokens: Optional[Callable],
|
|
58
|
+
instructions: str,
|
|
59
|
+
new_conversation: str
|
|
60
|
+
):
|
|
61
|
+
st.markdown("### π Instructions")
|
|
62
|
+
st.markdown(instructions)
|
|
63
|
+
|
|
64
|
+
if st.button(new_conversation):
|
|
65
|
+
print("π New conversation started")
|
|
66
|
+
_reset_conversation()
|
|
67
|
+
|
|
68
|
+
if generate_title:
|
|
69
|
+
for convo_id, convo in st.session_state.conversations.items():
|
|
70
|
+
label = convo["title"]
|
|
71
|
+
if count_tokens:
|
|
72
|
+
label += f" ({convo.get('token_count', '?')} tokens)"
|
|
73
|
+
if st.button(label, key=convo_id):
|
|
74
|
+
st.session_state.current_convo_id = convo_id
|
|
75
|
+
st.session_state.messages = convo["messages"]
|
|
76
|
+
st.rerun()
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
def _render_chat_history():
|
|
80
|
+
for msg in st.session_state.messages:
|
|
81
|
+
with st.chat_message(msg["role"]):
|
|
82
|
+
st.markdown(msg["content"])
|
|
83
|
+
|
|
84
|
+
|
|
85
|
+
def _handle_user_input(
|
|
86
|
+
generate_response: Callable,
|
|
87
|
+
generate_title: Optional[Callable],
|
|
88
|
+
count_tokens: Optional[Callable],
|
|
89
|
+
placeholder: str,
|
|
90
|
+
spinner_text: str,
|
|
91
|
+
max_tokens: Optional[int]
|
|
92
|
+
):
|
|
93
|
+
if prompt := st.chat_input(placeholder):
|
|
94
|
+
_create_conversation_if_needed(prompt, generate_title)
|
|
95
|
+
|
|
96
|
+
st.chat_message("user").markdown(prompt)
|
|
97
|
+
st.session_state.messages.append({"role": "user", "content": prompt})
|
|
98
|
+
|
|
99
|
+
try:
|
|
100
|
+
with st.spinner(spinner_text):
|
|
101
|
+
if count_tokens and max_tokens:
|
|
102
|
+
messages_for_api = _truncate_messages(
|
|
103
|
+
st.session_state.messages,
|
|
104
|
+
count_tokens,
|
|
105
|
+
max_tokens
|
|
106
|
+
)
|
|
107
|
+
elif generate_title or count_tokens:
|
|
108
|
+
messages_for_api = st.session_state.messages
|
|
109
|
+
else:
|
|
110
|
+
messages_for_api = [st.session_state.messages[-1]]
|
|
111
|
+
|
|
112
|
+
response = generate_response(messages_for_api)
|
|
113
|
+
|
|
114
|
+
st.chat_message("assistant").markdown(response)
|
|
115
|
+
st.session_state.messages.append({"role": "assistant", "content": response})
|
|
116
|
+
|
|
117
|
+
if st.session_state.current_convo_id:
|
|
118
|
+
convo = st.session_state.conversations[st.session_state.current_convo_id]
|
|
119
|
+
convo["messages"] = st.session_state.messages
|
|
120
|
+
if count_tokens:
|
|
121
|
+
convo["token_count"] = count_tokens(st.session_state.messages)
|
|
122
|
+
|
|
123
|
+
except Exception as e:
|
|
124
|
+
print(f"Exception: {e}")
|
|
125
|
+
st.error(f"Error: {str(e)}")
|
|
126
|
+
|
|
127
|
+
|
|
128
|
+
def _create_conversation_if_needed(prompt: str, generate_title: Optional[Callable]):
|
|
129
|
+
if not st.session_state.current_convo_id:
|
|
130
|
+
convo_id = str(uuid.uuid4())
|
|
131
|
+
title = generate_title(prompt) if generate_title else "Untitled Chat"
|
|
132
|
+
st.session_state.conversations[convo_id] = {
|
|
133
|
+
"id": convo_id,
|
|
134
|
+
"title": title,
|
|
135
|
+
"messages": [],
|
|
136
|
+
"token_count": 0,
|
|
137
|
+
"created_at": datetime.now().isoformat()
|
|
138
|
+
}
|
|
139
|
+
st.session_state.current_convo_id = convo_id
|
|
140
|
+
|
|
141
|
+
|
|
142
|
+
def _reset_conversation():
|
|
143
|
+
st.session_state.current_convo_id = None
|
|
144
|
+
st.session_state.messages = []
|
|
145
|
+
|
|
146
|
+
|
|
147
|
+
def _truncate_messages(messages: List[Dict], count_tokens: Callable, max_tokens: int) -> List[Dict]:
|
|
148
|
+
trimmed = []
|
|
149
|
+
total_tokens = 0
|
|
150
|
+
|
|
151
|
+
for msg in reversed(messages):
|
|
152
|
+
tokens = count_tokens([msg])
|
|
153
|
+
if total_tokens + tokens > max_tokens:
|
|
154
|
+
break
|
|
155
|
+
trimmed.insert(0, msg)
|
|
156
|
+
total_tokens += tokens
|
|
157
|
+
|
|
158
|
+
return trimmed
|
|
@@ -0,0 +1,52 @@
|
|
|
1
|
+
Metadata-Version: 2.2
|
|
2
|
+
Name: UI4AI
|
|
3
|
+
Version: 0.1.0
|
|
4
|
+
Summary: Streamlit UI for LLM chat apps
|
|
5
|
+
Home-page: https://github.com/DKethan/UI4AI/tree/dev-01
|
|
6
|
+
Author: Kethan Dosapati
|
|
7
|
+
Classifier: Programming Language :: Python :: 3
|
|
8
|
+
Classifier: Operating System :: OS Independent
|
|
9
|
+
Requires-Python: >=3.7
|
|
10
|
+
Description-Content-Type: text/markdown
|
|
11
|
+
Requires-Dist: streamlit
|
|
12
|
+
Dynamic: author
|
|
13
|
+
Dynamic: classifier
|
|
14
|
+
Dynamic: description
|
|
15
|
+
Dynamic: description-content-type
|
|
16
|
+
Dynamic: home-page
|
|
17
|
+
Dynamic: requires-dist
|
|
18
|
+
Dynamic: requires-python
|
|
19
|
+
Dynamic: summary
|
|
20
|
+
|
|
21
|
+
# UI4AI
|
|
22
|
+
|
|
23
|
+
A lightweight, plug-and-play Streamlit-based UI for LLM chatbot applications.
|
|
24
|
+
|
|
25
|
+
## Features
|
|
26
|
+
- Add your own `generate_response` function
|
|
27
|
+
- Sidebar history, session management
|
|
28
|
+
- Optional: title generation, token counting
|
|
29
|
+
|
|
30
|
+
## Usage
|
|
31
|
+
|
|
32
|
+
```python
|
|
33
|
+
from ui4ai import run_chat
|
|
34
|
+
|
|
35
|
+
run_chat(generate_response=my_response_function)
|
|
36
|
+
```
|
|
37
|
+
|
|
38
|
+
## Install
|
|
39
|
+
|
|
40
|
+
```
|
|
41
|
+
pip install ui4ai
|
|
42
|
+
```
|
|
43
|
+
```
|
|
44
|
+
|
|
45
|
+
---
|
|
46
|
+
|
|
47
|
+
### β
Next Step?
|
|
48
|
+
- Want me to generate the license file?
|
|
49
|
+
- Ready to build and upload to PyPI?
|
|
50
|
+
- Want me to zip this entire structure for you?
|
|
51
|
+
|
|
52
|
+
Letβs roll!
|
|
@@ -0,0 +1,6 @@
|
|
|
1
|
+
UI4AI/__init__.py,sha256=E6Eh2MA2TXY8-wiVPNWZqEYx4KnoaCGsdV7-8HlIFjc,30
|
|
2
|
+
UI4AI/chat_ui.py,sha256=fJfiAVDGjHIgv-_OLw2zQ-nQR3ucL8rT-Dgyj1DLmSs,5154
|
|
3
|
+
UI4AI-0.1.0.dist-info/METADATA,sha256=0G-YEPnJVeyA_E6R4tELLOkRy3RmFJwBvj3g3phyEi4,1065
|
|
4
|
+
UI4AI-0.1.0.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
|
|
5
|
+
UI4AI-0.1.0.dist-info/top_level.txt,sha256=88f17E-A9G1zgeqEYcqE8tfuzGb3Z-Zq6sTE3H2ljAQ,6
|
|
6
|
+
UI4AI-0.1.0.dist-info/RECORD,,
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
UI4AI
|