UI4AI 0.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
ui4ai-0.1.0/PKG-INFO ADDED
@@ -0,0 +1,52 @@
1
+ Metadata-Version: 2.2
2
+ Name: UI4AI
3
+ Version: 0.1.0
4
+ Summary: Streamlit UI for LLM chat apps
5
+ Home-page: https://github.com/DKethan/UI4AI/tree/dev-01
6
+ Author: Kethan Dosapati
7
+ Classifier: Programming Language :: Python :: 3
8
+ Classifier: Operating System :: OS Independent
9
+ Requires-Python: >=3.7
10
+ Description-Content-Type: text/markdown
11
+ Requires-Dist: streamlit
12
+ Dynamic: author
13
+ Dynamic: classifier
14
+ Dynamic: description
15
+ Dynamic: description-content-type
16
+ Dynamic: home-page
17
+ Dynamic: requires-dist
18
+ Dynamic: requires-python
19
+ Dynamic: summary
20
+
21
+ # UI4AI
22
+
23
+ A lightweight, plug-and-play Streamlit-based UI for LLM chatbot applications.
24
+
25
+ ## Features
26
+ - Add your own `generate_response` function
27
+ - Sidebar history, session management
28
+ - Optional: title generation, token counting
29
+
30
+ ## Usage
31
+
32
+ ```python
33
+ from ui4ai import run_chat
34
+
35
+ run_chat(generate_response=my_response_function)
36
+ ```
37
+
38
+ ## Install
39
+
40
+ ```
41
+ pip install ui4ai
42
+ ```
43
+ ```
44
+
45
+ ---
46
+
47
+ ### ✅ Next Step?
48
+ - Want me to generate the license file?
49
+ - Ready to build and upload to PyPI?
50
+ - Want me to zip this entire structure for you?
51
+
52
+ Let’s roll!
ui4ai-0.1.0/README.md ADDED
@@ -0,0 +1,32 @@
1
+ # UI4AI
2
+
3
+ A lightweight, plug-and-play Streamlit-based UI for LLM chatbot applications.
4
+
5
+ ## Features
6
+ - Add your own `generate_response` function
7
+ - Sidebar history, session management
8
+ - Optional: title generation, token counting
9
+
10
+ ## Usage
11
+
12
+ ```python
13
+ from ui4ai import run_chat
14
+
15
+ run_chat(generate_response=my_response_function)
16
+ ```
17
+
18
+ ## Install
19
+
20
+ ```
21
+ pip install ui4ai
22
+ ```
23
+ ```
24
+
25
+ ---
26
+
27
+ ### ✅ Next Step?
28
+ - Want me to generate the license file?
29
+ - Ready to build and upload to PyPI?
30
+ - Want me to zip this entire structure for you?
31
+
32
+ Let’s roll!
@@ -0,0 +1 @@
1
+ from .chat_ui import run_chat
@@ -0,0 +1,158 @@
1
+ import streamlit as st
2
+ import uuid
3
+ from datetime import datetime
4
+ from typing import List, Dict, Callable, Optional
5
+
6
+
7
+ def run_chat(
8
+ generate_response: Optional[Callable[[List[Dict]], str]],
9
+ generate_title: Optional[Callable[[str], str]] = None,
10
+ count_tokens: Optional[Callable[[List[Dict]], int]] = None,
11
+ page_title: str = "AI Chat",
12
+ title: str = "Conversational bot",
13
+ layout: str = "wide",
14
+ new_conversation: str = "➕ New Chat",
15
+ chat_placeholder: str = "Ask me anything...",
16
+ sidebar_instructions: str = "Conversation History",
17
+ spinner_text: str = "Thinking...",
18
+ max_history_tokens: Optional[int] = None
19
+ ):
20
+ """Streamlit UI for LLM chat with flexible core"""
21
+
22
+ if not generate_response:
23
+ print("No generate_response function provided.")
24
+ st.set_page_config(page_title="Error", layout="wide")
25
+ st.error("No `generate_response` function provided.")
26
+ return
27
+
28
+ _init_session_state()
29
+
30
+ st.set_page_config(page_title=page_title, layout=layout)
31
+ st.title(title)
32
+
33
+ with st.sidebar:
34
+ _render_sidebar(generate_title, count_tokens, sidebar_instructions, new_conversation)
35
+
36
+ _render_chat_history()
37
+
38
+ _handle_user_input(
39
+ generate_response,
40
+ generate_title,
41
+ count_tokens,
42
+ chat_placeholder,
43
+ spinner_text,
44
+ max_history_tokens
45
+ )
46
+
47
+
48
+ def _init_session_state():
49
+ defaults = {"conversations": {}, "current_convo_id": None, "messages": []}
50
+ for key, val in defaults.items():
51
+ if key not in st.session_state:
52
+ st.session_state[key] = val
53
+
54
+
55
+ def _render_sidebar(
56
+ generate_title: Optional[Callable],
57
+ count_tokens: Optional[Callable],
58
+ instructions: str,
59
+ new_conversation: str
60
+ ):
61
+ st.markdown("### 📖 Instructions")
62
+ st.markdown(instructions)
63
+
64
+ if st.button(new_conversation):
65
+ print("🔄 New conversation started")
66
+ _reset_conversation()
67
+
68
+ if generate_title:
69
+ for convo_id, convo in st.session_state.conversations.items():
70
+ label = convo["title"]
71
+ if count_tokens:
72
+ label += f" ({convo.get('token_count', '?')} tokens)"
73
+ if st.button(label, key=convo_id):
74
+ st.session_state.current_convo_id = convo_id
75
+ st.session_state.messages = convo["messages"]
76
+ st.rerun()
77
+
78
+
79
+ def _render_chat_history():
80
+ for msg in st.session_state.messages:
81
+ with st.chat_message(msg["role"]):
82
+ st.markdown(msg["content"])
83
+
84
+
85
+ def _handle_user_input(
86
+ generate_response: Callable,
87
+ generate_title: Optional[Callable],
88
+ count_tokens: Optional[Callable],
89
+ placeholder: str,
90
+ spinner_text: str,
91
+ max_tokens: Optional[int]
92
+ ):
93
+ if prompt := st.chat_input(placeholder):
94
+ _create_conversation_if_needed(prompt, generate_title)
95
+
96
+ st.chat_message("user").markdown(prompt)
97
+ st.session_state.messages.append({"role": "user", "content": prompt})
98
+
99
+ try:
100
+ with st.spinner(spinner_text):
101
+ if count_tokens and max_tokens:
102
+ messages_for_api = _truncate_messages(
103
+ st.session_state.messages,
104
+ count_tokens,
105
+ max_tokens
106
+ )
107
+ elif generate_title or count_tokens:
108
+ messages_for_api = st.session_state.messages
109
+ else:
110
+ messages_for_api = [st.session_state.messages[-1]]
111
+
112
+ response = generate_response(messages_for_api)
113
+
114
+ st.chat_message("assistant").markdown(response)
115
+ st.session_state.messages.append({"role": "assistant", "content": response})
116
+
117
+ if st.session_state.current_convo_id:
118
+ convo = st.session_state.conversations[st.session_state.current_convo_id]
119
+ convo["messages"] = st.session_state.messages
120
+ if count_tokens:
121
+ convo["token_count"] = count_tokens(st.session_state.messages)
122
+
123
+ except Exception as e:
124
+ print(f"Exception: {e}")
125
+ st.error(f"Error: {str(e)}")
126
+
127
+
128
+ def _create_conversation_if_needed(prompt: str, generate_title: Optional[Callable]):
129
+ if not st.session_state.current_convo_id:
130
+ convo_id = str(uuid.uuid4())
131
+ title = generate_title(prompt) if generate_title else "Untitled Chat"
132
+ st.session_state.conversations[convo_id] = {
133
+ "id": convo_id,
134
+ "title": title,
135
+ "messages": [],
136
+ "token_count": 0,
137
+ "created_at": datetime.now().isoformat()
138
+ }
139
+ st.session_state.current_convo_id = convo_id
140
+
141
+
142
+ def _reset_conversation():
143
+ st.session_state.current_convo_id = None
144
+ st.session_state.messages = []
145
+
146
+
147
+ def _truncate_messages(messages: List[Dict], count_tokens: Callable, max_tokens: int) -> List[Dict]:
148
+ trimmed = []
149
+ total_tokens = 0
150
+
151
+ for msg in reversed(messages):
152
+ tokens = count_tokens([msg])
153
+ if total_tokens + tokens > max_tokens:
154
+ break
155
+ trimmed.insert(0, msg)
156
+ total_tokens += tokens
157
+
158
+ return trimmed
@@ -0,0 +1,52 @@
1
+ Metadata-Version: 2.2
2
+ Name: UI4AI
3
+ Version: 0.1.0
4
+ Summary: Streamlit UI for LLM chat apps
5
+ Home-page: https://github.com/DKethan/UI4AI/tree/dev-01
6
+ Author: Kethan Dosapati
7
+ Classifier: Programming Language :: Python :: 3
8
+ Classifier: Operating System :: OS Independent
9
+ Requires-Python: >=3.7
10
+ Description-Content-Type: text/markdown
11
+ Requires-Dist: streamlit
12
+ Dynamic: author
13
+ Dynamic: classifier
14
+ Dynamic: description
15
+ Dynamic: description-content-type
16
+ Dynamic: home-page
17
+ Dynamic: requires-dist
18
+ Dynamic: requires-python
19
+ Dynamic: summary
20
+
21
+ # UI4AI
22
+
23
+ A lightweight, plug-and-play Streamlit-based UI for LLM chatbot applications.
24
+
25
+ ## Features
26
+ - Add your own `generate_response` function
27
+ - Sidebar history, session management
28
+ - Optional: title generation, token counting
29
+
30
+ ## Usage
31
+
32
+ ```python
33
+ from ui4ai import run_chat
34
+
35
+ run_chat(generate_response=my_response_function)
36
+ ```
37
+
38
+ ## Install
39
+
40
+ ```
41
+ pip install ui4ai
42
+ ```
43
+ ```
44
+
45
+ ---
46
+
47
+ ### ✅ Next Step?
48
+ - Want me to generate the license file?
49
+ - Ready to build and upload to PyPI?
50
+ - Want me to zip this entire structure for you?
51
+
52
+ Let’s roll!
@@ -0,0 +1,10 @@
1
+ README.md
2
+ setup.py
3
+ UI4AI/__init__.py
4
+ UI4AI/chat_ui.py
5
+ UI4AI.egg-info/PKG-INFO
6
+ UI4AI.egg-info/SOURCES.txt
7
+ UI4AI.egg-info/dependency_links.txt
8
+ UI4AI.egg-info/requires.txt
9
+ UI4AI.egg-info/top_level.txt
10
+ tests/test_apr_10_441.py
@@ -0,0 +1 @@
1
+ streamlit
@@ -0,0 +1 @@
1
+ UI4AI
ui4ai-0.1.0/setup.cfg ADDED
@@ -0,0 +1,4 @@
1
+ [egg_info]
2
+ tag_build =
3
+ tag_date = 0
4
+
ui4ai-0.1.0/setup.py ADDED
@@ -0,0 +1,18 @@
1
+ from setuptools import setup, find_packages
2
+
3
+ setup(
4
+ name="UI4AI",
5
+ version="0.1.0",
6
+ author="Kethan Dosapati",
7
+ description="Streamlit UI for LLM chat apps",
8
+ long_description=open("README.md").read(),
9
+ long_description_content_type="text/markdown",
10
+ url="https://github.com/DKethan/UI4AI/tree/dev-01",
11
+ packages=find_packages(),
12
+ install_requires=["streamlit"],
13
+ classifiers=[
14
+ "Programming Language :: Python :: 3",
15
+ "Operating System :: OS Independent",
16
+ ],
17
+ python_requires='>=3.7',
18
+ )
@@ -0,0 +1,24 @@
1
+ from UI4AI import run_chat
2
+ import openai
3
+
4
+ openai.api_key = "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"
5
+
6
+ def generate_response(messages) -> str:
7
+ """Generate response with history management"""
8
+ try:
9
+ response = openai.chat.completions.create(
10
+ model="gpt-4",
11
+ messages=messages,
12
+ temperature=0.7
13
+ )
14
+ return response.choices[0].message.content
15
+ except Exception as e:
16
+ raise RuntimeError(f"Response generation failed: {str(e)}")
17
+
18
+ run_chat(
19
+ generate_response=generate_response,
20
+ page_title="GPT-4 Chat",
21
+ chat_placeholder="Ask me anything...",
22
+ sidebar_instructions="Powered by GPT-4",
23
+ spinner_text="Generating response...",
24
+ )