zaturn 0.2.1__py3-none-any.whl → 0.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (42) hide show
  1. zaturn/mcp/__init__.py +9 -10
  2. zaturn/studio/agent_wrapper.py +36 -112
  3. zaturn/studio/app.py +48 -41
  4. zaturn/studio/storage.py +14 -4
  5. zaturn/studio/templates/_shell.html +3 -3
  6. zaturn/studio/templates/ai_message.html +1 -2
  7. zaturn/studio/templates/c_source_card.html +3 -3
  8. zaturn/studio/templates/chat.html +27 -19
  9. zaturn/studio/templates/chat_metadata.html +10 -0
  10. zaturn/studio/templates/css/style.css +146 -21
  11. zaturn/studio/templates/function_call.html +5 -4
  12. zaturn/studio/templates/icons/arrow-left.svg +3 -0
  13. zaturn/studio/templates/icons/arrow-right.svg +3 -0
  14. zaturn/studio/templates/icons/chat-bubble.svg +6 -0
  15. zaturn/studio/templates/icons/check-circle-solid.svg +3 -0
  16. zaturn/studio/templates/icons/database.svg +5 -0
  17. zaturn/studio/templates/icons/fire-flame.svg +4 -0
  18. zaturn/studio/templates/icons/floppy-disk.svg +5 -0
  19. zaturn/studio/templates/icons/link.svg +4 -0
  20. zaturn/studio/templates/icons/play.svg +3 -0
  21. zaturn/studio/templates/icons/settings.svg +4 -0
  22. zaturn/studio/templates/icons/timer.svg +5 -0
  23. zaturn/studio/templates/icons/trash.svg +4 -0
  24. zaturn/studio/templates/icons/upload.svg +4 -0
  25. zaturn/studio/templates/icons/user.svg +4 -0
  26. zaturn/studio/templates/icons/warning-triangle.svg +5 -0
  27. zaturn/studio/templates/icons/wrench.svg +4 -0
  28. zaturn/studio/templates/loader.html +1 -1
  29. zaturn/studio/templates/manage_sources.html +5 -5
  30. zaturn/studio/templates/new_conversation.html +2 -2
  31. zaturn/studio/templates/settings.html +24 -3
  32. zaturn/studio/templates/setup_prompt.html +3 -2
  33. zaturn/studio/templates/user_message.html +2 -2
  34. zaturn/tools/config.py +0 -83
  35. {zaturn-0.2.1.dist-info → zaturn-0.3.0.dist-info}/METADATA +5 -5
  36. zaturn-0.3.0.dist-info/RECORD +55 -0
  37. zaturn/studio/static/noto_emoji.ttf +0 -0
  38. zaturn-0.2.1.dist-info/RECORD +0 -39
  39. {zaturn-0.2.1.dist-info → zaturn-0.3.0.dist-info}/WHEEL +0 -0
  40. {zaturn-0.2.1.dist-info → zaturn-0.3.0.dist-info}/entry_points.txt +0 -0
  41. {zaturn-0.2.1.dist-info → zaturn-0.3.0.dist-info}/licenses/LICENSE +0 -0
  42. {zaturn-0.2.1.dist-info → zaturn-0.3.0.dist-info}/top_level.txt +0 -0
zaturn/mcp/__init__.py CHANGED
@@ -1,10 +1,11 @@
1
1
  import argparse
2
+ import importlib.resources
2
3
  import os
3
4
  import platformdirs
4
- import pkg_resources
5
5
  import sys
6
6
 
7
7
  from fastmcp import FastMCP
8
+ from fastmcp.tools.tool import Tool
8
9
 
9
10
  from zaturn.tools import ZaturnTools
10
11
 
@@ -30,19 +31,17 @@ if not source_list:
30
31
  source_list = args.sources
31
32
 
32
33
  if not source_list:
33
- source_list = [
34
- pkg_resources.resource_filename(
35
- 'zaturn',
36
- os.path.join('mcp', 'example_data', 'all_pokemon_data.csv')
37
- )
38
- ]
34
+ with importlib.resources.path(
35
+ 'zaturn.tools.example_data', 'all_pokemon_data.csv'
36
+ ) as source_path:
37
+ source_list = [str(source_path)]
38
+
39
39
  print("No data sources provided. Loading example dataset for demonstration.")
40
40
  print(f"\nTo load your datasets, add them to {SOURCES_FILE} (one source URL or full file path per line)")
41
41
  print("\nOr use command line args to specify data sources:")
42
42
  print("zaturn_mcp sqlite:///path/to/mydata.db /path/to/my_file.csv")
43
43
  print(f"\nNOTE: Sources in command line args will be ignored if sources are found in {SOURCES_FILE}")
44
44
 
45
-
46
45
  SOURCES = {}
47
46
  for s in source_list:
48
47
  source = s.lower()
@@ -90,8 +89,8 @@ for s in source_list:
90
89
  def ZaturnMCP(sources):
91
90
  zaturn_tools = ZaturnTools(sources)
92
91
  zaturn_mcp = FastMCP()
93
- for tool in zaturn_tools.tools:
94
- zaturn_mcp.add_tool(tool)
92
+ for tool_function in zaturn_tools.tools:
93
+ zaturn_mcp.add_tool(Tool.from_function(tool_function))
95
94
 
96
95
  return zaturn_mcp
97
96
 
@@ -4,128 +4,52 @@ import json
4
4
  from function_schema import get_function_schema
5
5
  import httpx
6
6
  from mcp.types import ImageContent
7
+ from pydantic_ai import Agent
8
+ from pydantic_ai.messages import ModelMessagesTypeAdapter
9
+ from pydantic_ai.models.openai import OpenAIChatModel, OpenAIChatModelSettings
10
+ from pydantic_ai.providers.openai import OpenAIProvider
11
+ from pydantic_core import to_jsonable_python
7
12
 
8
13
 
9
- class Agent:
14
+ class ZaturnAgent:
10
15
 
11
16
  def __init__(self,
12
17
  endpoint: str,
13
18
  api_key: str,
14
- model: str,
19
+ model_name: str,
15
20
  tools: list = [],
16
21
  image_input: bool = False,
22
+ reasoning_effort: str = 'none',
23
+ system_prompt: str = '',
17
24
  ):
18
25
 
19
- self._post_url = f'{endpoint}/chat/completions'
20
- self._api_key = api_key
21
- self._model = model
22
- self._image_input = image_input
23
- self._system_message = {
24
- 'role': 'system',
25
- 'content': """
26
- You are a helpful data analysis assistant.
27
- Use only the tool provided data sources to process user inputs.
28
- Do not use external sources or your own knowledge base.
29
- Also, the tool outputs are shown to the user.
30
- So, please avoid repeating the tool outputs in the generated text.
31
- Use list_sources and describe_table whenever needed,
32
- do not prompt the user for source names and column names.
33
- """,
34
- }
35
-
36
- self._tools = []
37
- self._tool_map = {}
38
- for tool in tools:
39
- tool_schema = get_function_schema(tool)
40
- self._tools.append({
41
- 'type': 'function',
42
- 'function': tool_schema,
43
- })
44
- self._tool_map[tool_schema['name']] = tool
45
-
46
-
47
- def _prepare_input_messages(self, messages):
48
- input_messages = [self._system_message]
49
- for message in messages:
50
- if message['role']!='tool':
51
- input_messages.append(message)
52
- elif type(message['content']) is not list:
53
- input_messages.append(message)
54
- else:
55
- new_content = []
56
- image_content = None
57
- for content in message['content']:
58
- if content['type']=='image_url':
59
- image_content = content
60
- new_content.append({
61
- 'type': 'text',
62
- 'text': 'Tool call returned an image to the user.',
63
- })
64
- else:
65
- new_content.append(content)
66
- input_messages.append({
67
- 'role': message['role'],
68
- 'tool_call_id': message['tool_call_id'],
69
- 'name': message['name'],
70
- 'content': new_content,
71
- })
72
-
73
- return input_messages
74
-
75
-
76
- def run(self, messages):
77
- if type(messages) is str:
78
- messages = [{'role': 'user', 'content': messages}]
79
-
80
- while True:
81
- res = httpx.post(
82
- url = self._post_url,
83
- headers = {
84
- 'Authorization': f'Bearer {self._api_key}'
85
- },
86
- json = {
87
- 'model': self._model,
88
- 'messages': self._prepare_input_messages(messages),
89
- 'tools': self._tools,
90
- 'reasoning': {'exclude': True},
91
- }
26
+ model_settings = None
27
+ reasoning_effort = reasoning_effort.lower()
28
+ if reasoning_effort in ['low', 'medium', 'high']:
29
+ model_settings = OpenAIChatModelSettings(
30
+ openai_reasoning_effort = reasoning_effort,
92
31
  )
32
+
33
+ self._agent = Agent(
34
+ OpenAIChatModel(
35
+ model_name,
36
+ provider = OpenAIProvider(
37
+ api_key = api_key,
38
+ base_url = endpoint,
39
+ )
40
+ ),
41
+ model_settings = model_settings,
42
+ system_prompt = system_prompt or None,
43
+ tools = tools,
44
+ )
45
+
93
46
 
94
- print(res.text)
95
- resj = res.json()
96
- reply = resj['choices'][0]['message']
97
- messages.append(reply)
98
-
99
- tool_calls = reply.get('tool_calls')
100
- if tool_calls:
101
- for tool_call in tool_calls:
102
- tool_name = tool_call['function']['name']
103
- tool_args = json.loads(tool_call['function']['arguments'])
104
- tool_response = self._tool_map[tool_name](**tool_args)
105
- if type(tool_response) is ImageContent:
106
- b64_data = tool_response.data
107
- data_url = f'data:image/png;base64,{b64_data}'
108
- content = [{
109
- 'type': 'image_url',
110
- 'image_url': {
111
- "url": data_url,
112
- }
113
- }]
114
- else:
115
- content = [{
116
- 'type': 'text',
117
- 'text': json.dumps(tool_response)
118
- }]
119
-
120
- messages.append({
121
- 'role': 'tool',
122
- 'tool_call_id': tool_call['id'],
123
- 'name': tool_name,
124
- 'content': content
125
- })
126
- else:
127
- break
128
-
129
- return messages
130
-
47
+ def run(self, prompt, message_history = None):
48
+ if message_history:
49
+ message_history_obj = ModelMessagesTypeAdapter.validate_python(message_history)
50
+ result = self._agent.run_sync(prompt, message_history = message_history_obj)
51
+ return to_jsonable_python(result.all_messages())
52
+ else:
53
+ result = self._agent.run_sync(prompt)
54
+ return to_jsonable_python(result.all_messages())
131
55
 
zaturn/studio/app.py CHANGED
@@ -62,6 +62,8 @@ def save_settings() -> str:
62
62
  app.config['state']['api_model'] = api_model
63
63
  app.config['state']['api_endpoint'] = api_endpoint
64
64
  app.config['state']['api_image_input'] = False
65
+ app.config['state']['reasoning_effort'] = request.form.get('reasoning_effort', 'none')
66
+ app.config['state']['system_prompt'] = request.form.get('system_prompt').strip('\n')
65
67
 
66
68
  try:
67
69
  model_info = httpx.get(
@@ -195,31 +197,34 @@ def get_active_sources():
195
197
 
196
198
  def prepare_chat_for_render(chat):
197
199
  fn_calls = {}
198
- for msg in chat['messages']:
199
- if msg.get('role')=='assistant':
200
- if msg.get('tool_calls'):
201
- msg['is_tool_call'] = True
202
- for tool_call in msg['tool_calls']:
203
- fn_call = tool_call['function']
204
- fn_call['arguments'] = tomli_w.dumps(
205
- json.loads(fn_call['arguments'])
206
- ).replace('\n', '<br>')
207
- fn_calls[tool_call['id']] = fn_call
208
- else:
209
- msg['html'] = mistune.html(msg['content'])
210
- if msg.get('role')=='tool':
211
- msg['call_details'] = fn_calls[msg['tool_call_id']]
212
- if type(msg['content']) is str:
213
- msg['html'] = mistune.html(json.loads(msg['text']))
214
- elif type(msg['content']) is list:
215
- msg['html'] = ''
216
- for content in msg['content']:
217
- if content['type'] == 'image_url':
218
- data_url = content['image_url']['url']
219
- msg['html'] += f'<img src="{data_url}">'
220
- else:
221
- msg['html'] += mistune.html(json.loads(content['text']))
222
-
200
+
201
+ for message in chat['messages']:
202
+ for part in message['parts']:
203
+ if part['part_kind']=='text' and message['kind']=='response':
204
+ part['html_content'] = mistune.html(part['content'])
205
+ elif part['part_kind']=='tool-call':
206
+ fn_calls[part['tool_call_id']] = part
207
+ fn_calls[part['tool_call_id']]['timestamp'] = message['timestamp']
208
+ elif part['part_kind']=='tool-return':
209
+ fn_call = fn_calls[part['tool_call_id']]
210
+ part['call_details'] = {}
211
+ part['call_details']['name'] = fn_call['tool_name']
212
+
213
+ t1 = datetime.fromisoformat(fn_call['timestamp'])
214
+ t2 = datetime.fromisoformat(part['timestamp'])
215
+ part['call_details']['exec_time'] = (t2 - t1).seconds
216
+
217
+ part['call_details']['args_html'] = tomli_w.dumps(
218
+ json.loads(fn_call['args'])
219
+ ).replace('\n', '<br>')
220
+
221
+ if type(part['content']) is str:
222
+ part['html_content'] = mistune.html(part['content'])
223
+ elif type(part['content']) is dict and part['content']['type']=='image':
224
+ data_url = f"data:{part['content']['mimeType']};base64,{part['content']['data']}"
225
+ part['html_content'] = f'<img src="{data_url}">'
226
+
227
+
223
228
  return chat
224
229
 
225
230
 
@@ -230,22 +235,23 @@ def create_new_chat():
230
235
  chat = storage.load_chat(slug)
231
236
 
232
237
  state = app.config['state']
233
- agent = agent_wrapper.Agent(
238
+ agent = agent_wrapper.ZaturnAgent(
234
239
  endpoint = state['api_endpoint'],
235
240
  api_key = state['api_key'],
236
- model = state['api_model'],
241
+ model_name = state['api_model'],
237
242
  tools = ZaturnTools(get_active_sources()).tools,
238
243
  image_input = state['api_image_input'],
244
+ reasoning_effort = state['reasoning_effort'],
245
+ system_prompt = state['system_prompt'],
239
246
  )
240
- chat['messages'] = agent.run(chat['messages'])
247
+ chat['messages'] = agent.run(question)
241
248
  storage.save_chat(slug, chat)
242
- chat = prepare_chat_for_render(chat)
243
249
 
244
250
  return boost(
245
251
  ''.join([
246
252
  render_template('nav.html', slugs=storage.list_chats()),
247
253
  '<main id="main">',
248
- render_template('chat.html', chat=chat),
254
+ render_template('chat.html', chat=prepare_chat_for_render(chat)),
249
255
  '</main>'
250
256
  ]),
251
257
  reswap = 'multi:#sidebar,#main',
@@ -264,25 +270,26 @@ def show_chat(slug: str):
264
270
  def follow_up_message():
265
271
  slug = request.form['slug']
266
272
  chat = storage.load_chat(slug)
267
- chat['messages'].append({
268
- 'role': 'user',
269
- 'content': request.form['question'],
270
- })
271
-
273
+
272
274
  state = app.config['state']
273
- agent = agent_wrapper.Agent(
275
+ agent = agent_wrapper.ZaturnAgent(
274
276
  endpoint = state['api_endpoint'],
275
277
  api_key = state['api_key'],
276
- model = state['api_model'],
278
+ model_name = state['api_model'],
277
279
  tools = ZaturnTools(get_active_sources()).tools,
278
280
  image_input = state['api_image_input'],
281
+ reasoning_effort = state['reasoning_effort'],
282
+ system_prompt = state['system_prompt'],
283
+ )
284
+
285
+ chat['messages'] = agent.run(
286
+ prompt = request.form['question'],
287
+ message_history = chat['messages'],
279
288
  )
280
- chat['messages'] = agent.run(chat['messages'])
281
289
  storage.save_chat(slug, chat)
282
- chat = prepare_chat_for_render(chat)
283
-
290
+
284
291
  return boost(
285
- render_template('chat.html', chat=chat),
292
+ render_template('chat.html', chat=prepare_chat_for_render(chat)),
286
293
  push_url = 'false',
287
294
  reswap = 'innerHTML scroll:bottom',
288
295
  )
zaturn/studio/storage.py CHANGED
@@ -14,12 +14,24 @@ STATE_FILE = USER_DATA_DIR / 'studio.json'
14
14
  CHATS_DIR = USER_DATA_DIR / 'chats'
15
15
  os.makedirs(CHATS_DIR, exist_ok=True)
16
16
 
17
+ DEFAULT_PROMPT = """
18
+ You are a helpful data analysis assistant.
19
+ Use only the tool provided data sources to process user inputs.
20
+ Do not use external sources or your own knowledge base.
21
+ Also, the tool outputs are shown to the user.
22
+ So, please avoid repeating the tool outputs in the generated text.
23
+ Use list_data_sources and describe_table whenever needed,
24
+ do not prompt the user for source names and column names.
25
+ """.strip('\n')
26
+
17
27
 
18
28
  def load_state() -> dict:
19
29
  if os.path.exists(STATE_FILE):
20
30
  with open(STATE_FILE) as f:
21
31
  state = json.loads(f.read())
22
32
  state['sources'] = state.get('sources', {})
33
+ state['system_prompt'] = state.get('system_prompt', DEFAULT_PROMPT)
34
+ state['reasoning_effort'] = state.get('reasoning_effort', 'none')
23
35
  return state
24
36
  else:
25
37
  return {}
@@ -51,10 +63,8 @@ def create_chat(question: str):
51
63
 
52
64
  chat = {
53
65
  'slug': slug,
54
- 'messages': [{
55
- 'role': 'user',
56
- 'content': question,
57
- }]
66
+ 'messages': [],
67
+ 'schema_version': 1
58
68
  }
59
69
 
60
70
  filename = CHATS_DIR / f'{slug}.json'
@@ -19,13 +19,13 @@
19
19
  <img src="/static/logo.svg" class="logo">
20
20
  </a>
21
21
  <a href="/">
22
- 💬
22
+ {% include('icons/chat-bubble.svg') %}
23
23
  </a>
24
24
  <a href="/sources/manage" title="Manage Sources">
25
- 📎
25
+ {% include('icons/database.svg') %}
26
26
  </a>
27
27
  <a href="/settings" title="Settings">
28
- 🛠
28
+ {% include('icons/settings.svg') %}
29
29
  </a>
30
30
  </header>
31
31
  {% with slugs=slugs %}
@@ -1,4 +1,3 @@
1
1
  <div class="ai-message">
2
- <!--<p><b class="sender">Zaturn:</b></p>-->
3
- <div>{{msg['html'] | safe}}</div>
2
+ <div>{{part['html_content'] | safe}}</div>
4
3
  </div>
@@ -5,15 +5,15 @@
5
5
  <input type="hidden" name="key" value="{{key}}">
6
6
  {% if active %}
7
7
  <input type="hidden" name="new_status" value="inactive">
8
- <button class="active" title="Currently visible to LLM, Click to Toggle">Active ✅</button>
8
+ <button class="active" title="Currently visible to LLM, Click to Toggle">Active {% include('icons/check-circle-solid.svg') %}</button>
9
9
  {% else %}
10
10
  <input type="hidden" name="new_status" value="active">
11
- <button class="inactive" title="Currently invisible to LLM, Click to Toggle">Inactive 😴</button>
11
+ <button class="inactive" title="Currently invisible to LLM, Click to Toggle">Inactive {% include('icons/warning-triangle.svg') %}</button>
12
12
  {% endif %}
13
13
  </form>
14
14
 
15
15
  <form action="/source/delete" method="POST">
16
16
  <input type="hidden" name="key" value="{{key}}">
17
- <button class="danger">Delete 🗑</button>
17
+ <button class="danger">Delete {% include('icons/trash.svg') %}</button>
18
18
  </form>
19
19
  </div>
@@ -1,22 +1,30 @@
1
1
  <section id="chat">
2
- {% for msg in chat['messages'] %}
3
- {% if msg['role']=='user' %}
4
- {% include('user_message.html') %}
5
- {% elif msg['role']=='assistant' and not msg['is_tool_call'] %}
6
- {% include('ai_message.html') %}
7
- {% elif msg['role']=='tool' %}
8
- {% include('function_call.html') %}
9
- {% endif %}
10
- {% endfor %}
2
+ {% if chat.get('schema_version', 0) >= 1 %}
3
+ {% for message in chat['messages'] %}
4
+ {% for part in message['parts'] %}
5
+ {% if part['part_kind']=='system-prompt' %}
6
+ {% include('chat_metadata.html') %}
7
+ {% elif part['part_kind']=='user-prompt' %}
8
+ {% include('user_message.html') %}
9
+ {% elif part['part_kind']=='text' and message['kind']=='response' %}
10
+ {% include('ai_message.html') %}
11
+ {% elif part['part_kind']=='tool-return' %}
12
+ {% include('function_call.html') %}
13
+ {% endif %}
14
+ {% endfor %}
15
+ {% endfor %}
11
16
 
12
- <form action="/follow_up_message" method="POST">
13
- <input type="hidden" name="slug" value="{{chat['slug']}}">
14
- <textarea
15
- required
16
- name="question"
17
- placeholder="Type a follow up question here."
18
- ></textarea>
19
- <button>➡</button>
20
- </form>
21
- {% include('loader.html') %}
17
+ <form action="/follow_up_message" method="POST">
18
+ <input type="hidden" name="slug" value="{{chat['slug']}}">
19
+ <textarea
20
+ required
21
+ name="question"
22
+ placeholder="Type a follow up question here."
23
+ ></textarea>
24
+ <button>{% include('icons/arrow-right.svg') %}</button>
25
+ </form>
26
+ {% include('loader.html') %}
27
+ {% else %}
28
+ <pre><code>{{chat | tojson(indent=2)}}</code></pre>
29
+ {% endif %}
22
30
  </section>
@@ -0,0 +1,10 @@
1
+ <div class="chat-metadata">
2
+ <dl>
3
+ <dt>Model:</dt>
4
+ <dd>{{chat['messages'][-1]['model_name']}}</dd>
5
+ </dl>
6
+ <details class="system-prompt-message">
7
+ <summary>System Prompt</summary>
8
+ <p>{{part['content']}}</p>
9
+ </details>
10
+ </div>