proscenium 0.0.13__tar.gz → 0.0.14__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {proscenium-0.0.13/src/proscenium.egg-info → proscenium-0.0.14}/PKG-INFO +6 -4
- {proscenium-0.0.13 → proscenium-0.0.14}/README.md +5 -3
- {proscenium-0.0.13 → proscenium-0.0.14}/pyproject.toml +1 -1
- {proscenium-0.0.13/src/proscenium/core → proscenium-0.0.14/src/proscenium}/__init__.py +10 -0
- {proscenium-0.0.13 → proscenium-0.0.14}/src/proscenium/admin/__init__.py +2 -2
- {proscenium-0.0.13 → proscenium-0.0.14}/src/proscenium/bin/__init__.py +1 -1
- {proscenium-0.0.13 → proscenium-0.0.14}/src/proscenium/bin/bot.py +1 -1
- proscenium-0.0.14/src/proscenium/complete.py +97 -0
- proscenium-0.0.13/src/proscenium/verbs/display/chat.py → proscenium-0.0.14/src/proscenium/history.py +14 -0
- {proscenium-0.0.13 → proscenium-0.0.14}/src/proscenium/interfaces/slack.py +2 -2
- {proscenium-0.0.13 → proscenium-0.0.14}/src/proscenium/patterns/rag.py +1 -1
- proscenium-0.0.13/src/proscenium/verbs/complete.py → proscenium-0.0.14/src/proscenium/patterns/tools.py +127 -90
- {proscenium-0.0.13 → proscenium-0.0.14/src/proscenium.egg-info}/PKG-INFO +6 -4
- {proscenium-0.0.13 → proscenium-0.0.14}/src/proscenium.egg-info/SOURCES.txt +2 -9
- {proscenium-0.0.13 → proscenium-0.0.14}/tests/test_display.py +1 -1
- proscenium-0.0.13/src/proscenium/patterns/__init__.py +0 -3
- proscenium-0.0.13/src/proscenium/patterns/tools.py +0 -71
- proscenium-0.0.13/src/proscenium/verbs/__init__.py +0 -3
- proscenium-0.0.13/src/proscenium/verbs/display/__init__.py +0 -9
- proscenium-0.0.13/src/proscenium/verbs/display/tools.py +0 -64
- proscenium-0.0.13/src/proscenium/verbs/display.py +0 -13
- proscenium-0.0.13/src/proscenium/verbs/invoke.py +0 -11
- proscenium-0.0.13/src/proscenium/verbs/remember.py +0 -13
- {proscenium-0.0.13 → proscenium-0.0.14}/LICENSE +0 -0
- {proscenium-0.0.13 → proscenium-0.0.14}/setup.cfg +0 -0
- {proscenium-0.0.13/src/proscenium → proscenium-0.0.14/src/proscenium/interfaces}/__init__.py +0 -0
- {proscenium-0.0.13/src/proscenium/interfaces → proscenium-0.0.14/src/proscenium/patterns}/__init__.py +0 -0
- {proscenium-0.0.13 → proscenium-0.0.14}/src/proscenium/patterns/graph_rag.py +0 -0
- {proscenium-0.0.13 → proscenium-0.0.14}/src/proscenium/util/__init__.py +0 -0
- {proscenium-0.0.13 → proscenium-0.0.14}/src/proscenium.egg-info/dependency_links.txt +0 -0
- {proscenium-0.0.13 → proscenium-0.0.14}/src/proscenium.egg-info/entry_points.txt +0 -0
- {proscenium-0.0.13 → proscenium-0.0.14}/src/proscenium.egg-info/requires.txt +0 -0
- {proscenium-0.0.13 → proscenium-0.0.14}/src/proscenium.egg-info/top_level.txt +0 -0
- {proscenium-0.0.13 → proscenium-0.0.14}/tests/test_demo_typer_help.py +0 -0
- {proscenium-0.0.13 → proscenium-0.0.14}/tests/test_slack_echo.py +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: proscenium
|
3
|
-
Version: 0.0.
|
3
|
+
Version: 0.0.14
|
4
4
|
Summary: Declare Collaborative, Asynchronous Human To Agent Interactions
|
5
5
|
Author-email: Adam Pingel <oss@pingel.org>
|
6
6
|
License-Expression: Apache-2.0
|
@@ -25,15 +25,17 @@ Dynamic: license-file
|
|
25
25
|
|
26
26
|
# Proscenium
|
27
27
|
|
28
|
-
[](https://pypi.org/project/proscenium/)
|
29
29
|
[](https://pypi.org/project/proscenium/)
|
30
30
|
[](https://github.com/The-AI-Alliance/proscenium/actions/workflows/pytest.yml)
|
31
|
-
[](https://pypi.org/project/proscenium/)
|
32
31
|
[](https://github.com/The-AI-Alliance/proscenium/tree/main?tab=Apache-2.0-1-ov-file#readme)
|
33
32
|
[](https://github.com/The-AI-Alliance/proscenium/issues)
|
34
33
|
[](https://github.com/The-AI-Alliance/proscenium/stargazers)
|
35
34
|
|
36
|
-
Proscenium is a small, experimental library
|
35
|
+
Proscenium is a small, experimental library for
|
36
|
+
declaring collaborative, asynchronous human to agent interactions
|
37
|
+
in enterprise AI applications.
|
38
|
+
It was started in February 2025 and is still in early development.
|
37
39
|
|
38
40
|
See the [website](https://the-ai-alliance.github.io/proscenium/) for quickstart info, goals, and other links.
|
39
41
|
|
@@ -1,14 +1,16 @@
|
|
1
1
|
# Proscenium
|
2
2
|
|
3
|
-
[](https://pypi.org/project/proscenium/)
|
4
4
|
[](https://pypi.org/project/proscenium/)
|
5
5
|
[](https://github.com/The-AI-Alliance/proscenium/actions/workflows/pytest.yml)
|
6
|
-
[](https://pypi.org/project/proscenium/)
|
7
6
|
[](https://github.com/The-AI-Alliance/proscenium/tree/main?tab=Apache-2.0-1-ov-file#readme)
|
8
7
|
[](https://github.com/The-AI-Alliance/proscenium/issues)
|
9
8
|
[](https://github.com/The-AI-Alliance/proscenium/stargazers)
|
10
9
|
|
11
|
-
Proscenium is a small, experimental library
|
10
|
+
Proscenium is a small, experimental library for
|
11
|
+
declaring collaborative, asynchronous human to agent interactions
|
12
|
+
in enterprise AI applications.
|
13
|
+
It was started in February 2025 and is still in early development.
|
12
14
|
|
13
15
|
See the [website](https://the-ai-alliance.github.io/proscenium/) for quickstart info, goals, and other links.
|
14
16
|
|
@@ -7,7 +7,7 @@ build-backend = "setuptools.build_meta"
|
|
7
7
|
|
8
8
|
[project]
|
9
9
|
name = "proscenium"
|
10
|
-
version = "0.0.
|
10
|
+
version = "0.0.14"
|
11
11
|
description = "Declare Collaborative, Asynchronous Human To Agent Interactions"
|
12
12
|
authors = [{ name = "Adam Pingel", email = "oss@pingel.org" }]
|
13
13
|
license = "Apache-2.0"
|
@@ -3,12 +3,22 @@ from typing import Optional
|
|
3
3
|
import logging
|
4
4
|
|
5
5
|
from pydantic import BaseModel, Field
|
6
|
+
from rich.text import Text
|
6
7
|
from rich.console import Console
|
7
8
|
|
8
9
|
logging.getLogger(__name__).addHandler(logging.NullHandler())
|
9
10
|
|
10
11
|
log = logging.getLogger(__name__)
|
11
12
|
|
13
|
+
|
14
|
+
def header() -> Text:
|
15
|
+
text = Text()
|
16
|
+
text.append("Proscenium 🎭\n", style="bold")
|
17
|
+
text.append("https://the-ai-alliance.github.io/proscenium/\n")
|
18
|
+
# TODO version, timestamp, ...
|
19
|
+
return text
|
20
|
+
|
21
|
+
|
12
22
|
control_flow_system_prompt = """
|
13
23
|
You control the workflow of an AI assistant. You evaluate user-posted messages and decide what the next step is.
|
14
24
|
"""
|
@@ -4,8 +4,8 @@ from typing import Optional
|
|
4
4
|
|
5
5
|
import logging
|
6
6
|
|
7
|
-
from proscenium
|
8
|
-
from proscenium
|
7
|
+
from proscenium import Prop
|
8
|
+
from proscenium import Character
|
9
9
|
from rich.console import Console
|
10
10
|
|
11
11
|
logging.getLogger(__name__).addHandler(logging.NullHandler())
|
@@ -8,7 +8,7 @@ import logging
|
|
8
8
|
from pathlib import Path
|
9
9
|
from rich.console import Console
|
10
10
|
|
11
|
-
from proscenium
|
11
|
+
from proscenium import header
|
12
12
|
from proscenium.bin import production_from_config
|
13
13
|
from proscenium.interfaces.slack import SlackProductionProcessor
|
14
14
|
|
@@ -0,0 +1,97 @@
|
|
1
|
+
"""
|
2
|
+
This module uses the [`aisuite`](https://github.com/andrewyng/aisuite) library
|
3
|
+
to interact with various LLM inference providers.
|
4
|
+
|
5
|
+
It provides functions to complete a simple chat prompt, evaluate a tool call,
|
6
|
+
and apply a list of tool calls to a chat prompt.
|
7
|
+
|
8
|
+
Providers tested with Proscenium include:
|
9
|
+
|
10
|
+
# AWS
|
11
|
+
|
12
|
+
Environment: `AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY`
|
13
|
+
|
14
|
+
Valid model ids:
|
15
|
+
- `aws:meta.llama3-1-8b-instruct-v1:0`
|
16
|
+
|
17
|
+
# Anthropic
|
18
|
+
|
19
|
+
Environment: `ANTHROPIC_API_KEY`
|
20
|
+
|
21
|
+
Valid model ids:
|
22
|
+
- `anthropic:claude-3-5-sonnet-20240620`
|
23
|
+
|
24
|
+
# OpenAI
|
25
|
+
|
26
|
+
Environment: `OPENAI_API_KEY`
|
27
|
+
|
28
|
+
Valid model ids:
|
29
|
+
- `openai:gpt-4o`
|
30
|
+
|
31
|
+
# Ollama
|
32
|
+
|
33
|
+
Command line, eg `ollama run llama3.2 --keepalive 2h`
|
34
|
+
|
35
|
+
Valid model ids:
|
36
|
+
- `ollama:llama3.2`
|
37
|
+
- `ollama:granite3.1-dense:2b`
|
38
|
+
"""
|
39
|
+
|
40
|
+
import logging
|
41
|
+
|
42
|
+
from rich.console import Group
|
43
|
+
from rich.panel import Panel
|
44
|
+
from rich.table import Table
|
45
|
+
from rich.text import Text
|
46
|
+
|
47
|
+
from aisuite import Client as AISuiteClient
|
48
|
+
|
49
|
+
log = logging.getLogger(__name__)
|
50
|
+
|
51
|
+
|
52
|
+
def complete_simple(
|
53
|
+
chat_completion_client: AISuiteClient,
|
54
|
+
model_id: str,
|
55
|
+
system_prompt: str,
|
56
|
+
user_prompt: str,
|
57
|
+
**kwargs,
|
58
|
+
) -> str:
|
59
|
+
|
60
|
+
console = kwargs.pop("console", None)
|
61
|
+
|
62
|
+
messages = [
|
63
|
+
{"role": "system", "content": system_prompt},
|
64
|
+
{"role": "user", "content": user_prompt},
|
65
|
+
]
|
66
|
+
|
67
|
+
if console is not None:
|
68
|
+
|
69
|
+
kwargs_text = "\n".join([str(k) + ": " + str(v) for k, v in kwargs.items()])
|
70
|
+
|
71
|
+
params_text = Text(
|
72
|
+
f"""
|
73
|
+
model_id: {model_id}
|
74
|
+
{kwargs_text}
|
75
|
+
"""
|
76
|
+
)
|
77
|
+
|
78
|
+
messages_table = Table(title="Messages", show_lines=True)
|
79
|
+
messages_table.add_column("Role", justify="left")
|
80
|
+
messages_table.add_column("Content", justify="left") # style="green"
|
81
|
+
for message in messages:
|
82
|
+
messages_table.add_row(message["role"], message["content"])
|
83
|
+
|
84
|
+
call_panel = Panel(
|
85
|
+
Group(params_text, messages_table), title="complete_simple call"
|
86
|
+
)
|
87
|
+
console.print(call_panel)
|
88
|
+
|
89
|
+
response = chat_completion_client.chat.completions.create(
|
90
|
+
model=model_id, messages=messages, **kwargs
|
91
|
+
)
|
92
|
+
response = response.choices[0].message.content
|
93
|
+
|
94
|
+
if console is not None:
|
95
|
+
console.print(Panel(response, title="Response"))
|
96
|
+
|
97
|
+
return response
|
proscenium-0.0.13/src/proscenium/verbs/display/chat.py → proscenium-0.0.14/src/proscenium/history.py
RENAMED
@@ -1,5 +1,19 @@
|
|
1
|
+
import logging
|
2
|
+
|
1
3
|
from rich.table import Table
|
2
4
|
|
5
|
+
log = logging.getLogger(__name__)
|
6
|
+
|
7
|
+
|
8
|
+
def format_chat_history(chat_history) -> str:
|
9
|
+
delimiter = "-" * 80 + "\n"
|
10
|
+
return delimiter.join(
|
11
|
+
[
|
12
|
+
f"{msg['sender']} to {msg['receiver']}:\n\n{msg['content']}\n\n"
|
13
|
+
for msg in chat_history
|
14
|
+
]
|
15
|
+
)
|
16
|
+
|
3
17
|
|
4
18
|
def messages_table(messages: list) -> Table:
|
5
19
|
|
@@ -12,8 +12,8 @@ from slack_sdk.socket_mode import SocketModeClient
|
|
12
12
|
from slack_sdk.socket_mode.request import SocketModeRequest
|
13
13
|
from slack_sdk.socket_mode.response import SocketModeResponse
|
14
14
|
|
15
|
-
from proscenium
|
16
|
-
from proscenium
|
15
|
+
from proscenium import Production
|
16
|
+
from proscenium import Character
|
17
17
|
from proscenium.admin import Admin
|
18
18
|
|
19
19
|
log = logging.getLogger(__name__)
|
@@ -1,107 +1,22 @@
|
|
1
|
-
"""
|
2
|
-
This module uses the [`aisuite`](https://github.com/andrewyng/aisuite) library
|
3
|
-
to interact with various LLM inference providers.
|
4
|
-
|
5
|
-
It provides functions to complete a simple chat prompt, evaluate a tool call,
|
6
|
-
and apply a list of tool calls to a chat prompt.
|
7
|
-
|
8
|
-
Providers tested with Proscenium include:
|
9
|
-
|
10
|
-
# AWS
|
11
|
-
|
12
|
-
Environment: `AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY`
|
13
|
-
|
14
|
-
Valid model ids:
|
15
|
-
- `aws:meta.llama3-1-8b-instruct-v1:0`
|
16
|
-
|
17
|
-
# Anthropic
|
18
|
-
|
19
|
-
Environment: `ANTHROPIC_API_KEY`
|
20
|
-
|
21
|
-
Valid model ids:
|
22
|
-
- `anthropic:claude-3-5-sonnet-20240620`
|
23
|
-
|
24
|
-
# OpenAI
|
25
|
-
|
26
|
-
Environment: `OPENAI_API_KEY`
|
27
|
-
|
28
|
-
Valid model ids:
|
29
|
-
- `openai:gpt-4o`
|
30
|
-
|
31
|
-
# Ollama
|
32
|
-
|
33
|
-
Command line, eg `ollama run llama3.2 --keepalive 2h`
|
34
|
-
|
35
|
-
Valid model ids:
|
36
|
-
- `ollama:llama3.2`
|
37
|
-
- `ollama:granite3.1-dense:2b`
|
38
|
-
"""
|
39
|
-
|
40
1
|
from typing import Optional
|
41
2
|
from typing import Any
|
42
3
|
import logging
|
43
4
|
import json
|
44
5
|
|
45
|
-
from rich.console import Console
|
46
6
|
from rich.console import Group
|
47
|
-
from rich.panel import Panel
|
48
7
|
from rich.table import Table
|
49
8
|
from rich.text import Text
|
9
|
+
from rich.panel import Panel
|
10
|
+
from rich.console import Console
|
50
11
|
|
51
12
|
from aisuite import Client as AISuiteClient
|
52
13
|
from aisuite.framework.message import ChatCompletionMessageToolCall
|
53
14
|
|
54
|
-
from
|
55
|
-
|
56
|
-
log = logging.getLogger(__name__)
|
15
|
+
from gofannon.base import BaseTool
|
57
16
|
|
17
|
+
from proscenium.history import messages_table
|
58
18
|
|
59
|
-
|
60
|
-
chat_completion_client: AISuiteClient,
|
61
|
-
model_id: str,
|
62
|
-
system_prompt: str,
|
63
|
-
user_prompt: str,
|
64
|
-
**kwargs,
|
65
|
-
) -> str:
|
66
|
-
|
67
|
-
console = kwargs.pop("console", None)
|
68
|
-
|
69
|
-
messages = [
|
70
|
-
{"role": "system", "content": system_prompt},
|
71
|
-
{"role": "user", "content": user_prompt},
|
72
|
-
]
|
73
|
-
|
74
|
-
if console is not None:
|
75
|
-
|
76
|
-
kwargs_text = "\n".join([str(k) + ": " + str(v) for k, v in kwargs.items()])
|
77
|
-
|
78
|
-
params_text = Text(
|
79
|
-
f"""
|
80
|
-
model_id: {model_id}
|
81
|
-
{kwargs_text}
|
82
|
-
"""
|
83
|
-
)
|
84
|
-
|
85
|
-
messages_table = Table(title="Messages", show_lines=True)
|
86
|
-
messages_table.add_column("Role", justify="left")
|
87
|
-
messages_table.add_column("Content", justify="left") # style="green"
|
88
|
-
for message in messages:
|
89
|
-
messages_table.add_row(message["role"], message["content"])
|
90
|
-
|
91
|
-
call_panel = Panel(
|
92
|
-
Group(params_text, messages_table), title="complete_simple call"
|
93
|
-
)
|
94
|
-
console.print(call_panel)
|
95
|
-
|
96
|
-
response = chat_completion_client.chat.completions.create(
|
97
|
-
model=model_id, messages=messages, **kwargs
|
98
|
-
)
|
99
|
-
response = response.choices[0].message.content
|
100
|
-
|
101
|
-
if console is not None:
|
102
|
-
console.print(Panel(response, title="Response"))
|
103
|
-
|
104
|
-
return response
|
19
|
+
log = logging.getLogger(__name__)
|
105
20
|
|
106
21
|
|
107
22
|
def evaluate_tool_call(tool_map: dict, tool_call: ChatCompletionMessageToolCall) -> Any:
|
@@ -207,3 +122,125 @@ def complete_with_tool_results(
|
|
207
122
|
)
|
208
123
|
|
209
124
|
return response.choices[0].message.content
|
125
|
+
|
126
|
+
|
127
|
+
def process_tools(tools: list[BaseTool]) -> tuple[dict, list]:
|
128
|
+
applied_tools = [F() for F in tools]
|
129
|
+
tool_map = {f.name: f.fn for f in applied_tools}
|
130
|
+
tool_desc_list = [f.definition for f in applied_tools]
|
131
|
+
return tool_map, tool_desc_list
|
132
|
+
|
133
|
+
|
134
|
+
def parameters_table(parameters: list[dict]) -> Table:
|
135
|
+
|
136
|
+
table = Table(title="Parameters", show_lines=False, box=None)
|
137
|
+
table.add_column("name", justify="right")
|
138
|
+
table.add_column("type", justify="left")
|
139
|
+
table.add_column("description", justify="left")
|
140
|
+
|
141
|
+
for name, props in parameters["properties"].items():
|
142
|
+
table.add_row(name, props["type"], props["description"])
|
143
|
+
|
144
|
+
# TODO denote required params
|
145
|
+
|
146
|
+
return table
|
147
|
+
|
148
|
+
|
149
|
+
def function_description_panel(fd: dict) -> Panel:
|
150
|
+
|
151
|
+
fn = fd["function"]
|
152
|
+
|
153
|
+
text = Text(f"{fd['type']} {fn['name']}: {fn['description']}\n")
|
154
|
+
|
155
|
+
pt = parameters_table(fn["parameters"])
|
156
|
+
|
157
|
+
panel = Panel(Group(text, pt))
|
158
|
+
|
159
|
+
return panel
|
160
|
+
|
161
|
+
|
162
|
+
def function_descriptions_panel(function_descriptions: list[dict]) -> Panel:
|
163
|
+
|
164
|
+
sub_panels = [function_description_panel(fd) for fd in function_descriptions]
|
165
|
+
|
166
|
+
panel = Panel(Group(*sub_panels), title="Function Descriptions")
|
167
|
+
|
168
|
+
return panel
|
169
|
+
|
170
|
+
|
171
|
+
def complete_with_tools_panel(
|
172
|
+
title: str, model_id: str, tool_desc_list: list, messages: list, temperature: float
|
173
|
+
) -> Panel:
|
174
|
+
|
175
|
+
text = Text(
|
176
|
+
f"""
|
177
|
+
model_id: {model_id}
|
178
|
+
temperature: {temperature}
|
179
|
+
"""
|
180
|
+
)
|
181
|
+
|
182
|
+
panel = Panel(
|
183
|
+
Group(
|
184
|
+
text, function_descriptions_panel(tool_desc_list), messages_table(messages)
|
185
|
+
),
|
186
|
+
title=title,
|
187
|
+
)
|
188
|
+
|
189
|
+
return panel
|
190
|
+
|
191
|
+
|
192
|
+
def apply_tools(
|
193
|
+
model_id: str,
|
194
|
+
system_message: str,
|
195
|
+
message: str,
|
196
|
+
tool_desc_list: list,
|
197
|
+
tool_map: dict,
|
198
|
+
temperature: float = 0.75,
|
199
|
+
console: Optional[Console] = None,
|
200
|
+
) -> str:
|
201
|
+
|
202
|
+
messages = [
|
203
|
+
{"role": "system", "content": system_message},
|
204
|
+
{"role": "user", "content": message},
|
205
|
+
]
|
206
|
+
|
207
|
+
response = complete_for_tool_applications(
|
208
|
+
model_id, messages, tool_desc_list, temperature, console
|
209
|
+
)
|
210
|
+
|
211
|
+
tool_call_message = response.choices[0].message
|
212
|
+
|
213
|
+
if tool_call_message.tool_calls is None or len(tool_call_message.tool_calls) == 0:
|
214
|
+
|
215
|
+
if console is not None:
|
216
|
+
console.print(
|
217
|
+
Panel(
|
218
|
+
Text(str(tool_call_message.content)),
|
219
|
+
title="Tool Application Response",
|
220
|
+
)
|
221
|
+
)
|
222
|
+
|
223
|
+
log.info("No tool applications detected")
|
224
|
+
|
225
|
+
return tool_call_message.content
|
226
|
+
|
227
|
+
else:
|
228
|
+
|
229
|
+
if console is not None:
|
230
|
+
console.print(
|
231
|
+
Panel(Text(str(tool_call_message)), title="Tool Application Response")
|
232
|
+
)
|
233
|
+
|
234
|
+
tool_evaluation_messages = evaluate_tool_calls(tool_call_message, tool_map)
|
235
|
+
|
236
|
+
result = complete_with_tool_results(
|
237
|
+
model_id,
|
238
|
+
messages,
|
239
|
+
tool_call_message,
|
240
|
+
tool_evaluation_messages,
|
241
|
+
tool_desc_list,
|
242
|
+
temperature,
|
243
|
+
console,
|
244
|
+
)
|
245
|
+
|
246
|
+
return result
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: proscenium
|
3
|
-
Version: 0.0.
|
3
|
+
Version: 0.0.14
|
4
4
|
Summary: Declare Collaborative, Asynchronous Human To Agent Interactions
|
5
5
|
Author-email: Adam Pingel <oss@pingel.org>
|
6
6
|
License-Expression: Apache-2.0
|
@@ -25,15 +25,17 @@ Dynamic: license-file
|
|
25
25
|
|
26
26
|
# Proscenium
|
27
27
|
|
28
|
-
[](https://pypi.org/project/proscenium/)
|
29
29
|
[](https://pypi.org/project/proscenium/)
|
30
30
|
[](https://github.com/The-AI-Alliance/proscenium/actions/workflows/pytest.yml)
|
31
|
-
[](https://pypi.org/project/proscenium/)
|
32
31
|
[](https://github.com/The-AI-Alliance/proscenium/tree/main?tab=Apache-2.0-1-ov-file#readme)
|
33
32
|
[](https://github.com/The-AI-Alliance/proscenium/issues)
|
34
33
|
[](https://github.com/The-AI-Alliance/proscenium/stargazers)
|
35
34
|
|
36
|
-
Proscenium is a small, experimental library
|
35
|
+
Proscenium is a small, experimental library for
|
36
|
+
declaring collaborative, asynchronous human to agent interactions
|
37
|
+
in enterprise AI applications.
|
38
|
+
It was started in February 2025 and is still in early development.
|
37
39
|
|
38
40
|
See the [website](https://the-ai-alliance.github.io/proscenium/) for quickstart info, goals, and other links.
|
39
41
|
|
@@ -2,6 +2,8 @@ LICENSE
|
|
2
2
|
README.md
|
3
3
|
pyproject.toml
|
4
4
|
src/proscenium/__init__.py
|
5
|
+
src/proscenium/complete.py
|
6
|
+
src/proscenium/history.py
|
5
7
|
src/proscenium.egg-info/PKG-INFO
|
6
8
|
src/proscenium.egg-info/SOURCES.txt
|
7
9
|
src/proscenium.egg-info/dependency_links.txt
|
@@ -11,7 +13,6 @@ src/proscenium.egg-info/top_level.txt
|
|
11
13
|
src/proscenium/admin/__init__.py
|
12
14
|
src/proscenium/bin/__init__.py
|
13
15
|
src/proscenium/bin/bot.py
|
14
|
-
src/proscenium/core/__init__.py
|
15
16
|
src/proscenium/interfaces/__init__.py
|
16
17
|
src/proscenium/interfaces/slack.py
|
17
18
|
src/proscenium/patterns/__init__.py
|
@@ -19,14 +20,6 @@ src/proscenium/patterns/graph_rag.py
|
|
19
20
|
src/proscenium/patterns/rag.py
|
20
21
|
src/proscenium/patterns/tools.py
|
21
22
|
src/proscenium/util/__init__.py
|
22
|
-
src/proscenium/verbs/__init__.py
|
23
|
-
src/proscenium/verbs/complete.py
|
24
|
-
src/proscenium/verbs/display.py
|
25
|
-
src/proscenium/verbs/invoke.py
|
26
|
-
src/proscenium/verbs/remember.py
|
27
|
-
src/proscenium/verbs/display/__init__.py
|
28
|
-
src/proscenium/verbs/display/chat.py
|
29
|
-
src/proscenium/verbs/display/tools.py
|
30
23
|
tests/test_demo_typer_help.py
|
31
24
|
tests/test_display.py
|
32
25
|
tests/test_slack_echo.py
|
@@ -1,71 +0,0 @@
|
|
1
|
-
from typing import Optional
|
2
|
-
import logging
|
3
|
-
|
4
|
-
from rich.console import Console
|
5
|
-
from rich.panel import Panel
|
6
|
-
from rich.text import Text
|
7
|
-
|
8
|
-
from proscenium.verbs.complete import (
|
9
|
-
complete_for_tool_applications,
|
10
|
-
evaluate_tool_calls,
|
11
|
-
complete_with_tool_results,
|
12
|
-
)
|
13
|
-
|
14
|
-
log = logging.getLogger(__name__)
|
15
|
-
|
16
|
-
|
17
|
-
def apply_tools(
|
18
|
-
model_id: str,
|
19
|
-
system_message: str,
|
20
|
-
message: str,
|
21
|
-
tool_desc_list: list,
|
22
|
-
tool_map: dict,
|
23
|
-
temperature: float = 0.75,
|
24
|
-
console: Optional[Console] = None,
|
25
|
-
) -> str:
|
26
|
-
|
27
|
-
messages = [
|
28
|
-
{"role": "system", "content": system_message},
|
29
|
-
{"role": "user", "content": message},
|
30
|
-
]
|
31
|
-
|
32
|
-
response = complete_for_tool_applications(
|
33
|
-
model_id, messages, tool_desc_list, temperature, console
|
34
|
-
)
|
35
|
-
|
36
|
-
tool_call_message = response.choices[0].message
|
37
|
-
|
38
|
-
if tool_call_message.tool_calls is None or len(tool_call_message.tool_calls) == 0:
|
39
|
-
|
40
|
-
if console is not None:
|
41
|
-
console.print(
|
42
|
-
Panel(
|
43
|
-
Text(str(tool_call_message.content)),
|
44
|
-
title="Tool Application Response",
|
45
|
-
)
|
46
|
-
)
|
47
|
-
|
48
|
-
log.info("No tool applications detected")
|
49
|
-
|
50
|
-
return tool_call_message.content
|
51
|
-
|
52
|
-
else:
|
53
|
-
|
54
|
-
if console is not None:
|
55
|
-
console.print(
|
56
|
-
Panel(Text(str(tool_call_message)), title="Tool Application Response")
|
57
|
-
)
|
58
|
-
|
59
|
-
tool_evaluation_messages = evaluate_tool_calls(tool_call_message, tool_map)
|
60
|
-
|
61
|
-
result = complete_with_tool_results(
|
62
|
-
model_id,
|
63
|
-
messages,
|
64
|
-
tool_call_message,
|
65
|
-
tool_evaluation_messages,
|
66
|
-
tool_desc_list,
|
67
|
-
temperature,
|
68
|
-
console,
|
69
|
-
)
|
70
|
-
|
71
|
-
return result
|
@@ -1,64 +0,0 @@
|
|
1
|
-
from rich.console import Group
|
2
|
-
from rich.table import Table
|
3
|
-
from rich.text import Text
|
4
|
-
from rich.panel import Panel
|
5
|
-
|
6
|
-
from .chat import messages_table
|
7
|
-
|
8
|
-
|
9
|
-
def parameters_table(parameters: list[dict]) -> Table:
|
10
|
-
|
11
|
-
table = Table(title="Parameters", show_lines=False, box=None)
|
12
|
-
table.add_column("name", justify="right")
|
13
|
-
table.add_column("type", justify="left")
|
14
|
-
table.add_column("description", justify="left")
|
15
|
-
|
16
|
-
for name, props in parameters["properties"].items():
|
17
|
-
table.add_row(name, props["type"], props["description"])
|
18
|
-
|
19
|
-
# TODO denote required params
|
20
|
-
|
21
|
-
return table
|
22
|
-
|
23
|
-
|
24
|
-
def function_description_panel(fd: dict) -> Panel:
|
25
|
-
|
26
|
-
fn = fd["function"]
|
27
|
-
|
28
|
-
text = Text(f"{fd['type']} {fn['name']}: {fn['description']}\n")
|
29
|
-
|
30
|
-
pt = parameters_table(fn["parameters"])
|
31
|
-
|
32
|
-
panel = Panel(Group(text, pt))
|
33
|
-
|
34
|
-
return panel
|
35
|
-
|
36
|
-
|
37
|
-
def function_descriptions_panel(function_descriptions: list[dict]) -> Panel:
|
38
|
-
|
39
|
-
sub_panels = [function_description_panel(fd) for fd in function_descriptions]
|
40
|
-
|
41
|
-
panel = Panel(Group(*sub_panels), title="Function Descriptions")
|
42
|
-
|
43
|
-
return panel
|
44
|
-
|
45
|
-
|
46
|
-
def complete_with_tools_panel(
|
47
|
-
title: str, model_id: str, tool_desc_list: list, messages: list, temperature: float
|
48
|
-
) -> Panel:
|
49
|
-
|
50
|
-
text = Text(
|
51
|
-
f"""
|
52
|
-
model_id: {model_id}
|
53
|
-
temperature: {temperature}
|
54
|
-
"""
|
55
|
-
)
|
56
|
-
|
57
|
-
panel = Panel(
|
58
|
-
Group(
|
59
|
-
text, function_descriptions_panel(tool_desc_list), messages_table(messages)
|
60
|
-
),
|
61
|
-
title=title,
|
62
|
-
)
|
63
|
-
|
64
|
-
return panel
|
@@ -1,13 +0,0 @@
|
|
1
|
-
import logging
|
2
|
-
from rich.text import Text
|
3
|
-
|
4
|
-
log = logging.getLogger(__name__)
|
5
|
-
|
6
|
-
|
7
|
-
def header() -> Text:
|
8
|
-
text = Text(
|
9
|
-
"""[bold]Proscenium[/bold] :performing_arts:
|
10
|
-
[bold]The AI Alliance[/bold]"""
|
11
|
-
)
|
12
|
-
# TODO version, timestamp, ...
|
13
|
-
return text
|
@@ -1,11 +0,0 @@
|
|
1
|
-
import logging
|
2
|
-
from gofannon.base import BaseTool
|
3
|
-
|
4
|
-
log = logging.getLogger(__name__)
|
5
|
-
|
6
|
-
|
7
|
-
def process_tools(tools: list[BaseTool]) -> tuple[dict, list]:
|
8
|
-
applied_tools = [F() for F in tools]
|
9
|
-
tool_map = {f.name: f.fn for f in applied_tools}
|
10
|
-
tool_desc_list = [f.definition for f in applied_tools]
|
11
|
-
return tool_map, tool_desc_list
|
@@ -1,13 +0,0 @@
|
|
1
|
-
import logging
|
2
|
-
|
3
|
-
log = logging.getLogger(__name__)
|
4
|
-
|
5
|
-
|
6
|
-
def format_chat_history(chat_history) -> str:
|
7
|
-
delimiter = "-" * 80 + "\n"
|
8
|
-
return delimiter.join(
|
9
|
-
[
|
10
|
-
f"{msg['sender']} to {msg['receiver']}:\n\n{msg['content']}\n\n"
|
11
|
-
for msg in chat_history
|
12
|
-
]
|
13
|
-
)
|
File without changes
|
File without changes
|
{proscenium-0.0.13/src/proscenium → proscenium-0.0.14/src/proscenium/interfaces}/__init__.py
RENAMED
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|