proscenium 0.0.1__tar.gz → 0.0.2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (32) hide show
  1. {proscenium-0.0.1 → proscenium-0.0.2}/PKG-INFO +4 -8
  2. {proscenium-0.0.1 → proscenium-0.0.2}/README.md +3 -6
  3. proscenium-0.0.2/proscenium/scripts/chunk_space.py +39 -0
  4. {proscenium-0.0.1 → proscenium-0.0.2}/proscenium/scripts/document_enricher.py +13 -11
  5. {proscenium-0.0.1 → proscenium-0.0.2}/proscenium/scripts/entity_resolver.py +19 -13
  6. proscenium-0.0.2/proscenium/scripts/graph_rag.py +56 -0
  7. {proscenium-0.0.1 → proscenium-0.0.2}/proscenium/scripts/knowledge_graph.py +2 -2
  8. {proscenium-0.0.1 → proscenium-0.0.2}/proscenium/scripts/rag.py +8 -12
  9. {proscenium-0.0.1 → proscenium-0.0.2}/proscenium/scripts/tools.py +12 -46
  10. {proscenium-0.0.1 → proscenium-0.0.2}/proscenium/verbs/complete.py +22 -28
  11. {proscenium-0.0.1 → proscenium-0.0.2}/proscenium/verbs/display/__init__.py +1 -1
  12. {proscenium-0.0.1 → proscenium-0.0.2}/proscenium/verbs/extract.py +4 -2
  13. {proscenium-0.0.1 → proscenium-0.0.2}/proscenium/verbs/read.py +0 -2
  14. {proscenium-0.0.1 → proscenium-0.0.2}/proscenium/verbs/vector_database.py +7 -7
  15. {proscenium-0.0.1 → proscenium-0.0.2}/pyproject.toml +1 -2
  16. proscenium-0.0.1/proscenium/scripts/chunk_space.py +0 -33
  17. proscenium-0.0.1/proscenium/scripts/graph_rag.py +0 -43
  18. proscenium-0.0.1/proscenium/verbs/know.py +0 -9
  19. {proscenium-0.0.1 → proscenium-0.0.2}/LICENSE +0 -0
  20. {proscenium-0.0.1 → proscenium-0.0.2}/proscenium/__init__.py +0 -0
  21. {proscenium-0.0.1 → proscenium-0.0.2}/proscenium/scripts/__init__.py +0 -0
  22. {proscenium-0.0.1 → proscenium-0.0.2}/proscenium/verbs/__init__.py +0 -0
  23. {proscenium-0.0.1 → proscenium-0.0.2}/proscenium/verbs/chunk.py +0 -0
  24. {proscenium-0.0.1 → proscenium-0.0.2}/proscenium/verbs/display/chat.py +0 -0
  25. {proscenium-0.0.1 → proscenium-0.0.2}/proscenium/verbs/display/huggingface.py +0 -0
  26. {proscenium-0.0.1 → proscenium-0.0.2}/proscenium/verbs/display/milvus.py +0 -0
  27. {proscenium-0.0.1 → proscenium-0.0.2}/proscenium/verbs/display/neo4j.py +0 -0
  28. {proscenium-0.0.1 → proscenium-0.0.2}/proscenium/verbs/display/tools.py +0 -0
  29. {proscenium-0.0.1 → proscenium-0.0.2}/proscenium/verbs/display.py +0 -0
  30. {proscenium-0.0.1 → proscenium-0.0.2}/proscenium/verbs/invoke.py +0 -0
  31. {proscenium-0.0.1 → proscenium-0.0.2}/proscenium/verbs/remember.py +0 -0
  32. {proscenium-0.0.1 → proscenium-0.0.2}/proscenium/verbs/write.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: proscenium
3
- Version: 0.0.1
3
+ Version: 0.0.2
4
4
  Summary: Frame AI Agents
5
5
  License: ASFv2
6
6
  Author: Adam Pingel
@@ -26,7 +26,6 @@ Requires-Dist: pymilvus_model (>=0.3.1,<0.4.0)
26
26
  Requires-Dist: python-dotenv (>=1.0.1,<2.0.0)
27
27
  Requires-Dist: rich (>=13.9.4,<14.0.0)
28
28
  Requires-Dist: stringcase (>=1.2.0,<2.0.0)
29
- Requires-Dist: thespian (>=4.0.0,<5.0.0)
30
29
  Requires-Dist: tiktoken (>=0.9.0,<0.10.0)
31
30
  Requires-Dist: typer (>=0.15.2,<0.16.0)
32
31
  Description-Content-Type: text/markdown
@@ -34,17 +33,14 @@ Description-Content-Type: text/markdown
34
33
  # Proscenium
35
34
 
36
35
  [![CI](https://github.com/The-AI-Alliance/proscenium/actions/workflows/pytest.yml/badge.svg)](https://github.com/The-AI-Alliance/proscenium/actions/workflows/pytest.yml)
36
+ [![PyPI](https://img.shields.io/pypi/v/proscenium)](https://pypi.org/project/proscenium/)
37
37
  [![License](https://img.shields.io/github/license/The-AI-Alliance/proscenium)](https://github.com/The-AI-Alliance/proscenium/tree/main?tab=Apache-2.0-1-ov-file#readme)
38
38
  [![Issues](https://img.shields.io/github/issues/The-AI-Alliance/proscenium)](https://github.com/The-AI-Alliance/proscenium/issues)
39
39
  [![GitHub stars](https://img.shields.io/github/stars/The-AI-Alliance/proscenium?style=social)](https://github.com/The-AI-Alliance/proscenium/stargazers)
40
40
 
41
- Proscenium is a small, experimental library of composable glue that allows for
42
- succinct construction of enterprise AI applications. It is in early development.
41
+ Proscenium is a small, experimental library of composable glue that allows for succinct construction of enterprise AI applications. It was started in February 2025 and is still in early development.
43
42
 
44
- It is also set of simple demonstration applications that elucidate aspects of application and library design.
45
-
46
- Currently, proscenium development prioritizes support for domains where
47
- the creation and use of structured data is critical.
43
+ Currently, proscenium development prioritizes support for domains where the creation and use of structured data is critical.
48
44
 
49
45
  See the [website](https://the-ai-alliance.github.io/proscenium/) for quickstart info, goals, and other links.
50
46
 
@@ -1,17 +1,14 @@
1
1
  # Proscenium
2
2
 
3
3
  [![CI](https://github.com/The-AI-Alliance/proscenium/actions/workflows/pytest.yml/badge.svg)](https://github.com/The-AI-Alliance/proscenium/actions/workflows/pytest.yml)
4
+ [![PyPI](https://img.shields.io/pypi/v/proscenium)](https://pypi.org/project/proscenium/)
4
5
  [![License](https://img.shields.io/github/license/The-AI-Alliance/proscenium)](https://github.com/The-AI-Alliance/proscenium/tree/main?tab=Apache-2.0-1-ov-file#readme)
5
6
  [![Issues](https://img.shields.io/github/issues/The-AI-Alliance/proscenium)](https://github.com/The-AI-Alliance/proscenium/issues)
6
7
  [![GitHub stars](https://img.shields.io/github/stars/The-AI-Alliance/proscenium?style=social)](https://github.com/The-AI-Alliance/proscenium/stargazers)
7
8
 
8
- Proscenium is a small, experimental library of composable glue that allows for
9
- succinct construction of enterprise AI applications. It is in early development.
9
+ Proscenium is a small, experimental library of composable glue that allows for succinct construction of enterprise AI applications. It was started in February 2025 and is still in early development.
10
10
 
11
- It is also set of simple demonstration applications that elucidate aspects of application and library design.
12
-
13
- Currently, proscenium development prioritizes support for domains where
14
- the creation and use of structured data is critical.
11
+ Currently, proscenium development prioritizes support for domains where the creation and use of structured data is critical.
15
12
 
16
13
  See the [website](https://the-ai-alliance.github.io/proscenium/) for quickstart info, goals, and other links.
17
14
 
@@ -0,0 +1,39 @@
1
+ import logging
2
+
3
+ from pymilvus import MilvusClient
4
+ from pymilvus import model
5
+
6
+ from proscenium.verbs.read import load_file
7
+ from proscenium.verbs.chunk import documents_to_chunks_by_characters
8
+ from proscenium.verbs.vector_database import create_collection
9
+ from proscenium.verbs.vector_database import add_chunks_to_vector_db
10
+ from proscenium.verbs.display.milvus import collection_panel
11
+
12
+
13
+ def make_vector_db_builder(
14
+ data_files: list[str],
15
+ vector_db_client: MilvusClient,
16
+ embedding_fn: model.dense.SentenceTransformerEmbeddingFunction,
17
+ collection_name: str,
18
+ ):
19
+
20
+ def build():
21
+
22
+ create_collection(
23
+ vector_db_client, embedding_fn, collection_name, overwrite=True
24
+ )
25
+
26
+ for data_file in data_files:
27
+
28
+ documents = load_file(data_file)
29
+ chunks = documents_to_chunks_by_characters(documents)
30
+ logging.info("Data file %s has %s chunks", data_file, len(chunks))
31
+
32
+ info = add_chunks_to_vector_db(
33
+ vector_db_client, embedding_fn, chunks, collection_name
34
+ )
35
+ logging.info("%s chunks inserted", info["insert_count"])
36
+
37
+ logging.info(collection_panel(vector_db_client, collection_name))
38
+
39
+ return build
@@ -1,12 +1,13 @@
1
1
  from typing import List
2
2
  from typing import Callable
3
- from typing import Any
3
+ from typing import Optional
4
4
 
5
5
  import time
6
+ import logging
6
7
  from pydantic import BaseModel
7
8
 
8
- from rich import print
9
9
  from rich.panel import Panel
10
+ from rich.console import Console
10
11
  from rich.progress import Progress
11
12
 
12
13
  from langchain_core.documents.base import Document
@@ -22,11 +23,12 @@ def extract_from_document_chunks(
22
23
  chunk_extraction_template: str,
23
24
  chunk_extract_clazz: type[BaseModel],
24
25
  delay: float,
25
- verbose: bool = False,
26
+ console: Optional[Console] = None,
26
27
  ) -> List[BaseModel]:
27
28
 
28
- print(doc_as_rich(doc))
29
- print()
29
+ if console is not None:
30
+ console.print(doc_as_rich(doc))
31
+ console.print()
30
32
 
31
33
  extract_models = []
32
34
 
@@ -40,9 +42,9 @@ def extract_from_document_chunks(
40
42
  chunk.page_content,
41
43
  )
42
44
 
43
- if verbose:
44
- print("Extract model in chunk", i + 1, "of", len(chunks))
45
- print(Panel(str(ce)))
45
+ logging.info("Extract model in chunk %s of %s", i + 1, len(chunks))
46
+ if console is not None:
47
+ console.print(Panel(str(ce)))
46
48
 
47
49
  extract_models.append(ce)
48
50
  time.sleep(delay)
@@ -55,7 +57,7 @@ def enrich_documents(
55
57
  extract_from_doc_chunks: Callable[[Document], List[BaseModel]],
56
58
  doc_enrichments: Callable[[Document, list[BaseModel]], BaseModel],
57
59
  enrichments_jsonl_file: str,
58
- verbose: bool = False,
60
+ console: Optional[Console] = None,
59
61
  ) -> None:
60
62
 
61
63
  docs = retrieve_documents()
@@ -70,11 +72,11 @@ def enrich_documents(
70
72
 
71
73
  for doc in docs:
72
74
 
73
- chunk_extract_models = extract_from_doc_chunks(doc, verbose)
75
+ chunk_extract_models = extract_from_doc_chunks(doc)
74
76
  enrichments = doc_enrichments(doc, chunk_extract_models)
75
77
  enrichments_json = enrichments.model_dump_json()
76
78
  f.write(enrichments_json + "\n")
77
79
 
78
80
  progress.update(task_enrich, advance=1)
79
81
 
80
- print("Wrote document enrichments to", enrichments_jsonl_file)
82
+ logging.info("Wrote document enrichments to %s", enrichments_jsonl_file)
@@ -1,6 +1,7 @@
1
1
  from typing import Optional
2
- from rich import print
2
+ import logging
3
3
 
4
+ from rich.console import Console
4
5
  from langchain_core.documents.base import Document
5
6
  from neo4j import Driver
6
7
 
@@ -21,27 +22,27 @@ class Resolver:
21
22
  cypher: str,
22
23
  field_name: str,
23
24
  collection_name: str,
24
- embedding_model_id: str,
25
25
  ):
26
26
  self.cypher = cypher
27
27
  self.field_name = field_name
28
28
  self.collection_name = collection_name
29
- self.embedding_model_id = embedding_model_id
30
29
 
31
30
 
32
31
  def load_entity_resolver(
33
32
  driver: Driver,
34
33
  resolvers: list[Resolver],
34
+ embedding_model_id: str,
35
35
  milvus_uri: str,
36
+ console: Optional[Console] = None,
36
37
  ) -> None:
37
38
 
38
39
  vector_db_client = vector_db(milvus_uri, overwrite=True)
39
- print("Vector db stored at", milvus_uri)
40
+ logging.info("Vector db stored at %s", milvus_uri)
40
41
 
41
42
  for resolver in resolvers:
42
43
 
43
- embedding_fn = embedding_function(resolver.embedding_model_id)
44
- print("Embedding model", resolver.embedding_model_id)
44
+ embedding_fn = embedding_function(embedding_model_id)
45
+ logging.info("Embedding model %s", embedding_model_id)
45
46
 
46
47
  values = []
47
48
  with driver.session() as session:
@@ -49,15 +50,18 @@ def load_entity_resolver(
49
50
  new_values = [Document(record[resolver.field_name]) for record in result]
50
51
  values.extend(new_values)
51
52
 
52
- print("Loading entity resolver into vector db", resolver.collection_name)
53
+ logging.info(
54
+ "Loading entity resolver into vector db %s", resolver.collection_name
55
+ )
53
56
  create_collection(
54
57
  vector_db_client, embedding_fn, resolver.collection_name, overwrite=True
55
58
  )
56
59
  info = add_chunks_to_vector_db(
57
60
  vector_db_client, embedding_fn, values, resolver.collection_name
58
61
  )
59
- print(info["insert_count"], "chunks inserted")
60
- print(collection_panel(vector_db_client, resolver.collection_name))
62
+ logging.info("%s chunks inserted ", info["insert_count"])
63
+ if console is not None:
64
+ console.print(collection_panel(vector_db_client, resolver.collection_name))
61
65
 
62
66
  vector_db_client.close()
63
67
 
@@ -68,10 +72,12 @@ def find_matching_objects(
68
72
  resolver: Resolver,
69
73
  ) -> Optional[str]:
70
74
 
71
- print("Loading collection", resolver.collection_name)
75
+ logging.info("Loading collection", resolver.collection_name)
72
76
  vector_db_client.load_collection(resolver.collection_name)
73
77
 
74
- print("Finding entity matches for", approximate, "using", resolver.collection_name)
78
+ logging.info(
79
+ "Finding entity matches for", approximate, "using", resolver.collection_name
80
+ )
75
81
 
76
82
  hits = closest_chunks(
77
83
  vector_db_client,
@@ -82,8 +88,8 @@ def find_matching_objects(
82
88
  )
83
89
  # TODO apply distance threshold
84
90
  for match in [head["entity"]["text"] for head in hits[:1]]:
85
- print("Closest match:", match)
91
+ logging.info("Closest match:", match)
86
92
  return match
87
93
 
88
- print("No match found")
94
+ logging.info("No match found")
89
95
  return None
@@ -0,0 +1,56 @@
1
+ from typing import Callable
2
+ from typing import Optional
3
+
4
+ import logging
5
+
6
+ from rich.console import Console
7
+
8
+ from pydantic import BaseModel
9
+ from uuid import uuid4, UUID
10
+ from neo4j import Driver
11
+
12
+
13
+ def query_to_prompts(
14
+ query: str,
15
+ query_extraction_model_id: str,
16
+ milvus_uri: str,
17
+ driver: Driver,
18
+ query_extract: Callable[
19
+ [str, str], BaseModel
20
+ ], # (query_str, query_extraction_model_id) -> QueryExtractions
21
+ query_extract_to_graph: Callable[
22
+ [str, UUID, BaseModel], None
23
+ ], # query, query_id, extract
24
+ query_extract_to_context: Callable[
25
+ [BaseModel, str, Driver, str, Optional[Console]], BaseModel
26
+ ], # (QueryExtractions, query_str, Driver, milvus_uri) -> Context
27
+ context_to_prompts: Callable[
28
+ [BaseModel], tuple[str, str]
29
+ ], # Context -> (system_prompt, user_prompt)
30
+ console: Optional[Console] = None,
31
+ ) -> str:
32
+
33
+ query_id = uuid4()
34
+
35
+ logging.info("Extracting information from the question")
36
+
37
+ extract = query_extract(query, query_extraction_model_id)
38
+ if extract is None:
39
+ logging.info("Unable to extract information from that question")
40
+ return None
41
+
42
+ logging.info("Extract: %s", extract)
43
+
44
+ logging.info("Storing the extracted information in the graph")
45
+ query_extract_to_graph(query, query_id, extract, driver)
46
+
47
+ logging.info("Forming context from the extracted information")
48
+ context = query_extract_to_context(
49
+ extract, query, driver, milvus_uri, console=console
50
+ )
51
+
52
+ logging.info("Context: %s", context)
53
+
54
+ prompts = context_to_prompts(context)
55
+
56
+ return prompts
@@ -1,10 +1,10 @@
1
1
  from typing import Callable
2
2
  from typing import Any
3
3
 
4
+ import logging
4
5
  import json
5
6
  from pydantic import BaseModel
6
7
 
7
- from rich import print
8
8
  from rich.progress import Progress
9
9
 
10
10
  from neo4j import Driver
@@ -17,7 +17,7 @@ def load_knowledge_graph(
17
17
  doc_enrichments_to_graph: Callable[[Any, BaseModel], None],
18
18
  ) -> None:
19
19
 
20
- print("Parsing enrichments from", enrichments_jsonl_file)
20
+ logging.info("Parsing enrichments from %s", enrichments_jsonl_file)
21
21
 
22
22
  enrichmentss = []
23
23
  with open(enrichments_jsonl_file, "r") as f:
@@ -1,7 +1,7 @@
1
- from typing import List, Dict
1
+ from typing import List, Dict, Optional
2
+ import logging
2
3
 
3
- from rich import print
4
- from rich.panel import Panel
4
+ from rich.console import Console
5
5
 
6
6
  from pymilvus import MilvusClient
7
7
  from pymilvus import model
@@ -44,20 +44,16 @@ def answer_question(
44
44
  vector_db_client: MilvusClient,
45
45
  embedding_fn: model.dense.SentenceTransformerEmbeddingFunction,
46
46
  collection_name: str,
47
- verbose: bool = False,
47
+ console: Optional[Console] = None,
48
48
  ) -> str:
49
49
 
50
- print(Panel(query, title="User"))
51
-
52
50
  chunks = closest_chunks(vector_db_client, embedding_fn, query, collection_name)
53
- if verbose:
54
- print("Found", len(chunks), "closest chunks")
55
- print(chunk_hits_table(chunks))
51
+ logging.info("Found %s closest chunks", len(chunks))
52
+ logging.info(chunk_hits_table(chunks))
56
53
 
57
54
  prompt = rag_prompt(chunks, query)
58
- if verbose:
59
- print("RAG prompt created. Calling inference at", model_id, "\n\n")
55
+ logging.info("RAG prompt created. Calling inference at %s", model_id)
60
56
 
61
- answer = complete_simple(model_id, rag_system_prompt, prompt, rich_output=verbose)
57
+ answer = complete_simple(model_id, rag_system_prompt, prompt, console=console)
62
58
 
63
59
  return answer
@@ -1,47 +1,15 @@
1
- from typing import List
1
+ from typing import Optional
2
+ import logging
2
3
 
3
- from rich import print
4
+ from rich.console import Console
4
5
  from rich.panel import Panel
5
6
  from rich.text import Text
6
- from thespian.actors import Actor
7
-
8
- from gofannon.base import BaseTool
9
7
 
10
8
  from proscenium.verbs.complete import (
11
9
  complete_for_tool_applications,
12
10
  evaluate_tool_calls,
13
11
  complete_with_tool_results,
14
12
  )
15
- from proscenium.verbs.invoke import process_tools
16
-
17
-
18
- def tool_applier_actor_class(
19
- tools: List[BaseTool],
20
- system_message: str,
21
- model_id: str,
22
- temperature: float = 0.75,
23
- rich_output: bool = False,
24
- ):
25
-
26
- tool_map, tool_desc_list = process_tools(tools)
27
-
28
- class ToolApplier(Actor):
29
-
30
- def receiveMessage(self, message, sender):
31
-
32
- response = apply_tools(
33
- model_id=model_id,
34
- system_message=system_message,
35
- message=message,
36
- tool_desc_list=tool_desc_list,
37
- tool_map=tool_map,
38
- temperature=temperature,
39
- rich_output=rich_output,
40
- )
41
-
42
- self.send(sender, response)
43
-
44
- return ToolApplier
45
13
 
46
14
 
47
15
  def apply_tools(
@@ -51,7 +19,7 @@ def apply_tools(
51
19
  tool_desc_list: list,
52
20
  tool_map: dict,
53
21
  temperature: float = 0.75,
54
- rich_output: bool = False,
22
+ console: Optional[Console] = None,
55
23
  ) -> str:
56
24
 
57
25
  messages = [
@@ -60,35 +28,33 @@ def apply_tools(
60
28
  ]
61
29
 
62
30
  response = complete_for_tool_applications(
63
- model_id, messages, tool_desc_list, temperature, rich_output
31
+ model_id, messages, tool_desc_list, temperature, console
64
32
  )
65
33
 
66
34
  tool_call_message = response.choices[0].message
67
35
 
68
36
  if tool_call_message.tool_calls is None or len(tool_call_message.tool_calls) == 0:
69
37
 
70
- if rich_output:
71
- print(
38
+ if console is not None:
39
+ console.print(
72
40
  Panel(
73
41
  Text(str(tool_call_message.content)),
74
42
  title="Tool Application Response",
75
43
  )
76
44
  )
77
45
 
78
- print("No tool applications detected")
46
+ logging.info("No tool applications detected")
79
47
 
80
48
  return tool_call_message.content
81
49
 
82
50
  else:
83
51
 
84
- if rich_output:
85
- print(
52
+ if console is not None:
53
+ console.print(
86
54
  Panel(Text(str(tool_call_message)), title="Tool Application Response")
87
55
  )
88
56
 
89
- tool_evaluation_messages = evaluate_tool_calls(
90
- tool_call_message, tool_map, rich_output
91
- )
57
+ tool_evaluation_messages = evaluate_tool_calls(tool_call_message, tool_map)
92
58
 
93
59
  result = complete_with_tool_results(
94
60
  model_id,
@@ -97,7 +63,7 @@ def apply_tools(
97
63
  tool_evaluation_messages,
98
64
  tool_desc_list,
99
65
  temperature,
100
- rich_output,
66
+ console,
101
67
  )
102
68
 
103
69
  return result
@@ -37,10 +37,12 @@ Valid model ids:
37
37
  - `ollama:granite3.1-dense:2b`
38
38
  """
39
39
 
40
+ from typing import Optional
40
41
  from typing import Any
41
-
42
+ import logging
42
43
  import json
43
- from rich import print
44
+
45
+ from rich.console import Console
44
46
  from rich.console import Group
45
47
  from rich.panel import Panel
46
48
  from rich.table import Table
@@ -63,14 +65,14 @@ def complete_simple(
63
65
  model_id: str, system_prompt: str, user_prompt: str, **kwargs
64
66
  ) -> str:
65
67
 
66
- rich_output = kwargs.pop("rich_output", False)
68
+ console = kwargs.pop("console", None)
67
69
 
68
70
  messages = [
69
71
  {"role": "system", "content": system_prompt},
70
72
  {"role": "user", "content": user_prompt},
71
73
  ]
72
74
 
73
- if rich_output:
75
+ if console is not None:
74
76
 
75
77
  kwargs_text = "\n".join([str(k) + ": " + str(v) for k, v in kwargs.items()])
76
78
 
@@ -90,34 +92,30 @@ model_id: {model_id}
90
92
  call_panel = Panel(
91
93
  Group(params_text, messages_table), title="complete_simple call"
92
94
  )
93
- print(call_panel)
95
+ console.print(call_panel)
94
96
 
95
97
  response = client.chat.completions.create(
96
98
  model=model_id, messages=messages, **kwargs
97
99
  )
98
100
  response = response.choices[0].message.content
99
101
 
100
- if rich_output:
101
- print(Panel(response, title="Response"))
102
+ if console is not None:
103
+ console.print(Panel(response, title="Response"))
102
104
 
103
105
  return response
104
106
 
105
107
 
106
- def evaluate_tool_call(
107
- tool_map: dict, tool_call: ChatCompletionMessageToolCall, rich_output: bool = False
108
- ) -> Any:
108
+ def evaluate_tool_call(tool_map: dict, tool_call: ChatCompletionMessageToolCall) -> Any:
109
109
 
110
110
  function_name = tool_call.function.name
111
111
  # TODO validate the arguments?
112
112
  function_args = json.loads(tool_call.function.arguments)
113
113
 
114
- if rich_output:
115
- print(f"Evaluating tool call: {function_name} with args {function_args}")
114
+ logging.info(f"Evaluating tool call: {function_name} with args {function_args}")
116
115
 
117
116
  function_response = tool_map[function_name](**function_args)
118
117
 
119
- if rich_output:
120
- print(f" Response: {function_response}")
118
+ logging.info(f" Response: {function_response}")
121
119
 
122
120
  return function_response
123
121
 
@@ -134,23 +132,19 @@ def tool_response_message(
134
132
  }
135
133
 
136
134
 
137
- def evaluate_tool_calls(
138
- tool_call_message, tool_map: dict, rich_output: bool = False
139
- ) -> list[dict]:
135
+ def evaluate_tool_calls(tool_call_message, tool_map: dict) -> list[dict]:
140
136
 
141
137
  tool_call: ChatCompletionMessageToolCall
142
138
 
143
- if rich_output:
144
- print("Evaluating tool calls")
139
+ logging.info("Evaluating tool calls")
145
140
 
146
141
  new_messages: list[dict] = []
147
142
 
148
143
  for tool_call in tool_call_message.tool_calls:
149
- function_response = evaluate_tool_call(tool_map, tool_call, rich_output)
144
+ function_response = evaluate_tool_call(tool_map, tool_call)
150
145
  new_messages.append(tool_response_message(tool_call, function_response))
151
146
 
152
- if rich_output:
153
- print("Tool calls evaluated")
147
+ logging.info("Tool calls evaluated")
154
148
 
155
149
  return new_messages
156
150
 
@@ -160,10 +154,10 @@ def complete_for_tool_applications(
160
154
  messages: list,
161
155
  tool_desc_list: list,
162
156
  temperature: float,
163
- rich_output: bool = False,
157
+ console: Optional[Console] = None,
164
158
  ):
165
159
 
166
- if rich_output:
160
+ if console is not None:
167
161
  panel = complete_with_tools_panel(
168
162
  "complete for tool applications",
169
163
  model_id,
@@ -171,7 +165,7 @@ def complete_for_tool_applications(
171
165
  messages,
172
166
  temperature,
173
167
  )
174
- print(panel)
168
+ console.print(panel)
175
169
 
176
170
  response = client.chat.completions.create(
177
171
  model=model_id,
@@ -190,13 +184,13 @@ def complete_with_tool_results(
190
184
  tool_evaluation_messages: list[dict],
191
185
  tool_desc_list: list,
192
186
  temperature: float,
193
- rich_output: bool = False,
187
+ console: Optional[Console] = None,
194
188
  ):
195
189
 
196
190
  messages.append(tool_call_message)
197
191
  messages.extend(tool_evaluation_messages)
198
192
 
199
- if rich_output:
193
+ if console is not None:
200
194
  panel = complete_with_tools_panel(
201
195
  "complete call with tool results",
202
196
  model_id,
@@ -204,7 +198,7 @@ def complete_with_tool_results(
204
198
  messages,
205
199
  temperature,
206
200
  )
207
- print(panel)
201
+ console.print(panel)
208
202
 
209
203
  response = client.chat.completions.create(
210
204
  model=model_id,
@@ -4,6 +4,6 @@ from rich.text import Text
4
4
  def header() -> Text:
5
5
  text = Text()
6
6
  text.append("Proscenium 🎭\n", style="bold")
7
- text.append("The AI Alliance\n", style="bold")
7
+ text.append("https://the-ai-alliance.github.io/proscenium/\n")
8
8
  # TODO version, timestamp, ...
9
9
  return text
@@ -1,4 +1,6 @@
1
+ from typing import Optional
1
2
  import logging
3
+ from rich.console import Console
2
4
  from string import Formatter
3
5
 
4
6
  import json
@@ -36,7 +38,7 @@ def extract_to_pydantic_model(
36
38
  extraction_template: str,
37
39
  clazz: type[BaseModel],
38
40
  text: str,
39
- verbose: bool = False,
41
+ console: Optional[Console] = None,
40
42
  ) -> BaseModel:
41
43
 
42
44
  extract_str = complete_simple(
@@ -47,7 +49,7 @@ def extract_to_pydantic_model(
47
49
  "type": "json_object",
48
50
  "schema": clazz.model_json_schema(),
49
51
  },
50
- rich_output=verbose,
52
+ console=console,
51
53
  )
52
54
 
53
55
  logging.info("complete_to_pydantic_model: extract_str = <<<%s>>>", extract_str)
@@ -42,12 +42,10 @@ from pathlib import Path
42
42
  async def url_to_file(url: HttpUrl, data_file: Path, overwrite: bool = False):
43
43
 
44
44
  if data_file.exists() and not overwrite:
45
- # print(f"File {data_file} exists. Use overwrite=True to replace.")
46
45
  return
47
46
 
48
47
  async with httpx.AsyncClient() as client:
49
48
 
50
- # print(f"Downloading {url} to {data_file}...")
51
49
  response = await client.get(url)
52
50
  response.raise_for_status()
53
51
 
@@ -1,5 +1,6 @@
1
- from typing import Dict, List, Optional
1
+ from typing import Dict, List
2
2
 
3
+ import logging
3
4
  from pathlib import Path
4
5
  from langchain_core.documents.base import Document
5
6
  from pymilvus import MilvusClient
@@ -54,15 +55,14 @@ def vector_db(
54
55
  if file_path.exists():
55
56
  if overwrite:
56
57
  file_path.unlink()
57
- print("Deleted existing vector db file", file_path)
58
+ logging.info("Deleted existing vector db file %s", file_path)
58
59
  else:
59
- print(
60
- "Using existing",
60
+ logging.info(
61
+ "Using existing %s file. Use overwrite=True to replace.",
61
62
  uri_fields[2],
62
- "file. Use overwrite=True to replace.",
63
63
  )
64
64
  else:
65
- print("Creating new vector db file", file_path)
65
+ logging.info("Creating new vector db file %s", file_path)
66
66
 
67
67
  client = MilvusClient(uri=str(file_path))
68
68
 
@@ -100,7 +100,7 @@ def create_collection(
100
100
  client.create_index(
101
101
  collection_name=collection_name, index_params=index_params, sync=True
102
102
  )
103
- print("Created collection", collection_name)
103
+ logging.info("Created collection %s", collection_name)
104
104
 
105
105
 
106
106
  def add_chunks_to_vector_db(
@@ -1,6 +1,6 @@
1
1
  [tool.poetry]
2
2
  name = "proscenium"
3
- version = "0.0.1"
3
+ version = "0.0.2"
4
4
  description = "Frame AI Agents"
5
5
  authors = ["Adam Pingel <oss@pingel.org>"]
6
6
  license = "ASFv2"
@@ -15,7 +15,6 @@ typer = "^0.15.2"
15
15
  python-dotenv = "^1.0.1"
16
16
  pydantic = "^2.10.6"
17
17
  stringcase = "^1.2.0"
18
- thespian = "^4.0.0"
19
18
  docstring_parser = "^0.16"
20
19
  pymilvus = {version = "^2.5.4"}
21
20
  pymilvus_model = {version = "^0.3.1"}
@@ -1,33 +0,0 @@
1
- from rich import print
2
-
3
- from pymilvus import MilvusClient
4
- from pymilvus import model
5
-
6
- from proscenium.verbs.read import load_file
7
- from proscenium.verbs.chunk import documents_to_chunks_by_characters
8
- from proscenium.verbs.vector_database import create_collection
9
- from proscenium.verbs.vector_database import add_chunks_to_vector_db
10
- from proscenium.verbs.display.milvus import collection_panel
11
-
12
-
13
- def build_vector_db(
14
- data_files: list[str],
15
- vector_db_client: MilvusClient,
16
- embedding_fn: model.dense.SentenceTransformerEmbeddingFunction,
17
- collection_name: str,
18
- ):
19
-
20
- create_collection(vector_db_client, embedding_fn, collection_name, overwrite=True)
21
-
22
- for data_file in data_files:
23
-
24
- documents = load_file(data_file)
25
- chunks = documents_to_chunks_by_characters(documents)
26
- print("Data file", data_file, "has", len(chunks), "chunks")
27
-
28
- info = add_chunks_to_vector_db(
29
- vector_db_client, embedding_fn, chunks, collection_name
30
- )
31
- print(info["insert_count"], "chunks inserted")
32
-
33
- print(collection_panel(vector_db_client, collection_name))
@@ -1,43 +0,0 @@
1
- from typing import Callable
2
-
3
- from pydantic import BaseModel
4
-
5
- from rich import print
6
- from rich.panel import Panel
7
-
8
- from neo4j import Driver
9
-
10
- from proscenium.verbs.complete import complete_simple
11
-
12
-
13
- def query_to_prompts(
14
- question: str,
15
- query_extraction_model_id: str,
16
- milvus_uri: str,
17
- driver: Driver,
18
- query_extract: Callable[
19
- [str, str, bool], BaseModel
20
- ], # (query_str, query_extraction_model_id) -> QueryExtractions
21
- extract_to_context: Callable[
22
- [BaseModel, str, Driver, str, bool], BaseModel
23
- ], # (QueryExtractions, query_str, Driver, milvus_uri) -> Context
24
- context_to_prompts: Callable[
25
- [BaseModel, bool], tuple[str, str]
26
- ], # Context -> (system_prompt, user_prompt)
27
- verbose: bool = False,
28
- ) -> str:
29
-
30
- print("Extracting information from the question")
31
- extract = query_extract(question, query_extraction_model_id, verbose)
32
- if extract is None:
33
- print("Unable to extract information from that question")
34
- return None
35
- print("Extract:", extract)
36
-
37
- print("Forming context from the extracted information")
38
- context = extract_to_context(extract, question, driver, milvus_uri, verbose)
39
- print("Context:", context)
40
-
41
- prompts = context_to_prompts(context, verbose)
42
-
43
- return prompts
@@ -1,9 +0,0 @@
1
- from neo4j import GraphDatabase
2
- from neo4j import Driver
3
-
4
-
5
- def knowledge_graph_client(uri: str, username: str, password: str) -> Driver:
6
-
7
- driver = GraphDatabase.driver(uri, auth=(username, password))
8
-
9
- return driver
File without changes