llm-to-toon 0.0.47__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,36 @@
1
+ Metadata-Version: 2.4
2
+ Name: llm-to-toon
3
+ Version: 0.0.47
4
+ Summary: Tiny wrapper exposing Prompture helpers to convert LLM output into TOON.
5
+ Author-email: Juan Denis <juan@vene.co>
6
+ License: MIT
7
+ Keywords: llm,toon,prompt,structured-output
8
+ Requires-Python: >=3.10
9
+ Description-Content-Type: text/markdown
10
+ Requires-Dist: prompture>=0.0.47
11
+
12
+ # llm-to-toon
13
+
14
+ Tiny wrapper around `prompture` that returns [TOON](https://github.com/jmorganca/python-toon)
15
+ (Token-Oriented Object Notation) instead of JSON. Under the hood it uses
16
+ `prompture.extract_and_jsonify(..., output_format="toon")` and converts the result
17
+ into the ultra-compact TOON text automatically.
18
+
19
+ Install:
20
+ ```bash
21
+ pip install llm-to-toon
22
+ ```
23
+
24
+ Usage:
25
+
26
+ ```python
27
+ from llm_to_toon import from_llm_text
28
+
29
+ schema = {"name": "string", "age": "int"}
30
+ toon_text = from_llm_text("Name: Juan Age: 30", schema)
31
+ print(toon_text)
32
+ ```
33
+
34
+ By default the helper spins up the local Ollama driver (`gemma:latest`). Pass your
35
+ own Prompture driver if you want to call OpenAI, Azure, Groq, etc. For the full
36
+ Prompture feature-set see the main project: https://github.com/jhd3197/prompture
@@ -0,0 +1,25 @@
1
+ # llm-to-toon
2
+
3
+ Tiny wrapper around `prompture` that returns [TOON](https://github.com/jmorganca/python-toon)
4
+ (Token-Oriented Object Notation) instead of JSON. Under the hood it uses
5
+ `prompture.extract_and_jsonify(..., output_format="toon")` and converts the result
6
+ into the ultra-compact TOON text automatically.
7
+
8
+ Install:
9
+ ```bash
10
+ pip install llm-to-toon
11
+ ```
12
+
13
+ Usage:
14
+
15
+ ```python
16
+ from llm_to_toon import from_llm_text
17
+
18
+ schema = {"name": "string", "age": "int"}
19
+ toon_text = from_llm_text("Name: Juan Age: 30", schema)
20
+ print(toon_text)
21
+ ```
22
+
23
+ By default the helper spins up the local Ollama driver (`gemma:latest`). Pass your
24
+ own Prompture driver if you want to call OpenAI, Azure, Groq, etc. For the full
25
+ Prompture feature-set see the main project: https://github.com/jhd3197/prompture
@@ -0,0 +1,24 @@
1
+ # packages/llm_to_toon/llm_to_toon/__init__.py
2
+ try:
3
+ from prompture import extract_and_jsonify as _extract_and_jsonify
4
+ from prompture.drivers import OllamaDriver
5
+ except Exception:
6
+ from prompture.core import extract_and_jsonify as _extract_and_jsonify
7
+ from prompture.drivers import OllamaDriver
8
+
9
+
10
+ def from_llm_text(text: str, schema: dict, driver: dict | None = None):
11
+ """Extract TOON from text using an LLM and Prompture helpers.
12
+
13
+ Args:
14
+ text: Text to extract structured data from.
15
+ schema: JSON schema describing the expected structure.
16
+ driver: Optional LLM driver (defaults to local Ollama if not provided).
17
+
18
+ Returns:
19
+ TOON string encoded from the extracted data.
20
+ """
21
+ if driver is None:
22
+ driver = OllamaDriver(endpoint="http://localhost:11434", model="gemma:latest")
23
+ result = _extract_and_jsonify(driver, text, schema, output_format="toon")
24
+ return result["toon_string"]
@@ -0,0 +1,36 @@
1
+ Metadata-Version: 2.4
2
+ Name: llm-to-toon
3
+ Version: 0.0.47
4
+ Summary: Tiny wrapper exposing Prompture helpers to convert LLM output into TOON.
5
+ Author-email: Juan Denis <juan@vene.co>
6
+ License: MIT
7
+ Keywords: llm,toon,prompt,structured-output
8
+ Requires-Python: >=3.10
9
+ Description-Content-Type: text/markdown
10
+ Requires-Dist: prompture>=0.0.47
11
+
12
+ # llm-to-toon
13
+
14
+ Tiny wrapper around `prompture` that returns [TOON](https://github.com/jmorganca/python-toon)
15
+ (Token-Oriented Object Notation) instead of JSON. Under the hood it uses
16
+ `prompture.extract_and_jsonify(..., output_format="toon")` and converts the result
17
+ into the ultra-compact TOON text automatically.
18
+
19
+ Install:
20
+ ```bash
21
+ pip install llm-to-toon
22
+ ```
23
+
24
+ Usage:
25
+
26
+ ```python
27
+ from llm_to_toon import from_llm_text
28
+
29
+ schema = {"name": "string", "age": "int"}
30
+ toon_text = from_llm_text("Name: Juan Age: 30", schema)
31
+ print(toon_text)
32
+ ```
33
+
34
+ By default the helper spins up the local Ollama driver (`gemma:latest`). Pass your
35
+ own Prompture driver if you want to call OpenAI, Azure, Groq, etc. For the full
36
+ Prompture feature-set see the main project: https://github.com/jhd3197/prompture
@@ -0,0 +1,8 @@
1
+ README.md
2
+ pyproject.toml
3
+ llm_to_toon/__init__.py
4
+ llm_to_toon.egg-info/PKG-INFO
5
+ llm_to_toon.egg-info/SOURCES.txt
6
+ llm_to_toon.egg-info/dependency_links.txt
7
+ llm_to_toon.egg-info/requires.txt
8
+ llm_to_toon.egg-info/top_level.txt
@@ -0,0 +1 @@
1
+ prompture>=0.0.47
@@ -0,0 +1 @@
1
+ llm_to_toon
@@ -0,0 +1,22 @@
1
+ [build-system]
2
+ requires = [ "setuptools>=61.0", "wheel",]
3
+ build-backend = "setuptools.build_meta"
4
+
5
+ [project]
6
+ name = "llm-to-toon"
7
+ version = "0.0.47"
8
+ description = "Tiny wrapper exposing Prompture helpers to convert LLM output into TOON."
9
+ readme = "README.md"
10
+ requires-python = ">=3.10"
11
+ keywords = [ "llm", "toon", "prompt", "structured-output",]
12
+ dependencies = [ "prompture>=0.0.47",]
13
+ [[project.authors]]
14
+ name = "Juan Denis"
15
+ email = "juan@vene.co"
16
+
17
+ [project.license]
18
+ text = "MIT"
19
+
20
+ [tool.setuptools.packages.find]
21
+ where = [ ".",]
22
+ include = [ "llm_to_toon*",]
@@ -0,0 +1,4 @@
1
+ [egg_info]
2
+ tag_build =
3
+ tag_date = 0
4
+