llm-ir 0.0.22__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
llm_ir-0.0.22/PKG-INFO ADDED
@@ -0,0 +1,7 @@
1
+ Metadata-Version: 2.4
2
+ Name: llm-ir
3
+ Version: 0.0.22
4
+ Summary: Core message IR for LLM pipelines
5
+ Author: Mathis Siebert
6
+ Requires-Python: >=3.11
7
+ Requires-Dist: pydantic>=2.0
@@ -0,0 +1,14 @@
1
+ # Intermediate Representation for LLM Histories
2
+ This repository provides the core types for a standardized way to represent messages for LLMs. Additional chunk-types will be added in the future.
3
+
4
+ ## Installation via Pypi:
5
+
6
+ ```bash
7
+ pip install llm-ir
8
+ ```
9
+
10
+ ## Imports:
11
+
12
+ ```python
13
+ from llm_ir import AIMessage, AIRoles, AIChunk, AIChunkText, AIChunkFile
14
+ ```
@@ -0,0 +1,12 @@
1
+ from .chunks import AIChunk, AIChunkText, AIChunkFile, AIChunkImageURL
2
+ from .roles import AIRoles
3
+ from .messages import AIMessage
4
+
5
+ __all__ = [
6
+ "AIChunk",
7
+ "AIChunkText",
8
+ "AIChunkFile",
9
+ "AIRoles",
10
+ "AIMessage",
11
+ "AIChunkImageURL",
12
+ ]
@@ -0,0 +1,9 @@
1
+ from .openai import to_openai, OpenAIMessage, OpenAIContent, OpenAITextContent, OpenAIImageURLContent
2
+
3
+ __all__ = [
4
+ "to_openai",
5
+ "OpenAIMessage",
6
+ "OpenAIContent",
7
+ "OpenAITextContent",
8
+ "OpenAIImageURLContent",
9
+ ]
@@ -0,0 +1,67 @@
1
+ from typing import TypedDict, Literal, Union
2
+
3
+ from ..messages import AIMessage
4
+ from ..chunks import AIChunk, AIChunkText, AIChunkImageURL, AIChunkFile
5
+ import base64
6
+
7
+
8
+ class OpenAITextContent(TypedDict):
9
+ type: Literal["text"]
10
+ text: str
11
+
12
+
13
+ class OpenAIImageURLContent(TypedDict):
14
+ type: Literal["image_url"]
15
+ image_url: dict[str, str]
16
+
17
+ OpenAIContent = Union[OpenAITextContent, OpenAIImageURLContent]
18
+
19
+
20
+ class OpenAIMessage(TypedDict):
21
+ role: str
22
+ content: list[OpenAIContent]
23
+
24
+
25
+ def to_openai(messages: list[AIMessage]) -> list[OpenAIMessage]:
26
+
27
+
28
+ result: list[OpenAIMessage] = []
29
+ for message in messages:
30
+ role = message.role.value
31
+ result.append(OpenAIMessage(
32
+ role= role,
33
+ content= [
34
+ chunk_to_openai(chunk) for chunk in message.chunks
35
+ ]
36
+ ))
37
+ return result
38
+
39
+
40
+ def chunk_to_openai(chunk: AIChunk) -> OpenAIContent:
41
+
42
+ match chunk:
43
+ case AIChunkText():
44
+ return OpenAITextContent(
45
+ type="text",
46
+ text=chunk.text,
47
+ )
48
+ case AIChunkImageURL():
49
+ return OpenAIImageURLContent(
50
+ type="image_url",
51
+ image_url={
52
+ "url": chunk.url,
53
+ }
54
+ )
55
+ case AIChunkFile():
56
+ if chunk.mimetype.startswith("image/"):
57
+ base64_data = base64.b64encode(chunk.bytes).decode('utf-8')
58
+ return OpenAIImageURLContent(
59
+ type= "image_url",
60
+ image_url= {
61
+ "url": f"data:{chunk.mimetype};base64,{base64_data}",
62
+ }
63
+ )
64
+ else:
65
+ raise ValueError(f"Unsupported file type for OpenAI: {chunk.mimetype}")
66
+ case _:
67
+ raise ValueError(f"Unsupported chunk type: {type(chunk)}")
@@ -0,0 +1,18 @@
1
+ from typing import Union, Literal
2
+ from pydantic import BaseModel
3
+
4
+ class AIChunkText(BaseModel):
5
+ type: Literal["text"] = "text"
6
+ text: str
7
+
8
+ class AIChunkFile(BaseModel):
9
+ type: Literal["file"] = "file"
10
+ name: str
11
+ mimetype: str
12
+ bytes: bytes
13
+
14
+ class AIChunkImageURL(BaseModel):
15
+ type: Literal["image"] = "image"
16
+ url: str
17
+
18
+ AIChunk = Union[AIChunkText, AIChunkFile, AIChunkImageURL]
@@ -0,0 +1,12 @@
1
+ from pydantic import BaseModel, Field
2
+
3
+ from .chunks import AIChunk
4
+ from .roles import AIRoles
5
+
6
+
7
+ class AIMessage(BaseModel):
8
+
9
+ role: AIRoles
10
+ chunks: list[AIChunk] = Field(default_factory=list[AIChunk])
11
+
12
+
File without changes
@@ -0,0 +1,9 @@
1
+ from enum import StrEnum
2
+
3
+
4
+ class AIRoles(StrEnum):
5
+
6
+ USER = "user"
7
+ MODEL = "assistant"
8
+ SYSTEM = "system"
9
+ TOOL = "tool"
@@ -0,0 +1,7 @@
1
+ Metadata-Version: 2.4
2
+ Name: llm-ir
3
+ Version: 0.0.22
4
+ Summary: Core message IR for LLM pipelines
5
+ Author: Mathis Siebert
6
+ Requires-Python: >=3.11
7
+ Requires-Dist: pydantic>=2.0
@@ -0,0 +1,14 @@
1
+ README.md
2
+ pyproject.toml
3
+ llm_ir/__init__.py
4
+ llm_ir/chunks.py
5
+ llm_ir/messages.py
6
+ llm_ir/py.typed
7
+ llm_ir/roles.py
8
+ llm_ir.egg-info/PKG-INFO
9
+ llm_ir.egg-info/SOURCES.txt
10
+ llm_ir.egg-info/dependency_links.txt
11
+ llm_ir.egg-info/requires.txt
12
+ llm_ir.egg-info/top_level.txt
13
+ llm_ir/adapter/__init__.py
14
+ llm_ir/adapter/openai.py
@@ -0,0 +1 @@
1
+ pydantic>=2.0
@@ -0,0 +1 @@
1
+ llm_ir
@@ -0,0 +1,13 @@
1
+ [project]
2
+ name = "llm-ir"
3
+ version = "0.0.22"
4
+ description = "Core message IR for LLM pipelines"
5
+ authors = [{ name="Mathis Siebert" }]
6
+ requires-python = ">=3.11"
7
+ dependencies = [
8
+ "pydantic>=2.0"
9
+ ]
10
+
11
+ [build-system]
12
+ requires = ["setuptools", "wheel"]
13
+ build-backend = "setuptools.build_meta"
@@ -0,0 +1,4 @@
1
+ [egg_info]
2
+ tag_build =
3
+ tag_date = 0
4
+