llmir 0.0.4__tar.gz → 0.0.6__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: llmir
3
- Version: 0.0.4
3
+ Version: 0.0.6
4
4
  Summary: Core message and tool IR for LLM pipelines
5
5
  Author: Mathis Siebert
6
6
  Requires-Python: >=3.11
@@ -1,10 +1,10 @@
1
- from .chunks import AIChunk, AIChunkText, AIChunkFile, AIChunkImageURL, AIChunkToolCall
1
+ from .chunks import AIChunks, AIChunkText, AIChunkFile, AIChunkImageURL, AIChunkToolCall
2
2
  from .roles import AIRoles
3
3
  from .messages import AIMessages, AIMessage, AIMessageToolResponse
4
4
  from .tools import Tool
5
5
 
6
6
  __all__ = [
7
- "AIChunk",
7
+ "AIChunks",
8
8
  "AIChunkText",
9
9
  "AIChunkFile",
10
10
  "AIChunkImageURL",
@@ -0,0 +1,13 @@
1
+ from .openai import to_openai, OpenAIMessage, OpenAIMessageToolResponse, OpenAIContents, OpenAITextContent, OpenAIImageURLContent, OpenAIImageURLURL, OpenAIToolCallContent, OpenAIToolCallFunction
2
+
3
+ __all__ = [
4
+ "to_openai",
5
+ "OpenAIMessage",
6
+ "OpenAIMessageToolResponse",
7
+ "OpenAIContents",
8
+ "OpenAITextContent",
9
+ "OpenAIImageURLContent",
10
+ "OpenAIImageURLURL",
11
+ "OpenAIToolCallContent",
12
+ "OpenAIToolCallFunction",
13
+ ]
@@ -0,0 +1,136 @@
1
+ from typing import TypedDict, Literal
2
+
3
+ from ..messages import AIMessages, AIMessageToolResponse
4
+ from ..chunks import AIChunks, AIChunkText, AIChunkImageURL, AIChunkFile, AIChunkToolCall
5
+ import base64
6
+ import json
7
+
8
+
9
+ class OpenAITextContent(TypedDict):
10
+ type: Literal["text"]
11
+ text: str
12
+
13
+ class OpenAIImageURLURL(TypedDict):
14
+ url: str
15
+
16
+ class OpenAIImageURLContent(TypedDict):
17
+ type: Literal["image_url"]
18
+ image_url: OpenAIImageURLURL
19
+
20
+
21
+ class OpenAIToolCallFunction(TypedDict):
22
+ name: str
23
+ arguments: str
24
+
25
+ class OpenAIToolCallContent(TypedDict):
26
+ id: str
27
+ type: Literal["function"]
28
+ function: OpenAIToolCallFunction
29
+
30
+
31
+
32
+ OpenAIContents = OpenAITextContent | OpenAIImageURLContent | OpenAIToolCallContent
33
+
34
+
35
+ class OpenAIMessage(TypedDict):
36
+ role: str
37
+ content: list[OpenAIContents]
38
+
39
+ class OpenAIMessageToolResponse(TypedDict):
40
+ role: Literal["tool"]
41
+ tool_call_id: str
42
+ name: str
43
+ content: str
44
+
45
+
46
+ OpenAIMessages = OpenAIMessage | OpenAIMessageToolResponse
47
+
48
+
49
+
50
+ def to_openai(messages: list[AIMessages]) -> list[OpenAIMessages]:
51
+
52
+
53
+ result: list[OpenAIMessages] = []
54
+ for message in messages:
55
+ role = message.role.value
56
+ if isinstance(message, AIMessageToolResponse):
57
+ assert(role == "tool")
58
+ text: str = ""
59
+ media_chunks: list[AIChunkFile | AIChunkImageURL] = []
60
+ for chunk in message.chunks:
61
+ if isinstance(chunk, AIChunkText):
62
+ text += chunk.text
63
+ elif isinstance(chunk, AIChunkImageURL) or isinstance(chunk, AIChunkFile):
64
+ media_chunks.append(chunk)
65
+ else:
66
+ raise Exception(f"Invalid chunk type for Tool Response: {chunk.type}")
67
+ result.append(
68
+ OpenAIMessageToolResponse(
69
+ role=role,
70
+ tool_call_id=message.id,
71
+ name=message.name,
72
+ content=text,
73
+ )
74
+ )
75
+ if media_chunks:
76
+ result.append(
77
+ OpenAIMessage(
78
+ role="user", # Hacky, but what else to circumvent API limitations in a broadly compatible way?
79
+ content=[
80
+ chunk_to_openai(chunk) for chunk in media_chunks
81
+ ]
82
+ )
83
+ )
84
+ else:
85
+ result.append(OpenAIMessage(
86
+ role= role,
87
+ content= [
88
+ chunk_to_openai(chunk) for chunk in message.chunks
89
+ ]
90
+ ))
91
+ return result
92
+
93
+
94
+ def chunk_to_openai(chunk: AIChunks) -> OpenAIContents:
95
+
96
+ match chunk:
97
+ case AIChunkText():
98
+ return OpenAITextContent(
99
+ type="text",
100
+ text=chunk.text,
101
+ )
102
+ case AIChunkImageURL():
103
+ return OpenAIImageURLContent(
104
+ type="image_url",
105
+ image_url={
106
+ "url": chunk.url,
107
+ }
108
+ )
109
+ case AIChunkFile():
110
+ if chunk.mimetype.startswith("image/"):
111
+ base64_data = base64.b64encode(chunk.bytes).decode('utf-8')
112
+ return OpenAIImageURLContent(
113
+ type= "image_url",
114
+ image_url= {
115
+ "url": f"data:{chunk.mimetype};base64,{base64_data}",
116
+ }
117
+ )
118
+ elif chunk.mimetype == "text/plain":
119
+ text = chunk.bytes.decode(encoding="utf-8")
120
+ return OpenAITextContent(
121
+ type="text",
122
+ text=text
123
+ )
124
+ else:
125
+ raise ValueError(f"Unsupported file type for OpenAI: {chunk.mimetype}")
126
+ case AIChunkToolCall():
127
+ return OpenAIToolCallContent(
128
+ id=chunk.id,
129
+ type="function",
130
+ function=OpenAIToolCallFunction(
131
+ name=chunk.name,
132
+ arguments=json.dumps(chunk.arguments)
133
+ )
134
+ )
135
+ case _:
136
+ raise ValueError(f"Unsupported chunk type: {type(chunk)}")
@@ -23,4 +23,4 @@ class AIChunkToolCall(RichReprMixin, BaseModel):
23
23
  arguments: dict[str, object]
24
24
 
25
25
 
26
- AIChunk = Union[AIChunkText, AIChunkFile, AIChunkImageURL, AIChunkToolCall]
26
+ AIChunks = Union[AIChunkText, AIChunkFile, AIChunkImageURL, AIChunkToolCall]
@@ -1,17 +1,17 @@
1
1
  from pydantic import BaseModel, Field
2
2
  from typing import Literal
3
- from .chunks import AIChunk
3
+ from .chunks import AIChunks
4
4
  from .roles import AIRoles
5
5
 
6
6
  class AIMessage(BaseModel):
7
7
 
8
8
  role: Literal[AIRoles.USER, AIRoles.MODEL, AIRoles.SYSTEM]
9
- chunks: list[AIChunk] = Field(default_factory=list[AIChunk])
9
+ chunks: list[AIChunks] = Field(default_factory=list[AIChunks])
10
10
 
11
11
  class AIMessageToolResponse(BaseModel):
12
12
 
13
13
  role: Literal[AIRoles.TOOL] = AIRoles.TOOL
14
- chunks: list[AIChunk] = Field(default_factory=list[AIChunk])
14
+ chunks: list[AIChunks] = Field(default_factory=list[AIChunks])
15
15
  id: str
16
16
  name: str
17
17
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: llmir
3
- Version: 0.0.4
3
+ Version: 0.0.6
4
4
  Summary: Core message and tool IR for LLM pipelines
5
5
  Author: Mathis Siebert
6
6
  Requires-Python: >=3.11
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "llmir"
3
- version = "0.0.4"
3
+ version = "0.0.6"
4
4
  description = "Core message and tool IR for LLM pipelines"
5
5
  authors = [{ name="Mathis Siebert" }]
6
6
  requires-python = ">=3.11"
@@ -1,10 +0,0 @@
1
- from .openai import to_openai, OpenAIMessage, OpenAIMessageToolResponse, OpenAIContent, OpenAITextContent, OpenAIImageURLContent
2
-
3
- __all__ = [
4
- "to_openai",
5
- "OpenAIMessage",
6
- "OpenAIMessageToolResponse",
7
- "OpenAIContent",
8
- "OpenAITextContent",
9
- "OpenAIImageURLContent",
10
- ]
@@ -1,89 +0,0 @@
1
- from typing import TypedDict, Literal, Union
2
-
3
- from ..messages import AIMessages, AIMessageToolResponse
4
- from ..chunks import AIChunk, AIChunkText, AIChunkImageURL, AIChunkFile
5
- import base64
6
-
7
-
8
- class OpenAITextContent(TypedDict):
9
- type: Literal["text"]
10
- text: str
11
-
12
-
13
- class OpenAIImageURLContent(TypedDict):
14
- type: Literal["image_url"]
15
- image_url: dict[str, str]
16
-
17
- OpenAIContent = Union[OpenAITextContent, OpenAIImageURLContent]
18
-
19
-
20
- class OpenAIMessage(TypedDict):
21
- role: str
22
- content: list[OpenAIContent]
23
-
24
- class OpenAIMessageToolResponse(OpenAIMessage):
25
- tool_call_id: str
26
- name: str
27
-
28
-
29
- def to_openai(messages: list[AIMessages]) -> list[OpenAIMessage]:
30
-
31
-
32
- result: list[OpenAIMessage] = []
33
- for message in messages:
34
- role = message.role.value
35
- if isinstance(message, AIMessageToolResponse):
36
- result.append(
37
- OpenAIMessageToolResponse(
38
- role=role,
39
- tool_call_id=message.id,
40
- name=message.name,
41
- content=[
42
- chunk_to_openai(chunk) for chunk in message.chunks
43
- ]
44
- )
45
- )
46
- else:
47
- result.append(OpenAIMessage(
48
- role= role,
49
- content= [
50
- chunk_to_openai(chunk) for chunk in message.chunks
51
- ]
52
- ))
53
- return result
54
-
55
-
56
- def chunk_to_openai(chunk: AIChunk) -> OpenAIContent:
57
-
58
- match chunk:
59
- case AIChunkText():
60
- return OpenAITextContent(
61
- type="text",
62
- text=chunk.text,
63
- )
64
- case AIChunkImageURL():
65
- return OpenAIImageURLContent(
66
- type="image_url",
67
- image_url={
68
- "url": chunk.url,
69
- }
70
- )
71
- case AIChunkFile():
72
- if chunk.mimetype.startswith("image/"):
73
- base64_data = base64.b64encode(chunk.bytes).decode('utf-8')
74
- return OpenAIImageURLContent(
75
- type= "image_url",
76
- image_url= {
77
- "url": f"data:{chunk.mimetype};base64,{base64_data}",
78
- }
79
- )
80
- elif chunk.mimetype == ("text/plain"):
81
- text = chunk.bytes.decode(encoding="utf-8")
82
- return OpenAITextContent(
83
- type="text",
84
- text=text
85
- )
86
- else:
87
- raise ValueError(f"Unsupported file type for OpenAI: {chunk.mimetype}")
88
- case _:
89
- raise ValueError(f"Unsupported chunk type: {type(chunk)}")
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes