llmir 0.0.12__tar.gz → 0.0.14__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {llmir-0.0.12 → llmir-0.0.14}/PKG-INFO +1 -1
- {llmir-0.0.12 → llmir-0.0.14}/llmir/adapter/openai.py +15 -10
- {llmir-0.0.12 → llmir-0.0.14}/llmir.egg-info/PKG-INFO +1 -1
- {llmir-0.0.12 → llmir-0.0.14}/pyproject.toml +1 -1
- {llmir-0.0.12 → llmir-0.0.14}/README.md +0 -0
- {llmir-0.0.12 → llmir-0.0.14}/llmir/__init__.py +0 -0
- {llmir-0.0.12 → llmir-0.0.14}/llmir/adapter/__init__.py +0 -0
- {llmir-0.0.12 → llmir-0.0.14}/llmir/chunks.py +0 -0
- {llmir-0.0.12 → llmir-0.0.14}/llmir/messages.py +0 -0
- {llmir-0.0.12 → llmir-0.0.14}/llmir/py.typed +0 -0
- {llmir-0.0.12 → llmir-0.0.14}/llmir/rich_repr.py +0 -0
- {llmir-0.0.12 → llmir-0.0.14}/llmir/roles.py +0 -0
- {llmir-0.0.12 → llmir-0.0.14}/llmir/tools.py +0 -0
- {llmir-0.0.12 → llmir-0.0.14}/llmir.egg-info/SOURCES.txt +0 -0
- {llmir-0.0.12 → llmir-0.0.14}/llmir.egg-info/dependency_links.txt +0 -0
- {llmir-0.0.12 → llmir-0.0.14}/llmir.egg-info/requires.txt +0 -0
- {llmir-0.0.12 → llmir-0.0.14}/llmir.egg-info/top_level.txt +0 -0
- {llmir-0.0.12 → llmir-0.0.14}/setup.cfg +0 -0
|
@@ -100,6 +100,16 @@ def to_openai(messages: list[AIMessages]) -> list[OpenAIMessages]:
|
|
|
100
100
|
content_chunks.append(chunk)
|
|
101
101
|
|
|
102
102
|
|
|
103
|
+
|
|
104
|
+
if media_chunks:
|
|
105
|
+
result.append(OpenAIMessage(
|
|
106
|
+
role="user", # Hacky too, but what else to circumvent API limitations in a broadly compatible way?
|
|
107
|
+
content=[
|
|
108
|
+
content_chunk_to_openai(c) for c in media_chunks
|
|
109
|
+
]
|
|
110
|
+
)
|
|
111
|
+
)
|
|
112
|
+
|
|
103
113
|
if content_chunks or tool_call_chunks:
|
|
104
114
|
formatted = OpenAIMessage(
|
|
105
115
|
role=role
|
|
@@ -110,15 +120,6 @@ def to_openai(messages: list[AIMessages]) -> list[OpenAIMessages]:
|
|
|
110
120
|
formatted["tool_calls"] = [tool_call_chunk_to_openai(c) for c in tool_call_chunks]
|
|
111
121
|
|
|
112
122
|
result.append(formatted)
|
|
113
|
-
|
|
114
|
-
if media_chunks:
|
|
115
|
-
result.append(OpenAIMessage(
|
|
116
|
-
role="user", # Hacky too, but what else to circumvent API limitations in a broadly compatible way?
|
|
117
|
-
content=[
|
|
118
|
-
content_chunk_to_openai(c) for c in media_chunks
|
|
119
|
-
]
|
|
120
|
-
)
|
|
121
|
-
)
|
|
122
123
|
|
|
123
124
|
return result
|
|
124
125
|
|
|
@@ -154,7 +155,11 @@ def content_chunk_to_openai(chunk: AIChunkText | AIChunkFile | AIChunkImageURL)
|
|
|
154
155
|
text=text
|
|
155
156
|
)
|
|
156
157
|
else:
|
|
157
|
-
|
|
158
|
+
return OpenAIText( # Fallback: represent as text
|
|
159
|
+
type="text",
|
|
160
|
+
text=f"[Unsupported file type: {chunk.mimetype}, size: {len(chunk.bytes)} bytes, name: {chunk.name}]"
|
|
161
|
+
)
|
|
162
|
+
# raise ValueError(f"Unsupported file type for OpenAI: {chunk.mimetype}")
|
|
158
163
|
case _:
|
|
159
164
|
raise ValueError(f"Unsupported chunk type: {type(chunk)}")
|
|
160
165
|
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|