byllm 0.4.8__py2.py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- byllm/__init__.py +24 -0
- byllm/lib.py +10 -0
- byllm/llm.jac +361 -0
- byllm/mtir.jac +200 -0
- byllm/plugin.py +55 -0
- byllm/schema.jac +283 -0
- byllm/types.jac +471 -0
- byllm-0.4.8.dist-info/METADATA +188 -0
- byllm-0.4.8.dist-info/RECORD +11 -0
- byllm-0.4.8.dist-info/WHEEL +4 -0
- byllm-0.4.8.dist-info/entry_points.txt +3 -0
byllm/types.jac
ADDED
|
@@ -0,0 +1,471 @@
|
|
|
1
|
+
"""Type definitions for LLM interactions.
|
|
2
|
+
|
|
3
|
+
This module defines the types used in the LLM interactions, including messages,
|
|
4
|
+
tools, and tool calls. It provides a structured way to represent messages,
|
|
5
|
+
tool calls, and tools that can be used in LLM requests and responses.
|
|
6
|
+
"""
|
|
7
|
+
import base64;
|
|
8
|
+
import mimetypes;
|
|
9
|
+
import os;
|
|
10
|
+
import from contextlib { suppress }
|
|
11
|
+
import from enum { StrEnum }
|
|
12
|
+
import from io { BytesIO }
|
|
13
|
+
import from typing { Callable, TypeAlias, get_type_hints }
|
|
14
|
+
|
|
15
|
+
import from PIL.Image { Image as PILImageCls }
|
|
16
|
+
import from PIL.Image { open as open_image }
|
|
17
|
+
|
|
18
|
+
import from litellm.types.utils { Message as LiteLLMMessage }
|
|
19
|
+
import from pydantic { TypeAdapter }
|
|
20
|
+
import from .schema { tool_to_schema }
|
|
21
|
+
|
|
22
|
+
# The message can be a jaclang defined message or what ever the llm
|
|
23
|
+
# returned object that was feed back to the llm as it was given (dict).
|
|
24
|
+
glob MessageType:
|
|
25
|
+
TypeAlias = 'Message | LiteLLMMessage';
|
|
26
|
+
|
|
27
|
+
"""Enum for message roles in LLM interactions."""
|
|
28
|
+
enum MessageRole ( StrEnum ) {
|
|
29
|
+
SYSTEM = "system",
|
|
30
|
+
USER = "user",
|
|
31
|
+
ASSISTANT = "assistant",
|
|
32
|
+
TOOL = "tool"
|
|
33
|
+
}
|
|
34
|
+
|
|
35
|
+
"""Message class for LLM interactions."""
|
|
36
|
+
obj Message {
|
|
37
|
+
has role: MessageRole;
|
|
38
|
+
has content: str | list[Media];
|
|
39
|
+
|
|
40
|
+
"""Convert the message to a dictionary."""
|
|
41
|
+
def to_dict() -> dict[str, object] {
|
|
42
|
+
if isinstance(self.content, str) {
|
|
43
|
+
return {"role": self.role.value, "content": self.content,};
|
|
44
|
+
}
|
|
45
|
+
media_contents = [];
|
|
46
|
+
for media in self.content {
|
|
47
|
+
media_contents.extend(media.to_dict());
|
|
48
|
+
return {"role": self.role.value, "content": media_contents,};
|
|
49
|
+
}
|
|
50
|
+
}
|
|
51
|
+
}
|
|
52
|
+
|
|
53
|
+
"""Result of a tool call in LLM interactions."""
|
|
54
|
+
obj ToolCallResultMsg(Message) {
|
|
55
|
+
has tool_call_id: str;
|
|
56
|
+
has name: str; # Function name.
|
|
57
|
+
|
|
58
|
+
"""Post-initialization to set the role of the message."""
|
|
59
|
+
def postinit() -> None {
|
|
60
|
+
self.role = MessageRole.TOOL; # Maybe this should be an assertion?
|
|
61
|
+
}
|
|
62
|
+
|
|
63
|
+
"""Convert the tool call result message to a dictionary."""
|
|
64
|
+
def to_dict() -> dict[str, object] {
|
|
65
|
+
return {
|
|
66
|
+
"role": self.role.value,
|
|
67
|
+
"content": self.content,
|
|
68
|
+
"tool_call_id": self.tool_call_id,
|
|
69
|
+
"name": self.name,
|
|
70
|
+
|
|
71
|
+
};
|
|
72
|
+
}
|
|
73
|
+
}
|
|
74
|
+
|
|
75
|
+
"""Tool class for LLM interactions."""
|
|
76
|
+
obj Tool {
|
|
77
|
+
has func: Callable;
|
|
78
|
+
has description: str = "";
|
|
79
|
+
has params_desc: dict[str, str] = None; # type: ignore
|
|
80
|
+
|
|
81
|
+
"""Post-initialization to validate the function."""
|
|
82
|
+
def postinit() -> None {
|
|
83
|
+
annotations = get_type_hints(self.func);
|
|
84
|
+
with suppress(Exception) {
|
|
85
|
+
self.func.__annotations__ = annotations;
|
|
86
|
+
}
|
|
87
|
+
|
|
88
|
+
self.description = Tool.get_func_description(self.func);
|
|
89
|
+
|
|
90
|
+
if hasattr(self.func, "_jac_semstr_inner") {
|
|
91
|
+
self.params_desc = self.func._jac_semstr_inner; # type: ignore
|
|
92
|
+
} else {
|
|
93
|
+
self.params_desc = {
|
|
94
|
+
name: str(type) for (name, type) in annotations.items()
|
|
95
|
+
};
|
|
96
|
+
}
|
|
97
|
+
}
|
|
98
|
+
|
|
99
|
+
"""Call the tool function with the provided arguments."""
|
|
100
|
+
def __call__( *args: list , **kwargs: dict) -> object {
|
|
101
|
+
# If there is an error with the finish tool, we throw the exception.
|
|
102
|
+
# Since it's the user's responsibility to handle it.
|
|
103
|
+
if self.is_finish_tool() {
|
|
104
|
+
return self.func(*args, **kwargs);
|
|
105
|
+
}
|
|
106
|
+
try {
|
|
107
|
+
# TODO: Shoud I json serialize or this is fine?
|
|
108
|
+
return self.func(*args, **kwargs);
|
|
109
|
+
} except Exception as e {
|
|
110
|
+
# For the LLM if the tool failed, it'll see the error message
|
|
111
|
+
# and make decision based on that.
|
|
112
|
+
return str(e);
|
|
113
|
+
}
|
|
114
|
+
}
|
|
115
|
+
|
|
116
|
+
"""Return the name of the tool function."""
|
|
117
|
+
def get_name() -> str {
|
|
118
|
+
return self.func.__name__;
|
|
119
|
+
}
|
|
120
|
+
|
|
121
|
+
"""Get the description of the function."""
|
|
122
|
+
static def get_func_description(func: Callable) -> str {
|
|
123
|
+
if hasattr(func, "_jac_semstr") {
|
|
124
|
+
return func._jac_semstr; # type: ignore
|
|
125
|
+
|
|
126
|
+
}
|
|
127
|
+
return func.__doc__ or func.__name__;
|
|
128
|
+
}
|
|
129
|
+
|
|
130
|
+
"""Create a finish tool that returns the final output."""
|
|
131
|
+
static def make_finish_tool(resp_type: type) -> Tool {
|
|
132
|
+
def finish_tool(final_output: object) -> object {
|
|
133
|
+
return TypeAdapter(resp_type).validate_python(final_output);
|
|
134
|
+
}
|
|
135
|
+
|
|
136
|
+
finish_tool.__annotations__["return"] = resp_type;
|
|
137
|
+
finish_tool.__annotations__["final_output"] = resp_type;
|
|
138
|
+
return Tool(
|
|
139
|
+
func=finish_tool,
|
|
140
|
+
description="This tool is used to finish the tool calls and return the final output.",
|
|
141
|
+
params_desc={"final_output": "The final output of the tool calls.",},
|
|
142
|
+
);
|
|
143
|
+
}
|
|
144
|
+
|
|
145
|
+
"""Check if the tool is a finish tool."""
|
|
146
|
+
def is_finish_tool() -> bool {
|
|
147
|
+
return self.get_name() == "finish_tool";
|
|
148
|
+
}
|
|
149
|
+
|
|
150
|
+
"""Return the JSON schema for the tool function."""
|
|
151
|
+
def get_json_schema() -> dict[str, object] {
|
|
152
|
+
return tool_to_schema(self.func, self.description, self.params_desc);
|
|
153
|
+
}
|
|
154
|
+
|
|
155
|
+
"""Parse the arguments from JSON to the function's expected format."""
|
|
156
|
+
def parse_arguments(args_json: dict) -> dict {
|
|
157
|
+
args = {};
|
|
158
|
+
|
|
159
|
+
annotations: dict = {};
|
|
160
|
+
try {
|
|
161
|
+
annotations = self.func.__annotations__;
|
|
162
|
+
} except AttributeError {
|
|
163
|
+
annotations = get_type_hints(self.func);
|
|
164
|
+
}
|
|
165
|
+
for (arg_name, arg_json) in args_json.items() {
|
|
166
|
+
if arg_type := annotations.get(arg_name) {
|
|
167
|
+
args[arg_name] = TypeAdapter(arg_type).validate_python(arg_json);
|
|
168
|
+
}
|
|
169
|
+
}
|
|
170
|
+
return args;
|
|
171
|
+
}
|
|
172
|
+
}
|
|
173
|
+
|
|
174
|
+
"""Tool call class for LLM interactions."""
|
|
175
|
+
obj ToolCall {
|
|
176
|
+
has call_id: str;
|
|
177
|
+
has tool: Tool;
|
|
178
|
+
has args: dict;
|
|
179
|
+
|
|
180
|
+
"""Call the tool with the provided arguments."""
|
|
181
|
+
def __call__() -> ToolCallResultMsg {
|
|
182
|
+
if self.args is not None {
|
|
183
|
+
result = self.tool(**self.args);
|
|
184
|
+
} else {
|
|
185
|
+
raise ValueError("args is None, Expected a dictionary") ;
|
|
186
|
+
}
|
|
187
|
+
return ToolCallResultMsg(
|
|
188
|
+
role=MessageRole.TOOL,
|
|
189
|
+
content=str(result),
|
|
190
|
+
tool_call_id=self.call_id,
|
|
191
|
+
name=self.tool.get_name(),
|
|
192
|
+
);
|
|
193
|
+
}
|
|
194
|
+
|
|
195
|
+
"""Return the string representation of the tool call."""
|
|
196
|
+
def __str__() -> str {
|
|
197
|
+
params = ", ".join(f"{k}={v}" for (k, v) in self.args.items());
|
|
198
|
+
return f"{self.tool.get_name()}({params})";
|
|
199
|
+
}
|
|
200
|
+
|
|
201
|
+
"""Check if the tool is a finish tool."""
|
|
202
|
+
def is_finish_call() -> bool {
|
|
203
|
+
return self.tool.is_finish_tool();
|
|
204
|
+
}
|
|
205
|
+
|
|
206
|
+
"""Get the output from the finish tool call."""
|
|
207
|
+
def get_output() -> object {
|
|
208
|
+
assert (self.is_finish_call()) , "This method should only be called for finish tools.";
|
|
209
|
+
return self.tool(**self.args);
|
|
210
|
+
}
|
|
211
|
+
}
|
|
212
|
+
|
|
213
|
+
"""Mock tool call for testing purposes."""
|
|
214
|
+
obj MockToolCall {
|
|
215
|
+
has tool: Callable;
|
|
216
|
+
has args: dict;
|
|
217
|
+
|
|
218
|
+
"""Convert the mock tool call to a ToolCall."""
|
|
219
|
+
def to_tool_call() -> ToolCall {
|
|
220
|
+
args_parsed = Tool(self.tool).parse_arguments(self.args);
|
|
221
|
+
return ToolCall(
|
|
222
|
+
call_id="", # Call ID is not used in mock calls.
|
|
223
|
+
tool=Tool(self.tool),
|
|
224
|
+
args=args_parsed,
|
|
225
|
+
);
|
|
226
|
+
}
|
|
227
|
+
}
|
|
228
|
+
|
|
229
|
+
"""Result of the completion from the LLM."""
|
|
230
|
+
obj CompletionResult {
|
|
231
|
+
has output: object;
|
|
232
|
+
has tool_calls: list[ToolCall];
|
|
233
|
+
}
|
|
234
|
+
|
|
235
|
+
# -----------------------------------------------------------------------------
|
|
236
|
+
# Media content types
|
|
237
|
+
# -----------------------------------------------------------------------------
|
|
238
|
+
"""Base class for message content."""
|
|
239
|
+
obj Media {
|
|
240
|
+
"""Convert the content to a dictionary."""
|
|
241
|
+
def to_dict() -> list[dict] {
|
|
242
|
+
raise NotImplementedError("Subclasses must implement this method.") ;
|
|
243
|
+
}
|
|
244
|
+
}
|
|
245
|
+
|
|
246
|
+
"""Class representing text content in a message."""
|
|
247
|
+
obj Text(Media) {
|
|
248
|
+
has text: str;
|
|
249
|
+
|
|
250
|
+
"""Convert the text content to a dictionary."""
|
|
251
|
+
def to_dict() -> list[dict] {
|
|
252
|
+
return [{"type": "text", "text": self.text}];
|
|
253
|
+
}
|
|
254
|
+
}
|
|
255
|
+
|
|
256
|
+
"""Class representing an image."""
|
|
257
|
+
obj Image(Media) {
|
|
258
|
+
has url:
|
|
259
|
+
(
|
|
260
|
+
"str | bytes | bytearray | memoryview | BytesIO | IO[bytes] | ""os.PathLike[str] | os.PathLike[bytes] | PILImageCls"
|
|
261
|
+
); # type: ignore[name-defined]
|
|
262
|
+
|
|
263
|
+
has mime_type: str | None = None;
|
|
264
|
+
|
|
265
|
+
"""Normalize input into a data URL or leave remote/data URLs as-is.
|
|
266
|
+
|
|
267
|
+
Supported inputs:
|
|
268
|
+
- HTTP(S)/GS URLs (left as-is)
|
|
269
|
+
- Data URLs (data:...)
|
|
270
|
+
- Local file paths (opened and encoded to data URL)
|
|
271
|
+
- Bytes / bytearray / memoryview
|
|
272
|
+
- File-like objects (BytesIO or any IO[bytes])
|
|
273
|
+
- os.PathLike
|
|
274
|
+
- PIL.Image.Image instances
|
|
275
|
+
"""
|
|
276
|
+
def postinit() -> None {
|
|
277
|
+
value = self.url;
|
|
278
|
+
|
|
279
|
+
# Handle path-like inputs by converting to string
|
|
280
|
+
if isinstance(value, os.PathLike) {
|
|
281
|
+
value = os.fspath(value);
|
|
282
|
+
}
|
|
283
|
+
|
|
284
|
+
# Remote or data URLs: keep as-is (trim whitespace)
|
|
285
|
+
if isinstance(value, str) {
|
|
286
|
+
s = value.strip();
|
|
287
|
+
if s.startswith(("http://", "https://", "gs://", "data:")) {
|
|
288
|
+
self.url = s;
|
|
289
|
+
return;
|
|
290
|
+
}
|
|
291
|
+
# Treat as local file path
|
|
292
|
+
if not os.path.exists(s) {
|
|
293
|
+
raise ValueError(f"Image file does not exist: {s}") ;
|
|
294
|
+
}
|
|
295
|
+
image = open_image(s);
|
|
296
|
+
fmt = image.format or "PNG";
|
|
297
|
+
# Determine MIME type with WEBP special-case for py<3.13
|
|
298
|
+
self.mime_type = self._format_to_mime(fmt);
|
|
299
|
+
with BytesIO() as buffer {
|
|
300
|
+
image.save(buffer, format=fmt);
|
|
301
|
+
data = buffer.getvalue();
|
|
302
|
+
self.url = self._data_url_from_bytes(data, fmt);
|
|
303
|
+
return;
|
|
304
|
+
}
|
|
305
|
+
}
|
|
306
|
+
|
|
307
|
+
# PIL Image instance
|
|
308
|
+
if isinstance(value, PILImageCls) {
|
|
309
|
+
fmt = value.format or "PNG";
|
|
310
|
+
with BytesIO() as buffer {
|
|
311
|
+
value.save(buffer, format=fmt);
|
|
312
|
+
data = buffer.getvalue();
|
|
313
|
+
self.url = self._data_url_from_bytes(data, fmt);
|
|
314
|
+
return;
|
|
315
|
+
}
|
|
316
|
+
}
|
|
317
|
+
|
|
318
|
+
# Bytes-like object
|
|
319
|
+
if isinstance(value, (bytes, bytearray, memoryview)) {
|
|
320
|
+
raw = bytes(value);
|
|
321
|
+
# Probe format via PIL to set correct MIME
|
|
322
|
+
img = open_image(BytesIO(raw));
|
|
323
|
+
fmt = img.format or "PNG";
|
|
324
|
+
# Use bytes as-is (avoid re-encode) if PIL detects same format as content
|
|
325
|
+
# Otherwise, re-encode to the detected format to be safe.
|
|
326
|
+
try {
|
|
327
|
+
self.url = self._data_url_from_bytes(raw, fmt);
|
|
328
|
+
} except Exception {
|
|
329
|
+
with BytesIO() as buffer {
|
|
330
|
+
img.save(buffer, format=fmt);
|
|
331
|
+
self.url = self._data_url_from_bytes(buffer.getvalue(), fmt);
|
|
332
|
+
}
|
|
333
|
+
}
|
|
334
|
+
return;
|
|
335
|
+
}
|
|
336
|
+
|
|
337
|
+
# File-like object (e.g., BytesIO, IO[bytes])
|
|
338
|
+
if hasattr(value, "read") and callable(value.read) {
|
|
339
|
+
# Safely read without permanently moving the cursor
|
|
340
|
+
stream: IO[bytes] = value; # type: ignore[assignment]
|
|
341
|
+
pos = None;
|
|
342
|
+
try {
|
|
343
|
+
pos = stream.tell(); # type: ignore[attr-defined]
|
|
344
|
+
} except Exception {
|
|
345
|
+
pos = None;
|
|
346
|
+
}
|
|
347
|
+
try {
|
|
348
|
+
# Prefer getvalue if available (e.g., BytesIO)
|
|
349
|
+
if hasattr(stream, "getvalue") and callable(stream.getvalue) {
|
|
350
|
+
raw = stream.getvalue(); # type: ignore[call-arg]
|
|
351
|
+
} else {
|
|
352
|
+
if hasattr(stream, "seek") {
|
|
353
|
+
with suppress(Exception) {
|
|
354
|
+
stream.seek(0);
|
|
355
|
+
}
|
|
356
|
+
}
|
|
357
|
+
raw = stream.read();
|
|
358
|
+
}
|
|
359
|
+
img = open_image(BytesIO(raw));
|
|
360
|
+
fmt = img.format or "PNG";
|
|
361
|
+
self.url = self._data_url_from_bytes(raw, fmt);
|
|
362
|
+
} finally {
|
|
363
|
+
if pos is not None and hasattr(stream, "seek") {
|
|
364
|
+
with suppress(Exception) {
|
|
365
|
+
stream.seek(pos);
|
|
366
|
+
}
|
|
367
|
+
}
|
|
368
|
+
}
|
|
369
|
+
return;
|
|
370
|
+
}
|
|
371
|
+
|
|
372
|
+
# If we reach here, the input type isn't supported
|
|
373
|
+
raise TypeError(
|
|
374
|
+
"Unsupported Image input type. Provide a URL/path string, data URL, bytes, ""BytesIO, file-like object, os.PathLike, or PIL.Image.Image."
|
|
375
|
+
) ;
|
|
376
|
+
}
|
|
377
|
+
|
|
378
|
+
"""Map a PIL format name to a MIME type with sensible fallbacks."""
|
|
379
|
+
def _format_to_mime(fmt: str | None) -> str {
|
|
380
|
+
fmt = (fmt or "PNG").upper();
|
|
381
|
+
if fmt == "WEBP" {
|
|
382
|
+
return "image/webp";
|
|
383
|
+
}
|
|
384
|
+
if fmt == "JPEG" or fmt == "JPG" {
|
|
385
|
+
return "image/jpeg";
|
|
386
|
+
}
|
|
387
|
+
if fmt == "PNG" {
|
|
388
|
+
return "image/png";
|
|
389
|
+
}
|
|
390
|
+
# Try mimetypes (uses extension mapping)
|
|
391
|
+
mime = mimetypes.types_map.get("." + fmt.lower());
|
|
392
|
+
return mime or "image/png";
|
|
393
|
+
}
|
|
394
|
+
|
|
395
|
+
def _data_url_from_bytes(data: bytes, fmt: str | None) -> str {
|
|
396
|
+
mime = self.mime_type or self._format_to_mime(fmt);
|
|
397
|
+
# Ensure mime_type is set on the instance for downstream usage
|
|
398
|
+
self.mime_type = mime;
|
|
399
|
+
b64 = base64.b64encode(data).decode("utf-8");
|
|
400
|
+
return f"data:{mime};base64,{b64}";
|
|
401
|
+
}
|
|
402
|
+
|
|
403
|
+
"""Convert the image to a dictionary."""
|
|
404
|
+
def to_dict() -> list[dict] {
|
|
405
|
+
image_url = {"url": self.url};
|
|
406
|
+
if self.mime_type {
|
|
407
|
+
image_url["format"] = self.mime_type;
|
|
408
|
+
}
|
|
409
|
+
return [{"type": "image_url", "image_url": image_url,}];
|
|
410
|
+
}
|
|
411
|
+
}
|
|
412
|
+
|
|
413
|
+
# Ref: https://cookbook.openai.com/examples/gpt_with_vision_for_video_understanding
|
|
414
|
+
"""Class representing a video."""
|
|
415
|
+
obj Video(Media) {
|
|
416
|
+
has path: str;
|
|
417
|
+
has fps: int = 1;
|
|
418
|
+
has _base64frames: list[str] | None = None;
|
|
419
|
+
|
|
420
|
+
"""Post-initialization to ensure the path is a string."""
|
|
421
|
+
def postinit() -> None {
|
|
422
|
+
if not os.path.exists(self.path) {
|
|
423
|
+
raise ValueError(f"Video file does not exist: {self.path}") ;
|
|
424
|
+
}
|
|
425
|
+
}
|
|
426
|
+
|
|
427
|
+
"""Load video frames as base64-encoded images."""
|
|
428
|
+
def load_frames() -> None {
|
|
429
|
+
try {
|
|
430
|
+
import cv2;
|
|
431
|
+
} except ImportError {
|
|
432
|
+
raise ImportError(
|
|
433
|
+
"OpenCV is required to process video files.""Install `pip install byllm[video]` for video capabilities."
|
|
434
|
+
) ;
|
|
435
|
+
}
|
|
436
|
+
|
|
437
|
+
self._base64frames = [];
|
|
438
|
+
video = cv2.VideoCapture(self.path);
|
|
439
|
+
total_frames = int(video.get(cv2.CAP_PROP_FRAME_COUNT));
|
|
440
|
+
|
|
441
|
+
target_fps = self.fps;
|
|
442
|
+
source_fps = video.get(cv2.CAP_PROP_FPS);
|
|
443
|
+
frames_to_skip = (
|
|
444
|
+
int(source_fps / target_fps) - 1 if target_fps < source_fps else 1
|
|
445
|
+
);
|
|
446
|
+
|
|
447
|
+
curr_frame = 0;
|
|
448
|
+
while curr_frame < total_frames - 1 {
|
|
449
|
+
video.set(cv2.CAP_PROP_POS_FRAMES, curr_frame);
|
|
450
|
+
(success, frame) = video.read();
|
|
451
|
+
if not success {
|
|
452
|
+
raise ValueError("Failed to read video frame.") ;
|
|
453
|
+
}
|
|
454
|
+
(_, buffer) = cv2.imencode(".jpg", frame);
|
|
455
|
+
self._base64frames.append(base64.b64encode(buffer).decode("utf-8"));
|
|
456
|
+
curr_frame += frames_to_skip;
|
|
457
|
+
}
|
|
458
|
+
}
|
|
459
|
+
|
|
460
|
+
"""Convert the video to a dictionary."""
|
|
461
|
+
def to_dict() -> list[dict] {
|
|
462
|
+
if self._base64frames is None {
|
|
463
|
+
self.load_frames();
|
|
464
|
+
}
|
|
465
|
+
assert (self._base64frames is not None) , "Frames must be loaded before conversion.";
|
|
466
|
+
return [
|
|
467
|
+
{"type": "image_url", "image_url": f"data:image/jpeg;base64,{frame}",}
|
|
468
|
+
for frame in self._base64frames
|
|
469
|
+
];
|
|
470
|
+
}
|
|
471
|
+
}
|
|
@@ -0,0 +1,188 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: byllm
|
|
3
|
+
Version: 0.4.8
|
|
4
|
+
Summary: byLLM Provides Easy to use APIs for different LLM Providers to be used with Jaseci's Jaclang Programming Language.
|
|
5
|
+
License: MIT
|
|
6
|
+
Keywords: llm,jaclang,jaseci,byLLM
|
|
7
|
+
Author: Jason Mars
|
|
8
|
+
Author-email: jason@mars.ninja
|
|
9
|
+
Maintainer: Jason Mars
|
|
10
|
+
Maintainer-email: jason@mars.ninja
|
|
11
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
12
|
+
Classifier: Programming Language :: Python :: 2
|
|
13
|
+
Classifier: Programming Language :: Python :: 2.7
|
|
14
|
+
Classifier: Programming Language :: Python :: 3
|
|
15
|
+
Classifier: Programming Language :: Python :: 3.4
|
|
16
|
+
Classifier: Programming Language :: Python :: 3.5
|
|
17
|
+
Classifier: Programming Language :: Python :: 3.6
|
|
18
|
+
Classifier: Programming Language :: Python :: 3.7
|
|
19
|
+
Classifier: Programming Language :: Python :: 3.8
|
|
20
|
+
Classifier: Programming Language :: Python :: 3.9
|
|
21
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
22
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
23
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
24
|
+
Classifier: Programming Language :: Python :: 3.13
|
|
25
|
+
Classifier: Programming Language :: Python :: 3.14
|
|
26
|
+
Provides-Extra: tools
|
|
27
|
+
Provides-Extra: video
|
|
28
|
+
Requires-Dist: jaclang (>=0.9.3)
|
|
29
|
+
Requires-Dist: litellm (>=1.75.5.post1,<1.80.0)
|
|
30
|
+
Requires-Dist: loguru (>=0.7.2,<0.8.0)
|
|
31
|
+
Requires-Dist: pillow (>=10.4.0,<10.5.0)
|
|
32
|
+
Description-Content-Type: text/markdown
|
|
33
|
+
|
|
34
|
+
<div align="center">
|
|
35
|
+
<img src="../docs/docs/assets/byLLM_name_logo.png" height="150">
|
|
36
|
+
|
|
37
|
+
[About byLLM] | [Get started] | [Usage docs] | [Research Paper]
|
|
38
|
+
</div>
|
|
39
|
+
|
|
40
|
+
[About byLLM]: https://www.jac-lang.org/learn/jac-byllm/with_llm/
|
|
41
|
+
[Get started]: https://www.jac-lang.org/learn/jac-byllm/quickstart/
|
|
42
|
+
[Usage docs]: https://www.jac-lang.org/learn/jac-byllm/usage/
|
|
43
|
+
[Research Paper]: https://arxiv.org/abs/2405.08965
|
|
44
|
+
|
|
45
|
+
# byLLM : Prompt Less, Smile More!
|
|
46
|
+
|
|
47
|
+
[](https://pypi.org/project/byllm/) [](https://github.com/jaseci-labs/jaseci/actions/workflows/test-jaseci.yml) [](https://discord.gg/6j3QNdtcN6)
|
|
48
|
+
|
|
49
|
+
byLLM is an innovative AI integration framework built for the Jaseci ecosystem, implementing the cutting-edge Meaning Typed Programming (MTP) paradigm. MTP revolutionizes AI integration by embedding prompt engineering directly into code semantics, making AI interactions more natural and maintainable. While primarily designed to complement the Jac programming language, byLLM also provides a powerful Python library interface.
|
|
50
|
+
|
|
51
|
+
Installation is simple via PyPI:
|
|
52
|
+
|
|
53
|
+
```bash
|
|
54
|
+
pip install byllm
|
|
55
|
+
```
|
|
56
|
+
|
|
57
|
+
## Basic Example
|
|
58
|
+
|
|
59
|
+
Consider building an application that translates english to other languages using an LLM. This can be simply built as follows:
|
|
60
|
+
|
|
61
|
+
```python
|
|
62
|
+
import from byllm.lib { Model }
|
|
63
|
+
|
|
64
|
+
glob llm = Model(model_name="gpt-4o");
|
|
65
|
+
|
|
66
|
+
def translate_to(language: str, phrase: str) -> str by llm();
|
|
67
|
+
|
|
68
|
+
with entry {
|
|
69
|
+
output = translate_to(language="Welsh", phrase="Hello world");
|
|
70
|
+
print(output);
|
|
71
|
+
}
|
|
72
|
+
```
|
|
73
|
+
|
|
74
|
+
This simple piece of code replaces traditional prompt engineering without introducing additional complexity.
|
|
75
|
+
|
|
76
|
+
## Power of Types with LLMs
|
|
77
|
+
|
|
78
|
+
Consider a program that detects the personality type of a historical figure from their name. This can eb built in a way that LLM picks from an enum and the output strictly adhere this type.
|
|
79
|
+
|
|
80
|
+
```python
|
|
81
|
+
import from byllm.lib { Model }
|
|
82
|
+
glob llm = Model(model_name="gemini/gemini-2.0-flash");
|
|
83
|
+
|
|
84
|
+
enum Personality {
|
|
85
|
+
INTROVERT, EXTROVERT, AMBIVERT
|
|
86
|
+
}
|
|
87
|
+
|
|
88
|
+
def get_personality(name: str) -> Personality by llm();
|
|
89
|
+
|
|
90
|
+
with entry {
|
|
91
|
+
name = "Albert Einstein";
|
|
92
|
+
result = get_personality(name);
|
|
93
|
+
print(f"{result} personality detected for {name}");
|
|
94
|
+
}
|
|
95
|
+
```
|
|
96
|
+
|
|
97
|
+
> Similarly, custom types can be used as output types which force the LLM to adhere to the specified type and produce a valid result.
|
|
98
|
+
|
|
99
|
+
## Control! Control! Control!
|
|
100
|
+
|
|
101
|
+
Even if we are elimination prompt engineering entierly, we allow specific ways to enrich code semantics through **docstrings** and **semstrings**.
|
|
102
|
+
|
|
103
|
+
```python
|
|
104
|
+
"""Represents the personal record of a person"""
|
|
105
|
+
obj Person {
|
|
106
|
+
has name: str;
|
|
107
|
+
has dob: str;
|
|
108
|
+
has ssn: str;
|
|
109
|
+
}
|
|
110
|
+
|
|
111
|
+
sem Person.name = "Full name of the person";
|
|
112
|
+
sem Person.dob = "Date of Birth";
|
|
113
|
+
sem Person.ssn = "Last four digits of the Social Security Number of a person";
|
|
114
|
+
|
|
115
|
+
"""Calculate eligibility for various services based on person's data."""
|
|
116
|
+
def check_eligibility(person: Person, service_type: str) -> bool by llm();
|
|
117
|
+
|
|
118
|
+
```
|
|
119
|
+
|
|
120
|
+
Docstrings naturally enhance the semantics of their associated code constructs, while the `sem` keyword provides an elegant way to enrich the meaning of class attributes and function arguments. Our research shows these concise semantic strings are more effective than traditional multi-line prompts.
|
|
121
|
+
|
|
122
|
+
## How well does byLLM work?
|
|
123
|
+
|
|
124
|
+
byLLM is built using the underline priciple of Meaning Typed Programming and we shown our evaluation data compared with two such AI integration frameworks for python, such as DSPy and LMQL. We show significant performance gain against LMQL while allowing on par or better performance to DSPy, while having a lower cost and faster runtime.
|
|
125
|
+
|
|
126
|
+
<div align="center">
|
|
127
|
+
<img src="../docs/docs/assets/correctness_comparison.png" alt="Correctness Comparison" width="600" style="max-width: 100%;">
|
|
128
|
+
<br>
|
|
129
|
+
<em>Figure: Correctness comparison of byLLM with DSPy and LMQL on benchmark tasks.</em>
|
|
130
|
+
</div>
|
|
131
|
+
|
|
132
|
+
**๐ Full Documentation**: [Jac byLLM Documentation](https://www.jac-lang.org/learn/jac-byllm/with_llm/)
|
|
133
|
+
|
|
134
|
+
**๐ฎ Complete Examples**:
|
|
135
|
+
- [Fantasy Trading Game](https://www.jac-lang.org/learn/examples/mtp_examples/fantasy_trading_game/) - Interactive RPG with AI-generated characters
|
|
136
|
+
- [RPG Level Generator](https://www.jac-lang.org/learn/examples/mtp_examples/rpg_game/) - AI-powered game level creation
|
|
137
|
+
- [RAG Chatbot Tutorial](https://www.jac-lang.org/learn/examples/rag_chatbot/Overview/) - Building chatbots with document retrieval
|
|
138
|
+
|
|
139
|
+
**๐ฌ Research**: The research journey of MTP is available on [Arxiv](https://arxiv.org/abs/2405.08965) and accepted for OOPSLA 2025.
|
|
140
|
+
|
|
141
|
+
## Quick Links
|
|
142
|
+
|
|
143
|
+
- [Getting Started Guide](https://www.jac-lang.org/learn/jac-byllm/quickstart/)
|
|
144
|
+
- [Jac Language Documentation](https://www.jac-lang.org/)
|
|
145
|
+
- [GitHub Repository](https://github.com/jaseci-labs/jaseci)
|
|
146
|
+
|
|
147
|
+
## Contributing
|
|
148
|
+
|
|
149
|
+
We welcome contributions to byLLM! Whether you're fixing bugs, improving documentation, or adding new features, your help is appreciated.
|
|
150
|
+
|
|
151
|
+
Areas we actively seek contributions:
|
|
152
|
+
- ๐ Bug fixes and improvements
|
|
153
|
+
- ๐ Documentation enhancements
|
|
154
|
+
- โจ New examples and tutorials
|
|
155
|
+
- ๐งช Test cases and benchmarks
|
|
156
|
+
|
|
157
|
+
Please see our [Contributing Guide](https://www.jac-lang.org/internals/contrib/) for detailed instructions.
|
|
158
|
+
|
|
159
|
+
If you find a bug or have a feature request, please [open an issue](https://github.com/jaseci-labs/jaseci/issues/new/choose).
|
|
160
|
+
|
|
161
|
+
## Community
|
|
162
|
+
|
|
163
|
+
Join our vibrant community:
|
|
164
|
+
- [Discord Server](https://discord.gg/6j3QNdtcN6) - Chat with the team and community
|
|
165
|
+
|
|
166
|
+
## License
|
|
167
|
+
|
|
168
|
+
This project is licensed under the MIT License.
|
|
169
|
+
|
|
170
|
+
### Third-Party Dependencies
|
|
171
|
+
|
|
172
|
+
byLLM integrates with various LLM providers (OpenAI, Anthropic, Google, etc.) through LiteLLM.
|
|
173
|
+
|
|
174
|
+
## Cite our research
|
|
175
|
+
|
|
176
|
+
|
|
177
|
+
> Jayanaka L. Dantanarayana, Yiping Kang, Kugesan Sivasothynathan, Christopher Clarke, Baichuan Li, Savini
|
|
178
|
+
Kashmira, Krisztian Flautner, Lingjia Tang, and Jason Mars. 2025. MTP: A Meaning-Typed Language Ab-
|
|
179
|
+
straction for AI-Integrated Programming. Proc. ACM Program. Lang. 9, OOPSLA2, Article 314 (October 2025),
|
|
180
|
+
29 pages. https://doi.org/10.1145/3763092
|
|
181
|
+
|
|
182
|
+
|
|
183
|
+
## Jaseci Contributors
|
|
184
|
+
|
|
185
|
+
<a href="https://github.com/jaseci-labs/jaseci/graphs/contributors">
|
|
186
|
+
<img src="https://contrib.rocks/image?repo=jaseci-labs/jaseci" />
|
|
187
|
+
</a>
|
|
188
|
+
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
byllm/__init__.py,sha256=cHwW8H7h-wNwtk_e6CVMquikcP9izlW3SLvcG3VeOVk,709
|
|
2
|
+
byllm/lib.py,sha256=WjFCRhM-R4O3w9joAHM8Lm70uKoDWGmCItPTzt69iWs,274
|
|
3
|
+
byllm/llm.jac,sha256=1Tu0TCClkW08O_vJeeMYWG3B7auUA9ac1_HugGxz5-I,13135
|
|
4
|
+
byllm/mtir.jac,sha256=8Kp3A6qjUsZ2tx0t3bebzYQ-9-Y9ocz-Cwqu9XEdr-s,6720
|
|
5
|
+
byllm/plugin.py,sha256=9t_nhrUwm35fUB2yEicATQ6kPQivex0Hmy3PAPrmIx8,1608
|
|
6
|
+
byllm/schema.jac,sha256=L6jnI-Td03HwNa7-7m6MkNSgH5obcp9DjE6qtptUgOI,9035
|
|
7
|
+
byllm/types.jac,sha256=7LVzaTM9WLKLCHoKa5VehpLkqBgMgljs7lWFGqvLdR8,15625
|
|
8
|
+
byllm-0.4.8.dist-info/METADATA,sha256=8g6qrroI8HNwbncUmlyznsmLk9sZJuxc8ZXN7HKd_JM,7743
|
|
9
|
+
byllm-0.4.8.dist-info/WHEEL,sha256=MICUlqIgkuEnKh9OWy254Ca7q2MHOW-q0u36TZR60nU,92
|
|
10
|
+
byllm-0.4.8.dist-info/entry_points.txt,sha256=j4qdGR-j4plBBZcCeIWW_tLjchtDWWsADNCrfo7qUOM,37
|
|
11
|
+
byllm-0.4.8.dist-info/RECORD,,
|