byllm 0.4.1__py2.py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of byllm might be problematic. Click here for more details.

byllm/__init__.py ADDED
@@ -0,0 +1,8 @@
1
+ """byLLM Package."""
2
+
3
+ from byllm.llm import Model
4
+ from byllm.mtir import MTIR
5
+ from byllm.plugin import by
6
+ from byllm.types import Image, MockToolCall, Video
7
+
8
+ __all__ = ["by", "Image", "MockToolCall", "Model", "MTIR", "Video"]
byllm/llm.py ADDED
@@ -0,0 +1,101 @@
1
+ """LLM abstraction module.
2
+
3
+ This module provides a LLM class that abstracts LiteLLM and offers
4
+ enhanced functionality and interface for language model operations.
5
+ """
6
+
7
+ # flake8: noqa: E402
8
+
9
+ import os
10
+ from typing import Generator
11
+
12
+ from byllm.mtir import MTIR
13
+
14
+ # This will prevent LiteLLM from fetching pricing information from
15
+ # the bellow URL every time we import the litellm and use a cached
16
+ # local json file. Maybe we we should conditionally enable this.
17
+ # https://raw.githubusercontent.com/BerriAI/litellm/main/model_prices_and_context_window.json
18
+ os.environ["LITELLM_LOCAL_MODEL_COST_MAP"] = "True"
19
+
20
+ from .llm_connector import LLMConnector
21
+ from .types import CompletionResult
22
+
23
+ SYSTEM_PERSONA = """\
24
+ This is a task you must complete by returning only the output.
25
+ Do not include explanations, code, or extra text—only the result.
26
+ """ # noqa E501
27
+
28
+ INSTRUCTION_TOOL = """
29
+ Use the tools provided to reach the goal. Call one tool at a time with \
30
+ proper args—no explanations, no narration. Think step by step, invoking tools \
31
+ as needed. When done, always call finish_tool(output) to return the final \
32
+ output. Only use tools.
33
+ """ # noqa E501
34
+
35
+
36
+ class Model:
37
+ """A wrapper class that abstracts LiteLLM functionality.
38
+
39
+ This class provides a simplified and enhanced interface for interacting
40
+ with various language models through LiteLLM.
41
+ """
42
+
43
+ def __init__(self, model_name: str, **kwargs: object) -> None:
44
+ """Initialize the JacLLM instance.
45
+
46
+ Args:
47
+ model: The model name to use (e.g., "gpt-3.5-turbo", "claude-3-sonnet-20240229")
48
+ api_key: API key for the model provider
49
+ **kwargs: Additional configuration options
50
+ """
51
+ self.llm_connector = LLMConnector.for_model(model_name, **kwargs)
52
+
53
+ def __call__(self, **kwargs: object) -> "Model":
54
+ """Construct the call parameters and return self (factory pattern).
55
+
56
+ Example:
57
+ ```jaclang
58
+ llm = JacLLM(model="gpt-3.5-turbo", api_key="your_api_key")
59
+
60
+ # The bellow call will construct the parameter and return self.
61
+ def answer_user_query(query: str) -> str by
62
+ llm(
63
+ temperature=0.7,
64
+ max_tokens=100,
65
+ );
66
+ ```
67
+ """
68
+ self.llm_connector.call_params = kwargs
69
+ return self
70
+
71
+ @property
72
+ def call_params(self) -> dict[str, object]:
73
+ """Get the call parameters for the LLM."""
74
+ return self.llm_connector.call_params
75
+
76
+ def invoke(self, mtir: MTIR) -> object:
77
+ """Invoke the LLM with the given caller and arguments."""
78
+ if mtir.stream:
79
+ return self._completion_streaming(mtir)
80
+
81
+ # Invoke the LLM and handle tool calls.
82
+ while True:
83
+ resp = self._completion_no_streaming(mtir)
84
+ if resp.tool_calls:
85
+ for tool_call in resp.tool_calls:
86
+ if tool_call.is_finish_call():
87
+ return tool_call.get_output()
88
+ else:
89
+ mtir.add_message(tool_call())
90
+ else:
91
+ break
92
+
93
+ return resp.output
94
+
95
+ def _completion_no_streaming(self, mtir: MTIR) -> CompletionResult:
96
+ """Perform a completion request with the LLM."""
97
+ return self.llm_connector.dispatch_no_streaming(mtir)
98
+
99
+ def _completion_streaming(self, mtir: MTIR) -> Generator[str, None, None]:
100
+ """Perform a streaming completion request with the LLM."""
101
+ return self.llm_connector.dispatch_streaming(mtir)
byllm/llm_connector.py ADDED
@@ -0,0 +1,228 @@
1
+ """LLM Connector for Litellm, MockLLM, Proxy server, etc.
2
+
3
+ This module provides an abstract base class for LLM connectors and concrete implementations
4
+ for different LLM services. It includes methods for dispatching requests and handling responses.
5
+ """
6
+
7
+ # flake8: noqa: E402
8
+
9
+ import json
10
+ import logging
11
+ import os
12
+ import random
13
+ import time
14
+ from abc import ABC, abstractmethod
15
+ from typing import Generator, override
16
+
17
+ # This will prevent LiteLLM from fetching pricing information from
18
+ # the bellow URL every time we import the litellm and use a cached
19
+ # local json file. Maybe we we should conditionally enable this.
20
+ # https://raw.githubusercontent.com/BerriAI/litellm/main/model_prices_and_context_window.json
21
+ os.environ["LITELLM_LOCAL_MODEL_COST_MAP"] = "True"
22
+
23
+ import litellm
24
+ from litellm._logging import _disable_debugging
25
+
26
+ from openai import OpenAI
27
+
28
+ from .mtir import MTIR
29
+
30
+ from .types import (
31
+ CompletionResult,
32
+ LiteLLMMessage,
33
+ MockToolCall,
34
+ ToolCall,
35
+ )
36
+
37
+ DEFAULT_BASE_URL = "http://localhost:4000"
38
+ MODEL_MOCK = "mockllm"
39
+
40
+
41
+ class LLMConnector(ABC):
42
+ """Abstract base class for LLM connectors."""
43
+
44
+ def __init__(self, model_name: str, **kwargs: object) -> None:
45
+ """Initialize the LLM connector with a model."""
46
+ self.model_name = model_name
47
+ self.config = kwargs
48
+ # The parameters for the llm call like temprature, top_k, max_token, etc.
49
+ # This is only applicable for the next call passed from `by llm(**kwargs)`.
50
+ self.call_params: dict[str, object] = {}
51
+
52
+ @staticmethod
53
+ def for_model(model_name: str, **kwargs: object) -> "LLMConnector":
54
+ """Construct the appropriate LLM connector based on the model name."""
55
+ if model_name.lower().strip() == MODEL_MOCK:
56
+ return MockLLMConnector(model_name, **kwargs)
57
+ if kwargs.get("proxy_url"):
58
+ kwargs["base_url"] = kwargs.pop("proxy_url")
59
+ return LiteLLMConnector(True, model_name, **kwargs)
60
+ return LiteLLMConnector(False, model_name, **kwargs)
61
+
62
+ def make_model_params(self, mtir: MTIR) -> dict:
63
+ """Prepare the parameters for the LLM call."""
64
+ params = {
65
+ "model": self.model_name,
66
+ "api_base": (
67
+ self.config.get("base_url")
68
+ or self.config.get("host")
69
+ or self.config.get("api_base")
70
+ ),
71
+ "api_key": self.config.get("api_key"),
72
+ "messages": mtir.get_msg_list(),
73
+ "tools": mtir.get_tool_list() or None,
74
+ "response_format": mtir.get_output_schema(),
75
+ "temperature": self.call_params.get("temperature", 0.7),
76
+ "max_tokens": self.call_params.get("max_tokens"),
77
+ # "top_k": self.call_params.get("top_k", 50),
78
+ # "top_p": self.call_params.get("top_p", 0.9),
79
+ }
80
+ return params
81
+
82
+ def log_info(self, message: str) -> None:
83
+ """Log a message to the console."""
84
+ # FIXME: The logger.info will not always log so for now I'm printing to stdout
85
+ # remove and log properly.
86
+ if bool(self.config.get("verbose", False)):
87
+ print(message)
88
+
89
+ @abstractmethod
90
+ def dispatch_no_streaming(self, mtir: MTIR) -> CompletionResult:
91
+ """Dispatch the LLM call without streaming."""
92
+ raise NotImplementedError()
93
+
94
+ @abstractmethod
95
+ def dispatch_streaming(self, mtir: MTIR) -> Generator[str, None, None]:
96
+ """Dispatch the LLM call with streaming."""
97
+ raise NotImplementedError()
98
+
99
+
100
+ # -----------------------------------------------------------------------------
101
+ # Mock LLM Connector
102
+ # -----------------------------------------------------------------------------
103
+
104
+
105
+ class MockLLMConnector(LLMConnector):
106
+ """LLM Connector for a mock LLM service that simulates responses."""
107
+
108
+ @override
109
+ def dispatch_no_streaming(self, mtir: MTIR) -> CompletionResult:
110
+ """Dispatch the mock LLM call with the given request."""
111
+ output = self.config["outputs"].pop(0) # type: ignore
112
+
113
+ if isinstance(output, MockToolCall):
114
+ self.log_info(
115
+ f"Mock LLM call completed with tool call:\n{output.to_tool_call()}"
116
+ )
117
+ return CompletionResult(
118
+ output=None,
119
+ tool_calls=[output.to_tool_call()],
120
+ )
121
+
122
+ self.log_info(f"Mock LLM call completed with response:\n{output}")
123
+
124
+ return CompletionResult(
125
+ output=output,
126
+ tool_calls=[],
127
+ )
128
+
129
+ @override
130
+ def dispatch_streaming(self, mtir: MTIR) -> Generator[str, None, None]:
131
+ """Dispatch the mock LLM call with the given request."""
132
+ output = self.config["outputs"].pop(0) # type: ignore
133
+ if mtir.stream:
134
+ while output:
135
+ chunk_len = random.randint(3, 10)
136
+ yield output[:chunk_len] # Simulate token chunk
137
+ time.sleep(random.uniform(0.01, 0.05)) # Simulate network delay
138
+ output = output[chunk_len:]
139
+
140
+
141
+ # -----------------------------------------------------------------------------
142
+ # LiteLLM Connector
143
+ # -----------------------------------------------------------------------------
144
+
145
+
146
+ class LiteLLMConnector(LLMConnector):
147
+ """LLM Connector for LiteLLM, a lightweight wrapper around OpenAI API."""
148
+
149
+ def __init__(self, proxy: bool, model_name: str, **kwargs: object) -> None:
150
+ """Initialize the LiteLLM connector."""
151
+ super().__init__(model_name, **kwargs)
152
+ self.proxy = proxy
153
+
154
+ # Every litellm call will be logged to the tty and that pollutes the output.
155
+ # When there is a by llm() call in the jaclang.
156
+ logging.getLogger("httpx").setLevel(logging.WARNING)
157
+ _disable_debugging()
158
+ litellm.drop_params = True
159
+
160
+ @override
161
+ def dispatch_no_streaming(self, mtir: MTIR) -> CompletionResult:
162
+ """Dispatch the LLM call without streaming."""
163
+ # Construct the parameters for the LLM call
164
+ params = self.make_model_params(mtir)
165
+
166
+ # Call the LiteLLM API
167
+ self.log_info(f"Calling LLM: {self.model_name} with params:\n{params}")
168
+ if self.proxy:
169
+ client = OpenAI(
170
+ base_url=params.pop("api_base", "htpp://localhost:4000"),
171
+ api_key=params.pop("api_key"),
172
+ )
173
+ response = client.chat.completions.create(**params)
174
+ else:
175
+ response = litellm.completion(**params)
176
+
177
+ # Output format:
178
+ # https://docs.litellm.ai/docs/#response-format-openai-format
179
+ #
180
+ # TODO: Handle stream output (type ignoring stream response)
181
+ message: LiteLLMMessage = response.choices[0].message # type: ignore
182
+ mtir.add_message(message)
183
+
184
+ output_content: str = message.content # type: ignore
185
+ self.log_info(f"LLM call completed with response:\n{output_content}")
186
+ output_value = mtir.parse_response(output_content)
187
+
188
+ tool_calls: list[ToolCall] = []
189
+ for tool_call in message.tool_calls or []: # type: ignore
190
+ if tool := mtir.get_tool(tool_call["function"]["name"]):
191
+ args_json = json.loads(tool_call["function"]["arguments"])
192
+ args = tool.parse_arguments(args_json)
193
+ tool_calls.append(
194
+ ToolCall(call_id=tool_call["id"], tool=tool, args=args)
195
+ )
196
+ else:
197
+ raise RuntimeError(
198
+ f"Attempted to call tool: '{tool_call['function']['name']}' which was not present."
199
+ )
200
+
201
+ return CompletionResult(
202
+ output=output_value,
203
+ tool_calls=tool_calls,
204
+ )
205
+
206
+ @override
207
+ def dispatch_streaming(self, mtir: MTIR) -> Generator[str, None, None]:
208
+ """Dispatch the LLM call with streaming."""
209
+ # Construct the parameters for the LLM call
210
+ params = self.make_model_params(mtir)
211
+
212
+ # Call the LiteLLM API
213
+ self.log_info(f"Calling LLM: {self.model_name} with params:\n{params}")
214
+ if self.proxy:
215
+ client = OpenAI(
216
+ base_url=params.pop("api_base"),
217
+ api_key=params.pop("api_key"),
218
+ )
219
+
220
+ # Call the LiteLLM API
221
+ response = client.chat.completions.create(**params, stream=True)
222
+ else:
223
+ response = litellm.completion(**params, stream=True) # type: ignore
224
+
225
+ for chunk in response:
226
+ if chunk.choices and chunk.choices[0].delta:
227
+ delta = chunk.choices[0].delta
228
+ yield delta.content or ""
byllm/mtir.py ADDED
@@ -0,0 +1,194 @@
1
+ """MTIR (Meaning Typed Intermediate Representation) module for JacLang runtime library."""
2
+
3
+ import inspect
4
+ import json
5
+ from dataclasses import dataclass
6
+ from types import MethodType
7
+ from typing import Callable, get_type_hints
8
+
9
+ from byllm.schema import json_to_instance, type_to_schema
10
+ from byllm.types import (
11
+ LiteLLMMessage,
12
+ Media,
13
+ Message,
14
+ MessageRole,
15
+ MessageType,
16
+ Text,
17
+ Tool,
18
+ )
19
+
20
+
21
+ SYSTEM_PERSONA = """\
22
+ This is a task you must complete by returning only the output.
23
+ Do not include explanations, code, or extra text—only the result.
24
+ """ # noqa E501
25
+
26
+ INSTRUCTION_TOOL = """
27
+ Use the tools provided to reach the goal. Call one tool at a time with \
28
+ proper args—no explanations, no narration. Think step by step, invoking tools \
29
+ as needed. When done, always call finish_tool(output) to return the final \
30
+ output. Only use tools.
31
+ """ # noqa E501
32
+
33
+
34
+ @dataclass
35
+ class MTIR:
36
+ """A class representing the MTIR for JacLang."""
37
+
38
+ # All the context required to dispatch an LLM invocation.
39
+ messages: list[MessageType]
40
+ resp_type: type | None
41
+ stream: bool
42
+ tools: list[Tool]
43
+ call_params: dict[str, object]
44
+
45
+ # FIXME: Comeup with a better name
46
+ @staticmethod
47
+ def factory(
48
+ caller: Callable, args: dict[int | str, object], call_params: dict[str, object]
49
+ ) -> "MTIR":
50
+ """Create an MTIR instance."""
51
+ # Prepare the tools for the LLM call.
52
+ tools = [Tool(func) for func in call_params.get("tools", [])] # type: ignore
53
+
54
+ # Construct the input information from the arguments.
55
+ param_names = list(inspect.signature(caller).parameters.keys())
56
+ inputs_detail: list[str] = []
57
+ media_inputs: list[Media] = []
58
+
59
+ for key, value in args.items():
60
+ if isinstance(value, Media):
61
+ media_inputs.append(value)
62
+ continue
63
+
64
+ if isinstance(key, str):
65
+ inputs_detail.append(f"{key} = {value}")
66
+ else:
67
+ # TODO: Handle *args, **kwargs properly.
68
+ if key < len(param_names):
69
+ inputs_detail.append(f"{param_names[key]} = {value}")
70
+ else:
71
+ inputs_detail.append(f"arg = {value}")
72
+ incl_info = call_params.get("incl_info")
73
+ if incl_info and isinstance(incl_info, dict):
74
+ for key, value in incl_info.items():
75
+ if isinstance(value, Media):
76
+ media_inputs.append(value)
77
+ else:
78
+ inputs_detail.append(f"{key} = {value}")
79
+
80
+ if isinstance(caller, MethodType):
81
+ inputs_detail.insert(0, f"self = {caller.__self__}")
82
+
83
+ # Prepare the messages for the LLM call.
84
+ messages: list[MessageType] = [
85
+ Message(
86
+ role=MessageRole.SYSTEM,
87
+ content=SYSTEM_PERSONA + (INSTRUCTION_TOOL if tools else ""),
88
+ ),
89
+ Message(
90
+ role=MessageRole.USER,
91
+ content=[
92
+ Text(
93
+ Tool.get_func_description(caller)
94
+ + "\n\n"
95
+ + "\n".join(inputs_detail)
96
+ ),
97
+ *media_inputs,
98
+ ],
99
+ ),
100
+ ]
101
+
102
+ # Prepare return type.
103
+ return_type = get_type_hints(caller).get("return")
104
+ is_streaming = bool(call_params.get("stream", False))
105
+
106
+ if is_streaming:
107
+ if return_type is not str:
108
+ raise RuntimeError(
109
+ "Streaming responses are only supported for str return types."
110
+ )
111
+ if tools:
112
+ raise RuntimeError(
113
+ "Streaming responses are not supported with tool calls yet."
114
+ )
115
+
116
+ # TODO: Support mockllm for mocktesting.
117
+ # Invoke streaming request, this will result in a generator that the caller
118
+ # should either do .next() or .__iter__() by calling `for tok in resp: ...`
119
+ if is_streaming and tools:
120
+ raise RuntimeError(
121
+ "Streaming responses are not supported with tool calls yet."
122
+ )
123
+
124
+ if len(tools) > 0:
125
+ finish_tool = Tool.make_finish_tool(return_type or str)
126
+ tools.append(finish_tool)
127
+
128
+ return MTIR(
129
+ messages=messages,
130
+ tools=tools,
131
+ resp_type=return_type,
132
+ stream=is_streaming,
133
+ call_params=call_params,
134
+ )
135
+
136
+ def dispatch_params(self) -> dict[str, object]:
137
+ """Dispatch the parameters for the MTIR."""
138
+ params = {
139
+ "messages": self.get_msg_list(),
140
+ "tools": self.get_tool_list() or None,
141
+ "response_format": self.get_output_schema(),
142
+ "temperature": self.call_params.get("temperature", 0.7),
143
+ # "max_tokens": self.call_params.get("max_tokens", 100),
144
+ # "top_k": self.call_params.get("top_k", 50),
145
+ # "top_p": self.call_params.get("top_p", 0.9),
146
+ }
147
+ return params
148
+
149
+ def add_message(self, message: MessageType) -> None:
150
+ """Add a message to the request."""
151
+ self.messages.append(message)
152
+
153
+ def get_msg_list(self) -> list[dict[str, object] | LiteLLMMessage]:
154
+ """Return the messages in a format suitable for LLM API."""
155
+ return [
156
+ msg.to_dict() if isinstance(msg, Message) else msg for msg in self.messages
157
+ ]
158
+
159
+ def parse_response(self, response: str) -> object:
160
+ """Parse the response from the LLM."""
161
+ # To use validate_json the string should contains quotes.
162
+ # example: '"The weather at New York is sunny."'
163
+ # but the response from LLM will not have quotes, so
164
+ # we need to check if it's string and return early.
165
+ if self.resp_type is None or self.resp_type is str or response.strip() == "":
166
+ return response
167
+ if self.resp_type:
168
+ json_dict = json.loads(response)
169
+ return json_to_instance(json_dict, self.resp_type)
170
+ return response
171
+
172
+ def get_tool(self, tool_name: str) -> Tool | None:
173
+ """Get a tool by its name."""
174
+ for tool in self.tools:
175
+ if tool.func.__name__ == tool_name:
176
+ return tool
177
+ return None
178
+
179
+ def get_tool_list(self) -> list[dict]:
180
+ """Return the tools in a format suitable for LLM API."""
181
+ return [tool.get_json_schema() for tool in self.tools]
182
+
183
+ def get_output_schema(self) -> dict | None:
184
+ """Return the JSON schema for the response type."""
185
+ assert (
186
+ len(self.tools) == 0 or self.get_tool("finish_tool") is not None
187
+ ), "Finish tool should be present in the tools list."
188
+ if len(self.tools) == 0 and self.resp_type:
189
+ if self.resp_type is str:
190
+ return None # Strings are default and not using a schema.
191
+ return type_to_schema(self.resp_type)
192
+ # If the are tools, the final output will be sent to the finish_tool
193
+ # thus there is no output schema.
194
+ return None
byllm/plugin.py ADDED
@@ -0,0 +1,40 @@
1
+ """Plugin for Jac's with_llm feature."""
2
+
3
+ from typing import Callable
4
+
5
+ from byllm.llm import Model
6
+ from byllm.mtir import MTIR
7
+
8
+ from jaclang.runtimelib.machine import hookimpl
9
+
10
+
11
+ class JacMachine:
12
+ """Jac's with_llm feature."""
13
+
14
+ @staticmethod
15
+ @hookimpl
16
+ def call_llm(model: Model, mtir: MTIR) -> object:
17
+ """Call JacLLM and return the result."""
18
+ return model.invoke(mtir=mtir)
19
+
20
+
21
+ def by(model: Model) -> Callable:
22
+ """Python library mode decorator for Jac's by llm() syntax."""
23
+
24
+ def _decorator(caller: Callable) -> Callable:
25
+ def _wrapped_caller(*args: object, **kwargs: object) -> object:
26
+ invoke_args: dict[int | str, object] = {}
27
+ for i, arg in enumerate(args):
28
+ invoke_args[i] = arg
29
+ for key, value in kwargs.items():
30
+ invoke_args[key] = value
31
+ mtir = MTIR.factory(
32
+ caller=caller,
33
+ args=invoke_args,
34
+ call_params=model.llm_connector.call_params,
35
+ )
36
+ return JacMachine.call_llm(model, mtir)
37
+
38
+ return _wrapped_caller
39
+
40
+ return _decorator
byllm/schema.py ADDED
@@ -0,0 +1,265 @@
1
+ """Schema generation for OpenAI compatible APIs.
2
+
3
+ This module provides functionality to generate JSON schemas for classes and types
4
+ and to validate instances against these schemas.
5
+ """
6
+
7
+ from dataclasses import is_dataclass
8
+ from enum import Enum
9
+ from types import FunctionType, UnionType
10
+ from typing import Callable, Union, get_args, get_origin, get_type_hints
11
+
12
+ from pydantic import TypeAdapter
13
+
14
+
15
+ _SCHEMA_OBJECT_WRAPPER = "schema_object_wrapper"
16
+ _SCHEMA_DICT_WRAPPER = "schema_dict_wrapper"
17
+
18
+
19
+ def _type_to_schema(ty: type, title: str = "", desc: str = "") -> dict:
20
+
21
+ title = title.replace("_", " ").title()
22
+ context = ({"title": title} if title else {}) | (
23
+ {"description": desc} if desc else {}
24
+ )
25
+
26
+ semstr: str = ty._jac_semstr if hasattr(ty, "_jac_semstr") else ""
27
+ semstr = semstr or (ty.__doc__ if hasattr(ty, "__doc__") else "") or "" # type: ignore
28
+
29
+ semstr_inner: dict[str, str] = (
30
+ ty._jac_semstr_inner if hasattr(ty, "_jac_semstr_inner") else {}
31
+ )
32
+
33
+ # Raise on unsupported types
34
+ if ty in (list, dict, set, tuple):
35
+ raise ValueError(
36
+ f"Untyped {ty.__name__} is not supported for schema generation. "
37
+ f"Use {ty.__name__}[T, ...] instead."
38
+ )
39
+
40
+ # Handle primitive types
41
+ if ty is type(None):
42
+ return {"type": "null"} | context
43
+ if ty is bool:
44
+ return {"type": "boolean"} | context
45
+ if ty is int:
46
+ return {"type": "integer"} | context
47
+ if ty is float:
48
+ return {"type": "number"} | context
49
+ if ty is str:
50
+ return {"type": "string"} | context
51
+
52
+ # Handle Union
53
+ if get_origin(ty) in (Union, UnionType):
54
+ args = get_args(ty)
55
+ return {
56
+ "anyOf": [_type_to_schema(arg) for arg in args],
57
+ "title": title,
58
+ } | context
59
+
60
+ # Handle annotated list
61
+ if get_origin(ty) is list:
62
+ item_type: type = get_args(ty)[0]
63
+ return {
64
+ "type": "array",
65
+ "items": _type_to_schema(item_type),
66
+ } | context
67
+
68
+ # Handle annotated tuple/set
69
+ if get_origin(ty) in (tuple, set):
70
+ origin = get_origin(ty).__name__ # type: ignore
71
+ args = get_args(ty)
72
+ if len(args) == 2 and args[1] is Ellipsis:
73
+ item_type = args[0]
74
+ return {
75
+ "type": "array",
76
+ "items": _type_to_schema(item_type),
77
+ } | context
78
+ raise ValueError(
79
+ f"Unsupported {origin} type for schema generation: {ty}. "
80
+ f"Only {origin} of the form {origin}[T, ...] are supported."
81
+ )
82
+
83
+ # Handle annotated dictionaries
84
+ if get_origin(ty) is dict:
85
+ return _convert_dict_to_schema(ty) | context
86
+
87
+ # Handle dataclass
88
+ if is_dataclass(ty):
89
+ fields: dict[str, type] = {
90
+ name: type
91
+ for name, type in get_type_hints(ty).items()
92
+ if not name.startswith("_")
93
+ }
94
+ properties = {
95
+ name: _type_to_schema(type, name, semstr_inner.get(name, "")) # type: ignore
96
+ for name, type in fields.items()
97
+ }
98
+ return {
99
+ "title": title or ty.__name__,
100
+ "description": semstr,
101
+ "type": "object",
102
+ "properties": properties,
103
+ "required": list(fields.keys()),
104
+ "additionalProperties": False,
105
+ }
106
+
107
+ # Handle enums
108
+ if isinstance(ty, type) and issubclass(ty, Enum):
109
+ enum_type = None
110
+ enum_values = []
111
+ for member in ty.__members__.values():
112
+ enum_values.append(member.value)
113
+ if enum_type is None:
114
+ enum_type = type(member.value)
115
+ elif type(member.value) is not enum_type:
116
+ raise ValueError(
117
+ f"Enum {ty.__name__} has mixed types. Not supported for schema generation."
118
+ )
119
+ enum_type = enum_type or int
120
+ enum_desc = f"\nThe value *should* be one in this list: {enum_values}"
121
+ if enum_type not in (int, str):
122
+ raise ValueError(
123
+ f"Enum {ty.__name__} has unsupported type {enum_type}. "
124
+ "Only int and str enums are supported for schema generation."
125
+ )
126
+ return {
127
+ "description": semstr + enum_desc,
128
+ "type": "integer" if enum_type is int else "string",
129
+ }
130
+
131
+ # Handle functions
132
+ if isinstance(ty, FunctionType):
133
+ hints = get_type_hints(ty)
134
+ hints.pop("return", None)
135
+ params = {
136
+ name: _type_to_schema(type, name, semstr_inner.get(name, ""))
137
+ for name, type in hints.items()
138
+ }
139
+ return {
140
+ "title": title or ty.__name__,
141
+ "type": "function",
142
+ "description": semstr,
143
+ "properties": params,
144
+ "required": list(params.keys()),
145
+ "additionalProperties": False,
146
+ }
147
+
148
+ raise ValueError(
149
+ f"Unsupported type for schema generation: {ty}. "
150
+ "Only primitive types, dataclasses, and Union types are supported."
151
+ )
152
+
153
+
154
+ def _name_of_type(ty: type) -> str:
155
+ if get_origin(ty) in (Union, UnionType):
156
+ names = [_name_of_type(arg) for arg in get_args(ty)]
157
+ return "_or_".join(names)
158
+ if hasattr(ty, "__name__"):
159
+ return ty.__name__
160
+ return "type"
161
+
162
+
163
+ def _convert_dict_to_schema(ty_dict: type) -> dict:
164
+ """Convert a dictionary type to a schema."""
165
+ if get_origin(ty_dict) is not dict:
166
+ raise ValueError(f"Expected a dictionary type, got {ty_dict}.")
167
+ key_type, value_type = get_args(ty_dict)
168
+ return {
169
+ "type": "object",
170
+ "title": _SCHEMA_DICT_WRAPPER,
171
+ "properties": {
172
+ _SCHEMA_DICT_WRAPPER: {
173
+ "type": "array",
174
+ "items": {
175
+ "type": "object",
176
+ "properties": {
177
+ "key": _type_to_schema(key_type),
178
+ "value": _type_to_schema(value_type),
179
+ },
180
+ "required": ["key", "value"],
181
+ "additionalProperties": False,
182
+ },
183
+ }
184
+ },
185
+ "additionalProperties": False,
186
+ "required": [_SCHEMA_DICT_WRAPPER],
187
+ }
188
+
189
+
190
+ def _decode_dict(json_obj: dict) -> dict:
191
+ """Decode a JSON dictionary to a Python dictionary."""
192
+ if not isinstance(json_obj, dict):
193
+ return json_obj
194
+ if _SCHEMA_DICT_WRAPPER in json_obj:
195
+ items = json_obj[_SCHEMA_DICT_WRAPPER]
196
+ return {item["key"]: _decode_dict(item["value"]) for item in items}
197
+ return {key: _decode_dict(value) for key, value in json_obj.items()}
198
+
199
+
200
+ def _wrap_to_object(schema: dict[str, object]) -> dict[str, object]:
201
+ """Wrap the schema in an object with a type."""
202
+ if "type" in schema and schema["type"] == "object":
203
+ return schema
204
+ return {
205
+ "type": "object",
206
+ "title": _SCHEMA_OBJECT_WRAPPER,
207
+ "properties": {
208
+ _SCHEMA_OBJECT_WRAPPER: schema,
209
+ },
210
+ "required": [_SCHEMA_OBJECT_WRAPPER],
211
+ "additionalProperties": False,
212
+ }
213
+
214
+
215
+ def _unwrap_from_object(json_obj: dict) -> dict:
216
+ """Unwrap the schema from an object with a type."""
217
+ if _SCHEMA_OBJECT_WRAPPER in json_obj:
218
+ return json_obj[_SCHEMA_OBJECT_WRAPPER]
219
+ return json_obj
220
+
221
+
222
+ def type_to_schema(resp_type: type) -> dict[str, object]:
223
+ """Return the JSON schema for the response type."""
224
+ type_name = _name_of_type(resp_type)
225
+ schema = _type_to_schema(resp_type, type_name)
226
+ schema = _wrap_to_object(schema)
227
+ return {
228
+ "type": "json_schema",
229
+ "json_schema": {
230
+ "name": type_name,
231
+ "schema": schema,
232
+ "strict": True,
233
+ },
234
+ }
235
+
236
+
237
+ def tool_to_schema(
238
+ func: Callable, description: str, params_desc: dict[str, str]
239
+ ) -> dict[str, object]:
240
+ """Return the JSON schema for the tool type."""
241
+ schema = _type_to_schema(func) # type: ignore
242
+ properties: dict[str, object] = schema.get("properties", {}) # type: ignore
243
+ required: list[str] = schema.get("required", []) # type: ignore
244
+ for param_name, param_info in properties.items():
245
+ param_info["description"] = params_desc.get(param_name, "") # type: ignore
246
+ return {
247
+ "type": "function",
248
+ "function": {
249
+ "name": func.__name__,
250
+ "description": description,
251
+ "parameters": {
252
+ "type": "object",
253
+ "properties": properties,
254
+ "required": required,
255
+ "additionalProperties": False,
256
+ },
257
+ },
258
+ }
259
+
260
+
261
+ def json_to_instance(json_obj: dict, ty: type) -> object:
262
+ """Convert a JSON dictionary to an instance of the given type."""
263
+ json_obj = _unwrap_from_object(json_obj)
264
+ json_obj = _decode_dict(json_obj)
265
+ return TypeAdapter(ty).validate_python(json_obj)
byllm/types.py ADDED
@@ -0,0 +1,346 @@
1
+ """Type definitions for LLM interactions.
2
+
3
+ This module defines the types used in the LLM interactions, including messages,
4
+ tools, and tool calls. It provides a structured way to represent messages,
5
+ tool calls, and tools that can be used in LLM requests and responses.
6
+ """
7
+
8
+ import base64
9
+ import mimetypes
10
+ import os
11
+ from dataclasses import dataclass
12
+ from enum import StrEnum
13
+ from io import BytesIO
14
+ from typing import Callable, TypeAlias, get_type_hints
15
+
16
+ from PIL.Image import open as open_image
17
+
18
+ from litellm.types.utils import Message as LiteLLMMessage
19
+
20
+ from pydantic import TypeAdapter
21
+
22
+ from .schema import tool_to_schema
23
+
24
+ # The message can be a jaclang defined message or what ever the llm
25
+ # returned object that was feed back to the llm as it was given (dict).
26
+ MessageType: TypeAlias = "Message | LiteLLMMessage"
27
+
28
+
29
+ class MessageRole(StrEnum):
30
+ """Enum for message roles in LLM interactions."""
31
+
32
+ SYSTEM = "system"
33
+ USER = "user"
34
+ ASSISTANT = "assistant"
35
+ TOOL = "tool"
36
+
37
+
38
+ @dataclass
39
+ class Message:
40
+ """Message class for LLM interactions."""
41
+
42
+ role: MessageRole
43
+ content: "str | list[Media]"
44
+
45
+ def to_dict(self) -> dict[str, object]:
46
+ """Convert the message to a dictionary."""
47
+ if isinstance(self.content, str):
48
+ return {
49
+ "role": self.role.value,
50
+ "content": self.content,
51
+ }
52
+
53
+ media_contents = []
54
+ for media in self.content:
55
+ media_contents.extend(media.to_dict())
56
+ return {
57
+ "role": self.role.value,
58
+ "content": media_contents,
59
+ }
60
+
61
+
62
+ @dataclass
63
+ class ToolCallResultMsg(Message):
64
+ """Result of a tool call in LLM interactions."""
65
+
66
+ tool_call_id: str
67
+ name: str # Function name.
68
+
69
+ def __post_init__(self) -> None:
70
+ """Post-initialization to set the role of the message."""
71
+ self.role = MessageRole.TOOL # Maybe this should be an assertion?
72
+
73
+ def to_dict(self) -> dict[str, object]:
74
+ """Convert the tool call result message to a dictionary."""
75
+ return {
76
+ "role": self.role.value,
77
+ "content": self.content,
78
+ "tool_call_id": self.tool_call_id,
79
+ "name": self.name,
80
+ }
81
+
82
+
83
+ @dataclass
84
+ class Tool:
85
+ """Tool class for LLM interactions."""
86
+
87
+ func: Callable
88
+ description: str = ""
89
+ params_desc: dict[str, str] = None # type: ignore
90
+
91
+ def __post_init__(self) -> None:
92
+ """Post-initialization to validate the function."""
93
+ self.func.__annotations__ = get_type_hints(self.func)
94
+ self.description = Tool.get_func_description(self.func)
95
+ if hasattr(self.func, "_jac_semstr_inner"):
96
+ self.params_desc = self.func._jac_semstr_inner # type: ignore
97
+ else:
98
+ self.params_desc = {
99
+ name: str(type) for name, type in self.func.__annotations__.items()
100
+ }
101
+
102
+ def __call__(self, *args: list, **kwargs: dict) -> object:
103
+ """Call the tool function with the provided arguments."""
104
+ # If there is an error with the finish tool, we throw the exception.
105
+ # Since it's the user's responsibility to handle it.
106
+ if self.is_finish_tool():
107
+ return self.func(*args, **kwargs)
108
+ try:
109
+ # TODO: Shoud I json serialize or this is fine?
110
+ return self.func(*args, **kwargs)
111
+ except Exception as e:
112
+ # For the LLM if the tool failed, it'll see the error message
113
+ # and make decision based on that.
114
+ return str(e)
115
+
116
+ def get_name(self) -> str:
117
+ """Return the name of the tool function."""
118
+ return self.func.__name__
119
+
120
+ @staticmethod
121
+ def get_func_description(func: Callable) -> str:
122
+ """Get the description of the function."""
123
+ if hasattr(func, "_jac_semstr"):
124
+ return func._jac_semstr # type: ignore
125
+ return func.__doc__ or func.__name__
126
+
127
+ @staticmethod
128
+ def make_finish_tool(resp_type: type) -> "Tool":
129
+ """Create a finish tool that returns the final output."""
130
+
131
+ def finish_tool(final_output: object) -> object:
132
+ return TypeAdapter(resp_type).validate_python(final_output)
133
+
134
+ finish_tool.__annotations__["return"] = resp_type
135
+ finish_tool.__annotations__["final_output"] = resp_type
136
+ return Tool(
137
+ func=finish_tool,
138
+ description="This tool is used to finish the tool calls and return the final output.",
139
+ params_desc={
140
+ "final_output": "The final output of the tool calls.",
141
+ },
142
+ )
143
+
144
+ def is_finish_tool(self) -> bool:
145
+ """Check if the tool is a finish tool."""
146
+ return self.get_name() == "finish_tool"
147
+
148
+ def get_json_schema(self) -> dict[str, object]:
149
+ """Return the JSON schema for the tool function."""
150
+ return tool_to_schema(self.func, self.description, self.params_desc)
151
+
152
+ def parse_arguments(self, args_json: dict) -> dict:
153
+ """Parse the arguments from JSON to the function's expected format."""
154
+ args = {}
155
+ annotations = self.func.__annotations__
156
+ for arg_name, arg_json in args_json.items():
157
+ if arg_type := annotations.get(arg_name):
158
+ args[arg_name] = TypeAdapter(arg_type).validate_python(arg_json)
159
+ return args
160
+
161
+
162
+ @dataclass
163
+ class ToolCall:
164
+ """Tool call class for LLM interactions."""
165
+
166
+ call_id: str
167
+ tool: Tool
168
+ args: dict
169
+
170
+ def __call__(self) -> ToolCallResultMsg:
171
+ """Call the tool with the provided arguments."""
172
+ result = self.tool(**self.args)
173
+ return ToolCallResultMsg(
174
+ role=MessageRole.TOOL,
175
+ content=str(result),
176
+ tool_call_id=self.call_id,
177
+ name=self.tool.get_name(),
178
+ )
179
+
180
+ def __str__(self) -> str:
181
+ """Return the string representation of the tool call."""
182
+ params = ", ".join(f"{k}={v}" for k, v in self.args.items())
183
+ return f"{self.tool.get_name()}({params})"
184
+
185
+ def is_finish_call(self) -> bool:
186
+ """Check if the tool is a finish tool."""
187
+ return self.tool.is_finish_tool()
188
+
189
+ def get_output(self) -> object:
190
+ """Get the output from the finish tool call."""
191
+ assert (
192
+ self.is_finish_call()
193
+ ), "This method should only be called for finish tools."
194
+ return self.tool(**self.args)
195
+
196
+
197
+ @dataclass
198
+ class MockToolCall:
199
+ """Mock tool call for testing purposes."""
200
+
201
+ tool: Callable
202
+ args: dict
203
+
204
+ def to_tool_call(self) -> ToolCall:
205
+ """Convert the mock tool call to a ToolCall."""
206
+ return ToolCall(
207
+ call_id="", # Call ID is not used in mock calls.
208
+ tool=Tool(self.tool),
209
+ args=self.args,
210
+ )
211
+
212
+
213
+ @dataclass
214
+ class CompletionResult:
215
+ """Result of the completion from the LLM."""
216
+
217
+ output: object
218
+ tool_calls: list[ToolCall]
219
+
220
+
221
+ # -----------------------------------------------------------------------------
222
+ # Media content types
223
+ # -----------------------------------------------------------------------------
224
+
225
+
226
+ @dataclass
227
+ class Media:
228
+ """Base class for message content."""
229
+
230
+ def to_dict(self) -> list[dict]:
231
+ """Convert the content to a dictionary."""
232
+ raise NotImplementedError("Subclasses must implement this method.")
233
+
234
+
235
+ @dataclass
236
+ class Text(Media):
237
+ """Class representing text content in a message."""
238
+
239
+ text: str
240
+
241
+ def to_dict(self) -> list[dict]:
242
+ """Convert the text content to a dictionary."""
243
+ return [{"type": "text", "text": self.text}]
244
+
245
+
246
+ @dataclass
247
+ class Image(Media):
248
+ """Class representing an image."""
249
+
250
+ url: str
251
+ mime_type: str | None = None
252
+
253
+ def __post_init__(self) -> None:
254
+ """Post-initialization to ensure the URL is a string."""
255
+ if self.url.startswith(("http://", "https://", "gs://")):
256
+ self.url = self.url.strip()
257
+ else:
258
+ if not os.path.exists(self.url):
259
+ raise ValueError(f"Image file does not exist: {self.url}")
260
+ image = open_image(self.url)
261
+
262
+ # python<3.13 mimetypes doesn't support `webp` format as it wasn't an IANA standard
263
+ # until November 2024 (RFC-9649: https://www.rfc-editor.org/rfc/rfc9649.html).
264
+ if (image.format and image.format.lower()) == "webp":
265
+ self.mime_type = "image/webp"
266
+ else:
267
+ self.mime_type = mimetypes.types_map.get(
268
+ "." + (image.format or "png").lower()
269
+ )
270
+ with BytesIO() as buffer:
271
+ image.save(buffer, format=image.format, quality=100)
272
+ base64_image = base64.b64encode(buffer.getvalue()).decode("utf-8")
273
+ self.url = f"data:{self.mime_type};base64,{base64_image}"
274
+
275
+ def to_dict(self) -> list[dict]:
276
+ """Convert the image to a dictionary."""
277
+ image_url = {"url": self.url}
278
+ if self.mime_type:
279
+ image_url["format"] = self.mime_type
280
+ return [
281
+ {
282
+ "type": "image_url",
283
+ "image_url": image_url,
284
+ }
285
+ ]
286
+
287
+
288
+ # Ref: https://cookbook.openai.com/examples/gpt_with_vision_for_video_understanding
289
+ @dataclass
290
+ class Video(Media):
291
+ """Class representing a video."""
292
+
293
+ path: str
294
+ fps: int = 1
295
+ _base64frames: list[str] | None = None
296
+
297
+ def __post_init__(self) -> None:
298
+ """Post-initialization to ensure the path is a string."""
299
+ if not os.path.exists(self.path):
300
+ raise ValueError(f"Video file does not exist: {self.path}")
301
+
302
+ def load_frames(self) -> None:
303
+ """Load video frames as base64-encoded images."""
304
+ try:
305
+ import cv2
306
+ except ImportError:
307
+ raise ImportError(
308
+ "OpenCV is required to process video files."
309
+ "Install `pip install byllm[video]` for video capabilities."
310
+ )
311
+
312
+ self._base64frames = []
313
+ video = cv2.VideoCapture(self.path)
314
+ total_frames = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
315
+
316
+ target_fps = self.fps
317
+ source_fps = video.get(cv2.CAP_PROP_FPS)
318
+ frames_to_skip = (
319
+ int(source_fps / target_fps) - 1 if target_fps < source_fps else 1
320
+ )
321
+
322
+ curr_frame = 0
323
+ while curr_frame < total_frames - 1:
324
+ video.set(cv2.CAP_PROP_POS_FRAMES, curr_frame)
325
+ success, frame = video.read()
326
+ if not success:
327
+ raise ValueError("Failed to read video frame.")
328
+ _, buffer = cv2.imencode(".jpg", frame)
329
+ self._base64frames.append(base64.b64encode(buffer).decode("utf-8"))
330
+ curr_frame += frames_to_skip
331
+
332
+ def to_dict(self) -> list[dict]:
333
+ """Convert the video to a dictionary."""
334
+ if self._base64frames is None:
335
+ self.load_frames()
336
+ assert (
337
+ self._base64frames is not None
338
+ ), "Frames must be loaded before conversion."
339
+
340
+ return [
341
+ {
342
+ "type": "image_url",
343
+ "image_url": f"data:image/jpeg;base64,{frame}",
344
+ }
345
+ for frame in self._base64frames
346
+ ]
@@ -0,0 +1,102 @@
1
+ Metadata-Version: 2.3
2
+ Name: byllm
3
+ Version: 0.4.1
4
+ Summary: byLLM Provides Easy to use APIs for different LLM Providers to be used with Jaseci's Jaclang Programming Language.
5
+ License: MIT
6
+ Keywords: llm,jaclang,jaseci,byLLM
7
+ Author: Jason Mars
8
+ Author-email: jason@jaseci.org
9
+ Maintainer: Jason Mars
10
+ Maintainer-email: jason@jaseci.org
11
+ Classifier: License :: OSI Approved :: MIT License
12
+ Classifier: Programming Language :: Python :: 2
13
+ Classifier: Programming Language :: Python :: 2.7
14
+ Classifier: Programming Language :: Python :: 3
15
+ Classifier: Programming Language :: Python :: 3.4
16
+ Classifier: Programming Language :: Python :: 3.5
17
+ Classifier: Programming Language :: Python :: 3.6
18
+ Classifier: Programming Language :: Python :: 3.7
19
+ Classifier: Programming Language :: Python :: 3.8
20
+ Classifier: Programming Language :: Python :: 3.9
21
+ Classifier: Programming Language :: Python :: 3.10
22
+ Classifier: Programming Language :: Python :: 3.11
23
+ Classifier: Programming Language :: Python :: 3.12
24
+ Classifier: Programming Language :: Python :: 3.13
25
+ Provides-Extra: tools
26
+ Provides-Extra: video
27
+ Requires-Dist: jaclang (==0.8.5)
28
+ Requires-Dist: litellm (>=1.75.5.post1)
29
+ Requires-Dist: loguru (>=0.7.2,<0.8.0)
30
+ Requires-Dist: pillow (>=10.4.0,<10.5.0)
31
+ Description-Content-Type: text/markdown
32
+
33
+ # byLLM - AI Integration Framework for Jac-lang
34
+
35
+ [![PyPI version](https://img.shields.io/pypi/v/mtllm.svg)](https://pypi.org/project/mtllm/) [![tests](https://github.com/jaseci-labs/jaseci/actions/workflows/test-jaseci.yml/badge.svg?branch=main)](https://github.com/jaseci-labs/jaseci/actions/workflows/test-jaseci.yml)
36
+
37
+ Meaning Typed Programming (MTP) is a programming paradigm for AI integration where prompt engineering is hidden through code semantics. byLLM is the plugin built, exploring this hypothesis. byLLM is built as a plugin to the Jaseci ecosystem. This plugin can be installed as a PyPI package.
38
+
39
+ ```bash
40
+ pip install byllm
41
+ ```
42
+
43
+ ## Basic Example
44
+
45
+ A basic usecase of MTP can be demonstrated as follows:
46
+
47
+ ```python
48
+ import from byllm {Model}
49
+
50
+ glob llm = Model(model_name="openai\gpt-4o");
51
+
52
+ def translate_to(language: str, phrase: str) -> str by llm();
53
+
54
+ with entry {
55
+ output = translate_to(language="Welsh", phrase="Hello world");
56
+ print(output);
57
+ }
58
+ ```
59
+
60
+ ## AI-Powered Object Generation
61
+
62
+ ```python
63
+ import from byllm {Model}
64
+
65
+ glob llm = Model(model_name="gpt-4o");
66
+
67
+ obj Task {
68
+ has description: str,
69
+ priority: int,
70
+ estimated_time: int;
71
+ }
72
+
73
+ sem Task.priority = "priority between 0 (highest priority) and 10(lowest priority)";
74
+
75
+ def create_task(description: str, previous_tasks: list[Task]) -> Task by llm();
76
+
77
+ with entry {
78
+ tasks = [];
79
+ new_task = create_task("Write documentation for the API", tasks);
80
+ print(f"Task: {new_task.description}, Priority: {new_task.priority}, Time: {new_task.estimated_time}min");
81
+ }
82
+ ```
83
+
84
+ The `by` abstraction allows to automate semantic extraction from existing code semantics, eliminating manual prompt engineering while leveraging type annotations for structured AI responses.
85
+
86
+ ## Documentation and Examples
87
+
88
+ **📚 Full Documentation**: [Jac byLLM Documentation](https://www.jac-lang.org/learn/jac-byllm/with_llm/)
89
+
90
+ **🎮 Complete Examples**:
91
+ - [Fantasy Trading Game](https://www.jac-lang.org/learn/examples/mtp_examples/fantasy_trading_game/) - Interactive RPG with AI-generated characters
92
+ - [RPG Level Generator](https://www.jac-lang.org/learn/examples/mtp_examples/rpg_game/) - AI-powered game level creation
93
+ - [RAG Chatbot Tutorial](https://www.jac-lang.org/learn/examples/rag_chatbot/Overview/) - Building chatbots with document retrieval
94
+
95
+ **🔬 Research**: The research journey of MTP is available on [Arxiv](https://arxiv.org/abs/2405.08965).
96
+
97
+ ## Quick Links
98
+
99
+ - [Getting Started Guide](https://www.jac-lang.org/learn/jac-byllm/with_llm/)
100
+ - [Model Configuration](https://www.jac-lang.org/learn/jac-byllm/model_declaration/)
101
+ - [Jac Language Documentation](https://www.jac-lang.org/)
102
+ - [GitHub Repository](https://github.com/jaseci-labs/jaseci)
@@ -0,0 +1,11 @@
1
+ byllm/__init__.py,sha256=Iqi_KAnRkr5vC9OYMWa9e5vZE3PiR2ror_Th1wa8F9Q,226
2
+ byllm/llm.py,sha256=fjraqQaT1uiwxTjLW7Wm8TDnnl7N5xDztA2KhyUfIQE,3560
3
+ byllm/llm_connector.py,sha256=XK8ftsafTYWq7aAjDxk4R4OeyJ1P1ou2qrZAXOU9xmU,8546
4
+ byllm/mtir.py,sha256=V4fpc0-j_7pb8rVMV8yskc9XczUCtHczrj5OZGXKA8g,7083
5
+ byllm/plugin.py,sha256=T_uSAkiyuPiufo5SEUNcgMOU0Ns5Zbg7BPfMo_ihC9s,1132
6
+ byllm/schema.py,sha256=PGaEiNlbm9CjIJu1lE2kv8QrfudfD2I3LvHfkMqxQkI,8812
7
+ byllm/types.py,sha256=Uh6j0AcdHZ0zZ_KESu-ZBBpI9yoA7kw4yU550N4E-Tk,11117
8
+ byllm-0.4.1.dist-info/METADATA,sha256=RQYnecvetizswVO-4TmSJ6r3G4aFrgkK99rjnK_isw0,3990
9
+ byllm-0.4.1.dist-info/WHEEL,sha256=5druYqcII7zHXzX7qmN0TH895Jg3yuTtfjMgJIVBrKE,92
10
+ byllm-0.4.1.dist-info/entry_points.txt,sha256=hUzQdaP8qTKkAqHfBpcqxQK02wixeyyzx3y-s6KyrQg,37
11
+ byllm-0.4.1.dist-info/RECORD,,
@@ -0,0 +1,4 @@
1
+ Wheel-Version: 1.0
2
+ Generator: poetry-core 2.1.3
3
+ Root-Is-Purelib: true
4
+ Tag: py2.py3-none-any
@@ -0,0 +1,3 @@
1
+ [jac]
2
+ byllm=byllm.plugin:JacMachine
3
+