jaclang 0.7.5__py3-none-any.whl → 0.7.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of jaclang might be problematic. Click here for more details.
- jaclang/compiler/absyntree.py +167 -26
- jaclang/compiler/constant.py +98 -2
- jaclang/compiler/jac.lark +2 -0
- jaclang/compiler/parser.py +4 -0
- jaclang/compiler/passes/main/access_modifier_pass.py +5 -3
- jaclang/compiler/passes/main/def_impl_match_pass.py +3 -1
- jaclang/compiler/passes/main/def_use_pass.py +27 -39
- jaclang/compiler/passes/main/fuse_typeinfo_pass.py +34 -12
- jaclang/compiler/passes/main/sub_node_tab_pass.py +0 -5
- jaclang/compiler/passes/main/sym_tab_build_pass.py +31 -181
- jaclang/compiler/passes/tool/tests/fixtures/genai/essay_review.jac +1 -1
- jaclang/compiler/passes/tool/tests/fixtures/genai/expert_answer.jac +1 -1
- jaclang/compiler/passes/tool/tests/fixtures/genai/joke_gen.jac +1 -1
- jaclang/compiler/passes/tool/tests/fixtures/genai/odd_word_out.jac +1 -1
- jaclang/compiler/passes/tool/tests/fixtures/genai/personality_finder.jac +1 -1
- jaclang/compiler/passes/tool/tests/fixtures/genai/text_to_type.jac +1 -1
- jaclang/compiler/passes/tool/tests/fixtures/genai/translator.jac +1 -1
- jaclang/compiler/passes/tool/tests/fixtures/genai/wikipedia.jac +1 -1
- jaclang/compiler/symtable.py +118 -65
- jaclang/core/aott.py +7 -3
- jaclang/core/importer.py +1 -1
- jaclang/langserve/engine.py +100 -36
- jaclang/langserve/server.py +34 -61
- jaclang/langserve/tests/fixtures/base_module_structure.jac +28 -0
- jaclang/langserve/tests/fixtures/circle_pure.test.jac +15 -0
- jaclang/langserve/tests/fixtures/import_include_statements.jac +6 -0
- jaclang/langserve/tests/fixtures/py_import.py +26 -0
- jaclang/langserve/tests/test_server.py +90 -6
- jaclang/langserve/utils.py +114 -4
- jaclang/plugin/default.py +2 -2
- jaclang/plugin/feature.py +1 -1
- jaclang/plugin/spec.py +1 -1
- jaclang/tests/fixtures/aott_raise.jac +1 -1
- jaclang/tests/fixtures/edgetypeissue.jac +10 -0
- jaclang/tests/fixtures/hello.jac +1 -1
- jaclang/tests/fixtures/with_llm_function.jac +1 -1
- jaclang/tests/fixtures/with_llm_lower.jac +1 -1
- jaclang/tests/fixtures/with_llm_method.jac +1 -1
- jaclang/tests/fixtures/with_llm_type.jac +1 -1
- jaclang/tests/fixtures/with_llm_vision.jac +1 -1
- jaclang/tests/test_language.py +106 -96
- {jaclang-0.7.5.dist-info → jaclang-0.7.6.dist-info}/METADATA +1 -1
- {jaclang-0.7.5.dist-info → jaclang-0.7.6.dist-info}/RECORD +45 -50
- jaclang/core/llms/__init__.py +0 -20
- jaclang/core/llms/anthropic.py +0 -90
- jaclang/core/llms/base.py +0 -206
- jaclang/core/llms/groq.py +0 -70
- jaclang/core/llms/huggingface.py +0 -76
- jaclang/core/llms/ollama.py +0 -81
- jaclang/core/llms/openai.py +0 -65
- jaclang/core/llms/togetherai.py +0 -63
- jaclang/core/llms/utils.py +0 -9
- jaclang/tests/fixtures/edgetypetest.jac +0 -16
- {jaclang-0.7.5.dist-info → jaclang-0.7.6.dist-info}/WHEEL +0 -0
- {jaclang-0.7.5.dist-info → jaclang-0.7.6.dist-info}/entry_points.txt +0 -0
jaclang/core/llms/base.py
DELETED
|
@@ -1,206 +0,0 @@
|
|
|
1
|
-
"""Base Large Language Model (LLM) class."""
|
|
2
|
-
|
|
3
|
-
import logging
|
|
4
|
-
import re
|
|
5
|
-
|
|
6
|
-
from .utils import logger
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
httpx_logger = logging.getLogger("httpx")
|
|
10
|
-
httpx_logger.setLevel(logging.WARNING)
|
|
11
|
-
|
|
12
|
-
SYSTEM_PROMPT = """
|
|
13
|
-
[System Prompt]
|
|
14
|
-
This is an operation you must perform and return the output values. Neither, the methodology, extra sentences nor the code are not needed.
|
|
15
|
-
Input/Type formatting: Explanation of the Input (variable_name) (type) = value
|
|
16
|
-
""" # noqa E501
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
PROMPT_TEMPLATE = """
|
|
20
|
-
[Information]
|
|
21
|
-
{information}
|
|
22
|
-
|
|
23
|
-
[Context]
|
|
24
|
-
{context}
|
|
25
|
-
|
|
26
|
-
[Inputs Information]
|
|
27
|
-
{inputs_information}
|
|
28
|
-
|
|
29
|
-
[Output Information]
|
|
30
|
-
{output_information}
|
|
31
|
-
|
|
32
|
-
[Type Explanations]
|
|
33
|
-
{type_explanations}
|
|
34
|
-
|
|
35
|
-
[Action]
|
|
36
|
-
{action}
|
|
37
|
-
""" # noqa E501
|
|
38
|
-
|
|
39
|
-
NORMAL_SUFFIX = """Generate and return the output result(s) only, adhering to the provided Type in the following format
|
|
40
|
-
|
|
41
|
-
[Output] <result>
|
|
42
|
-
""" # noqa E501
|
|
43
|
-
|
|
44
|
-
REASON_SUFFIX = """
|
|
45
|
-
Reason and return the output result(s) only, adhering to the provided Type in the following format
|
|
46
|
-
|
|
47
|
-
[Reasoning] <Reason>
|
|
48
|
-
[Output] <Result>
|
|
49
|
-
"""
|
|
50
|
-
|
|
51
|
-
CHAIN_OF_THOUGHT_SUFFIX = """
|
|
52
|
-
Generate and return the output result(s) only, adhering to the provided Type in the following format. Perform the operation in a chain of thoughts.(Think Step by Step)
|
|
53
|
-
|
|
54
|
-
[Chain of Thoughts] <Chain of Thoughts>
|
|
55
|
-
[Output] <Result>
|
|
56
|
-
""" # noqa E501
|
|
57
|
-
|
|
58
|
-
REACT_SUFFIX = """
|
|
59
|
-
""" # noqa E501
|
|
60
|
-
|
|
61
|
-
MTLLM_OUTPUT_FIX_PROMPT = """
|
|
62
|
-
[Output]
|
|
63
|
-
{model_output}
|
|
64
|
-
|
|
65
|
-
[Previous Result You Provided]
|
|
66
|
-
{previous_output}
|
|
67
|
-
|
|
68
|
-
[Desired Output Type]
|
|
69
|
-
{output_info}
|
|
70
|
-
|
|
71
|
-
[Type Explanations]
|
|
72
|
-
{output_type_info}
|
|
73
|
-
|
|
74
|
-
Above output is not in the desired Output Format/Type. Please provide the output in the desired type. Do not repeat the previously provided output.
|
|
75
|
-
Important: Do not provide the code or the methodology. Only provide
|
|
76
|
-
the output in the desired format.
|
|
77
|
-
""" # noqa E501
|
|
78
|
-
|
|
79
|
-
OUTPUT_CHECK_PROMPT = """
|
|
80
|
-
[Output]
|
|
81
|
-
{model_output}
|
|
82
|
-
|
|
83
|
-
[Desired Output Type]
|
|
84
|
-
{output_type}
|
|
85
|
-
|
|
86
|
-
[Type Explanations]
|
|
87
|
-
{output_type_info}
|
|
88
|
-
|
|
89
|
-
Check if the output is exactly in the desired Output Type. Important: Just say 'Yes' or 'No'.
|
|
90
|
-
""" # noqa E501
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
class BaseLLM:
|
|
94
|
-
"""Base Large Language Model (LLM) class."""
|
|
95
|
-
|
|
96
|
-
MTLLM_SYSTEM_PROMPT: str = SYSTEM_PROMPT
|
|
97
|
-
MTLLM_PROMPT: str = PROMPT_TEMPLATE
|
|
98
|
-
MTLLM_METHOD_PROMPTS: dict[str, str] = {
|
|
99
|
-
"Normal": NORMAL_SUFFIX,
|
|
100
|
-
"Reason": REASON_SUFFIX,
|
|
101
|
-
"Chain-of-Thoughts": CHAIN_OF_THOUGHT_SUFFIX,
|
|
102
|
-
"ReAct": REACT_SUFFIX,
|
|
103
|
-
}
|
|
104
|
-
OUTPUT_FIX_PROMPT: str = MTLLM_OUTPUT_FIX_PROMPT
|
|
105
|
-
OUTPUT_CHECK_PROMPT: str = OUTPUT_CHECK_PROMPT
|
|
106
|
-
|
|
107
|
-
def __init__(
|
|
108
|
-
self, verbose: bool = False, max_tries: int = 10, **kwargs: dict
|
|
109
|
-
) -> None:
|
|
110
|
-
"""Initialize the Large Language Model (LLM) client."""
|
|
111
|
-
self.verbose = verbose
|
|
112
|
-
self.max_tries = max_tries
|
|
113
|
-
raise NotImplementedError
|
|
114
|
-
|
|
115
|
-
def __infer__(self, meaning_in: str | list[dict], **kwargs: dict) -> str:
|
|
116
|
-
"""Infer a response from the input meaning."""
|
|
117
|
-
raise NotImplementedError
|
|
118
|
-
|
|
119
|
-
def __call__(self, input_text: str | list[dict], **kwargs: dict) -> str:
|
|
120
|
-
"""Infer a response from the input text."""
|
|
121
|
-
if self.verbose:
|
|
122
|
-
logger.info(f"Meaning In\n{input_text}")
|
|
123
|
-
return self.__infer__(input_text, **kwargs)
|
|
124
|
-
|
|
125
|
-
def resolve_output(
|
|
126
|
-
self,
|
|
127
|
-
meaning_out: str,
|
|
128
|
-
output_semstr: str,
|
|
129
|
-
output_type: str,
|
|
130
|
-
output_type_info: str,
|
|
131
|
-
) -> str:
|
|
132
|
-
"""Resolve the output string to return the reasoning and output."""
|
|
133
|
-
if self.verbose:
|
|
134
|
-
logger.info(f"Meaning Out\n{meaning_out}")
|
|
135
|
-
output_match = re.search(r"\[Output\](.*)", meaning_out)
|
|
136
|
-
output = output_match.group(1).strip() if output_match else None
|
|
137
|
-
if not output_match:
|
|
138
|
-
output = self._extract_output(
|
|
139
|
-
meaning_out,
|
|
140
|
-
output_semstr,
|
|
141
|
-
output_type,
|
|
142
|
-
output_type_info,
|
|
143
|
-
self.max_tries,
|
|
144
|
-
)
|
|
145
|
-
return str(output)
|
|
146
|
-
|
|
147
|
-
def _check_output(
|
|
148
|
-
self, output: str, output_type: str, output_type_info: str
|
|
149
|
-
) -> bool:
|
|
150
|
-
"""Check if the output is in the desired format."""
|
|
151
|
-
output_check_prompt = self.OUTPUT_CHECK_PROMPT.format(
|
|
152
|
-
model_output=output,
|
|
153
|
-
output_type=output_type,
|
|
154
|
-
output_type_info=output_type_info,
|
|
155
|
-
)
|
|
156
|
-
llm_output = self.__infer__(output_check_prompt)
|
|
157
|
-
return "yes" in llm_output.lower()
|
|
158
|
-
|
|
159
|
-
def _extract_output(
|
|
160
|
-
self,
|
|
161
|
-
meaning_out: str,
|
|
162
|
-
output_semstr: str,
|
|
163
|
-
output_type: str,
|
|
164
|
-
output_type_info: str,
|
|
165
|
-
max_tries: int,
|
|
166
|
-
previous_output: str = "None",
|
|
167
|
-
) -> str:
|
|
168
|
-
"""Extract the output from the meaning out string."""
|
|
169
|
-
if max_tries == 0:
|
|
170
|
-
logger.error("Failed to extract output. Max tries reached.")
|
|
171
|
-
raise ValueError(
|
|
172
|
-
"Failed to extract output. Try Changing the Semstrings, provide examples or change the method."
|
|
173
|
-
)
|
|
174
|
-
|
|
175
|
-
if self.verbose:
|
|
176
|
-
if max_tries < self.max_tries:
|
|
177
|
-
logger.info(
|
|
178
|
-
f"Failed to extract output. Trying to extract output again. Max tries left: {max_tries}"
|
|
179
|
-
)
|
|
180
|
-
else:
|
|
181
|
-
logger.info("Extracting output from the meaning out string.")
|
|
182
|
-
|
|
183
|
-
output_fix_prompt = self.OUTPUT_FIX_PROMPT.format(
|
|
184
|
-
model_output=meaning_out,
|
|
185
|
-
previous_output=previous_output,
|
|
186
|
-
output_info=f"{output_semstr} ({output_type})",
|
|
187
|
-
output_type_info=output_type_info,
|
|
188
|
-
)
|
|
189
|
-
llm_output = self.__infer__(output_fix_prompt)
|
|
190
|
-
is_in_desired_format = self._check_output(
|
|
191
|
-
llm_output, output_type, output_type_info
|
|
192
|
-
)
|
|
193
|
-
if self.verbose:
|
|
194
|
-
logger.info(
|
|
195
|
-
f"Extracted Output: {llm_output}. Is in Desired Format: {is_in_desired_format}"
|
|
196
|
-
)
|
|
197
|
-
if is_in_desired_format:
|
|
198
|
-
return llm_output
|
|
199
|
-
return self._extract_output(
|
|
200
|
-
meaning_out,
|
|
201
|
-
output_semstr,
|
|
202
|
-
output_type,
|
|
203
|
-
output_type_info,
|
|
204
|
-
max_tries - 1,
|
|
205
|
-
llm_output,
|
|
206
|
-
)
|
jaclang/core/llms/groq.py
DELETED
|
@@ -1,70 +0,0 @@
|
|
|
1
|
-
"""Groq API client for MTLLM."""
|
|
2
|
-
|
|
3
|
-
from .base import BaseLLM
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
REASON_SUFFIX = """
|
|
7
|
-
Reason and return the output result(s) only, adhering to the provided Type in the following format
|
|
8
|
-
|
|
9
|
-
[Reasoning] <Reason>
|
|
10
|
-
[Output] <Result>
|
|
11
|
-
"""
|
|
12
|
-
|
|
13
|
-
NORMAL_SUFFIX = """Generate and return the output result(s) only, adhering to the provided Type in the following format
|
|
14
|
-
|
|
15
|
-
[Output] <result>
|
|
16
|
-
""" # noqa E501
|
|
17
|
-
|
|
18
|
-
CHAIN_OF_THOUGHT_SUFFIX = """
|
|
19
|
-
Generate and return the output result(s) only, adhering to the provided Type in the following format. Perform the operation in a chain of thoughts.(Think Step by Step)
|
|
20
|
-
|
|
21
|
-
[Chain of Thoughts] <Chain of Thoughts>
|
|
22
|
-
[Output] <Result>
|
|
23
|
-
""" # noqa E501
|
|
24
|
-
|
|
25
|
-
REACT_SUFFIX = """
|
|
26
|
-
""" # noqa E501
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
class Groq(BaseLLM):
|
|
30
|
-
"""Groq API client for MTLLM."""
|
|
31
|
-
|
|
32
|
-
MTLLM_METHOD_PROMPTS: dict[str, str] = {
|
|
33
|
-
"Normal": NORMAL_SUFFIX,
|
|
34
|
-
"Reason": REASON_SUFFIX,
|
|
35
|
-
"Chain-of-Thoughts": CHAIN_OF_THOUGHT_SUFFIX,
|
|
36
|
-
"ReAct": REACT_SUFFIX,
|
|
37
|
-
}
|
|
38
|
-
|
|
39
|
-
def __init__(
|
|
40
|
-
self, verbose: bool = False, max_tries: int = 10, **kwargs: dict
|
|
41
|
-
) -> None:
|
|
42
|
-
"""Initialize the Groq API client."""
|
|
43
|
-
import groq # type: ignore
|
|
44
|
-
|
|
45
|
-
self.client = groq.Groq()
|
|
46
|
-
self.verbose = verbose
|
|
47
|
-
self.max_tries = max_tries
|
|
48
|
-
self.model_name = kwargs.get("model_name", "mixtral-8x7b-32768")
|
|
49
|
-
self.temperature = kwargs.get("temperature", 0.7)
|
|
50
|
-
self.max_tokens = kwargs.get("max_tokens", 1024)
|
|
51
|
-
|
|
52
|
-
def __infer__(self, meaning_in: str | list[dict], **kwargs: dict) -> str:
|
|
53
|
-
"""Infer a response from the input meaning."""
|
|
54
|
-
assert isinstance(
|
|
55
|
-
meaning_in, str
|
|
56
|
-
), "Currently Multimodal models are not supported. Please provide a string input."
|
|
57
|
-
messages = [{"role": "user", "content": meaning_in}]
|
|
58
|
-
model_params = {
|
|
59
|
-
k: v
|
|
60
|
-
for k, v in kwargs.items()
|
|
61
|
-
if k not in ["model_name", "temperature", "max_tokens"]
|
|
62
|
-
}
|
|
63
|
-
output = self.client.chat.completions.create(
|
|
64
|
-
model=kwargs.get("model_name", self.model_name),
|
|
65
|
-
temperature=kwargs.get("temperature", self.temperature),
|
|
66
|
-
max_tokens=kwargs.get("max_tokens", self.max_tokens),
|
|
67
|
-
messages=messages,
|
|
68
|
-
**model_params,
|
|
69
|
-
)
|
|
70
|
-
return output.choices[0].message.content
|
jaclang/core/llms/huggingface.py
DELETED
|
@@ -1,76 +0,0 @@
|
|
|
1
|
-
"""Huggingface client for MTLLM."""
|
|
2
|
-
|
|
3
|
-
from .base import BaseLLM
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
REASON_SUFFIX = """
|
|
7
|
-
Reason and return the output results(s) only such that <Output> should be eval(<Output>) Compatible and reflects the
|
|
8
|
-
expected output type, Follow the format below to provide the reasoning for the output result(s).
|
|
9
|
-
|
|
10
|
-
[Reasoning] <Reasoning>
|
|
11
|
-
[Output] <Output>
|
|
12
|
-
"""
|
|
13
|
-
|
|
14
|
-
NORMAL_SUFFIX = """Return the output result(s) only such that <Output> should be eval(<Output>) Compatible and
|
|
15
|
-
reflects the expected output type, Follow the format below to provide the output result(s).
|
|
16
|
-
|
|
17
|
-
[Output] <Output>
|
|
18
|
-
""" # noqa E501
|
|
19
|
-
|
|
20
|
-
CHAIN_OF_THOUGHT_SUFFIX = """
|
|
21
|
-
Generate and return the output result(s) only, adhering to the provided Type in the following format. Perform the operation in a chain of thoughts.(Think Step by Step)
|
|
22
|
-
|
|
23
|
-
[Chain of Thoughts] <Chain of Thoughts>
|
|
24
|
-
[Output] <Result>
|
|
25
|
-
""" # noqa E501
|
|
26
|
-
|
|
27
|
-
REACT_SUFFIX = """
|
|
28
|
-
""" # noqa E501
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
class Huggingface(BaseLLM):
|
|
32
|
-
"""Huggingface API client for Large Language Models (LLMs)."""
|
|
33
|
-
|
|
34
|
-
MTLLM_METHOD_PROMPTS: dict[str, str] = {
|
|
35
|
-
"Normal": NORMAL_SUFFIX,
|
|
36
|
-
"Reason": REASON_SUFFIX,
|
|
37
|
-
"Chain-of-Thoughts": CHAIN_OF_THOUGHT_SUFFIX,
|
|
38
|
-
"ReAct": REACT_SUFFIX,
|
|
39
|
-
}
|
|
40
|
-
|
|
41
|
-
def __init__(
|
|
42
|
-
self, verbose: bool = False, max_tries: int = 10, **kwargs: dict
|
|
43
|
-
) -> None:
|
|
44
|
-
"""Initialize the Huggingface API client."""
|
|
45
|
-
import torch # type: ignore
|
|
46
|
-
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline # type: ignore
|
|
47
|
-
|
|
48
|
-
torch.random.manual_seed(0)
|
|
49
|
-
model = AutoModelForCausalLM.from_pretrained(
|
|
50
|
-
kwargs.get("model_name", "microsoft/Phi-3-mini-128k-instruct"),
|
|
51
|
-
device_map=kwargs.get("device_map", "cuda"),
|
|
52
|
-
torch_dtype="auto",
|
|
53
|
-
trust_remote_code=True,
|
|
54
|
-
)
|
|
55
|
-
tokenizer = AutoTokenizer.from_pretrained(
|
|
56
|
-
kwargs.get("model_name", "microsoft/Phi-3-mini-128k-instruct")
|
|
57
|
-
)
|
|
58
|
-
self.verbose = verbose
|
|
59
|
-
self.max_tries = max_tries
|
|
60
|
-
self.pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
|
|
61
|
-
self.temperature = kwargs.get("temperature", 0.7)
|
|
62
|
-
self.max_tokens = kwargs.get("max_new_tokens", 1024)
|
|
63
|
-
|
|
64
|
-
def __infer__(self, meaning_in: str | list[dict], **kwargs: dict) -> str:
|
|
65
|
-
"""Infer a response from the input meaning."""
|
|
66
|
-
assert isinstance(
|
|
67
|
-
meaning_in, str
|
|
68
|
-
), "Currently Multimodal models are not supported. Please provide a string input."
|
|
69
|
-
messages = [{"role": "user", "content": meaning_in}]
|
|
70
|
-
output = self.pipe(
|
|
71
|
-
messages,
|
|
72
|
-
temperature=kwargs.get("temperature", self.temperature),
|
|
73
|
-
max_length=kwargs.get("max_new_tokens", self.max_tokens),
|
|
74
|
-
**kwargs,
|
|
75
|
-
)
|
|
76
|
-
return output[0]["generated_text"][-1]["content"]
|
jaclang/core/llms/ollama.py
DELETED
|
@@ -1,81 +0,0 @@
|
|
|
1
|
-
"""Ollama client for MTLLM."""
|
|
2
|
-
|
|
3
|
-
from .base import BaseLLM
|
|
4
|
-
|
|
5
|
-
REASON_SUFFIX = """
|
|
6
|
-
Reason and return the output results(s) only such that <Output> should be eval(<Output>) Compatible and reflects the
|
|
7
|
-
expected output type, Follow the format below to provide the reasoning for the output result(s).
|
|
8
|
-
|
|
9
|
-
[Reasoning] <Reasoning>
|
|
10
|
-
[Output] <Output>
|
|
11
|
-
"""
|
|
12
|
-
|
|
13
|
-
NORMAL_SUFFIX = """Return the output result(s) only such that <Output> should be eval(<Output>) Compatible and
|
|
14
|
-
reflects the expected output type, Follow the format below to provide the output result(s).
|
|
15
|
-
|
|
16
|
-
[Output] <Output>
|
|
17
|
-
""" # noqa E501
|
|
18
|
-
|
|
19
|
-
CHAIN_OF_THOUGHT_SUFFIX = """
|
|
20
|
-
Generate and return the output result(s) only, adhering to the provided Type in the following format. Perform the operation in a chain of thoughts.(Think Step by Step)
|
|
21
|
-
|
|
22
|
-
[Chain of Thoughts] <Chain of Thoughts>
|
|
23
|
-
[Output] <Result>
|
|
24
|
-
""" # noqa E501
|
|
25
|
-
|
|
26
|
-
REACT_SUFFIX = """
|
|
27
|
-
""" # noqa E501
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
class Ollama(BaseLLM):
|
|
31
|
-
"""Ollama API client for Large Language Models (LLMs)."""
|
|
32
|
-
|
|
33
|
-
MTLLM_METHOD_PROMPTS: dict[str, str] = {
|
|
34
|
-
"Normal": NORMAL_SUFFIX,
|
|
35
|
-
"Reason": REASON_SUFFIX,
|
|
36
|
-
"Chain-of-Thoughts": CHAIN_OF_THOUGHT_SUFFIX,
|
|
37
|
-
"ReAct": REACT_SUFFIX,
|
|
38
|
-
}
|
|
39
|
-
|
|
40
|
-
def __init__(
|
|
41
|
-
self, verbose: bool = False, max_tries: int = 10, **kwargs: dict
|
|
42
|
-
) -> None:
|
|
43
|
-
"""Initialize the Ollama API client."""
|
|
44
|
-
import ollama # type: ignore
|
|
45
|
-
|
|
46
|
-
self.client = ollama.Client(host=kwargs.get("host", "http://localhost:11434"))
|
|
47
|
-
self.verbose = verbose
|
|
48
|
-
self.max_tries = max_tries
|
|
49
|
-
self.model_name = kwargs.get("model_name", "phi3")
|
|
50
|
-
self.default_model_params = {
|
|
51
|
-
k: v for k, v in kwargs.items() if k not in ["model_name", "host"]
|
|
52
|
-
}
|
|
53
|
-
|
|
54
|
-
def __infer__(self, meaning_in: str | list[dict], **kwargs: dict) -> str:
|
|
55
|
-
"""Infer a response from the input meaning."""
|
|
56
|
-
assert isinstance(
|
|
57
|
-
meaning_in, str
|
|
58
|
-
), "Currently Multimodal models are not supported. Please provide a string input."
|
|
59
|
-
model = str(kwargs.get("model_name", self.model_name))
|
|
60
|
-
if not self.check_model(model):
|
|
61
|
-
self.download_model(model)
|
|
62
|
-
model_params = {k: v for k, v in kwargs.items() if k not in ["model_name"]}
|
|
63
|
-
messages = [{"role": "user", "content": meaning_in}]
|
|
64
|
-
output = self.client.chat(
|
|
65
|
-
model=model,
|
|
66
|
-
messages=messages,
|
|
67
|
-
options={**self.default_model_params, **model_params},
|
|
68
|
-
)
|
|
69
|
-
return output["message"]["content"]
|
|
70
|
-
|
|
71
|
-
def check_model(self, model_name: str) -> bool:
|
|
72
|
-
"""Check if the model is available."""
|
|
73
|
-
try:
|
|
74
|
-
self.client.show(model_name)
|
|
75
|
-
return True
|
|
76
|
-
except Exception:
|
|
77
|
-
return False
|
|
78
|
-
|
|
79
|
-
def download_model(self, model_name: str) -> None:
|
|
80
|
-
"""Download the model."""
|
|
81
|
-
self.client.pull(model_name)
|
jaclang/core/llms/openai.py
DELETED
|
@@ -1,65 +0,0 @@
|
|
|
1
|
-
"""Anthropic API client for MTLLM."""
|
|
2
|
-
|
|
3
|
-
from .base import BaseLLM
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
REASON_SUFFIX = """
|
|
7
|
-
Reason and return the output result(s) only, adhering to the provided Type in the following format
|
|
8
|
-
|
|
9
|
-
[Reasoning] <Reason>
|
|
10
|
-
[Output] <Result>
|
|
11
|
-
"""
|
|
12
|
-
|
|
13
|
-
NORMAL_SUFFIX = """Generate and return the output result(s) only, adhering to the provided Type in the following format
|
|
14
|
-
|
|
15
|
-
[Output] <result>
|
|
16
|
-
""" # noqa E501
|
|
17
|
-
|
|
18
|
-
CHAIN_OF_THOUGHT_SUFFIX = """
|
|
19
|
-
Generate and return the output result(s) only, adhering to the provided Type in the following format. Perform the operation in a chain of thoughts.(Think Step by Step)
|
|
20
|
-
|
|
21
|
-
[Chain of Thoughts] <Chain of Thoughts>
|
|
22
|
-
[Output] <Result>
|
|
23
|
-
""" # noqa E501
|
|
24
|
-
|
|
25
|
-
REACT_SUFFIX = """
|
|
26
|
-
""" # noqa E501
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
class OpenAI(BaseLLM):
|
|
30
|
-
"""Anthropic API client for MTLLM."""
|
|
31
|
-
|
|
32
|
-
MTLLM_METHOD_PROMPTS: dict[str, str] = {
|
|
33
|
-
"Normal": NORMAL_SUFFIX,
|
|
34
|
-
"Reason": REASON_SUFFIX,
|
|
35
|
-
"Chain-of-Thoughts": CHAIN_OF_THOUGHT_SUFFIX,
|
|
36
|
-
"ReAct": REACT_SUFFIX,
|
|
37
|
-
}
|
|
38
|
-
|
|
39
|
-
def __init__(
|
|
40
|
-
self, verbose: bool = False, max_tries: int = 10, **kwargs: dict
|
|
41
|
-
) -> None:
|
|
42
|
-
"""Initialize the Anthropic API client."""
|
|
43
|
-
import openai # type: ignore
|
|
44
|
-
|
|
45
|
-
self.client = openai.OpenAI()
|
|
46
|
-
self.verbose = verbose
|
|
47
|
-
self.max_tries = max_tries
|
|
48
|
-
self.model_name = str(kwargs.get("model_name", "gpt-3.5-turbo"))
|
|
49
|
-
self.temperature = kwargs.get("temperature", 0.7)
|
|
50
|
-
self.max_tokens = kwargs.get("max_tokens", 1024)
|
|
51
|
-
|
|
52
|
-
def __infer__(self, meaning_in: str | list[dict], **kwargs: dict) -> str:
|
|
53
|
-
"""Infer a response from the input meaning."""
|
|
54
|
-
if not isinstance(meaning_in, str):
|
|
55
|
-
assert self.model_name.startswith(
|
|
56
|
-
("gpt-4o", "gpt-4-turbo")
|
|
57
|
-
), f"Model {self.model_name} is not multimodal, use a multimodal model instead."
|
|
58
|
-
messages = [{"role": "user", "content": meaning_in}]
|
|
59
|
-
output = self.client.chat.completions.create(
|
|
60
|
-
model=kwargs.get("model_name", self.model_name),
|
|
61
|
-
temperature=kwargs.get("temperature", self.temperature),
|
|
62
|
-
max_tokens=kwargs.get("max_tokens", self.max_tokens),
|
|
63
|
-
messages=messages,
|
|
64
|
-
)
|
|
65
|
-
return output.choices[0].message.content
|
jaclang/core/llms/togetherai.py
DELETED
|
@@ -1,63 +0,0 @@
|
|
|
1
|
-
"""Anthropic API client for MTLLM."""
|
|
2
|
-
|
|
3
|
-
from .base import BaseLLM
|
|
4
|
-
|
|
5
|
-
REASON_SUFFIX = """
|
|
6
|
-
Reason and return the output result(s) only, adhering to the provided Type in the following format
|
|
7
|
-
|
|
8
|
-
[Reasoning] <Reason>
|
|
9
|
-
[Output] <Result>
|
|
10
|
-
"""
|
|
11
|
-
|
|
12
|
-
NORMAL_SUFFIX = """Generate and return the output result(s) only, adhering to the provided Type in the following format
|
|
13
|
-
|
|
14
|
-
[Output] <result>
|
|
15
|
-
""" # noqa E501
|
|
16
|
-
|
|
17
|
-
CHAIN_OF_THOUGHT_SUFFIX = """
|
|
18
|
-
Generate and return the output result(s) only, adhering to the provided Type in the following format. Perform the operation in a chain of thoughts.(Think Step by Step)
|
|
19
|
-
|
|
20
|
-
[Chain of Thoughts] <Chain of Thoughts>
|
|
21
|
-
[Output] <Result>
|
|
22
|
-
""" # noqa E501
|
|
23
|
-
|
|
24
|
-
REACT_SUFFIX = """
|
|
25
|
-
""" # noqa E501
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
class TogetherAI(BaseLLM):
|
|
29
|
-
"""Anthropic API client for MTLLM."""
|
|
30
|
-
|
|
31
|
-
MTLLM_METHOD_PROMPTS: dict[str, str] = {
|
|
32
|
-
"Normal": NORMAL_SUFFIX,
|
|
33
|
-
"Reason": REASON_SUFFIX,
|
|
34
|
-
"Chain-of-Thoughts": CHAIN_OF_THOUGHT_SUFFIX,
|
|
35
|
-
"ReAct": REACT_SUFFIX,
|
|
36
|
-
}
|
|
37
|
-
|
|
38
|
-
def __init__(
|
|
39
|
-
self, verbose: bool = False, max_tries: int = 10, **kwargs: dict
|
|
40
|
-
) -> None:
|
|
41
|
-
"""Initialize the Anthropic API client."""
|
|
42
|
-
import together # type: ignore
|
|
43
|
-
|
|
44
|
-
self.client = together.Together()
|
|
45
|
-
self.verbose = verbose
|
|
46
|
-
self.max_tries = max_tries
|
|
47
|
-
self.model_name = kwargs.get("model_name", "mistralai/Mistral-7B-Instruct-v0.3")
|
|
48
|
-
self.temperature = kwargs.get("temperature", 0.7)
|
|
49
|
-
self.max_tokens = kwargs.get("max_tokens", 1024)
|
|
50
|
-
|
|
51
|
-
def __infer__(self, meaning_in: str | list[dict], **kwargs: dict) -> str:
|
|
52
|
-
"""Infer a response from the input meaning."""
|
|
53
|
-
assert isinstance(
|
|
54
|
-
meaning_in, str
|
|
55
|
-
), "Currently Multimodal models are not supported. Please provide a string input."
|
|
56
|
-
messages = [{"role": "user", "content": meaning_in}]
|
|
57
|
-
output = self.client.chat.completions.create(
|
|
58
|
-
model=kwargs.get("model_name", self.model_name),
|
|
59
|
-
temperature=kwargs.get("temperature", self.temperature),
|
|
60
|
-
max_tokens=kwargs.get("max_tokens", self.max_tokens),
|
|
61
|
-
messages=messages,
|
|
62
|
-
)
|
|
63
|
-
return output.choices[0].message.content
|
jaclang/core/llms/utils.py
DELETED
|
File without changes
|
|
File without changes
|