jaclang 0.7.2__py3-none-any.whl → 0.7.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of jaclang might be problematic. Click here for more details.
- jaclang/cli/cli.py +2 -2
- jaclang/compiler/absyntree.py +499 -294
- jaclang/compiler/codeloc.py +2 -2
- jaclang/compiler/constant.py +100 -2
- jaclang/compiler/jac.lark +27 -19
- jaclang/compiler/parser.py +119 -92
- jaclang/compiler/passes/main/access_modifier_pass.py +20 -12
- jaclang/compiler/passes/main/def_impl_match_pass.py +28 -14
- jaclang/compiler/passes/main/def_use_pass.py +59 -40
- jaclang/compiler/passes/main/fuse_typeinfo_pass.py +65 -43
- jaclang/compiler/passes/main/import_pass.py +8 -6
- jaclang/compiler/passes/main/pyast_gen_pass.py +97 -42
- jaclang/compiler/passes/main/pyast_load_pass.py +47 -12
- jaclang/compiler/passes/main/pyjac_ast_link_pass.py +19 -10
- jaclang/compiler/passes/main/registry_pass.py +6 -6
- jaclang/compiler/passes/main/sub_node_tab_pass.py +0 -5
- jaclang/compiler/passes/main/sym_tab_build_pass.py +43 -235
- jaclang/compiler/passes/main/tests/test_decl_def_match_pass.py +21 -4
- jaclang/compiler/passes/main/tests/test_def_use_pass.py +5 -10
- jaclang/compiler/passes/main/type_check_pass.py +2 -1
- jaclang/compiler/passes/tool/jac_formatter_pass.py +30 -9
- jaclang/compiler/passes/tool/tests/fixtures/corelib.jac +16 -0
- jaclang/compiler/passes/tool/tests/fixtures/corelib_fmt.jac +16 -0
- jaclang/compiler/passes/tool/tests/fixtures/genai/essay_review.jac +1 -1
- jaclang/compiler/passes/tool/tests/fixtures/genai/expert_answer.jac +1 -1
- jaclang/compiler/passes/tool/tests/fixtures/genai/joke_gen.jac +1 -1
- jaclang/compiler/passes/tool/tests/fixtures/genai/odd_word_out.jac +1 -1
- jaclang/compiler/passes/tool/tests/fixtures/genai/personality_finder.jac +1 -1
- jaclang/compiler/passes/tool/tests/fixtures/genai/text_to_type.jac +1 -1
- jaclang/compiler/passes/tool/tests/fixtures/genai/translator.jac +1 -1
- jaclang/compiler/passes/tool/tests/fixtures/genai/wikipedia.jac +1 -1
- jaclang/compiler/passes/transform.py +2 -4
- jaclang/{core/registry.py → compiler/semtable.py} +1 -3
- jaclang/compiler/symtable.py +142 -101
- jaclang/compiler/tests/test_parser.py +2 -2
- jaclang/core/aott.py +15 -11
- jaclang/core/{construct.py → architype.py} +25 -240
- jaclang/core/constructs.py +44 -0
- jaclang/core/context.py +157 -0
- jaclang/core/importer.py +18 -9
- jaclang/core/memory.py +99 -0
- jaclang/core/test.py +90 -0
- jaclang/core/utils.py +2 -2
- jaclang/langserve/engine.py +127 -50
- jaclang/langserve/server.py +34 -61
- jaclang/langserve/tests/fixtures/base_module_structure.jac +28 -0
- jaclang/langserve/tests/fixtures/circle.jac +16 -12
- jaclang/langserve/tests/fixtures/circle_err.jac +3 -3
- jaclang/langserve/tests/fixtures/circle_pure.test.jac +15 -0
- jaclang/langserve/tests/fixtures/import_include_statements.jac +6 -0
- jaclang/langserve/tests/fixtures/py_import.py +26 -0
- jaclang/langserve/tests/test_server.py +93 -18
- jaclang/langserve/utils.py +124 -10
- jaclang/plugin/builtin.py +1 -1
- jaclang/plugin/default.py +23 -9
- jaclang/plugin/feature.py +25 -7
- jaclang/plugin/spec.py +18 -20
- jaclang/settings.py +3 -0
- jaclang/tests/fixtures/abc.jac +16 -12
- jaclang/tests/fixtures/aott_raise.jac +1 -1
- jaclang/tests/fixtures/byllmissue.jac +9 -0
- jaclang/tests/fixtures/edgetypeissue.jac +10 -0
- jaclang/tests/fixtures/hello.jac +1 -1
- jaclang/tests/fixtures/impl_match_confused.impl.jac +1 -0
- jaclang/tests/fixtures/impl_match_confused.jac +5 -0
- jaclang/tests/fixtures/maxfail_run_test.jac +17 -5
- jaclang/tests/fixtures/run_test.jac +17 -5
- jaclang/tests/fixtures/with_llm_function.jac +1 -1
- jaclang/tests/fixtures/with_llm_lower.jac +1 -1
- jaclang/tests/fixtures/with_llm_method.jac +1 -1
- jaclang/tests/fixtures/with_llm_type.jac +1 -1
- jaclang/tests/fixtures/with_llm_vision.jac +1 -1
- jaclang/tests/test_bugs.py +19 -0
- jaclang/tests/test_cli.py +1 -1
- jaclang/tests/test_language.py +161 -96
- jaclang/tests/test_reference.py +1 -1
- jaclang/utils/lang_tools.py +5 -4
- jaclang/utils/test.py +2 -1
- jaclang/utils/treeprinter.py +22 -8
- {jaclang-0.7.2.dist-info → jaclang-0.7.7.dist-info}/METADATA +1 -1
- {jaclang-0.7.2.dist-info → jaclang-0.7.7.dist-info}/RECORD +83 -80
- jaclang/core/llms/__init__.py +0 -20
- jaclang/core/llms/anthropic.py +0 -90
- jaclang/core/llms/base.py +0 -206
- jaclang/core/llms/groq.py +0 -70
- jaclang/core/llms/huggingface.py +0 -76
- jaclang/core/llms/ollama.py +0 -81
- jaclang/core/llms/openai.py +0 -65
- jaclang/core/llms/togetherai.py +0 -63
- jaclang/core/llms/utils.py +0 -9
- {jaclang-0.7.2.dist-info → jaclang-0.7.7.dist-info}/WHEEL +0 -0
- {jaclang-0.7.2.dist-info → jaclang-0.7.7.dist-info}/entry_points.txt +0 -0
jaclang/core/llms/groq.py
DELETED
|
@@ -1,70 +0,0 @@
|
|
|
1
|
-
"""Groq API client for MTLLM."""
|
|
2
|
-
|
|
3
|
-
from .base import BaseLLM
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
REASON_SUFFIX = """
|
|
7
|
-
Reason and return the output result(s) only, adhering to the provided Type in the following format
|
|
8
|
-
|
|
9
|
-
[Reasoning] <Reason>
|
|
10
|
-
[Output] <Result>
|
|
11
|
-
"""
|
|
12
|
-
|
|
13
|
-
NORMAL_SUFFIX = """Generate and return the output result(s) only, adhering to the provided Type in the following format
|
|
14
|
-
|
|
15
|
-
[Output] <result>
|
|
16
|
-
""" # noqa E501
|
|
17
|
-
|
|
18
|
-
CHAIN_OF_THOUGHT_SUFFIX = """
|
|
19
|
-
Generate and return the output result(s) only, adhering to the provided Type in the following format. Perform the operation in a chain of thoughts.(Think Step by Step)
|
|
20
|
-
|
|
21
|
-
[Chain of Thoughts] <Chain of Thoughts>
|
|
22
|
-
[Output] <Result>
|
|
23
|
-
""" # noqa E501
|
|
24
|
-
|
|
25
|
-
REACT_SUFFIX = """
|
|
26
|
-
""" # noqa E501
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
class Groq(BaseLLM):
|
|
30
|
-
"""Groq API client for MTLLM."""
|
|
31
|
-
|
|
32
|
-
MTLLM_METHOD_PROMPTS: dict[str, str] = {
|
|
33
|
-
"Normal": NORMAL_SUFFIX,
|
|
34
|
-
"Reason": REASON_SUFFIX,
|
|
35
|
-
"Chain-of-Thoughts": CHAIN_OF_THOUGHT_SUFFIX,
|
|
36
|
-
"ReAct": REACT_SUFFIX,
|
|
37
|
-
}
|
|
38
|
-
|
|
39
|
-
def __init__(
|
|
40
|
-
self, verbose: bool = False, max_tries: int = 10, **kwargs: dict
|
|
41
|
-
) -> None:
|
|
42
|
-
"""Initialize the Groq API client."""
|
|
43
|
-
import groq # type: ignore
|
|
44
|
-
|
|
45
|
-
self.client = groq.Groq()
|
|
46
|
-
self.verbose = verbose
|
|
47
|
-
self.max_tries = max_tries
|
|
48
|
-
self.model_name = kwargs.get("model_name", "mixtral-8x7b-32768")
|
|
49
|
-
self.temperature = kwargs.get("temperature", 0.7)
|
|
50
|
-
self.max_tokens = kwargs.get("max_tokens", 1024)
|
|
51
|
-
|
|
52
|
-
def __infer__(self, meaning_in: str | list[dict], **kwargs: dict) -> str:
|
|
53
|
-
"""Infer a response from the input meaning."""
|
|
54
|
-
assert isinstance(
|
|
55
|
-
meaning_in, str
|
|
56
|
-
), "Currently Multimodal models are not supported. Please provide a string input."
|
|
57
|
-
messages = [{"role": "user", "content": meaning_in}]
|
|
58
|
-
model_params = {
|
|
59
|
-
k: v
|
|
60
|
-
for k, v in kwargs.items()
|
|
61
|
-
if k not in ["model_name", "temperature", "max_tokens"]
|
|
62
|
-
}
|
|
63
|
-
output = self.client.chat.completions.create(
|
|
64
|
-
model=kwargs.get("model_name", self.model_name),
|
|
65
|
-
temperature=kwargs.get("temperature", self.temperature),
|
|
66
|
-
max_tokens=kwargs.get("max_tokens", self.max_tokens),
|
|
67
|
-
messages=messages,
|
|
68
|
-
**model_params,
|
|
69
|
-
)
|
|
70
|
-
return output.choices[0].message.content
|
jaclang/core/llms/huggingface.py
DELETED
|
@@ -1,76 +0,0 @@
|
|
|
1
|
-
"""Huggingface client for MTLLM."""
|
|
2
|
-
|
|
3
|
-
from .base import BaseLLM
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
REASON_SUFFIX = """
|
|
7
|
-
Reason and return the output results(s) only such that <Output> should be eval(<Output>) Compatible and reflects the
|
|
8
|
-
expected output type, Follow the format below to provide the reasoning for the output result(s).
|
|
9
|
-
|
|
10
|
-
[Reasoning] <Reasoning>
|
|
11
|
-
[Output] <Output>
|
|
12
|
-
"""
|
|
13
|
-
|
|
14
|
-
NORMAL_SUFFIX = """Return the output result(s) only such that <Output> should be eval(<Output>) Compatible and
|
|
15
|
-
reflects the expected output type, Follow the format below to provide the output result(s).
|
|
16
|
-
|
|
17
|
-
[Output] <Output>
|
|
18
|
-
""" # noqa E501
|
|
19
|
-
|
|
20
|
-
CHAIN_OF_THOUGHT_SUFFIX = """
|
|
21
|
-
Generate and return the output result(s) only, adhering to the provided Type in the following format. Perform the operation in a chain of thoughts.(Think Step by Step)
|
|
22
|
-
|
|
23
|
-
[Chain of Thoughts] <Chain of Thoughts>
|
|
24
|
-
[Output] <Result>
|
|
25
|
-
""" # noqa E501
|
|
26
|
-
|
|
27
|
-
REACT_SUFFIX = """
|
|
28
|
-
""" # noqa E501
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
class Huggingface(BaseLLM):
|
|
32
|
-
"""Huggingface API client for Large Language Models (LLMs)."""
|
|
33
|
-
|
|
34
|
-
MTLLM_METHOD_PROMPTS: dict[str, str] = {
|
|
35
|
-
"Normal": NORMAL_SUFFIX,
|
|
36
|
-
"Reason": REASON_SUFFIX,
|
|
37
|
-
"Chain-of-Thoughts": CHAIN_OF_THOUGHT_SUFFIX,
|
|
38
|
-
"ReAct": REACT_SUFFIX,
|
|
39
|
-
}
|
|
40
|
-
|
|
41
|
-
def __init__(
|
|
42
|
-
self, verbose: bool = False, max_tries: int = 10, **kwargs: dict
|
|
43
|
-
) -> None:
|
|
44
|
-
"""Initialize the Huggingface API client."""
|
|
45
|
-
import torch # type: ignore
|
|
46
|
-
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline # type: ignore
|
|
47
|
-
|
|
48
|
-
torch.random.manual_seed(0)
|
|
49
|
-
model = AutoModelForCausalLM.from_pretrained(
|
|
50
|
-
kwargs.get("model_name", "microsoft/Phi-3-mini-128k-instruct"),
|
|
51
|
-
device_map=kwargs.get("device_map", "cuda"),
|
|
52
|
-
torch_dtype="auto",
|
|
53
|
-
trust_remote_code=True,
|
|
54
|
-
)
|
|
55
|
-
tokenizer = AutoTokenizer.from_pretrained(
|
|
56
|
-
kwargs.get("model_name", "microsoft/Phi-3-mini-128k-instruct")
|
|
57
|
-
)
|
|
58
|
-
self.verbose = verbose
|
|
59
|
-
self.max_tries = max_tries
|
|
60
|
-
self.pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
|
|
61
|
-
self.temperature = kwargs.get("temperature", 0.7)
|
|
62
|
-
self.max_tokens = kwargs.get("max_new_tokens", 1024)
|
|
63
|
-
|
|
64
|
-
def __infer__(self, meaning_in: str | list[dict], **kwargs: dict) -> str:
|
|
65
|
-
"""Infer a response from the input meaning."""
|
|
66
|
-
assert isinstance(
|
|
67
|
-
meaning_in, str
|
|
68
|
-
), "Currently Multimodal models are not supported. Please provide a string input."
|
|
69
|
-
messages = [{"role": "user", "content": meaning_in}]
|
|
70
|
-
output = self.pipe(
|
|
71
|
-
messages,
|
|
72
|
-
temperature=kwargs.get("temperature", self.temperature),
|
|
73
|
-
max_length=kwargs.get("max_new_tokens", self.max_tokens),
|
|
74
|
-
**kwargs,
|
|
75
|
-
)
|
|
76
|
-
return output[0]["generated_text"][-1]["content"]
|
jaclang/core/llms/ollama.py
DELETED
|
@@ -1,81 +0,0 @@
|
|
|
1
|
-
"""Ollama client for MTLLM."""
|
|
2
|
-
|
|
3
|
-
from .base import BaseLLM
|
|
4
|
-
|
|
5
|
-
REASON_SUFFIX = """
|
|
6
|
-
Reason and return the output results(s) only such that <Output> should be eval(<Output>) Compatible and reflects the
|
|
7
|
-
expected output type, Follow the format below to provide the reasoning for the output result(s).
|
|
8
|
-
|
|
9
|
-
[Reasoning] <Reasoning>
|
|
10
|
-
[Output] <Output>
|
|
11
|
-
"""
|
|
12
|
-
|
|
13
|
-
NORMAL_SUFFIX = """Return the output result(s) only such that <Output> should be eval(<Output>) Compatible and
|
|
14
|
-
reflects the expected output type, Follow the format below to provide the output result(s).
|
|
15
|
-
|
|
16
|
-
[Output] <Output>
|
|
17
|
-
""" # noqa E501
|
|
18
|
-
|
|
19
|
-
CHAIN_OF_THOUGHT_SUFFIX = """
|
|
20
|
-
Generate and return the output result(s) only, adhering to the provided Type in the following format. Perform the operation in a chain of thoughts.(Think Step by Step)
|
|
21
|
-
|
|
22
|
-
[Chain of Thoughts] <Chain of Thoughts>
|
|
23
|
-
[Output] <Result>
|
|
24
|
-
""" # noqa E501
|
|
25
|
-
|
|
26
|
-
REACT_SUFFIX = """
|
|
27
|
-
""" # noqa E501
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
class Ollama(BaseLLM):
|
|
31
|
-
"""Ollama API client for Large Language Models (LLMs)."""
|
|
32
|
-
|
|
33
|
-
MTLLM_METHOD_PROMPTS: dict[str, str] = {
|
|
34
|
-
"Normal": NORMAL_SUFFIX,
|
|
35
|
-
"Reason": REASON_SUFFIX,
|
|
36
|
-
"Chain-of-Thoughts": CHAIN_OF_THOUGHT_SUFFIX,
|
|
37
|
-
"ReAct": REACT_SUFFIX,
|
|
38
|
-
}
|
|
39
|
-
|
|
40
|
-
def __init__(
|
|
41
|
-
self, verbose: bool = False, max_tries: int = 10, **kwargs: dict
|
|
42
|
-
) -> None:
|
|
43
|
-
"""Initialize the Ollama API client."""
|
|
44
|
-
import ollama # type: ignore
|
|
45
|
-
|
|
46
|
-
self.client = ollama.Client(host=kwargs.get("host", "http://localhost:11434"))
|
|
47
|
-
self.verbose = verbose
|
|
48
|
-
self.max_tries = max_tries
|
|
49
|
-
self.model_name = kwargs.get("model_name", "phi3")
|
|
50
|
-
self.default_model_params = {
|
|
51
|
-
k: v for k, v in kwargs.items() if k not in ["model_name", "host"]
|
|
52
|
-
}
|
|
53
|
-
|
|
54
|
-
def __infer__(self, meaning_in: str | list[dict], **kwargs: dict) -> str:
|
|
55
|
-
"""Infer a response from the input meaning."""
|
|
56
|
-
assert isinstance(
|
|
57
|
-
meaning_in, str
|
|
58
|
-
), "Currently Multimodal models are not supported. Please provide a string input."
|
|
59
|
-
model = str(kwargs.get("model_name", self.model_name))
|
|
60
|
-
if not self.check_model(model):
|
|
61
|
-
self.download_model(model)
|
|
62
|
-
model_params = {k: v for k, v in kwargs.items() if k not in ["model_name"]}
|
|
63
|
-
messages = [{"role": "user", "content": meaning_in}]
|
|
64
|
-
output = self.client.chat(
|
|
65
|
-
model=model,
|
|
66
|
-
messages=messages,
|
|
67
|
-
options={**self.default_model_params, **model_params},
|
|
68
|
-
)
|
|
69
|
-
return output["message"]["content"]
|
|
70
|
-
|
|
71
|
-
def check_model(self, model_name: str) -> bool:
|
|
72
|
-
"""Check if the model is available."""
|
|
73
|
-
try:
|
|
74
|
-
self.client.show(model_name)
|
|
75
|
-
return True
|
|
76
|
-
except Exception:
|
|
77
|
-
return False
|
|
78
|
-
|
|
79
|
-
def download_model(self, model_name: str) -> None:
|
|
80
|
-
"""Download the model."""
|
|
81
|
-
self.client.pull(model_name)
|
jaclang/core/llms/openai.py
DELETED
|
@@ -1,65 +0,0 @@
|
|
|
1
|
-
"""Anthropic API client for MTLLM."""
|
|
2
|
-
|
|
3
|
-
from .base import BaseLLM
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
REASON_SUFFIX = """
|
|
7
|
-
Reason and return the output result(s) only, adhering to the provided Type in the following format
|
|
8
|
-
|
|
9
|
-
[Reasoning] <Reason>
|
|
10
|
-
[Output] <Result>
|
|
11
|
-
"""
|
|
12
|
-
|
|
13
|
-
NORMAL_SUFFIX = """Generate and return the output result(s) only, adhering to the provided Type in the following format
|
|
14
|
-
|
|
15
|
-
[Output] <result>
|
|
16
|
-
""" # noqa E501
|
|
17
|
-
|
|
18
|
-
CHAIN_OF_THOUGHT_SUFFIX = """
|
|
19
|
-
Generate and return the output result(s) only, adhering to the provided Type in the following format. Perform the operation in a chain of thoughts.(Think Step by Step)
|
|
20
|
-
|
|
21
|
-
[Chain of Thoughts] <Chain of Thoughts>
|
|
22
|
-
[Output] <Result>
|
|
23
|
-
""" # noqa E501
|
|
24
|
-
|
|
25
|
-
REACT_SUFFIX = """
|
|
26
|
-
""" # noqa E501
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
class OpenAI(BaseLLM):
|
|
30
|
-
"""Anthropic API client for MTLLM."""
|
|
31
|
-
|
|
32
|
-
MTLLM_METHOD_PROMPTS: dict[str, str] = {
|
|
33
|
-
"Normal": NORMAL_SUFFIX,
|
|
34
|
-
"Reason": REASON_SUFFIX,
|
|
35
|
-
"Chain-of-Thoughts": CHAIN_OF_THOUGHT_SUFFIX,
|
|
36
|
-
"ReAct": REACT_SUFFIX,
|
|
37
|
-
}
|
|
38
|
-
|
|
39
|
-
def __init__(
|
|
40
|
-
self, verbose: bool = False, max_tries: int = 10, **kwargs: dict
|
|
41
|
-
) -> None:
|
|
42
|
-
"""Initialize the Anthropic API client."""
|
|
43
|
-
import openai # type: ignore
|
|
44
|
-
|
|
45
|
-
self.client = openai.OpenAI()
|
|
46
|
-
self.verbose = verbose
|
|
47
|
-
self.max_tries = max_tries
|
|
48
|
-
self.model_name = str(kwargs.get("model_name", "gpt-3.5-turbo"))
|
|
49
|
-
self.temperature = kwargs.get("temperature", 0.7)
|
|
50
|
-
self.max_tokens = kwargs.get("max_tokens", 1024)
|
|
51
|
-
|
|
52
|
-
def __infer__(self, meaning_in: str | list[dict], **kwargs: dict) -> str:
|
|
53
|
-
"""Infer a response from the input meaning."""
|
|
54
|
-
if not isinstance(meaning_in, str):
|
|
55
|
-
assert self.model_name.startswith(
|
|
56
|
-
("gpt-4o", "gpt-4-turbo")
|
|
57
|
-
), f"Model {self.model_name} is not multimodal, use a multimodal model instead."
|
|
58
|
-
messages = [{"role": "user", "content": meaning_in}]
|
|
59
|
-
output = self.client.chat.completions.create(
|
|
60
|
-
model=kwargs.get("model_name", self.model_name),
|
|
61
|
-
temperature=kwargs.get("temperature", self.temperature),
|
|
62
|
-
max_tokens=kwargs.get("max_tokens", self.max_tokens),
|
|
63
|
-
messages=messages,
|
|
64
|
-
)
|
|
65
|
-
return output.choices[0].message.content
|
jaclang/core/llms/togetherai.py
DELETED
|
@@ -1,63 +0,0 @@
|
|
|
1
|
-
"""Anthropic API client for MTLLM."""
|
|
2
|
-
|
|
3
|
-
from .base import BaseLLM
|
|
4
|
-
|
|
5
|
-
REASON_SUFFIX = """
|
|
6
|
-
Reason and return the output result(s) only, adhering to the provided Type in the following format
|
|
7
|
-
|
|
8
|
-
[Reasoning] <Reason>
|
|
9
|
-
[Output] <Result>
|
|
10
|
-
"""
|
|
11
|
-
|
|
12
|
-
NORMAL_SUFFIX = """Generate and return the output result(s) only, adhering to the provided Type in the following format
|
|
13
|
-
|
|
14
|
-
[Output] <result>
|
|
15
|
-
""" # noqa E501
|
|
16
|
-
|
|
17
|
-
CHAIN_OF_THOUGHT_SUFFIX = """
|
|
18
|
-
Generate and return the output result(s) only, adhering to the provided Type in the following format. Perform the operation in a chain of thoughts.(Think Step by Step)
|
|
19
|
-
|
|
20
|
-
[Chain of Thoughts] <Chain of Thoughts>
|
|
21
|
-
[Output] <Result>
|
|
22
|
-
""" # noqa E501
|
|
23
|
-
|
|
24
|
-
REACT_SUFFIX = """
|
|
25
|
-
""" # noqa E501
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
class TogetherAI(BaseLLM):
|
|
29
|
-
"""Anthropic API client for MTLLM."""
|
|
30
|
-
|
|
31
|
-
MTLLM_METHOD_PROMPTS: dict[str, str] = {
|
|
32
|
-
"Normal": NORMAL_SUFFIX,
|
|
33
|
-
"Reason": REASON_SUFFIX,
|
|
34
|
-
"Chain-of-Thoughts": CHAIN_OF_THOUGHT_SUFFIX,
|
|
35
|
-
"ReAct": REACT_SUFFIX,
|
|
36
|
-
}
|
|
37
|
-
|
|
38
|
-
def __init__(
|
|
39
|
-
self, verbose: bool = False, max_tries: int = 10, **kwargs: dict
|
|
40
|
-
) -> None:
|
|
41
|
-
"""Initialize the Anthropic API client."""
|
|
42
|
-
import together # type: ignore
|
|
43
|
-
|
|
44
|
-
self.client = together.Together()
|
|
45
|
-
self.verbose = verbose
|
|
46
|
-
self.max_tries = max_tries
|
|
47
|
-
self.model_name = kwargs.get("model_name", "mistralai/Mistral-7B-Instruct-v0.3")
|
|
48
|
-
self.temperature = kwargs.get("temperature", 0.7)
|
|
49
|
-
self.max_tokens = kwargs.get("max_tokens", 1024)
|
|
50
|
-
|
|
51
|
-
def __infer__(self, meaning_in: str | list[dict], **kwargs: dict) -> str:
|
|
52
|
-
"""Infer a response from the input meaning."""
|
|
53
|
-
assert isinstance(
|
|
54
|
-
meaning_in, str
|
|
55
|
-
), "Currently Multimodal models are not supported. Please provide a string input."
|
|
56
|
-
messages = [{"role": "user", "content": meaning_in}]
|
|
57
|
-
output = self.client.chat.completions.create(
|
|
58
|
-
model=kwargs.get("model_name", self.model_name),
|
|
59
|
-
temperature=kwargs.get("temperature", self.temperature),
|
|
60
|
-
max_tokens=kwargs.get("max_tokens", self.max_tokens),
|
|
61
|
-
messages=messages,
|
|
62
|
-
)
|
|
63
|
-
return output.choices[0].message.content
|
jaclang/core/llms/utils.py
DELETED
|
File without changes
|
|
File without changes
|