jaclang 0.7.2__py3-none-any.whl → 0.7.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of jaclang might be problematic. Click here for more details.

Files changed (95) hide show
  1. jaclang/cli/cli.py +2 -2
  2. jaclang/compiler/__init__.py +11 -12
  3. jaclang/compiler/absyntree.py +499 -294
  4. jaclang/compiler/codeloc.py +2 -2
  5. jaclang/compiler/constant.py +100 -2
  6. jaclang/compiler/jac.lark +27 -19
  7. jaclang/compiler/parser.py +119 -92
  8. jaclang/compiler/passes/main/access_modifier_pass.py +20 -12
  9. jaclang/compiler/passes/main/def_impl_match_pass.py +28 -14
  10. jaclang/compiler/passes/main/def_use_pass.py +59 -40
  11. jaclang/compiler/passes/main/fuse_typeinfo_pass.py +65 -43
  12. jaclang/compiler/passes/main/import_pass.py +8 -6
  13. jaclang/compiler/passes/main/pyast_gen_pass.py +97 -42
  14. jaclang/compiler/passes/main/pyast_load_pass.py +47 -12
  15. jaclang/compiler/passes/main/pyjac_ast_link_pass.py +19 -10
  16. jaclang/compiler/passes/main/registry_pass.py +6 -6
  17. jaclang/compiler/passes/main/sub_node_tab_pass.py +0 -5
  18. jaclang/compiler/passes/main/sym_tab_build_pass.py +43 -235
  19. jaclang/compiler/passes/main/tests/test_decl_def_match_pass.py +21 -4
  20. jaclang/compiler/passes/main/tests/test_def_use_pass.py +5 -10
  21. jaclang/compiler/passes/main/type_check_pass.py +2 -1
  22. jaclang/compiler/passes/tool/jac_formatter_pass.py +30 -9
  23. jaclang/compiler/passes/tool/tests/fixtures/corelib.jac +16 -0
  24. jaclang/compiler/passes/tool/tests/fixtures/corelib_fmt.jac +16 -0
  25. jaclang/compiler/passes/tool/tests/fixtures/genai/essay_review.jac +1 -1
  26. jaclang/compiler/passes/tool/tests/fixtures/genai/expert_answer.jac +1 -1
  27. jaclang/compiler/passes/tool/tests/fixtures/genai/joke_gen.jac +1 -1
  28. jaclang/compiler/passes/tool/tests/fixtures/genai/odd_word_out.jac +1 -1
  29. jaclang/compiler/passes/tool/tests/fixtures/genai/personality_finder.jac +1 -1
  30. jaclang/compiler/passes/tool/tests/fixtures/genai/text_to_type.jac +1 -1
  31. jaclang/compiler/passes/tool/tests/fixtures/genai/translator.jac +1 -1
  32. jaclang/compiler/passes/tool/tests/fixtures/genai/wikipedia.jac +1 -1
  33. jaclang/compiler/passes/transform.py +2 -4
  34. jaclang/compiler/passes/utils/mypy_ast_build.py +1 -8
  35. jaclang/{core/registry.py → compiler/semtable.py} +1 -3
  36. jaclang/compiler/symtable.py +142 -101
  37. jaclang/compiler/tests/test_parser.py +2 -2
  38. jaclang/core/{construct.py → architype.py} +25 -240
  39. jaclang/core/constructs.py +44 -0
  40. jaclang/core/context.py +157 -0
  41. jaclang/core/importer.py +18 -9
  42. jaclang/core/memory.py +99 -0
  43. jaclang/core/test.py +90 -0
  44. jaclang/core/utils.py +2 -2
  45. jaclang/langserve/engine.py +127 -50
  46. jaclang/langserve/server.py +34 -61
  47. jaclang/langserve/tests/fixtures/base_module_structure.jac +28 -0
  48. jaclang/langserve/tests/fixtures/circle.jac +16 -12
  49. jaclang/langserve/tests/fixtures/circle_err.jac +3 -3
  50. jaclang/langserve/tests/fixtures/circle_pure.test.jac +15 -0
  51. jaclang/langserve/tests/fixtures/import_include_statements.jac +6 -0
  52. jaclang/langserve/tests/fixtures/py_import.py +26 -0
  53. jaclang/langserve/tests/test_server.py +93 -18
  54. jaclang/langserve/utils.py +124 -10
  55. jaclang/plugin/builtin.py +1 -1
  56. jaclang/plugin/default.py +25 -77
  57. jaclang/plugin/feature.py +25 -7
  58. jaclang/plugin/spec.py +18 -20
  59. jaclang/settings.py +3 -0
  60. jaclang/tests/fixtures/abc.jac +16 -12
  61. jaclang/tests/fixtures/aott_raise.jac +1 -1
  62. jaclang/tests/fixtures/byllmissue.jac +9 -0
  63. jaclang/tests/fixtures/edgetypeissue.jac +10 -0
  64. jaclang/tests/fixtures/hello.jac +1 -1
  65. jaclang/tests/fixtures/impl_match_confused.impl.jac +1 -0
  66. jaclang/tests/fixtures/impl_match_confused.jac +5 -0
  67. jaclang/tests/fixtures/maxfail_run_test.jac +17 -5
  68. jaclang/tests/fixtures/run_test.jac +17 -5
  69. jaclang/tests/test_bugs.py +19 -0
  70. jaclang/tests/test_cli.py +1 -1
  71. jaclang/tests/test_language.py +65 -100
  72. jaclang/tests/test_reference.py +1 -1
  73. jaclang/utils/lang_tools.py +5 -4
  74. jaclang/utils/test.py +2 -1
  75. jaclang/utils/treeprinter.py +22 -8
  76. {jaclang-0.7.2.dist-info → jaclang-0.7.8.dist-info}/METADATA +1 -1
  77. {jaclang-0.7.2.dist-info → jaclang-0.7.8.dist-info}/RECORD +79 -83
  78. jaclang/core/aott.py +0 -310
  79. jaclang/core/llms/__init__.py +0 -20
  80. jaclang/core/llms/anthropic.py +0 -90
  81. jaclang/core/llms/base.py +0 -206
  82. jaclang/core/llms/groq.py +0 -70
  83. jaclang/core/llms/huggingface.py +0 -76
  84. jaclang/core/llms/ollama.py +0 -81
  85. jaclang/core/llms/openai.py +0 -65
  86. jaclang/core/llms/togetherai.py +0 -63
  87. jaclang/core/llms/utils.py +0 -9
  88. jaclang/tests/fixtures/math_question.jpg +0 -0
  89. jaclang/tests/fixtures/with_llm_function.jac +0 -33
  90. jaclang/tests/fixtures/with_llm_lower.jac +0 -45
  91. jaclang/tests/fixtures/with_llm_method.jac +0 -51
  92. jaclang/tests/fixtures/with_llm_type.jac +0 -52
  93. jaclang/tests/fixtures/with_llm_vision.jac +0 -25
  94. {jaclang-0.7.2.dist-info → jaclang-0.7.8.dist-info}/WHEEL +0 -0
  95. {jaclang-0.7.2.dist-info → jaclang-0.7.8.dist-info}/entry_points.txt +0 -0
@@ -1,90 +0,0 @@
1
- """Anthropic API client for MTLLM."""
2
-
3
- from .base import BaseLLM
4
-
5
-
6
- REASON_SUFFIX = """
7
- Reason and return the output result(s) only, adhering to the provided Type in the following format
8
-
9
- [Reasoning] <Reason>
10
- [Output] <Result>
11
- """
12
-
13
- NORMAL_SUFFIX = """Generate and return the output result(s) only, adhering to the provided Type in the following format
14
-
15
- [Output] <result>
16
- """ # noqa E501
17
-
18
- CHAIN_OF_THOUGHT_SUFFIX = """
19
- Generate and return the output result(s) only, adhering to the provided Type in the following format. Perform the operation in a chain of thoughts.(Think Step by Step)
20
-
21
- [Chain of Thoughts] <Chain of Thoughts>
22
- [Output] <Result>
23
- """ # noqa E501
24
-
25
- REACT_SUFFIX = """
26
- """ # noqa E501
27
-
28
-
29
- class Anthropic(BaseLLM):
30
- """Anthropic API client for MTLLM."""
31
-
32
- MTLLM_METHOD_PROMPTS: dict[str, str] = {
33
- "Normal": NORMAL_SUFFIX,
34
- "Reason": REASON_SUFFIX,
35
- "Chain-of-Thoughts": CHAIN_OF_THOUGHT_SUFFIX,
36
- "ReAct": REACT_SUFFIX,
37
- }
38
-
39
- def __init__(
40
- self, verbose: bool = False, max_tries: int = 10, **kwargs: dict
41
- ) -> None:
42
- """Initialize the Anthropic API client."""
43
- import anthropic # type: ignore
44
-
45
- self.client = anthropic.Anthropic()
46
- self.verbose = verbose
47
- self.max_tries = max_tries
48
- self.model_name = str(kwargs.get("model_name", "claude-3-sonnet-20240229"))
49
- self.temperature = kwargs.get("temperature", 0.7)
50
- self.max_tokens = kwargs.get("max_tokens", 1024)
51
-
52
- def __infer__(self, meaning_in: str | list[dict], **kwargs: dict) -> str:
53
- """Infer a response from the input meaning."""
54
- if not isinstance(meaning_in, str):
55
- assert self.model_name.startswith(
56
- ("claude-3-opus", "claude-3-sonnet", "claude-3-haiku")
57
- ), f"Model {self.model_name} is not multimodal, use a multimodal model instead."
58
-
59
- import re
60
-
61
- formatted_meaning_in = []
62
- for item in meaning_in:
63
- if item["type"] == "image_url":
64
- # "data:image/jpeg;base64,base64_string"
65
- img_match = re.match(
66
- r"data:(image/[a-zA-Z]*);base64,(.*)", item["source"]
67
- )
68
- if img_match:
69
- media_type, base64_string = img_match.groups()
70
- formatted_meaning_in.append(
71
- {
72
- "type": "image",
73
- "source": {
74
- "type": "base64",
75
- "media_type": media_type,
76
- "data": base64_string,
77
- },
78
- }
79
- )
80
- continue
81
- formatted_meaning_in.append(item)
82
- meaning_in = formatted_meaning_in
83
- messages = [{"role": "user", "content": meaning_in}]
84
- output = self.client.messages.create(
85
- model=kwargs.get("model_name", self.model_name),
86
- temperature=kwargs.get("temperature", self.temperature),
87
- max_tokens=kwargs.get("max_tokens", self.max_tokens),
88
- messages=messages,
89
- )
90
- return output.content[0].text
jaclang/core/llms/base.py DELETED
@@ -1,206 +0,0 @@
1
- """Base Large Language Model (LLM) class."""
2
-
3
- import logging
4
- import re
5
-
6
- from .utils import logger
7
-
8
-
9
- httpx_logger = logging.getLogger("httpx")
10
- httpx_logger.setLevel(logging.WARNING)
11
-
12
- SYSTEM_PROMPT = """
13
- [System Prompt]
14
- This is an operation you must perform and return the output values. Neither, the methodology, extra sentences nor the code are not needed.
15
- Input/Type formatting: Explanation of the Input (variable_name) (type) = value
16
- """ # noqa E501
17
-
18
-
19
- PROMPT_TEMPLATE = """
20
- [Information]
21
- {information}
22
-
23
- [Context]
24
- {context}
25
-
26
- [Inputs Information]
27
- {inputs_information}
28
-
29
- [Output Information]
30
- {output_information}
31
-
32
- [Type Explanations]
33
- {type_explanations}
34
-
35
- [Action]
36
- {action}
37
- """ # noqa E501
38
-
39
- NORMAL_SUFFIX = """Generate and return the output result(s) only, adhering to the provided Type in the following format
40
-
41
- [Output] <result>
42
- """ # noqa E501
43
-
44
- REASON_SUFFIX = """
45
- Reason and return the output result(s) only, adhering to the provided Type in the following format
46
-
47
- [Reasoning] <Reason>
48
- [Output] <Result>
49
- """
50
-
51
- CHAIN_OF_THOUGHT_SUFFIX = """
52
- Generate and return the output result(s) only, adhering to the provided Type in the following format. Perform the operation in a chain of thoughts.(Think Step by Step)
53
-
54
- [Chain of Thoughts] <Chain of Thoughts>
55
- [Output] <Result>
56
- """ # noqa E501
57
-
58
- REACT_SUFFIX = """
59
- """ # noqa E501
60
-
61
- MTLLM_OUTPUT_FIX_PROMPT = """
62
- [Output]
63
- {model_output}
64
-
65
- [Previous Result You Provided]
66
- {previous_output}
67
-
68
- [Desired Output Type]
69
- {output_info}
70
-
71
- [Type Explanations]
72
- {output_type_info}
73
-
74
- Above output is not in the desired Output Format/Type. Please provide the output in the desired type. Do not repeat the previously provided output.
75
- Important: Do not provide the code or the methodology. Only provide
76
- the output in the desired format.
77
- """ # noqa E501
78
-
79
- OUTPUT_CHECK_PROMPT = """
80
- [Output]
81
- {model_output}
82
-
83
- [Desired Output Type]
84
- {output_type}
85
-
86
- [Type Explanations]
87
- {output_type_info}
88
-
89
- Check if the output is exactly in the desired Output Type. Important: Just say 'Yes' or 'No'.
90
- """ # noqa E501
91
-
92
-
93
- class BaseLLM:
94
- """Base Large Language Model (LLM) class."""
95
-
96
- MTLLM_SYSTEM_PROMPT: str = SYSTEM_PROMPT
97
- MTLLM_PROMPT: str = PROMPT_TEMPLATE
98
- MTLLM_METHOD_PROMPTS: dict[str, str] = {
99
- "Normal": NORMAL_SUFFIX,
100
- "Reason": REASON_SUFFIX,
101
- "Chain-of-Thoughts": CHAIN_OF_THOUGHT_SUFFIX,
102
- "ReAct": REACT_SUFFIX,
103
- }
104
- OUTPUT_FIX_PROMPT: str = MTLLM_OUTPUT_FIX_PROMPT
105
- OUTPUT_CHECK_PROMPT: str = OUTPUT_CHECK_PROMPT
106
-
107
- def __init__(
108
- self, verbose: bool = False, max_tries: int = 10, **kwargs: dict
109
- ) -> None:
110
- """Initialize the Large Language Model (LLM) client."""
111
- self.verbose = verbose
112
- self.max_tries = max_tries
113
- raise NotImplementedError
114
-
115
- def __infer__(self, meaning_in: str | list[dict], **kwargs: dict) -> str:
116
- """Infer a response from the input meaning."""
117
- raise NotImplementedError
118
-
119
- def __call__(self, input_text: str | list[dict], **kwargs: dict) -> str:
120
- """Infer a response from the input text."""
121
- if self.verbose:
122
- logger.info(f"Meaning In\n{input_text}")
123
- return self.__infer__(input_text, **kwargs)
124
-
125
- def resolve_output(
126
- self,
127
- meaning_out: str,
128
- output_semstr: str,
129
- output_type: str,
130
- output_type_info: str,
131
- ) -> str:
132
- """Resolve the output string to return the reasoning and output."""
133
- if self.verbose:
134
- logger.info(f"Meaning Out\n{meaning_out}")
135
- output_match = re.search(r"\[Output\](.*)", meaning_out)
136
- output = output_match.group(1).strip() if output_match else None
137
- if not output_match:
138
- output = self._extract_output(
139
- meaning_out,
140
- output_semstr,
141
- output_type,
142
- output_type_info,
143
- self.max_tries,
144
- )
145
- return str(output)
146
-
147
- def _check_output(
148
- self, output: str, output_type: str, output_type_info: str
149
- ) -> bool:
150
- """Check if the output is in the desired format."""
151
- output_check_prompt = self.OUTPUT_CHECK_PROMPT.format(
152
- model_output=output,
153
- output_type=output_type,
154
- output_type_info=output_type_info,
155
- )
156
- llm_output = self.__infer__(output_check_prompt)
157
- return "yes" in llm_output.lower()
158
-
159
- def _extract_output(
160
- self,
161
- meaning_out: str,
162
- output_semstr: str,
163
- output_type: str,
164
- output_type_info: str,
165
- max_tries: int,
166
- previous_output: str = "None",
167
- ) -> str:
168
- """Extract the output from the meaning out string."""
169
- if max_tries == 0:
170
- logger.error("Failed to extract output. Max tries reached.")
171
- raise ValueError(
172
- "Failed to extract output. Try Changing the Semstrings, provide examples or change the method."
173
- )
174
-
175
- if self.verbose:
176
- if max_tries < self.max_tries:
177
- logger.info(
178
- f"Failed to extract output. Trying to extract output again. Max tries left: {max_tries}"
179
- )
180
- else:
181
- logger.info("Extracting output from the meaning out string.")
182
-
183
- output_fix_prompt = self.OUTPUT_FIX_PROMPT.format(
184
- model_output=meaning_out,
185
- previous_output=previous_output,
186
- output_info=f"{output_semstr} ({output_type})",
187
- output_type_info=output_type_info,
188
- )
189
- llm_output = self.__infer__(output_fix_prompt)
190
- is_in_desired_format = self._check_output(
191
- llm_output, output_type, output_type_info
192
- )
193
- if self.verbose:
194
- logger.info(
195
- f"Extracted Output: {llm_output}. Is in Desired Format: {is_in_desired_format}"
196
- )
197
- if is_in_desired_format:
198
- return llm_output
199
- return self._extract_output(
200
- meaning_out,
201
- output_semstr,
202
- output_type,
203
- output_type_info,
204
- max_tries - 1,
205
- llm_output,
206
- )
jaclang/core/llms/groq.py DELETED
@@ -1,70 +0,0 @@
1
- """Groq API client for MTLLM."""
2
-
3
- from .base import BaseLLM
4
-
5
-
6
- REASON_SUFFIX = """
7
- Reason and return the output result(s) only, adhering to the provided Type in the following format
8
-
9
- [Reasoning] <Reason>
10
- [Output] <Result>
11
- """
12
-
13
- NORMAL_SUFFIX = """Generate and return the output result(s) only, adhering to the provided Type in the following format
14
-
15
- [Output] <result>
16
- """ # noqa E501
17
-
18
- CHAIN_OF_THOUGHT_SUFFIX = """
19
- Generate and return the output result(s) only, adhering to the provided Type in the following format. Perform the operation in a chain of thoughts.(Think Step by Step)
20
-
21
- [Chain of Thoughts] <Chain of Thoughts>
22
- [Output] <Result>
23
- """ # noqa E501
24
-
25
- REACT_SUFFIX = """
26
- """ # noqa E501
27
-
28
-
29
- class Groq(BaseLLM):
30
- """Groq API client for MTLLM."""
31
-
32
- MTLLM_METHOD_PROMPTS: dict[str, str] = {
33
- "Normal": NORMAL_SUFFIX,
34
- "Reason": REASON_SUFFIX,
35
- "Chain-of-Thoughts": CHAIN_OF_THOUGHT_SUFFIX,
36
- "ReAct": REACT_SUFFIX,
37
- }
38
-
39
- def __init__(
40
- self, verbose: bool = False, max_tries: int = 10, **kwargs: dict
41
- ) -> None:
42
- """Initialize the Groq API client."""
43
- import groq # type: ignore
44
-
45
- self.client = groq.Groq()
46
- self.verbose = verbose
47
- self.max_tries = max_tries
48
- self.model_name = kwargs.get("model_name", "mixtral-8x7b-32768")
49
- self.temperature = kwargs.get("temperature", 0.7)
50
- self.max_tokens = kwargs.get("max_tokens", 1024)
51
-
52
- def __infer__(self, meaning_in: str | list[dict], **kwargs: dict) -> str:
53
- """Infer a response from the input meaning."""
54
- assert isinstance(
55
- meaning_in, str
56
- ), "Currently Multimodal models are not supported. Please provide a string input."
57
- messages = [{"role": "user", "content": meaning_in}]
58
- model_params = {
59
- k: v
60
- for k, v in kwargs.items()
61
- if k not in ["model_name", "temperature", "max_tokens"]
62
- }
63
- output = self.client.chat.completions.create(
64
- model=kwargs.get("model_name", self.model_name),
65
- temperature=kwargs.get("temperature", self.temperature),
66
- max_tokens=kwargs.get("max_tokens", self.max_tokens),
67
- messages=messages,
68
- **model_params,
69
- )
70
- return output.choices[0].message.content
@@ -1,76 +0,0 @@
1
- """Huggingface client for MTLLM."""
2
-
3
- from .base import BaseLLM
4
-
5
-
6
- REASON_SUFFIX = """
7
- Reason and return the output results(s) only such that <Output> should be eval(<Output>) Compatible and reflects the
8
- expected output type, Follow the format below to provide the reasoning for the output result(s).
9
-
10
- [Reasoning] <Reasoning>
11
- [Output] <Output>
12
- """
13
-
14
- NORMAL_SUFFIX = """Return the output result(s) only such that <Output> should be eval(<Output>) Compatible and
15
- reflects the expected output type, Follow the format below to provide the output result(s).
16
-
17
- [Output] <Output>
18
- """ # noqa E501
19
-
20
- CHAIN_OF_THOUGHT_SUFFIX = """
21
- Generate and return the output result(s) only, adhering to the provided Type in the following format. Perform the operation in a chain of thoughts.(Think Step by Step)
22
-
23
- [Chain of Thoughts] <Chain of Thoughts>
24
- [Output] <Result>
25
- """ # noqa E501
26
-
27
- REACT_SUFFIX = """
28
- """ # noqa E501
29
-
30
-
31
- class Huggingface(BaseLLM):
32
- """Huggingface API client for Large Language Models (LLMs)."""
33
-
34
- MTLLM_METHOD_PROMPTS: dict[str, str] = {
35
- "Normal": NORMAL_SUFFIX,
36
- "Reason": REASON_SUFFIX,
37
- "Chain-of-Thoughts": CHAIN_OF_THOUGHT_SUFFIX,
38
- "ReAct": REACT_SUFFIX,
39
- }
40
-
41
- def __init__(
42
- self, verbose: bool = False, max_tries: int = 10, **kwargs: dict
43
- ) -> None:
44
- """Initialize the Huggingface API client."""
45
- import torch # type: ignore
46
- from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline # type: ignore
47
-
48
- torch.random.manual_seed(0)
49
- model = AutoModelForCausalLM.from_pretrained(
50
- kwargs.get("model_name", "microsoft/Phi-3-mini-128k-instruct"),
51
- device_map=kwargs.get("device_map", "cuda"),
52
- torch_dtype="auto",
53
- trust_remote_code=True,
54
- )
55
- tokenizer = AutoTokenizer.from_pretrained(
56
- kwargs.get("model_name", "microsoft/Phi-3-mini-128k-instruct")
57
- )
58
- self.verbose = verbose
59
- self.max_tries = max_tries
60
- self.pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
61
- self.temperature = kwargs.get("temperature", 0.7)
62
- self.max_tokens = kwargs.get("max_new_tokens", 1024)
63
-
64
- def __infer__(self, meaning_in: str | list[dict], **kwargs: dict) -> str:
65
- """Infer a response from the input meaning."""
66
- assert isinstance(
67
- meaning_in, str
68
- ), "Currently Multimodal models are not supported. Please provide a string input."
69
- messages = [{"role": "user", "content": meaning_in}]
70
- output = self.pipe(
71
- messages,
72
- temperature=kwargs.get("temperature", self.temperature),
73
- max_length=kwargs.get("max_new_tokens", self.max_tokens),
74
- **kwargs,
75
- )
76
- return output[0]["generated_text"][-1]["content"]
@@ -1,81 +0,0 @@
1
- """Ollama client for MTLLM."""
2
-
3
- from .base import BaseLLM
4
-
5
- REASON_SUFFIX = """
6
- Reason and return the output results(s) only such that <Output> should be eval(<Output>) Compatible and reflects the
7
- expected output type, Follow the format below to provide the reasoning for the output result(s).
8
-
9
- [Reasoning] <Reasoning>
10
- [Output] <Output>
11
- """
12
-
13
- NORMAL_SUFFIX = """Return the output result(s) only such that <Output> should be eval(<Output>) Compatible and
14
- reflects the expected output type, Follow the format below to provide the output result(s).
15
-
16
- [Output] <Output>
17
- """ # noqa E501
18
-
19
- CHAIN_OF_THOUGHT_SUFFIX = """
20
- Generate and return the output result(s) only, adhering to the provided Type in the following format. Perform the operation in a chain of thoughts.(Think Step by Step)
21
-
22
- [Chain of Thoughts] <Chain of Thoughts>
23
- [Output] <Result>
24
- """ # noqa E501
25
-
26
- REACT_SUFFIX = """
27
- """ # noqa E501
28
-
29
-
30
- class Ollama(BaseLLM):
31
- """Ollama API client for Large Language Models (LLMs)."""
32
-
33
- MTLLM_METHOD_PROMPTS: dict[str, str] = {
34
- "Normal": NORMAL_SUFFIX,
35
- "Reason": REASON_SUFFIX,
36
- "Chain-of-Thoughts": CHAIN_OF_THOUGHT_SUFFIX,
37
- "ReAct": REACT_SUFFIX,
38
- }
39
-
40
- def __init__(
41
- self, verbose: bool = False, max_tries: int = 10, **kwargs: dict
42
- ) -> None:
43
- """Initialize the Ollama API client."""
44
- import ollama # type: ignore
45
-
46
- self.client = ollama.Client(host=kwargs.get("host", "http://localhost:11434"))
47
- self.verbose = verbose
48
- self.max_tries = max_tries
49
- self.model_name = kwargs.get("model_name", "phi3")
50
- self.default_model_params = {
51
- k: v for k, v in kwargs.items() if k not in ["model_name", "host"]
52
- }
53
-
54
- def __infer__(self, meaning_in: str | list[dict], **kwargs: dict) -> str:
55
- """Infer a response from the input meaning."""
56
- assert isinstance(
57
- meaning_in, str
58
- ), "Currently Multimodal models are not supported. Please provide a string input."
59
- model = str(kwargs.get("model_name", self.model_name))
60
- if not self.check_model(model):
61
- self.download_model(model)
62
- model_params = {k: v for k, v in kwargs.items() if k not in ["model_name"]}
63
- messages = [{"role": "user", "content": meaning_in}]
64
- output = self.client.chat(
65
- model=model,
66
- messages=messages,
67
- options={**self.default_model_params, **model_params},
68
- )
69
- return output["message"]["content"]
70
-
71
- def check_model(self, model_name: str) -> bool:
72
- """Check if the model is available."""
73
- try:
74
- self.client.show(model_name)
75
- return True
76
- except Exception:
77
- return False
78
-
79
- def download_model(self, model_name: str) -> None:
80
- """Download the model."""
81
- self.client.pull(model_name)
@@ -1,65 +0,0 @@
1
- """Anthropic API client for MTLLM."""
2
-
3
- from .base import BaseLLM
4
-
5
-
6
- REASON_SUFFIX = """
7
- Reason and return the output result(s) only, adhering to the provided Type in the following format
8
-
9
- [Reasoning] <Reason>
10
- [Output] <Result>
11
- """
12
-
13
- NORMAL_SUFFIX = """Generate and return the output result(s) only, adhering to the provided Type in the following format
14
-
15
- [Output] <result>
16
- """ # noqa E501
17
-
18
- CHAIN_OF_THOUGHT_SUFFIX = """
19
- Generate and return the output result(s) only, adhering to the provided Type in the following format. Perform the operation in a chain of thoughts.(Think Step by Step)
20
-
21
- [Chain of Thoughts] <Chain of Thoughts>
22
- [Output] <Result>
23
- """ # noqa E501
24
-
25
- REACT_SUFFIX = """
26
- """ # noqa E501
27
-
28
-
29
- class OpenAI(BaseLLM):
30
- """Anthropic API client for MTLLM."""
31
-
32
- MTLLM_METHOD_PROMPTS: dict[str, str] = {
33
- "Normal": NORMAL_SUFFIX,
34
- "Reason": REASON_SUFFIX,
35
- "Chain-of-Thoughts": CHAIN_OF_THOUGHT_SUFFIX,
36
- "ReAct": REACT_SUFFIX,
37
- }
38
-
39
- def __init__(
40
- self, verbose: bool = False, max_tries: int = 10, **kwargs: dict
41
- ) -> None:
42
- """Initialize the Anthropic API client."""
43
- import openai # type: ignore
44
-
45
- self.client = openai.OpenAI()
46
- self.verbose = verbose
47
- self.max_tries = max_tries
48
- self.model_name = str(kwargs.get("model_name", "gpt-3.5-turbo"))
49
- self.temperature = kwargs.get("temperature", 0.7)
50
- self.max_tokens = kwargs.get("max_tokens", 1024)
51
-
52
- def __infer__(self, meaning_in: str | list[dict], **kwargs: dict) -> str:
53
- """Infer a response from the input meaning."""
54
- if not isinstance(meaning_in, str):
55
- assert self.model_name.startswith(
56
- ("gpt-4o", "gpt-4-turbo")
57
- ), f"Model {self.model_name} is not multimodal, use a multimodal model instead."
58
- messages = [{"role": "user", "content": meaning_in}]
59
- output = self.client.chat.completions.create(
60
- model=kwargs.get("model_name", self.model_name),
61
- temperature=kwargs.get("temperature", self.temperature),
62
- max_tokens=kwargs.get("max_tokens", self.max_tokens),
63
- messages=messages,
64
- )
65
- return output.choices[0].message.content
@@ -1,63 +0,0 @@
1
- """Anthropic API client for MTLLM."""
2
-
3
- from .base import BaseLLM
4
-
5
- REASON_SUFFIX = """
6
- Reason and return the output result(s) only, adhering to the provided Type in the following format
7
-
8
- [Reasoning] <Reason>
9
- [Output] <Result>
10
- """
11
-
12
- NORMAL_SUFFIX = """Generate and return the output result(s) only, adhering to the provided Type in the following format
13
-
14
- [Output] <result>
15
- """ # noqa E501
16
-
17
- CHAIN_OF_THOUGHT_SUFFIX = """
18
- Generate and return the output result(s) only, adhering to the provided Type in the following format. Perform the operation in a chain of thoughts.(Think Step by Step)
19
-
20
- [Chain of Thoughts] <Chain of Thoughts>
21
- [Output] <Result>
22
- """ # noqa E501
23
-
24
- REACT_SUFFIX = """
25
- """ # noqa E501
26
-
27
-
28
- class TogetherAI(BaseLLM):
29
- """Anthropic API client for MTLLM."""
30
-
31
- MTLLM_METHOD_PROMPTS: dict[str, str] = {
32
- "Normal": NORMAL_SUFFIX,
33
- "Reason": REASON_SUFFIX,
34
- "Chain-of-Thoughts": CHAIN_OF_THOUGHT_SUFFIX,
35
- "ReAct": REACT_SUFFIX,
36
- }
37
-
38
- def __init__(
39
- self, verbose: bool = False, max_tries: int = 10, **kwargs: dict
40
- ) -> None:
41
- """Initialize the Anthropic API client."""
42
- import together # type: ignore
43
-
44
- self.client = together.Together()
45
- self.verbose = verbose
46
- self.max_tries = max_tries
47
- self.model_name = kwargs.get("model_name", "mistralai/Mistral-7B-Instruct-v0.3")
48
- self.temperature = kwargs.get("temperature", 0.7)
49
- self.max_tokens = kwargs.get("max_tokens", 1024)
50
-
51
- def __infer__(self, meaning_in: str | list[dict], **kwargs: dict) -> str:
52
- """Infer a response from the input meaning."""
53
- assert isinstance(
54
- meaning_in, str
55
- ), "Currently Multimodal models are not supported. Please provide a string input."
56
- messages = [{"role": "user", "content": meaning_in}]
57
- output = self.client.chat.completions.create(
58
- model=kwargs.get("model_name", self.model_name),
59
- temperature=kwargs.get("temperature", self.temperature),
60
- max_tokens=kwargs.get("max_tokens", self.max_tokens),
61
- messages=messages,
62
- )
63
- return output.choices[0].message.content
@@ -1,9 +0,0 @@
1
- """Utility functions for the LLMs module."""
2
-
3
- try:
4
- from loguru import logger # noqa F401
5
- except ImportError:
6
- import logging
7
-
8
- logging.basicConfig(level=logging.INFO)
9
- logger = logging.getLogger(__name__)
Binary file