jaclang 0.5.17__py3-none-any.whl → 0.6.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of jaclang might be problematic. Click here for more details.

Files changed (51) hide show
  1. jaclang/__init__.py +2 -6
  2. jaclang/cli/cli.py +4 -2
  3. jaclang/compiler/__init__.py +12 -5
  4. jaclang/compiler/absyntree.py +23 -23
  5. jaclang/compiler/generated/jac_parser.py +2 -2
  6. jaclang/compiler/jac.lark +9 -9
  7. jaclang/compiler/parser.py +76 -21
  8. jaclang/compiler/passes/ir_pass.py +10 -8
  9. jaclang/compiler/passes/main/__init__.py +3 -2
  10. jaclang/compiler/passes/main/access_modifier_pass.py +173 -0
  11. jaclang/compiler/passes/main/fuse_typeinfo_pass.py +3 -2
  12. jaclang/compiler/passes/main/import_pass.py +33 -21
  13. jaclang/compiler/passes/main/pyast_gen_pass.py +99 -44
  14. jaclang/compiler/passes/main/pyast_load_pass.py +141 -77
  15. jaclang/compiler/passes/main/pyout_pass.py +14 -13
  16. jaclang/compiler/passes/main/registry_pass.py +8 -3
  17. jaclang/compiler/passes/main/schedules.py +5 -3
  18. jaclang/compiler/passes/main/sym_tab_build_pass.py +47 -37
  19. jaclang/compiler/passes/main/tests/test_import_pass.py +2 -2
  20. jaclang/compiler/passes/tool/jac_formatter_pass.py +85 -23
  21. jaclang/compiler/passes/tool/tests/test_jac_format_pass.py +11 -4
  22. jaclang/compiler/passes/transform.py +2 -0
  23. jaclang/compiler/symtable.py +10 -3
  24. jaclang/compiler/tests/test_importer.py +9 -0
  25. jaclang/compiler/workspace.py +19 -11
  26. jaclang/core/aott.py +34 -63
  27. jaclang/core/importer.py +73 -65
  28. jaclang/core/llms/__init__.py +20 -0
  29. jaclang/core/llms/anthropic.py +61 -0
  30. jaclang/core/llms/base.py +206 -0
  31. jaclang/core/llms/groq.py +67 -0
  32. jaclang/core/llms/huggingface.py +73 -0
  33. jaclang/core/llms/ollama.py +78 -0
  34. jaclang/core/llms/openai.py +61 -0
  35. jaclang/core/llms/togetherai.py +60 -0
  36. jaclang/core/llms/utils.py +9 -0
  37. jaclang/core/utils.py +16 -1
  38. jaclang/plugin/default.py +47 -16
  39. jaclang/plugin/feature.py +9 -6
  40. jaclang/plugin/spec.py +8 -1
  41. jaclang/settings.py +95 -0
  42. jaclang/utils/helpers.py +6 -2
  43. jaclang/utils/treeprinter.py +9 -6
  44. jaclang/vendor/mypy/checker.py +2 -3
  45. jaclang-0.6.0.dist-info/METADATA +17 -0
  46. {jaclang-0.5.17.dist-info → jaclang-0.6.0.dist-info}/RECORD +49 -39
  47. jaclang/core/llms.py +0 -29
  48. jaclang-0.5.17.dist-info/METADATA +0 -7
  49. {jaclang-0.5.17.dist-info → jaclang-0.6.0.dist-info}/WHEEL +0 -0
  50. {jaclang-0.5.17.dist-info → jaclang-0.6.0.dist-info}/entry_points.txt +0 -0
  51. {jaclang-0.5.17.dist-info → jaclang-0.6.0.dist-info}/top_level.txt +0 -0
jaclang/core/aott.py CHANGED
@@ -8,78 +8,40 @@ import re
8
8
  from enum import Enum
9
9
  from typing import Any
10
10
 
11
+ from jaclang.core.llms.base import BaseLLM
11
12
  from jaclang.core.registry import SemInfo, SemRegistry, SemScope
12
13
 
13
14
 
14
- PROMPT_TEMPLATE = """
15
- [System Prompt]
16
- This is an operation you must perform and return the output values. Neither, the methodology, extra sentences nor the code are not needed.
17
- Input/Type formatting: Explanation of the Input (variable_name) (type) = value
18
-
19
- [Information]
20
- {information}
21
-
22
- [Inputs Information]
23
- {inputs_information}
24
-
25
- [Output Information]
26
- {output_information}
27
-
28
- [Type Explanations]
29
- {type_explanations}
30
-
31
- [Action]
32
- {action}
33
-
34
- {reason_suffix}
35
- """ # noqa E501
36
-
37
- WITH_REASON_SUFFIX = """
38
- Reason and return the output result(s) only, adhering to the provided Type in the following format
39
-
40
- [Reasoning] <Reason>
41
- [Output] <Result>
42
- """
43
-
44
- WITHOUT_REASON_SUFFIX = """Generate and return the output result(s) only, adhering to the provided Type in the following format
45
-
46
- [Output] <result>
47
- """ # noqa E501
48
-
49
-
50
15
  def aott_raise(
16
+ model: BaseLLM,
51
17
  information: str,
52
18
  inputs_information: str,
53
19
  output_information: str,
54
20
  type_explanations: str,
55
21
  action: str,
56
- reason: bool,
22
+ context: str,
23
+ method: str,
24
+ tools: list["Tool"],
25
+ model_params: dict,
57
26
  ) -> str:
58
27
  """AOTT Raise uses the information (Meanings types values) provided to generate a prompt(meaning in)."""
59
- return PROMPT_TEMPLATE.format(
60
- information=information,
61
- inputs_information=inputs_information,
62
- output_information=output_information,
63
- type_explanations=type_explanations,
64
- action=action,
65
- reason_suffix=WITH_REASON_SUFFIX if reason else WITHOUT_REASON_SUFFIX,
66
- )
67
-
68
-
69
- def get_reasoning_output(s: str) -> tuple:
70
- """Get the reasoning and output from the meaning out string."""
71
- reasoning_match = re.search(r"\[Reasoning\](.*)\[Output\]", s)
72
- output_match = re.search(r"\[Output\](.*)", s)
73
-
74
- if reasoning_match and output_match:
75
- reasoning = reasoning_match.group(1)
76
- output = output_match.group(1)
77
- return (reasoning.strip(), output.strip())
78
- elif output_match:
79
- output = output_match.group(1)
80
- return (None, output.strip())
28
+ if method != "ReAct":
29
+ system_prompt = model.MTLLM_SYSTEM_PROMPT
30
+ mtllm_prompt = model.MTLLM_PROMPT.format(
31
+ information=information,
32
+ inputs_information=inputs_information,
33
+ output_information=output_information,
34
+ type_explanations=type_explanations,
35
+ action=action,
36
+ context=context,
37
+ )
38
+ method_prompt = model.MTLLM_METHOD_PROMPTS[method]
39
+ meaning_in = f"{system_prompt}\n{mtllm_prompt}\n{method_prompt}"
40
+ return model(meaning_in, **model_params)
81
41
  else:
82
- return (None, None)
42
+ assert tools, "Tools must be provided for the ReAct method."
43
+ # TODO: Implement ReAct method
44
+ return ""
83
45
 
84
46
 
85
47
  def get_info_types(
@@ -179,19 +141,19 @@ def get_type_explanation(
179
141
  if sem_info.type == "Enum" and isinstance(type_info, list):
180
142
  for enum_item in type_info:
181
143
  type_info_str.append(
182
- f"{enum_item.semstr} (EnumItem) ({enum_item.name})"
144
+ f"{enum_item.semstr} ({enum_item.name}) (EnumItem)"
183
145
  )
184
146
  elif sem_info.type in ["obj", "class", "node", "edge"] and isinstance(
185
147
  type_info, list
186
148
  ):
187
149
  for arch_item in type_info:
188
150
  type_info_str.append(
189
- f"{arch_item.semstr} ({arch_item.type}) ({arch_item.name})"
151
+ f"{arch_item.semstr} ({arch_item.name}) ({arch_item.type})"
190
152
  )
191
153
  if arch_item.type and extract_non_primary_type(arch_item.type):
192
154
  type_info_types.extend(extract_non_primary_type(arch_item.type))
193
155
  return (
194
- f"{sem_info.semstr} ({sem_info.type}) ({sem_info.name}) = {', '.join(type_info_str)}",
156
+ f"{sem_info.semstr} ({sem_info.name}) ({sem_info.type}) = {', '.join(type_info_str)}",
195
157
  set(type_info_types),
196
158
  )
197
159
  return None, None
@@ -232,3 +194,12 @@ def get_type_annotation(data: Any) -> str: # noqa: ANN401
232
194
  return "dict[str, Any]"
233
195
  else:
234
196
  return str(type(data).__name__)
197
+
198
+
199
+ class Tool:
200
+ """Tool class for the AOTT operations."""
201
+
202
+ def __init__(self) -> None:
203
+ """Initialize the Tool class."""
204
+ # TODO: Implement the Tool class
205
+ pass
jaclang/core/importer.py CHANGED
@@ -10,6 +10,7 @@ from typing import Optional, Union
10
10
  from jaclang.compiler.absyntree import Module
11
11
  from jaclang.compiler.compile import compile_jac
12
12
  from jaclang.compiler.constant import Constants as Con
13
+ from jaclang.core.utils import sys_path_context
13
14
  from jaclang.utils.log import logging
14
15
 
15
16
 
@@ -21,56 +22,37 @@ def jac_importer(
21
22
  mdl_alias: Optional[str] = None,
22
23
  override_name: Optional[str] = None,
23
24
  mod_bundle: Optional[Module] = None,
24
- lng: Optional[str] = None,
25
+ lng: Optional[str] = "jac",
25
26
  items: Optional[dict[str, Union[str, bool]]] = None,
26
27
  ) -> Optional[types.ModuleType]:
27
28
  """Core Import Process."""
28
- dir_path, file_name = (
29
- path.split(path.join(*(target.split("."))) + ".py")
30
- if lng == "py"
31
- else path.split(path.join(*(target.split("."))) + ".jac")
29
+ dir_path, file_name = path.split(
30
+ path.join(*(target.split("."))) + (".jac" if lng == "jac" else ".py")
32
31
  )
33
32
  module_name = path.splitext(file_name)[0]
34
33
  package_path = dir_path.replace(path.sep, ".")
35
34
 
36
- if package_path and f"{package_path}.{module_name}" in sys.modules and lng != "py":
35
+ if package_path and f"{package_path}.{module_name}" in sys.modules:
37
36
  return sys.modules[f"{package_path}.{module_name}"]
38
- elif not package_path and module_name in sys.modules and lng != "py":
37
+ elif not package_path and module_name in sys.modules:
39
38
  return sys.modules[module_name]
40
39
 
41
- caller_dir = path.dirname(base_path) if not path.isdir(base_path) else base_path
42
- if not caller_dir:
43
- caller_dir = getcwd()
44
- chomp_target = target
45
- if chomp_target.startswith("."):
46
- chomp_target = chomp_target[1:]
47
- while chomp_target.startswith("."):
48
- caller_dir = path.dirname(caller_dir)
49
- chomp_target = chomp_target[1:]
50
- caller_dir = path.join(caller_dir, dir_path)
51
-
40
+ caller_dir = get_caller_dir(target, base_path, dir_path)
52
41
  full_target = path.normpath(path.join(caller_dir, file_name))
53
- path_added = False
54
- if caller_dir not in sys.path:
55
- sys.path.append(caller_dir)
56
- path_added = True
57
42
 
58
- module_name = override_name if override_name else module_name
59
- module = types.ModuleType(module_name)
60
- module.__file__ = full_target
61
- module.__name__ = module_name
62
- module.__dict__["__jac_mod_bundle__"] = mod_bundle
63
- if lng != "py":
43
+ if lng == "py":
44
+ module = py_import(
45
+ target=target, items=items, absorb=absorb, mdl_alias=mdl_alias
46
+ )
47
+ else:
48
+ module_name = override_name if override_name else module_name
49
+ module = create_jac_py_module(
50
+ mod_bundle, module_name, package_path, full_target
51
+ )
64
52
  if mod_bundle:
65
- codeobj = (
66
- mod_bundle.gen.py_bytecode
67
- if full_target == mod_bundle.loc.mod_path
68
- else mod_bundle.mod_deps[full_target].gen.py_bytecode
69
- )
70
- if isinstance(codeobj, bytes):
71
- codeobj = marshal.loads(codeobj)
53
+ codeobj = mod_bundle.mod_deps[full_target].gen.py_bytecode
54
+ codeobj = marshal.loads(codeobj) if isinstance(codeobj, bytes) else None
72
55
  else:
73
-
74
56
  gen_dir = path.join(caller_dir, Con.JAC_GEN_DIR)
75
57
  pyc_file_path = path.join(gen_dir, module_name + ".jbc")
76
58
  if (
@@ -89,46 +71,60 @@ def jac_importer(
89
71
  return None
90
72
  else:
91
73
  codeobj = marshal.loads(result.ir.gen.py_bytecode)
92
-
93
- if package_path:
94
- parts = package_path.split(".")
95
- for i in range(len(parts)):
96
- package_name = ".".join(parts[: i + 1])
97
- if package_name not in sys.modules:
98
- sys.modules[package_name] = types.ModuleType(package_name)
99
-
100
- setattr(sys.modules[package_path], module_name, module)
101
- sys.modules[f"{package_path}.{module_name}"] = module
102
- sys.modules[module_name] = module
103
-
104
74
  if not codeobj:
105
75
  raise ImportError(f"No bytecode found for {full_target}")
106
- exec(codeobj, module.__dict__)
76
+ with sys_path_context(caller_dir):
77
+ exec(codeobj, module.__dict__)
107
78
 
108
- (
109
- py_import(target=target, items=items, absorb=absorb, mdl_alias=mdl_alias)
110
- if lng == "py" or lng == "jac"
111
- else None
112
- )
79
+ return module
113
80
 
114
- if path_added:
115
- sys.path.remove(caller_dir)
116
81
 
82
+ def create_jac_py_module(
83
+ mod_bundle: Optional[Module], module_name: str, package_path: str, full_target: str
84
+ ) -> types.ModuleType:
85
+ """Create a module."""
86
+ module = types.ModuleType(module_name)
87
+ module.__file__ = full_target
88
+ module.__name__ = module_name
89
+ module.__dict__["__jac_mod_bundle__"] = mod_bundle
90
+ if package_path:
91
+ parts = package_path.split(".")
92
+ for i in range(len(parts)):
93
+ package_name = ".".join(parts[: i + 1])
94
+ if package_name not in sys.modules:
95
+ sys.modules[package_name] = types.ModuleType(package_name)
96
+
97
+ setattr(sys.modules[package_path], module_name, module)
98
+ sys.modules[f"{package_path}.{module_name}"] = module
99
+ sys.modules[module_name] = module
117
100
  return module
118
101
 
119
102
 
103
+ def get_caller_dir(target: str, base_path: str, dir_path: str) -> str:
104
+ """Get the directory of the caller."""
105
+ caller_dir = base_path if path.isdir(base_path) else path.dirname(base_path)
106
+ caller_dir = caller_dir if caller_dir else getcwd()
107
+ chomp_target = target
108
+ if chomp_target.startswith("."):
109
+ chomp_target = chomp_target[1:]
110
+ while chomp_target.startswith("."):
111
+ caller_dir = path.dirname(caller_dir)
112
+ chomp_target = chomp_target[1:]
113
+ caller_dir = path.join(caller_dir, dir_path)
114
+ return caller_dir
115
+
116
+
120
117
  def py_import(
121
118
  target: str,
122
119
  items: Optional[dict[str, Union[str, bool]]] = None,
123
120
  absorb: bool = False,
124
121
  mdl_alias: Optional[str] = None,
125
- ) -> None:
122
+ ) -> types.ModuleType:
126
123
  """Import a Python module."""
127
124
  try:
128
125
  target = target.lstrip(".") if target.startswith("..") else target
129
- imported_module = importlib.import_module(target)
126
+ imported_module = importlib.import_module(name=target)
130
127
  main_module = __import__("__main__")
131
- # importer = importlib.import_module(caller)
132
128
  if absorb:
133
129
  for name in dir(imported_module):
134
130
  if not name.startswith("_"):
@@ -136,11 +132,21 @@ def py_import(
136
132
 
137
133
  elif items:
138
134
  for name, alias in items.items():
139
- setattr(
140
- main_module,
141
- alias if isinstance(alias, str) else name,
142
- getattr(imported_module, name),
143
- )
135
+ try:
136
+ setattr(
137
+ main_module,
138
+ alias if isinstance(alias, str) else name,
139
+ getattr(imported_module, name),
140
+ )
141
+ except AttributeError as e:
142
+ if hasattr(imported_module, "__path__"):
143
+ setattr(
144
+ main_module,
145
+ alias if isinstance(alias, str) else name,
146
+ importlib.import_module(f"{target}.{name}"),
147
+ )
148
+ else:
149
+ raise e
144
150
 
145
151
  else:
146
152
  setattr(
@@ -148,5 +154,7 @@ def py_import(
148
154
  mdl_alias if isinstance(mdl_alias, str) else target,
149
155
  imported_module,
150
156
  )
151
- except ImportError:
157
+ return imported_module
158
+ except ImportError as e:
152
159
  print(f"Failed to import module {target}")
160
+ raise e
@@ -0,0 +1,20 @@
1
+ """LLM implementations for MTLLM."""
2
+
3
+ from .anthropic import Anthropic
4
+ from .base import BaseLLM
5
+ from .groq import Groq
6
+ from .huggingface import Huggingface
7
+ from .ollama import Ollama
8
+ from .openai import OpenAI
9
+ from .togetherai import TogetherAI
10
+
11
+
12
+ __all__ = [
13
+ "Anthropic",
14
+ "Ollama",
15
+ "Huggingface",
16
+ "Groq",
17
+ "BaseLLM",
18
+ "OpenAI",
19
+ "TogetherAI",
20
+ ]
@@ -0,0 +1,61 @@
1
+ """Anthropic API client for MTLLM."""
2
+
3
+ from .base import BaseLLM
4
+
5
+
6
+ REASON_SUFFIX = """
7
+ Reason and return the output result(s) only, adhering to the provided Type in the following format
8
+
9
+ [Reasoning] <Reason>
10
+ [Output] <Result>
11
+ """
12
+
13
+ NORMAL_SUFFIX = """Generate and return the output result(s) only, adhering to the provided Type in the following format
14
+
15
+ [Output] <result>
16
+ """ # noqa E501
17
+
18
+ CHAIN_OF_THOUGHT_SUFFIX = """
19
+ Generate and return the output result(s) only, adhering to the provided Type in the following format. Perform the operation in a chain of thoughts.(Think Step by Step)
20
+
21
+ [Chain of Thoughts] <Chain of Thoughts>
22
+ [Output] <Result>
23
+ """ # noqa E501
24
+
25
+ REACT_SUFFIX = """
26
+ """ # noqa E501
27
+
28
+
29
+ class Anthropic(BaseLLM):
30
+ """Anthropic API client for MTLLM."""
31
+
32
+ MTLLM_METHOD_PROMPTS: dict[str, str] = {
33
+ "Normal": NORMAL_SUFFIX,
34
+ "Reason": REASON_SUFFIX,
35
+ "Chain-of-Thoughts": CHAIN_OF_THOUGHT_SUFFIX,
36
+ "ReAct": REACT_SUFFIX,
37
+ }
38
+
39
+ def __init__(
40
+ self, verbose: bool = False, max_tries: int = 10, **kwargs: dict
41
+ ) -> None:
42
+ """Initialize the Anthropic API client."""
43
+ import anthropic # type: ignore
44
+
45
+ self.client = anthropic.Anthropic()
46
+ self.verbose = verbose
47
+ self.max_tries = max_tries
48
+ self.model_name = kwargs.get("model_name", "claude-3-sonnet-20240229")
49
+ self.temperature = kwargs.get("temperature", 0.7)
50
+ self.max_tokens = kwargs.get("max_tokens", 1024)
51
+
52
+ def __infer__(self, meaning_in: str, **kwargs: dict) -> str:
53
+ """Infer a response from the input meaning."""
54
+ messages = [{"role": "user", "content": meaning_in}]
55
+ output = self.client.messages.create(
56
+ model=kwargs.get("model_name", self.model_name),
57
+ temperature=kwargs.get("temperature", self.temperature),
58
+ max_tokens=kwargs.get("max_tokens", self.max_tokens),
59
+ messages=messages,
60
+ )
61
+ return output.content[0].text
@@ -0,0 +1,206 @@
1
+ """Base Large Language Model (LLM) class."""
2
+
3
+ import logging
4
+ import re
5
+
6
+ from .utils import logger
7
+
8
+
9
+ httpx_logger = logging.getLogger("httpx")
10
+ httpx_logger.setLevel(logging.WARNING)
11
+
12
+ SYSTEM_PROMPT = """
13
+ [System Prompt]
14
+ This is an operation you must perform and return the output values. Neither, the methodology, extra sentences nor the code are not needed.
15
+ Input/Type formatting: Explanation of the Input (variable_name) (type) = value
16
+ """ # noqa E501
17
+
18
+
19
+ PROMPT_TEMPLATE = """
20
+ [Information]
21
+ {information}
22
+
23
+ [Context]
24
+ {context}
25
+
26
+ [Inputs Information]
27
+ {inputs_information}
28
+
29
+ [Output Information]
30
+ {output_information}
31
+
32
+ [Type Explanations]
33
+ {type_explanations}
34
+
35
+ [Action]
36
+ {action}
37
+ """ # noqa E501
38
+
39
+ NORMAL_SUFFIX = """Generate and return the output result(s) only, adhering to the provided Type in the following format
40
+
41
+ [Output] <result>
42
+ """ # noqa E501
43
+
44
+ REASON_SUFFIX = """
45
+ Reason and return the output result(s) only, adhering to the provided Type in the following format
46
+
47
+ [Reasoning] <Reason>
48
+ [Output] <Result>
49
+ """
50
+
51
+ CHAIN_OF_THOUGHT_SUFFIX = """
52
+ Generate and return the output result(s) only, adhering to the provided Type in the following format. Perform the operation in a chain of thoughts.(Think Step by Step)
53
+
54
+ [Chain of Thoughts] <Chain of Thoughts>
55
+ [Output] <Result>
56
+ """ # noqa E501
57
+
58
+ REACT_SUFFIX = """
59
+ """ # noqa E501
60
+
61
+ MTLLM_OUTPUT_FIX_PROMPT = """
62
+ [Output]
63
+ {model_output}
64
+
65
+ [Previous Result You Provided]
66
+ {previous_output}
67
+
68
+ [Desired Output Type]
69
+ {output_info}
70
+
71
+ [Type Explanations]
72
+ {output_type_info}
73
+
74
+ Above output is not in the desired Output Format/Type. Please provide the output in the desired type. Do not repeat the previously provided output.
75
+ Important: Do not provide the code or the methodology. Only provide
76
+ the output in the desired format.
77
+ """ # noqa E501
78
+
79
+ OUTPUT_CHECK_PROMPT = """
80
+ [Output]
81
+ {model_output}
82
+
83
+ [Desired Output Type]
84
+ {output_type}
85
+
86
+ [Type Explanations]
87
+ {output_type_info}
88
+
89
+ Check if the output is exactly in the desired Output Type. Important: Just say 'Yes' or 'No'.
90
+ """ # noqa E501
91
+
92
+
93
+ class BaseLLM:
94
+ """Base Large Language Model (LLM) class."""
95
+
96
+ MTLLM_SYSTEM_PROMPT: str = SYSTEM_PROMPT
97
+ MTLLM_PROMPT: str = PROMPT_TEMPLATE
98
+ MTLLM_METHOD_PROMPTS: dict[str, str] = {
99
+ "Normal": NORMAL_SUFFIX,
100
+ "Reason": REASON_SUFFIX,
101
+ "Chain-of-Thoughts": CHAIN_OF_THOUGHT_SUFFIX,
102
+ "ReAct": REACT_SUFFIX,
103
+ }
104
+ OUTPUT_FIX_PROMPT: str = MTLLM_OUTPUT_FIX_PROMPT
105
+ OUTPUT_CHECK_PROMPT: str = OUTPUT_CHECK_PROMPT
106
+
107
+ def __init__(
108
+ self, verbose: bool = False, max_tries: int = 10, **kwargs: dict
109
+ ) -> None:
110
+ """Initialize the Large Language Model (LLM) client."""
111
+ self.verbose = verbose
112
+ self.max_tries = max_tries
113
+ raise NotImplementedError
114
+
115
+ def __infer__(self, meaning_in: str, **kwargs: dict) -> str:
116
+ """Infer a response from the input meaning."""
117
+ raise NotImplementedError
118
+
119
+ def __call__(self, input_text: str, **kwargs: dict) -> str:
120
+ """Infer a response from the input text."""
121
+ if self.verbose:
122
+ logger.info(f"Meaning In\n{input_text}")
123
+ return self.__infer__(input_text, **kwargs)
124
+
125
+ def resolve_output(
126
+ self,
127
+ meaning_out: str,
128
+ output_semstr: str,
129
+ output_type: str,
130
+ output_type_info: str,
131
+ ) -> str:
132
+ """Resolve the output string to return the reasoning and output."""
133
+ if self.verbose:
134
+ logger.opt(colors=True).info(f"Meaning Out\n<green>{meaning_out}</green>")
135
+ output_match = re.search(r"\[Output\](.*)", meaning_out)
136
+ output = output_match.group(1).strip() if output_match else None
137
+ if not output_match:
138
+ output = self._extract_output(
139
+ meaning_out,
140
+ output_semstr,
141
+ output_type,
142
+ output_type_info,
143
+ self.max_tries,
144
+ )
145
+ return str(output)
146
+
147
+ def _check_output(
148
+ self, output: str, output_type: str, output_type_info: str
149
+ ) -> bool:
150
+ """Check if the output is in the desired format."""
151
+ output_check_prompt = self.OUTPUT_CHECK_PROMPT.format(
152
+ model_output=output,
153
+ output_type=output_type,
154
+ output_type_info=output_type_info,
155
+ )
156
+ llm_output = self.__infer__(output_check_prompt)
157
+ return "yes" in llm_output.lower()
158
+
159
+ def _extract_output(
160
+ self,
161
+ meaning_out: str,
162
+ output_semstr: str,
163
+ output_type: str,
164
+ output_type_info: str,
165
+ max_tries: int,
166
+ previous_output: str = "None",
167
+ ) -> str:
168
+ """Extract the output from the meaning out string."""
169
+ if max_tries == 0:
170
+ logger.error("Failed to extract output. Max tries reached.")
171
+ raise ValueError(
172
+ "Failed to extract output. Try Changing the Semstrings, provide examples or change the method."
173
+ )
174
+
175
+ if self.verbose:
176
+ if max_tries < self.max_tries:
177
+ logger.info(
178
+ f"Failed to extract output. Trying to extract output again. Max tries left: {max_tries}"
179
+ )
180
+ else:
181
+ logger.info("Extracting output from the meaning out string.")
182
+
183
+ output_fix_prompt = self.OUTPUT_FIX_PROMPT.format(
184
+ model_output=meaning_out,
185
+ previous_output=previous_output,
186
+ output_info=f"{output_semstr} ({output_type})",
187
+ output_type_info=output_type_info,
188
+ )
189
+ llm_output = self.__infer__(output_fix_prompt)
190
+ is_in_desired_format = self._check_output(
191
+ llm_output, output_type, output_type_info
192
+ )
193
+ if self.verbose:
194
+ logger.info(
195
+ f"Extracted Output: {llm_output}. Is in Desired Format: {is_in_desired_format}"
196
+ )
197
+ if is_in_desired_format:
198
+ return llm_output
199
+ return self._extract_output(
200
+ meaning_out,
201
+ output_semstr,
202
+ output_type,
203
+ output_type_info,
204
+ max_tries - 1,
205
+ llm_output,
206
+ )
@@ -0,0 +1,67 @@
1
+ """Groq API client for MTLLM."""
2
+
3
+ from .base import BaseLLM
4
+
5
+
6
+ REASON_SUFFIX = """
7
+ Reason and return the output result(s) only, adhering to the provided Type in the following format
8
+
9
+ [Reasoning] <Reason>
10
+ [Output] <Result>
11
+ """
12
+
13
+ NORMAL_SUFFIX = """Generate and return the output result(s) only, adhering to the provided Type in the following format
14
+
15
+ [Output] <result>
16
+ """ # noqa E501
17
+
18
+ CHAIN_OF_THOUGHT_SUFFIX = """
19
+ Generate and return the output result(s) only, adhering to the provided Type in the following format. Perform the operation in a chain of thoughts.(Think Step by Step)
20
+
21
+ [Chain of Thoughts] <Chain of Thoughts>
22
+ [Output] <Result>
23
+ """ # noqa E501
24
+
25
+ REACT_SUFFIX = """
26
+ """ # noqa E501
27
+
28
+
29
+ class Groq(BaseLLM):
30
+ """Groq API client for MTLLM."""
31
+
32
+ MTLLM_METHOD_PROMPTS: dict[str, str] = {
33
+ "Normal": NORMAL_SUFFIX,
34
+ "Reason": REASON_SUFFIX,
35
+ "Chain-of-Thoughts": CHAIN_OF_THOUGHT_SUFFIX,
36
+ "ReAct": REACT_SUFFIX,
37
+ }
38
+
39
+ def __init__(
40
+ self, verbose: bool = False, max_tries: int = 10, **kwargs: dict
41
+ ) -> None:
42
+ """Initialize the Groq API client."""
43
+ import groq # type: ignore
44
+
45
+ self.client = groq.Groq()
46
+ self.verbose = verbose
47
+ self.max_tries = max_tries
48
+ self.model_name = kwargs.get("model_name", "mixtral-8x7b-32768")
49
+ self.temperature = kwargs.get("temperature", 0.7)
50
+ self.max_tokens = kwargs.get("max_tokens", 1024)
51
+
52
+ def __infer__(self, meaning_in: str, **kwargs: dict) -> str:
53
+ """Infer a response from the input meaning."""
54
+ messages = [{"role": "user", "content": meaning_in}]
55
+ model_params = {
56
+ k: v
57
+ for k, v in kwargs.items()
58
+ if k not in ["model_name", "temperature", "max_tokens"]
59
+ }
60
+ output = self.client.chat.completions.create(
61
+ model=kwargs.get("model_name", self.model_name),
62
+ temperature=kwargs.get("temperature", self.temperature),
63
+ max_tokens=kwargs.get("max_tokens", self.max_tokens),
64
+ messages=messages,
65
+ **model_params,
66
+ )
67
+ return output.choices[0].message.content