janus-llm 3.1.0__py3-none-any.whl → 3.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- janus/__init__.py +1 -1
- janus/cli.py +64 -7
- janus/converter/_tests/test_translate.py +7 -7
- janus/converter/converter.py +6 -3
- janus/converter/translate.py +1 -1
- janus/language/alc/_tests/test_alc.py +3 -3
- janus/language/binary/_tests/test_binary.py +2 -2
- janus/language/mumps/_tests/test_mumps.py +2 -2
- janus/language/treesitter/_tests/test_treesitter.py +2 -2
- janus/llm/models_info.py +42 -20
- janus/metrics/_tests/test_llm.py +3 -3
- janus/metrics/metric.py +4 -4
- {janus_llm-3.1.0.dist-info → janus_llm-3.2.0.dist-info}/METADATA +1 -1
- {janus_llm-3.1.0.dist-info → janus_llm-3.2.0.dist-info}/RECORD +17 -17
- {janus_llm-3.1.0.dist-info → janus_llm-3.2.0.dist-info}/LICENSE +0 -0
- {janus_llm-3.1.0.dist-info → janus_llm-3.2.0.dist-info}/WHEEL +0 -0
- {janus_llm-3.1.0.dist-info → janus_llm-3.2.0.dist-info}/entry_points.txt +0 -0
janus/__init__.py
CHANGED
@@ -5,7 +5,7 @@ from langchain_core._api.deprecation import LangChainDeprecationWarning
|
|
5
5
|
from .converter.translate import Translator
|
6
6
|
from .metrics import * # noqa: F403
|
7
7
|
|
8
|
-
__version__ = "3.
|
8
|
+
__version__ = "3.2.0"
|
9
9
|
|
10
10
|
# Ignoring a deprecation warning from langchain_core that I can't seem to hunt down
|
11
11
|
warnings.filterwarnings("ignore", category=LangChainDeprecationWarning)
|
janus/cli.py
CHANGED
@@ -32,8 +32,12 @@ from janus.language.treesitter import TreeSitterSplitter
|
|
32
32
|
from janus.llm.model_callbacks import COST_PER_1K_TOKENS
|
33
33
|
from janus.llm.models_info import (
|
34
34
|
MODEL_CONFIG_DIR,
|
35
|
+
MODEL_ID_TO_LONG_ID,
|
35
36
|
MODEL_TYPE_CONSTRUCTORS,
|
37
|
+
MODEL_TYPES,
|
36
38
|
TOKEN_LIMITS,
|
39
|
+
bedrock_models,
|
40
|
+
openai_models,
|
37
41
|
)
|
38
42
|
from janus.metrics.cli import evaluate
|
39
43
|
from janus.utils.enums import LANGUAGES
|
@@ -179,7 +183,7 @@ def translate(
|
|
179
183
|
"-L",
|
180
184
|
help="The custom name of the model set with 'janus llm add'.",
|
181
185
|
),
|
182
|
-
] = "gpt-
|
186
|
+
] = "gpt-4o",
|
183
187
|
max_prompts: Annotated[
|
184
188
|
int,
|
185
189
|
typer.Option(
|
@@ -301,7 +305,7 @@ def document(
|
|
301
305
|
"-L",
|
302
306
|
help="The custom name of the model set with 'janus llm add'.",
|
303
307
|
),
|
304
|
-
] = "gpt-
|
308
|
+
] = "gpt-4o",
|
305
309
|
max_prompts: Annotated[
|
306
310
|
int,
|
307
311
|
typer.Option(
|
@@ -437,7 +441,7 @@ def diagram(
|
|
437
441
|
"-L",
|
438
442
|
help="The custom name of the model set with 'janus llm add'.",
|
439
443
|
),
|
440
|
-
] = "gpt-
|
444
|
+
] = "gpt-4o",
|
441
445
|
max_prompts: Annotated[
|
442
446
|
int,
|
443
447
|
typer.Option(
|
@@ -800,16 +804,44 @@ def llm_add(
|
|
800
804
|
"model_cost": {"input": in_cost, "output": out_cost},
|
801
805
|
}
|
802
806
|
elif model_type == "OpenAI":
|
803
|
-
|
807
|
+
model_id = typer.prompt(
|
808
|
+
"Enter the model ID (list model IDs with `janus llm ls -a`)",
|
809
|
+
default="gpt-4o",
|
810
|
+
type=click.Choice(openai_models),
|
811
|
+
show_choices=False,
|
812
|
+
)
|
804
813
|
params = dict(
|
805
|
-
model_name
|
814
|
+
# OpenAI uses the "model_name" key for what we're calling "long_model_id"
|
815
|
+
model_name=MODEL_ID_TO_LONG_ID[model_id],
|
806
816
|
temperature=0.7,
|
807
817
|
n=1,
|
808
818
|
)
|
809
|
-
max_tokens = TOKEN_LIMITS[
|
810
|
-
model_cost = COST_PER_1K_TOKENS[
|
819
|
+
max_tokens = TOKEN_LIMITS[MODEL_ID_TO_LONG_ID[model_id]]
|
820
|
+
model_cost = COST_PER_1K_TOKENS[MODEL_ID_TO_LONG_ID[model_id]]
|
821
|
+
cfg = {
|
822
|
+
"model_type": model_type,
|
823
|
+
"model_id": model_id,
|
824
|
+
"model_args": params,
|
825
|
+
"token_limit": max_tokens,
|
826
|
+
"model_cost": model_cost,
|
827
|
+
}
|
828
|
+
elif model_type == "BedrockChat" or model_type == "Bedrock":
|
829
|
+
model_id = typer.prompt(
|
830
|
+
"Enter the model ID (list model IDs with `janus llm ls -a`)",
|
831
|
+
default="bedrock-claude-sonnet",
|
832
|
+
type=click.Choice(bedrock_models),
|
833
|
+
show_choices=False,
|
834
|
+
)
|
835
|
+
params = dict(
|
836
|
+
# Bedrock uses the "model_id" key for what we're calling "long_model_id"
|
837
|
+
model_id=MODEL_ID_TO_LONG_ID[model_id],
|
838
|
+
model_kwargs={"temperature": 0.7},
|
839
|
+
)
|
840
|
+
max_tokens = TOKEN_LIMITS[MODEL_ID_TO_LONG_ID[model_id]]
|
841
|
+
model_cost = COST_PER_1K_TOKENS[MODEL_ID_TO_LONG_ID[model_id]]
|
811
842
|
cfg = {
|
812
843
|
"model_type": model_type,
|
844
|
+
"model_id": model_id,
|
813
845
|
"model_args": params,
|
814
846
|
"token_limit": max_tokens,
|
815
847
|
"model_cost": model_cost,
|
@@ -821,6 +853,31 @@ def llm_add(
|
|
821
853
|
print(f"Model config written to {model_cfg}")
|
822
854
|
|
823
855
|
|
856
|
+
@llm.command("ls", help="List all of the user-configured models")
|
857
|
+
def llm_ls(
|
858
|
+
all: Annotated[
|
859
|
+
bool,
|
860
|
+
typer.Option(
|
861
|
+
"--all",
|
862
|
+
"-a",
|
863
|
+
is_flag=True,
|
864
|
+
help="List all models, including the default model IDs.",
|
865
|
+
click_type=click.Choice(sorted(list(MODEL_TYPE_CONSTRUCTORS.keys()))),
|
866
|
+
),
|
867
|
+
] = False,
|
868
|
+
):
|
869
|
+
print("\n[green]User-configured models[/green]:")
|
870
|
+
for model_cfg in MODEL_CONFIG_DIR.glob("*.json"):
|
871
|
+
with open(model_cfg, "r") as f:
|
872
|
+
cfg = json.load(f)
|
873
|
+
print(f"\t[blue]{model_cfg.stem}[/blue]: [purple]{cfg['model_type']}[/purple]")
|
874
|
+
|
875
|
+
if all:
|
876
|
+
print("\n[green]Available model IDs[/green]:")
|
877
|
+
for model_id, model_type in MODEL_TYPES.items():
|
878
|
+
print(f"\t[blue]{model_id}[/blue]: [purple]{model_type}[/purple]")
|
879
|
+
|
880
|
+
|
824
881
|
@embedding.command("add", help="Add an embedding model config to janus")
|
825
882
|
def embedding_add(
|
826
883
|
model_name: Annotated[
|
@@ -79,7 +79,7 @@ class TestTranslator(unittest.TestCase):
|
|
79
79
|
def setUp(self):
|
80
80
|
"""Set up the tests."""
|
81
81
|
self.translator = Translator(
|
82
|
-
model="gpt-
|
82
|
+
model="gpt-4o",
|
83
83
|
source_language="fortran",
|
84
84
|
target_language="python",
|
85
85
|
target_version="3.10",
|
@@ -88,7 +88,7 @@ class TestTranslator(unittest.TestCase):
|
|
88
88
|
self.TEST_FILE_EMBEDDING_COUNT = 14
|
89
89
|
|
90
90
|
self.req_translator = RequirementsDocumenter(
|
91
|
-
model="gpt-
|
91
|
+
model="gpt-4o",
|
92
92
|
source_language="fortran",
|
93
93
|
prompt_template="requirements",
|
94
94
|
)
|
@@ -317,14 +317,14 @@ class TestDiagramGenerator(unittest.TestCase):
|
|
317
317
|
def setUp(self):
|
318
318
|
"""Set up the tests."""
|
319
319
|
self.diagram_generator = DiagramGenerator(
|
320
|
-
model="gpt-
|
320
|
+
model="gpt-4o",
|
321
321
|
source_language="fortran",
|
322
322
|
diagram_type="Activity",
|
323
323
|
)
|
324
324
|
|
325
325
|
def test_init(self):
|
326
326
|
"""Test __init__ method."""
|
327
|
-
self.assertEqual(self.diagram_generator._model_name, "gpt-
|
327
|
+
self.assertEqual(self.diagram_generator._model_name, "gpt-4o")
|
328
328
|
self.assertEqual(self.diagram_generator._source_language, "fortran")
|
329
329
|
self.assertEqual(self.diagram_generator._diagram_type, "Activity")
|
330
330
|
|
@@ -370,8 +370,8 @@ def test_language_combinations(
|
|
370
370
|
"""Tests that translator target language settings are consistent
|
371
371
|
with prompt template expectations.
|
372
372
|
"""
|
373
|
-
translator = Translator(model="gpt-
|
374
|
-
translator.set_model("gpt-
|
373
|
+
translator = Translator(model="gpt-4o")
|
374
|
+
translator.set_model("gpt-4o")
|
375
375
|
translator.set_source_language(source_language)
|
376
376
|
translator.set_target_language(expected_target_language, expected_target_version)
|
377
377
|
translator.set_prompt(prompt_template)
|
@@ -379,5 +379,5 @@ def test_language_combinations(
|
|
379
379
|
assert translator._target_language == expected_target_language # nosec
|
380
380
|
assert translator._target_version == expected_target_version # nosec
|
381
381
|
assert translator._splitter.language == source_language # nosec
|
382
|
-
assert translator._splitter.model.model_name == "gpt-
|
382
|
+
assert translator._splitter.model.model_name == "gpt-4o" # nosec
|
383
383
|
assert translator._prompt_template_name == prompt_template # nosec
|
janus/converter/converter.py
CHANGED
@@ -64,7 +64,7 @@ class Converter:
|
|
64
64
|
|
65
65
|
def __init__(
|
66
66
|
self,
|
67
|
-
model: str = "gpt-
|
67
|
+
model: str = "gpt-4o",
|
68
68
|
model_arguments: dict[str, Any] = {},
|
69
69
|
source_language: str = "fortran",
|
70
70
|
max_prompts: int = 10,
|
@@ -92,6 +92,7 @@ class Converter:
|
|
92
92
|
self.override_token_limit: bool = max_tokens is not None
|
93
93
|
|
94
94
|
self._model_name: str
|
95
|
+
self._model_id: str
|
95
96
|
self._custom_model_arguments: dict[str, Any]
|
96
97
|
|
97
98
|
self._source_language: str
|
@@ -265,7 +266,9 @@ class Converter:
|
|
265
266
|
# model_arguments.update(self._custom_model_arguments)
|
266
267
|
|
267
268
|
# Load the model
|
268
|
-
self._llm, token_limit, self.model_cost = load_model(
|
269
|
+
self._llm, self._model_id, token_limit, self.model_cost = load_model(
|
270
|
+
self._model_name
|
271
|
+
)
|
269
272
|
# Set the max_tokens to less than half the model's limit to allow for enough
|
270
273
|
# tokens at output
|
271
274
|
# Only modify max_tokens if it is not specified by user
|
@@ -283,7 +286,7 @@ class Converter:
|
|
283
286
|
If the relevant fields have not been changed since the last time this
|
284
287
|
method was called, nothing happens.
|
285
288
|
"""
|
286
|
-
prompt_engine = MODEL_PROMPT_ENGINES[self.
|
289
|
+
prompt_engine = MODEL_PROMPT_ENGINES[self._model_id](
|
287
290
|
source_language=self._source_language,
|
288
291
|
prompt_template=self._prompt_template_name,
|
289
292
|
)
|
janus/converter/translate.py
CHANGED
@@ -90,7 +90,7 @@ class Translator(Converter):
|
|
90
90
|
f"({self._source_language} != {self._target_language})"
|
91
91
|
)
|
92
92
|
|
93
|
-
prompt_engine = MODEL_PROMPT_ENGINES[self.
|
93
|
+
prompt_engine = MODEL_PROMPT_ENGINES[self._model_id](
|
94
94
|
source_language=self._source_language,
|
95
95
|
target_language=self._target_language,
|
96
96
|
target_version=self._target_version,
|
@@ -11,8 +11,8 @@ class TestAlcSplitter(unittest.TestCase):
|
|
11
11
|
|
12
12
|
def setUp(self):
|
13
13
|
"""Set up the tests."""
|
14
|
-
model_name = "gpt-
|
15
|
-
llm, _, _ = load_model(model_name)
|
14
|
+
model_name = "gpt-4o"
|
15
|
+
llm, _, _, _ = load_model(model_name)
|
16
16
|
self.splitter = AlcSplitter(model=llm)
|
17
17
|
self.combiner = Combiner(language="ibmhlasm")
|
18
18
|
self.test_file = Path("janus/language/alc/_tests/alc.asm")
|
@@ -20,7 +20,7 @@ class TestAlcSplitter(unittest.TestCase):
|
|
20
20
|
def test_split(self):
|
21
21
|
"""Test the split method."""
|
22
22
|
tree_root = self.splitter.split(self.test_file)
|
23
|
-
self.
|
23
|
+
self.assertAlmostEqual(tree_root.n_descendents, 32, delta=5)
|
24
24
|
self.assertLessEqual(tree_root.max_tokens, self.splitter.max_tokens)
|
25
25
|
self.assertFalse(tree_root.complete)
|
26
26
|
self.combiner.combine_children(tree_root)
|
@@ -13,9 +13,9 @@ class TestBinarySplitter(unittest.TestCase):
|
|
13
13
|
"""Tests for the BinarySplitter class."""
|
14
14
|
|
15
15
|
def setUp(self):
|
16
|
-
model_name = "gpt-
|
16
|
+
model_name = "gpt-4o"
|
17
17
|
self.binary_file = Path("janus/language/binary/_tests/hello")
|
18
|
-
self.llm, _, _ = load_model(model_name)
|
18
|
+
self.llm, _, _, _ = load_model(model_name)
|
19
19
|
self.splitter = BinarySplitter(model=self.llm)
|
20
20
|
os.environ["GHIDRA_INSTALL_PATH"] = "~/programs/ghidra_10.4_PUBLIC"
|
21
21
|
|
@@ -11,8 +11,8 @@ class TestMumpsSplitter(unittest.TestCase):
|
|
11
11
|
|
12
12
|
def setUp(self):
|
13
13
|
"""Set up the tests."""
|
14
|
-
model_name = "gpt-
|
15
|
-
llm, _, _ = load_model(model_name)
|
14
|
+
model_name = "gpt-4o"
|
15
|
+
llm, _, _, _ = load_model(model_name)
|
16
16
|
self.splitter = MumpsSplitter(model=llm)
|
17
17
|
self.combiner = Combiner(language="mumps")
|
18
18
|
self.test_file = Path("janus/language/mumps/_tests/mumps.m")
|
@@ -11,9 +11,9 @@ class TestTreeSitterSplitter(unittest.TestCase):
|
|
11
11
|
|
12
12
|
def setUp(self):
|
13
13
|
"""Set up the tests."""
|
14
|
-
model_name = "gpt-
|
14
|
+
model_name = "gpt-4o"
|
15
15
|
self.maxDiff = None
|
16
|
-
self.llm, _, _ = load_model(model_name)
|
16
|
+
self.llm, _, _, _ = load_model(model_name)
|
17
17
|
|
18
18
|
def _split(self):
|
19
19
|
"""Split the test file."""
|
janus/llm/models_info.py
CHANGED
@@ -55,11 +55,12 @@ openai_model_reroutes = {
|
|
55
55
|
}
|
56
56
|
|
57
57
|
openai_models = [
|
58
|
-
"gpt-
|
59
|
-
"gpt-4
|
60
|
-
"gpt-4-
|
61
|
-
"gpt-
|
62
|
-
"gpt-3.5-turbo
|
58
|
+
"gpt-4o",
|
59
|
+
"gpt-4",
|
60
|
+
"gpt-4-turbo",
|
61
|
+
"gpt-4-turbo-preview",
|
62
|
+
"gpt-3.5-turbo",
|
63
|
+
"gpt-3.5-turbo-16k",
|
63
64
|
]
|
64
65
|
claude_models = [
|
65
66
|
"bedrock-claude-v2",
|
@@ -133,8 +134,8 @@ _open_ai_defaults: dict[str, str] = {
|
|
133
134
|
"openai_organization": os.getenv("OPENAI_ORG_ID"),
|
134
135
|
}
|
135
136
|
|
136
|
-
|
137
|
-
**{m:
|
137
|
+
MODEL_ID_TO_LONG_ID = {
|
138
|
+
**{m: mr for m, mr in openai_model_reroutes.items()},
|
138
139
|
"bedrock-claude-v2": "anthropic.claude-v2",
|
139
140
|
"bedrock-claude-instant-v1": "anthropic.claude-instant-v1",
|
140
141
|
"bedrock-claude-haiku": "anthropic.claude-3-haiku-20240307-v1:0",
|
@@ -157,7 +158,7 @@ model_identifiers = {
|
|
157
158
|
|
158
159
|
MODEL_DEFAULT_ARGUMENTS: dict[str, dict[str, str]] = {
|
159
160
|
k: (dict(model_name=k) if k in openai_models else dict(model_id=v))
|
160
|
-
for k, v in
|
161
|
+
for k, v in MODEL_ID_TO_LONG_ID.items()
|
161
162
|
}
|
162
163
|
|
163
164
|
DEFAULT_MODELS = list(MODEL_DEFAULT_ARGUMENTS.keys())
|
@@ -199,22 +200,38 @@ TOKEN_LIMITS: dict[str, int] = {
|
|
199
200
|
}
|
200
201
|
|
201
202
|
|
202
|
-
def
|
203
|
+
def get_available_model_names() -> list[str]:
|
204
|
+
avaialable_models = []
|
205
|
+
for file in MODEL_CONFIG_DIR.iterdir():
|
206
|
+
if file.is_file():
|
207
|
+
avaialable_models.append(MODEL_CONFIG_DIR.stem)
|
208
|
+
return avaialable_models
|
209
|
+
|
210
|
+
|
211
|
+
def load_model(user_model_name: str) -> tuple[BaseLanguageModel, int, dict[str, float]]:
|
203
212
|
if not MODEL_CONFIG_DIR.exists():
|
204
213
|
MODEL_CONFIG_DIR.mkdir(parents=True)
|
205
|
-
model_config_file = MODEL_CONFIG_DIR / f"{
|
214
|
+
model_config_file = MODEL_CONFIG_DIR / f"{user_model_name}.json"
|
206
215
|
if not model_config_file.exists():
|
207
|
-
|
208
|
-
|
209
|
-
|
210
|
-
|
211
|
-
|
216
|
+
log.warning(
|
217
|
+
f"Model {user_model_name} not found in user-defined models, searching "
|
218
|
+
f"default models for {user_model_name}."
|
219
|
+
)
|
220
|
+
model_id = user_model_name
|
221
|
+
if user_model_name not in DEFAULT_MODELS:
|
222
|
+
message = (
|
223
|
+
f"Model {user_model_name} not found in default models. Make sure to run "
|
224
|
+
"`janus llm add` first."
|
225
|
+
)
|
226
|
+
log.error(message)
|
227
|
+
raise ValueError(message)
|
212
228
|
model_config = {
|
213
|
-
"model_type": MODEL_TYPES[
|
214
|
-
"
|
215
|
-
"
|
229
|
+
"model_type": MODEL_TYPES[model_id],
|
230
|
+
"model_id": model_id,
|
231
|
+
"model_args": MODEL_DEFAULT_ARGUMENTS[model_id],
|
232
|
+
"token_limit": TOKEN_LIMITS.get(MODEL_ID_TO_LONG_ID[model_id], 4096),
|
216
233
|
"model_cost": COST_PER_1K_TOKENS.get(
|
217
|
-
|
234
|
+
MODEL_ID_TO_LONG_ID[model_id], {"input": 0, "output": 0}
|
218
235
|
),
|
219
236
|
}
|
220
237
|
with open(model_config_file, "w") as f:
|
@@ -227,4 +244,9 @@ def load_model(model_name: str) -> tuple[BaseLanguageModel, int, dict[str, float
|
|
227
244
|
if model_config["model_type"] == "OpenAI":
|
228
245
|
model_args.update(_open_ai_defaults)
|
229
246
|
model = model_constructor(**model_args)
|
230
|
-
return
|
247
|
+
return (
|
248
|
+
model,
|
249
|
+
model_config["model_id"],
|
250
|
+
model_config["token_limit"],
|
251
|
+
model_config["model_cost"],
|
252
|
+
)
|
janus/metrics/_tests/test_llm.py
CHANGED
@@ -53,7 +53,7 @@ class TestLLMMetrics(unittest.TestCase):
|
|
53
53
|
self.bad_code,
|
54
54
|
metric="quality",
|
55
55
|
language="python",
|
56
|
-
llm=load_model("gpt-
|
56
|
+
llm=load_model("gpt-4o")[0],
|
57
57
|
)
|
58
58
|
self.assertLess(bad_code_quality, 5)
|
59
59
|
|
@@ -63,7 +63,7 @@ class TestLLMMetrics(unittest.TestCase):
|
|
63
63
|
self.impressive_code,
|
64
64
|
metric="quality",
|
65
65
|
language="python",
|
66
|
-
llm=load_model("gpt-
|
66
|
+
llm=load_model("gpt-4o")[0],
|
67
67
|
)
|
68
68
|
self.assertGreater(impressive_code_quality, 5)
|
69
69
|
|
@@ -81,7 +81,7 @@ class TestLLMMetrics(unittest.TestCase):
|
|
81
81
|
self.impressive_code_reference,
|
82
82
|
metric="faithfulness",
|
83
83
|
language="python",
|
84
|
-
llm=load_model("gpt-
|
84
|
+
llm=load_model("gpt-4o")[0],
|
85
85
|
)
|
86
86
|
self.assertGreater(faithfulness, 8)
|
87
87
|
|
janus/metrics/metric.py
CHANGED
@@ -112,7 +112,7 @@ def metric(
|
|
112
112
|
"-L",
|
113
113
|
help="The custom name of the model set with 'janus llm add'.",
|
114
114
|
),
|
115
|
-
] = "gpt-
|
115
|
+
] = "gpt-4o",
|
116
116
|
progress: Annotated[
|
117
117
|
bool,
|
118
118
|
typer.Option(
|
@@ -135,7 +135,7 @@ def metric(
|
|
135
135
|
**kwargs,
|
136
136
|
):
|
137
137
|
out = []
|
138
|
-
llm, token_limit, model_cost = load_model(llm_name)
|
138
|
+
llm, _, token_limit, model_cost = load_model(llm_name)
|
139
139
|
if json_file_name is not None:
|
140
140
|
with open(json_file_name, "r") as f:
|
141
141
|
json_obj = json.load(f)
|
@@ -274,7 +274,7 @@ def metric(
|
|
274
274
|
"-L",
|
275
275
|
help="The custom name of the model set with 'janus llm add'.",
|
276
276
|
),
|
277
|
-
] = "gpt-
|
277
|
+
] = "gpt-4o",
|
278
278
|
progress: Annotated[
|
279
279
|
bool,
|
280
280
|
typer.Option(
|
@@ -296,7 +296,7 @@ def metric(
|
|
296
296
|
*args,
|
297
297
|
**kwargs,
|
298
298
|
):
|
299
|
-
llm, token_limit, model_cost = load_model(llm_name)
|
299
|
+
llm, _, token_limit, model_cost = load_model(llm_name)
|
300
300
|
if json_file_name is not None:
|
301
301
|
with open(json_file_name, "r") as f:
|
302
302
|
json_obj = json.load(f)
|
@@ -1,18 +1,18 @@
|
|
1
|
-
janus/__init__.py,sha256=
|
1
|
+
janus/__init__.py,sha256=cQaVQ7gwfvdR0Xa9poA2iBEAEc__Rt667GChPOY_5zo,351
|
2
2
|
janus/__main__.py,sha256=lEkpNtLVPtFo8ySDZeXJ_NXDHb0GVdZFPWB4gD4RPS8,64
|
3
3
|
janus/_tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
4
4
|
janus/_tests/conftest.py,sha256=V7uW-oq3YbFiRPvrq15YoVVrA1n_83pjgiyTZ-IUGW8,963
|
5
5
|
janus/_tests/test_cli.py,sha256=mi7wAWV07ZFli5nQdExRGIGA3AMFD9s39-HcmDV4B6Y,4232
|
6
|
-
janus/cli.py,sha256
|
6
|
+
janus/cli.py,sha256=nCOducHBQFFfXwaM0wzmKccElB-0-oNgLJL5cdhviTE,31622
|
7
7
|
janus/converter/__init__.py,sha256=kzVmWOPXRDayqqBZ8ZDaFQzA_q8PEdv407dc-DefPxY,255
|
8
8
|
janus/converter/_tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
9
|
-
janus/converter/_tests/test_translate.py,sha256=
|
10
|
-
janus/converter/converter.py,sha256=
|
9
|
+
janus/converter/_tests/test_translate.py,sha256=I__qGemc-qT7ZxmHFbNVa7xMo1UAOxy-s-aXrEWAeYw,15801
|
10
|
+
janus/converter/converter.py,sha256=u0XySdsrBjDHEYdPlvU8lsRLolciDil5aYtf0QvmEeo,23573
|
11
11
|
janus/converter/diagram.py,sha256=JsJNDf-P8bPejpDxbCVEHvw-0kewiMrXh5qLhsL5JOA,4730
|
12
12
|
janus/converter/document.py,sha256=hsW512veNjFWbdl5WriuUdNmMEqZy8ktRvqn9rRmA6E,4566
|
13
13
|
janus/converter/evaluate.py,sha256=APWQUY3gjAXqkJkPzvj0UA4wPK3Cv9QSJLM-YK9t-ng,476
|
14
14
|
janus/converter/requirements.py,sha256=6YvrJRVH9BuPCOPxnXmaJQFYmoLYYvCu3zTntDLHeNg,1832
|
15
|
-
janus/converter/translate.py,sha256=
|
15
|
+
janus/converter/translate.py,sha256=0brQTlSfBYmXtoM8QYIOiyr0LrTr0S1n68Du-BR7_WQ,4236
|
16
16
|
janus/embedding/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
17
17
|
janus/embedding/_tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
18
18
|
janus/embedding/_tests/test_collections.py,sha256=eT0cYv-qmPrHJRjDZqWPFTkqVzFDRoPrRKR__FPiz58,2651
|
@@ -28,11 +28,11 @@ janus/language/_tests/test_combine.py,sha256=ydCYNbTxvaxT-5axiEBzPQLn6s4arSyZ5Tx
|
|
28
28
|
janus/language/_tests/test_splitter.py,sha256=VK48eqp5PYJfjdhD_x7IkeAjbF1KC3AyNnICfK8XnUQ,360
|
29
29
|
janus/language/alc/__init__.py,sha256=j7vOMGhT1Vri6p8dsjSaY-fkO5uFn0sJ0nrNGGvcizM,42
|
30
30
|
janus/language/alc/_tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
31
|
-
janus/language/alc/_tests/test_alc.py,sha256=
|
31
|
+
janus/language/alc/_tests/test_alc.py,sha256=Lw03OfUa1m-ao1c6X5rf4ulZD5yHsr04L4NtsT02pcw,1000
|
32
32
|
janus/language/alc/alc.py,sha256=n8KVHTb6FFILw50N8UM3gfT60gLVvkTjk37easwluWs,3061
|
33
33
|
janus/language/binary/__init__.py,sha256=AlNAe12ZA366kcGSrQ1FJyOdbwxFqGBFkYR2K6yL818,51
|
34
34
|
janus/language/binary/_tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
35
|
-
janus/language/binary/_tests/test_binary.py,sha256=
|
35
|
+
janus/language/binary/_tests/test_binary.py,sha256=ZV6_sN0ePkIv4-7CVtaQiHI-E88V-G38e2PVJpxmu9g,1718
|
36
36
|
janus/language/binary/binary.py,sha256=CS1RAieN8klSsCeXQEFYKUWioatUX-sOPXKQr5S6NzE,6534
|
37
37
|
janus/language/binary/reveng/decompile_script.py,sha256=veW51oJzuO-4UD3Er062jXZ_FYtTFo9OCkl82Z2xr6A,2182
|
38
38
|
janus/language/block.py,sha256=57hfOY-KSVMioKhkCvfDtovQt4h8lCg9cJbRF7ddV1s,9280
|
@@ -40,7 +40,7 @@ janus/language/combine.py,sha256=e7j8zQO_D3_LElaVCsGgtnzia7aFFK56m-mhArQBlR0,290
|
|
40
40
|
janus/language/file.py,sha256=X2MYcAMlCABK77uhMdI_J2foXLrqEdinapYRfLPyKB8,563
|
41
41
|
janus/language/mumps/__init__.py,sha256=-Ou_wJ-JgHezfp1dub2_qCYNiK9wO-zo2MlqxM9qiwE,48
|
42
42
|
janus/language/mumps/_tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
43
|
-
janus/language/mumps/_tests/test_mumps.py,sha256=
|
43
|
+
janus/language/mumps/_tests/test_mumps.py,sha256=f3YylHS8gTJUYXBucC-d7o34yr7lPrnqw7aHy1NAYP4,992
|
44
44
|
janus/language/mumps/mumps.py,sha256=MkF_TZB1SOIj3JQfGKYow1Hh2Bja0EglUlpd4aAY5Iw,7351
|
45
45
|
janus/language/mumps/patterns.py,sha256=FW5T6Nt5kBO2UKgSL1KLVDbYRgMaJAzDvEmvBkxHppA,2310
|
46
46
|
janus/language/naive/__init__.py,sha256=gsdC543qsIX8y_RxblCBIgyW0tfucljFms6v2WTrEz0,178
|
@@ -53,11 +53,11 @@ janus/language/node.py,sha256=-ymv--oILEYLVO2KSOrzOlzL2cZHNQpQJYwE1cKA-pY,200
|
|
53
53
|
janus/language/splitter.py,sha256=4XAe0hXka7njS30UHGCngJzDgHxn3lygUjikSHuV7Xo,16924
|
54
54
|
janus/language/treesitter/__init__.py,sha256=mUliw7ZJLZ8NkJKyUQMSoUV82hYXE0HvLHrEdGPJF4Q,43
|
55
55
|
janus/language/treesitter/_tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
56
|
-
janus/language/treesitter/_tests/test_treesitter.py,sha256=
|
56
|
+
janus/language/treesitter/_tests/test_treesitter.py,sha256=T3KYiXlqjn8ypm3s4vLaBf5Gu07DgNOF4UBgPdZ_iqs,2161
|
57
57
|
janus/language/treesitter/treesitter.py,sha256=UiV4OuWTt6IwMohHSw4FHsVNA_zxr9lNk4_Du09APdo,7509
|
58
58
|
janus/llm/__init__.py,sha256=8Pzn3Jdx867PzDc4xmwm8wvJDGzWSIhpN0NCEYFe0LQ,36
|
59
59
|
janus/llm/model_callbacks.py,sha256=h_xlBAHRx-gxQBBjVKRpGXxdxYf6d9L6kBoXjbEAEdI,7106
|
60
|
-
janus/llm/models_info.py,sha256=
|
60
|
+
janus/llm/models_info.py,sha256=38DJu1_8qn3Bh_JmpMqvAj59HALIyuawIOLv4tNPWJs,8288
|
61
61
|
janus/metrics/__init__.py,sha256=AsxtZJUzZiXJPr2ehPPltuYP-ddechjg6X85WZUO7mA,241
|
62
62
|
janus/metrics/_tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
63
63
|
janus/metrics/_tests/reference.py,sha256=hiaJPP9CXkvFBV_wL-gOe_BzELTw0nvB6uCxhxtIiE8,13
|
@@ -65,7 +65,7 @@ janus/metrics/_tests/target.py,sha256=hiaJPP9CXkvFBV_wL-gOe_BzELTw0nvB6uCxhxtIiE
|
|
65
65
|
janus/metrics/_tests/test_bleu.py,sha256=TcSnNGpMh00Nkkk1zq5wDfdCANMUq9eXscU_hcBRU8A,1640
|
66
66
|
janus/metrics/_tests/test_chrf.py,sha256=O4v1Cj513H8NYffJILpSI7CuR_dnm7F8CeB3C7sZYr0,2202
|
67
67
|
janus/metrics/_tests/test_file_pairing.py,sha256=A4Qy6JIesFXUcaig45Ze6LiViuHQS7MFSQzDHQP3j9w,1880
|
68
|
-
janus/metrics/_tests/test_llm.py,sha256
|
68
|
+
janus/metrics/_tests/test_llm.py,sha256=-YXGIxZXbotgl6EkbvA1d0JKKdGTfG2_MpaZsdOkD0w,2962
|
69
69
|
janus/metrics/_tests/test_reading.py,sha256=NDLFyjmOpM5gWf1LLTjGIw3aUR8Qf22zTt9hwe7NABs,840
|
70
70
|
janus/metrics/_tests/test_rouge_score.py,sha256=rcHmrpy55cW507PnTnGQnp9Tsn5rk7JEyXmusY7la3Q,2020
|
71
71
|
janus/metrics/_tests/test_similarity_score.py,sha256=jc3r0lWW5Iqm6AMKc36ewz5rboKwVw29fliBHClkzIg,799
|
@@ -76,7 +76,7 @@ janus/metrics/cli.py,sha256=Duuw2RF47Z-t1pal0cg3L_-N_91rx29krirqtIwjYLY,157
|
|
76
76
|
janus/metrics/complexity_metrics.py,sha256=1Z9n0o_CrILqayk40wRkjR1f7yvHIsJG38DxAbqj614,6560
|
77
77
|
janus/metrics/file_pairing.py,sha256=WNHRV1D8GOJMq8Pla5SPkTDAT7yVaS4-UU0XIGKvEVs,3729
|
78
78
|
janus/metrics/llm_metrics.py,sha256=3677S6GYcoVcokpmAN-fwvNu-lYWAKd7M5mebiE6RZc,5687
|
79
|
-
janus/metrics/metric.py,sha256=
|
79
|
+
janus/metrics/metric.py,sha256=od9tJSgML5CdNlL5BE9piYac9G0g4auNGFRRDnnS8Ak,16956
|
80
80
|
janus/metrics/reading.py,sha256=srLb2MO-vZL5ccRjaHz-dA4MwAvXVNyIKnOrvJXg77E,2244
|
81
81
|
janus/metrics/rouge_score.py,sha256=HfUJwUWI-yq5pOjML2ee4QTOMl0NQahnqEY2Mt8Dtnw,2865
|
82
82
|
janus/metrics/similarity.py,sha256=9pjWWpLKCsk0QfFfSgQNdPXiisqi7WJYOOHaiT8S0iY,1613
|
@@ -98,8 +98,8 @@ janus/utils/_tests/test_progress.py,sha256=Yh5NDNq-24n2nhHHbJm39pENAH70PYnh9ymwd
|
|
98
98
|
janus/utils/enums.py,sha256=AoilbdiYyMvY2Mp0AM4xlbLSELfut2XMwhIM1S_msP4,27610
|
99
99
|
janus/utils/logger.py,sha256=KZeuaMAnlSZCsj4yL0P6N-JzZwpxXygzACWfdZFeuek,2337
|
100
100
|
janus/utils/progress.py,sha256=pKcCzO9JOU9fSD7qTmLWcqY5smc8mujqQMXoPgqNysE,1458
|
101
|
-
janus_llm-3.
|
102
|
-
janus_llm-3.
|
103
|
-
janus_llm-3.
|
104
|
-
janus_llm-3.
|
105
|
-
janus_llm-3.
|
101
|
+
janus_llm-3.2.0.dist-info/LICENSE,sha256=_j0st0a-HB6MRbP3_BW3PUqpS16v54luyy-1zVyl8NU,10789
|
102
|
+
janus_llm-3.2.0.dist-info/METADATA,sha256=2CmCNWIGp-70EWyP_mnrJF0LtMxUOhwmyrfBcv97udc,4184
|
103
|
+
janus_llm-3.2.0.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
|
104
|
+
janus_llm-3.2.0.dist-info/entry_points.txt,sha256=OGhQwzj6pvXp79B0SaBD5apGekCu7Dwe9fZZT_TZ544,39
|
105
|
+
janus_llm-3.2.0.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|