janus-llm 3.2.1__py3-none-any.whl → 3.3.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- janus/__init__.py +1 -1
- janus/converter/_tests/test_translate.py +3 -2
- janus/converter/converter.py +49 -7
- janus/converter/diagram.py +68 -55
- janus/llm/models_info.py +2 -0
- janus/parsers/refiner_parser.py +49 -0
- janus/refiners/refiner.py +63 -0
- {janus_llm-3.2.1.dist-info → janus_llm-3.3.1.dist-info}/METADATA +1 -1
- {janus_llm-3.2.1.dist-info → janus_llm-3.3.1.dist-info}/RECORD +12 -10
- {janus_llm-3.2.1.dist-info → janus_llm-3.3.1.dist-info}/LICENSE +0 -0
- {janus_llm-3.2.1.dist-info → janus_llm-3.3.1.dist-info}/WHEEL +0 -0
- {janus_llm-3.2.1.dist-info → janus_llm-3.3.1.dist-info}/entry_points.txt +0 -0
janus/__init__.py
CHANGED
@@ -5,7 +5,7 @@ from langchain_core._api.deprecation import LangChainDeprecationWarning
|
|
5
5
|
from janus.converter.translate import Translator
|
6
6
|
from janus.metrics import * # noqa: F403
|
7
7
|
|
8
|
-
__version__ = "3.
|
8
|
+
__version__ = "3.3.1"
|
9
9
|
|
10
10
|
# Ignoring a deprecation warning from langchain_core that I can't seem to hunt down
|
11
11
|
warnings.filterwarnings("ignore", category=LangChainDeprecationWarning)
|
@@ -45,16 +45,17 @@ class TestTranslator(unittest.TestCase):
|
|
45
45
|
def setUp(self):
|
46
46
|
"""Set up the tests."""
|
47
47
|
self.translator = Translator(
|
48
|
-
model="gpt-4o",
|
48
|
+
model="gpt-4o-mini",
|
49
49
|
source_language="fortran",
|
50
50
|
target_language="python",
|
51
51
|
target_version="3.10",
|
52
|
+
splitter_type="ast-flex",
|
52
53
|
)
|
53
54
|
self.test_file = Path("janus/language/treesitter/_tests/languages/fortran.f90")
|
54
55
|
self.TEST_FILE_EMBEDDING_COUNT = 14
|
55
56
|
|
56
57
|
self.req_translator = RequirementsDocumenter(
|
57
|
-
model="gpt-4o",
|
58
|
+
model="gpt-4o-mini",
|
58
59
|
source_language="fortran",
|
59
60
|
prompt_template="requirements",
|
60
61
|
)
|
janus/converter/converter.py
CHANGED
@@ -6,7 +6,6 @@ from pathlib import Path
|
|
6
6
|
from typing import Any
|
7
7
|
|
8
8
|
from langchain.output_parsers import RetryWithErrorOutputParser
|
9
|
-
from langchain.output_parsers.fix import OutputFixingParser
|
10
9
|
from langchain_core.exceptions import OutputParserException
|
11
10
|
from langchain_core.language_models import BaseLanguageModel
|
12
11
|
from langchain_core.output_parsers import BaseOutputParser
|
@@ -29,6 +28,8 @@ from janus.llm import load_model
|
|
29
28
|
from janus.llm.model_callbacks import get_model_callback
|
30
29
|
from janus.llm.models_info import MODEL_PROMPT_ENGINES
|
31
30
|
from janus.parsers.code_parser import GenericParser
|
31
|
+
from janus.parsers.refiner_parser import RefinerParser
|
32
|
+
from janus.refiners.refiner import BasicRefiner, Refiner
|
32
33
|
from janus.utils.enums import LANGUAGES
|
33
34
|
from janus.utils.logger import create_logger
|
34
35
|
|
@@ -75,6 +76,7 @@ class Converter:
|
|
75
76
|
protected_node_types: tuple[str, ...] = (),
|
76
77
|
prune_node_types: tuple[str, ...] = (),
|
77
78
|
splitter_type: str = "file",
|
79
|
+
refiner_type: str = "basic",
|
78
80
|
) -> None:
|
79
81
|
"""Initialize a Converter instance.
|
80
82
|
|
@@ -84,6 +86,17 @@ class Converter:
|
|
84
86
|
values are `"code"`, `"text"`, `"eval"`, and `None` (default). If `None`,
|
85
87
|
the `Converter` assumes you won't be parsing an output (i.e., adding to an
|
86
88
|
embedding DB).
|
89
|
+
max_prompts: The maximum number of prompts to try before giving up.
|
90
|
+
max_tokens: The maximum number of tokens to use in the LLM. If `None`, the
|
91
|
+
converter will use half the model's token limit.
|
92
|
+
prompt_template: The name of the prompt template to use.
|
93
|
+
db_path: The path to the database to use for vectorization.
|
94
|
+
db_config: The configuration for the database.
|
95
|
+
protected_node_types: A set of node types that aren't to be merged.
|
96
|
+
prune_node_types: A set of node types which should be pruned.
|
97
|
+
splitter_type: The type of splitter to use. Valid values are `"file"`,
|
98
|
+
`"tag"`, `"chunk"`, `"ast-strict"`, and `"ast-flex"`.
|
99
|
+
refiner_type: The type of refiner to use. Valid values are `"basic"`.
|
87
100
|
"""
|
88
101
|
self._changed_attrs: set = set()
|
89
102
|
|
@@ -116,7 +129,11 @@ class Converter:
|
|
116
129
|
self._parser: BaseOutputParser = GenericParser()
|
117
130
|
self._combiner: Combiner = Combiner()
|
118
131
|
|
132
|
+
self._refiner_type: str
|
133
|
+
self._refiner: Refiner
|
134
|
+
|
119
135
|
self.set_splitter(splitter_type=splitter_type)
|
136
|
+
self.set_refiner(refiner_type=refiner_type)
|
120
137
|
self.set_model(model_name=model, **model_arguments)
|
121
138
|
self.set_prompt(prompt_template=prompt_template)
|
122
139
|
self.set_source_language(source_language)
|
@@ -142,6 +159,7 @@ class Converter:
|
|
142
159
|
self._load_prompt()
|
143
160
|
self._load_splitter()
|
144
161
|
self._load_vectorizer()
|
162
|
+
self._load_refiner()
|
145
163
|
self._changed_attrs.clear()
|
146
164
|
|
147
165
|
def set_model(self, model_name: str, **custom_arguments: dict[str, Any]):
|
@@ -179,6 +197,16 @@ class Converter:
|
|
179
197
|
"""
|
180
198
|
self._splitter_type = splitter_type
|
181
199
|
|
200
|
+
def set_refiner(self, refiner_type: str) -> None:
|
201
|
+
"""Validate and set the refiner name
|
202
|
+
|
203
|
+
The affected objects will not be updated until translate is called
|
204
|
+
|
205
|
+
Arguments:
|
206
|
+
refiner_type: the name of the refiner to use
|
207
|
+
"""
|
208
|
+
self._refiner_type = refiner_type
|
209
|
+
|
182
210
|
def set_source_language(self, source_language: str) -> None:
|
183
211
|
"""Validate and set the source language.
|
184
212
|
|
@@ -249,10 +277,24 @@ class Converter:
|
|
249
277
|
)
|
250
278
|
|
251
279
|
if self._splitter_type == "tag":
|
252
|
-
kwargs["tag"] = "<ITMOD_ALC_SPLIT>"
|
280
|
+
kwargs["tag"] = "<ITMOD_ALC_SPLIT>" # Hardcoded for now
|
253
281
|
|
254
282
|
self._splitter = CUSTOM_SPLITTERS[self._splitter_type](**kwargs)
|
255
283
|
|
284
|
+
@run_if_changed("_refiner_type", "_model_name")
|
285
|
+
def _load_refiner(self) -> None:
|
286
|
+
"""Load the refiner according to this instance's attributes.
|
287
|
+
|
288
|
+
If the relevant fields have not been changed since the last time this method was
|
289
|
+
called, nothing happens.
|
290
|
+
"""
|
291
|
+
if self._refiner_type == "basic":
|
292
|
+
self._refiner = BasicRefiner(
|
293
|
+
"basic_refinement", self._model_name, self._source_language
|
294
|
+
)
|
295
|
+
else:
|
296
|
+
raise ValueError(f"Error: unknown refiner type {self._refiner_type}")
|
297
|
+
|
256
298
|
@run_if_changed("_model_name", "_custom_model_arguments")
|
257
299
|
def _load_model(self) -> None:
|
258
300
|
"""Load the model according to this instance's attributes.
|
@@ -561,22 +603,22 @@ class Converter:
|
|
561
603
|
# Retries with just the input
|
562
604
|
n3 = math.ceil(self.max_prompts / (n1 * n2))
|
563
605
|
|
564
|
-
|
565
|
-
llm=self._llm,
|
606
|
+
refine_output = RefinerParser(
|
566
607
|
parser=self._parser,
|
608
|
+
initial_prompt=self._prompt.format(**{"SOURCE_CODE": block.original.text}),
|
609
|
+
refiner=self._refiner,
|
567
610
|
max_retries=n1,
|
611
|
+
llm=self._llm,
|
568
612
|
)
|
569
613
|
retry = RetryWithErrorOutputParser.from_llm(
|
570
614
|
llm=self._llm,
|
571
|
-
parser=
|
615
|
+
parser=refine_output,
|
572
616
|
max_retries=n2,
|
573
617
|
)
|
574
|
-
|
575
618
|
completion_chain = self._prompt | self._llm
|
576
619
|
chain = RunnableParallel(
|
577
620
|
completion=completion_chain, prompt_value=self._prompt
|
578
621
|
) | RunnableLambda(lambda x: retry.parse_with_prompt(**x))
|
579
|
-
|
580
622
|
for _ in range(n3):
|
581
623
|
try:
|
582
624
|
return chain.invoke({"SOURCE_CODE": block.original.text})
|
janus/converter/diagram.py
CHANGED
@@ -1,10 +1,14 @@
|
|
1
|
-
import
|
2
|
-
|
1
|
+
import math
|
2
|
+
|
3
|
+
from langchain.output_parsers import RetryWithErrorOutputParser
|
4
|
+
from langchain_core.exceptions import OutputParserException
|
5
|
+
from langchain_core.runnables import RunnableLambda, RunnableParallel
|
3
6
|
|
4
7
|
from janus.converter.converter import run_if_changed
|
5
8
|
from janus.converter.document import Documenter
|
6
9
|
from janus.language.block import TranslatedCodeBlock
|
7
10
|
from janus.llm.models_info import MODEL_PROMPT_ENGINES
|
11
|
+
from janus.parsers.refiner_parser import RefinerParser
|
8
12
|
from janus.parsers.uml import UMLSyntaxParser
|
9
13
|
from janus.utils.logger import create_logger
|
10
14
|
|
@@ -47,65 +51,74 @@ class DiagramGenerator(Documenter):
|
|
47
51
|
self._diagram_prompt_template_name = "diagram"
|
48
52
|
self._load_diagram_prompt_engine()
|
49
53
|
|
50
|
-
def
|
51
|
-
"""Given an "empty" `TranslatedCodeBlock`, translate the code represented in
|
52
|
-
`block.original`, setting the relevant fields in the translated block. The
|
53
|
-
`TranslatedCodeBlock` is updated in-pace, nothing is returned. Note that this
|
54
|
-
translates *only* the code for this block, not its children.
|
55
|
-
|
56
|
-
Arguments:
|
57
|
-
block: An empty `TranslatedCodeBlock`
|
58
|
-
"""
|
59
|
-
if block.translated:
|
60
|
-
return
|
61
|
-
|
62
|
-
if block.original.text is None:
|
63
|
-
block.translated = True
|
64
|
-
return
|
65
|
-
|
66
|
-
if self._add_documentation:
|
67
|
-
documentation_block = deepcopy(block)
|
68
|
-
super()._add_translation(documentation_block)
|
69
|
-
if not documentation_block.translated:
|
70
|
-
message = "Error: unable to produce documentation for code block"
|
71
|
-
log.info(message)
|
72
|
-
raise ValueError(message)
|
73
|
-
documentation = json.loads(documentation_block.text)["docstring"]
|
74
|
-
|
75
|
-
if self._llm is None:
|
76
|
-
message = (
|
77
|
-
"Model not configured correctly, cannot translate. Try setting "
|
78
|
-
"the model"
|
79
|
-
)
|
80
|
-
log.error(message)
|
81
|
-
raise ValueError(message)
|
82
|
-
|
83
|
-
log.debug(f"[{block.name}] Translating...")
|
84
|
-
log.debug(f"[{block.name}] Input text:\n{block.original.text}")
|
85
|
-
|
54
|
+
def _run_chain(self, block: TranslatedCodeBlock) -> str:
|
86
55
|
self._parser.set_reference(block.original)
|
56
|
+
n1 = round(self.max_prompts ** (1 / 3))
|
87
57
|
|
88
|
-
|
58
|
+
# Retries with the input, output, and error
|
59
|
+
n2 = round((self.max_prompts // n1) ** (1 / 2))
|
60
|
+
|
61
|
+
# Retries with just the input
|
62
|
+
n3 = math.ceil(self.max_prompts / (n1 * n2))
|
89
63
|
|
90
64
|
if self._add_documentation:
|
91
|
-
|
92
|
-
|
93
|
-
|
94
|
-
|
95
|
-
|
96
|
-
|
65
|
+
documentation_text = super()._run_chain(block)
|
66
|
+
refine_output = RefinerParser(
|
67
|
+
parser=self._diagram_parser,
|
68
|
+
initial_prompt=self._diagram_prompt.format(
|
69
|
+
**{
|
70
|
+
"SOURCE_CODE": block.original.text,
|
71
|
+
"DOCUMENTATION": documentation_text,
|
72
|
+
"DIAGRAM_TYPE": self._diagram_type,
|
73
|
+
}
|
74
|
+
),
|
75
|
+
refiner=self._refiner,
|
76
|
+
max_retries=n1,
|
77
|
+
llm=self._llm,
|
97
78
|
)
|
98
79
|
else:
|
99
|
-
|
100
|
-
|
101
|
-
|
102
|
-
|
103
|
-
|
80
|
+
refine_output = RefinerParser(
|
81
|
+
parser=self._diagram_parser,
|
82
|
+
initial_prompt=self._diagram_prompt.format(
|
83
|
+
**{
|
84
|
+
"SOURCE_CODE": block.original.text,
|
85
|
+
"DIAGRAM_TYPE": self._diagram_type,
|
86
|
+
}
|
87
|
+
),
|
88
|
+
refiner=self._refiner,
|
89
|
+
max_retries=n1,
|
90
|
+
llm=self._llm,
|
104
91
|
)
|
105
|
-
|
106
|
-
|
107
|
-
|
108
|
-
|
92
|
+
retry = RetryWithErrorOutputParser.from_llm(
|
93
|
+
llm=self._llm,
|
94
|
+
parser=refine_output,
|
95
|
+
max_retries=n2,
|
96
|
+
)
|
97
|
+
completion_chain = self._prompt | self._llm
|
98
|
+
chain = RunnableParallel(
|
99
|
+
completion=completion_chain, prompt_value=self._diagram_prompt
|
100
|
+
) | RunnableLambda(lambda x: retry.parse_with_prompt(**x))
|
101
|
+
for _ in range(n3):
|
102
|
+
try:
|
103
|
+
if self._add_documentation:
|
104
|
+
return chain.invoke(
|
105
|
+
{
|
106
|
+
"SOURCE_CODE": block.original.text,
|
107
|
+
"DOCUMENTATION": documentation_text,
|
108
|
+
"DIAGRAM_TYPE": self._diagram_type,
|
109
|
+
}
|
110
|
+
)
|
111
|
+
else:
|
112
|
+
return chain.invoke(
|
113
|
+
{
|
114
|
+
"SOURCE_CODE": block.original.text,
|
115
|
+
"DIAGRAM_TYPE": self._diagram_type,
|
116
|
+
}
|
117
|
+
)
|
118
|
+
except OutputParserException:
|
119
|
+
pass
|
120
|
+
|
121
|
+
raise OutputParserException(f"Failed to parse after {n1*n2*n3} retries")
|
109
122
|
|
110
123
|
@run_if_changed(
|
111
124
|
"_diagram_prompt_template_name",
|
@@ -123,4 +136,4 @@ class DiagramGenerator(Documenter):
|
|
123
136
|
target_version=None,
|
124
137
|
prompt_template=self._diagram_prompt_template_name,
|
125
138
|
)
|
126
|
-
self.
|
139
|
+
self._diagram_prompt = self._diagram_prompt_engine.prompt
|
janus/llm/models_info.py
CHANGED
@@ -47,6 +47,7 @@ load_dotenv()
|
|
47
47
|
|
48
48
|
openai_model_reroutes = {
|
49
49
|
"gpt-4o": "gpt-4o-2024-05-13",
|
50
|
+
"gpt-4o-mini": "gpt-4o-mini",
|
50
51
|
"gpt-4": "gpt-4-0613",
|
51
52
|
"gpt-4-turbo": "gpt-4-turbo-2024-04-09",
|
52
53
|
"gpt-4-turbo-preview": "gpt-4-0125-preview",
|
@@ -56,6 +57,7 @@ openai_model_reroutes = {
|
|
56
57
|
|
57
58
|
openai_models = [
|
58
59
|
"gpt-4o",
|
60
|
+
"gpt-4o-mini",
|
59
61
|
"gpt-4",
|
60
62
|
"gpt-4-turbo",
|
61
63
|
"gpt-4-turbo-preview",
|
@@ -0,0 +1,49 @@
|
|
1
|
+
from langchain_core.exceptions import OutputParserException
|
2
|
+
from langchain_core.language_models import BaseLanguageModel
|
3
|
+
from langchain_core.output_parsers import BaseOutputParser
|
4
|
+
|
5
|
+
from janus.refiners.refiner import Refiner
|
6
|
+
|
7
|
+
|
8
|
+
class RefinerParser(BaseOutputParser):
|
9
|
+
"""Parser for performing refinement with a refiner
|
10
|
+
|
11
|
+
Properties:
|
12
|
+
llm: the language model to use
|
13
|
+
parser: the parser to use for parsing llm output
|
14
|
+
initial_prompt: initial prompt used to generate output
|
15
|
+
refiner: refiner that gives new subsequent prompts
|
16
|
+
max_retires: maximum number of times to attempt refining
|
17
|
+
"""
|
18
|
+
|
19
|
+
class Config:
|
20
|
+
arbitrary_types_allowed = True
|
21
|
+
|
22
|
+
llm: BaseLanguageModel
|
23
|
+
parser: BaseOutputParser
|
24
|
+
initial_prompt: str
|
25
|
+
refiner: Refiner
|
26
|
+
max_retries: int
|
27
|
+
|
28
|
+
def parse(self, text: str) -> str:
|
29
|
+
"""Parses the text using the refiner
|
30
|
+
|
31
|
+
Arguments:
|
32
|
+
text: text to parse
|
33
|
+
|
34
|
+
Returns:
|
35
|
+
Parsed text
|
36
|
+
"""
|
37
|
+
last_prompt = self.initial_prompt
|
38
|
+
for _ in range(self.max_retries):
|
39
|
+
try:
|
40
|
+
return self.parser.parse(text)
|
41
|
+
except OutputParserException as oe:
|
42
|
+
err = str(oe)
|
43
|
+
new_prompt, prompt_arguments = self.refiner.refine(last_prompt, text, err)
|
44
|
+
new_chain = new_prompt | self.llm
|
45
|
+
text = new_chain.invoke(prompt_arguments)
|
46
|
+
last_prompt = new_prompt.format(**prompt_arguments)
|
47
|
+
raise OutputParserException(
|
48
|
+
f"Error: unable to correct output after {self.max_retries} attempts"
|
49
|
+
)
|
@@ -0,0 +1,63 @@
|
|
1
|
+
from langchain_core.prompts import ChatPromptTemplate
|
2
|
+
|
3
|
+
from janus.llm.models_info import MODEL_PROMPT_ENGINES
|
4
|
+
|
5
|
+
|
6
|
+
class Refiner:
|
7
|
+
def refine(
|
8
|
+
self, original_prompt: str, original_output: str, errors: str, **kwargs
|
9
|
+
) -> tuple[ChatPromptTemplate, dict[str, str]]:
|
10
|
+
"""Creates a new prompt based on feedback from original results
|
11
|
+
|
12
|
+
Arguments:
|
13
|
+
original_prompt: original prompt used to produce output
|
14
|
+
original_output: origial output of llm
|
15
|
+
errors: list of errors detected by parser
|
16
|
+
|
17
|
+
Returns:
|
18
|
+
Tuple of new prompt and prompt arguments
|
19
|
+
"""
|
20
|
+
raise NotImplementedError
|
21
|
+
|
22
|
+
|
23
|
+
class BasicRefiner(Refiner):
|
24
|
+
def __init__(
|
25
|
+
self,
|
26
|
+
prompt_name: str,
|
27
|
+
model_name: str,
|
28
|
+
source_language: str,
|
29
|
+
) -> None:
|
30
|
+
"""Basic refiner, asks llm to fix output of previous prompt given errors
|
31
|
+
|
32
|
+
Arguments:
|
33
|
+
prompt_name: refinement prompt name to use
|
34
|
+
model_name: name of llm to use
|
35
|
+
source_language: source_langauge to use
|
36
|
+
"""
|
37
|
+
self._prompt_name = prompt_name
|
38
|
+
self._model_name = model_name
|
39
|
+
self._source_language = source_language
|
40
|
+
|
41
|
+
def refine(
|
42
|
+
self, original_prompt: str, original_output: str, errors: str, **kwargs
|
43
|
+
) -> tuple[ChatPromptTemplate, dict[str, str]]:
|
44
|
+
"""Creates a new prompt based on feedback from original results
|
45
|
+
|
46
|
+
Arguments:
|
47
|
+
original_prompt: original prompt used to produce output
|
48
|
+
original_output: origial output of llm
|
49
|
+
errors: list of errors detected by parser
|
50
|
+
|
51
|
+
Returns:
|
52
|
+
Tuple of new prompt and prompt arguments
|
53
|
+
"""
|
54
|
+
prompt_engine = MODEL_PROMPT_ENGINES[self._model_name](
|
55
|
+
prompt_template=self._prompt_name,
|
56
|
+
source_language=self._source_language,
|
57
|
+
)
|
58
|
+
prompt_arguments = {
|
59
|
+
"ORIGINAL_PROMPT": original_prompt,
|
60
|
+
"OUTPUT": original_output,
|
61
|
+
"ERRORS": errors,
|
62
|
+
}
|
63
|
+
return prompt_engine.prompt, prompt_arguments
|
@@ -1,4 +1,4 @@
|
|
1
|
-
janus/__init__.py,sha256=
|
1
|
+
janus/__init__.py,sha256=43wVstqzlnt_SPfroA9wuz3ujZOg6wniq_i991815fM,361
|
2
2
|
janus/__main__.py,sha256=lEkpNtLVPtFo8ySDZeXJ_NXDHb0GVdZFPWB4gD4RPS8,64
|
3
3
|
janus/_tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
4
4
|
janus/_tests/conftest.py,sha256=V7uW-oq3YbFiRPvrq15YoVVrA1n_83pjgiyTZ-IUGW8,963
|
@@ -6,9 +6,9 @@ janus/_tests/test_cli.py,sha256=oYJsUGWfpBJWEGRG5NGxdJedU5DU_m6fwJ7xEbJVYl0,4244
|
|
6
6
|
janus/cli.py,sha256=S6IaQyUG55xSB166xARn6TQc_cOCIQ_ZMdIi7vj6BMk,31626
|
7
7
|
janus/converter/__init__.py,sha256=U2EOMcCykiC0ZqhorNefOP_04hOF18qhYoPKrVp1Vrk,345
|
8
8
|
janus/converter/_tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
9
|
-
janus/converter/_tests/test_translate.py,sha256=
|
10
|
-
janus/converter/converter.py,sha256=
|
11
|
-
janus/converter/diagram.py,sha256=
|
9
|
+
janus/converter/_tests/test_translate.py,sha256=yzcFEGc_z8QmBBBmC9dZnfL9tT8CD1rkpc8Hz44Jp4c,5631
|
10
|
+
janus/converter/converter.py,sha256=m0eGrybrPWw2B0nXrd1Fx6o0BO1JvY4PfC0rFUtpLxY,25633
|
11
|
+
janus/converter/diagram.py,sha256=5mo1H3Y1uIBPYdIsWz9kxluN5DNyuUMZrtcJmGF2Uw0,5335
|
12
12
|
janus/converter/document.py,sha256=hsW512veNjFWbdl5WriuUdNmMEqZy8ktRvqn9rRmA6E,4566
|
13
13
|
janus/converter/evaluate.py,sha256=APWQUY3gjAXqkJkPzvj0UA4wPK3Cv9QSJLM-YK9t-ng,476
|
14
14
|
janus/converter/requirements.py,sha256=6YvrJRVH9BuPCOPxnXmaJQFYmoLYYvCu3zTntDLHeNg,1832
|
@@ -57,7 +57,7 @@ janus/language/treesitter/_tests/test_treesitter.py,sha256=4S_UdH6AfJ0j6hyInZ2CF
|
|
57
57
|
janus/language/treesitter/treesitter.py,sha256=FU86H8j2cfRLtwyNeEnf9A6gzZEvDwmnFCIrQymUJso,7541
|
58
58
|
janus/llm/__init__.py,sha256=TKLYvnsWKWfxMucy-lCLQ-4bkN9ENotJZDywDEQmrKg,45
|
59
59
|
janus/llm/model_callbacks.py,sha256=K7P5NY-rf7IYRAFHnZ3kzhrQWE6g_najx8uxlaSiz3E,7110
|
60
|
-
janus/llm/models_info.py,sha256=
|
60
|
+
janus/llm/models_info.py,sha256=FHyv9PKXAUxYCgO06bw4qzoniFsqYN21uD450BdpvyA,8358
|
61
61
|
janus/metrics/__init__.py,sha256=AsxtZJUzZiXJPr2ehPPltuYP-ddechjg6X85WZUO7mA,241
|
62
62
|
janus/metrics/_tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
63
63
|
janus/metrics/_tests/reference.py,sha256=hiaJPP9CXkvFBV_wL-gOe_BzELTw0nvB6uCxhxtIiE8,13
|
@@ -87,10 +87,12 @@ janus/parsers/_tests/test_code_parser.py,sha256=RVgMmLvg8_57g0uJphfX-jZZsyBqOSuG
|
|
87
87
|
janus/parsers/code_parser.py,sha256=SZBsYThG4iszKlu4fHoWrs-6cbJiUFjWv4cLSr5bzDM,1790
|
88
88
|
janus/parsers/doc_parser.py,sha256=bJiOE5M7npUZur_1MWJ14C2HZl7-yXExqRXiC5ZBJvI,5679
|
89
89
|
janus/parsers/eval_parser.py,sha256=L1Lu2aNimcqUshe0FQee_9Zqj1rrqyZPXCgEAS05VJ4,2740
|
90
|
+
janus/parsers/refiner_parser.py,sha256=72tOEhpHwCZqHDb2T4aS5bPsiXN3pQXUk_oOPupa3Ts,1621
|
90
91
|
janus/parsers/reqs_parser.py,sha256=6YzpF63rjuDPqpKWfYvtjpsluWQ-UboWlsKoGrGQogA,2380
|
91
92
|
janus/parsers/uml.py,sha256=ZRyGY8YxvYibacTd-WZEAAaW3XjmvJhPJE3o29f71t8,1825
|
92
93
|
janus/prompts/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
93
94
|
janus/prompts/prompt.py,sha256=3796YXIzzIec9b0iUzd8VZlq-AdQbzq8qUGXLy4KH-0,10586
|
95
|
+
janus/refiners/refiner.py,sha256=O4i5JaPEWH_ijmHunTKP4YzX_ZwZIyOIckn4Hmf1ZOI,2084
|
94
96
|
janus/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
95
97
|
janus/utils/_tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
96
98
|
janus/utils/_tests/test_logger.py,sha256=jkkvrCTKwsFCsZtmyuvc-WJ0rC7LJi2Z91sIe4IiKzA,2209
|
@@ -98,8 +100,8 @@ janus/utils/_tests/test_progress.py,sha256=Rs_u5PiGjP-L-o6C1fhwfE1ig8jYu9Xo9s4p8
|
|
98
100
|
janus/utils/enums.py,sha256=AoilbdiYyMvY2Mp0AM4xlbLSELfut2XMwhIM1S_msP4,27610
|
99
101
|
janus/utils/logger.py,sha256=KZeuaMAnlSZCsj4yL0P6N-JzZwpxXygzACWfdZFeuek,2337
|
100
102
|
janus/utils/progress.py,sha256=PIpcQec7SrhsfqB25LHj2CDDkfm9umZx90d9LZnAx6k,1469
|
101
|
-
janus_llm-3.
|
102
|
-
janus_llm-3.
|
103
|
-
janus_llm-3.
|
104
|
-
janus_llm-3.
|
105
|
-
janus_llm-3.
|
103
|
+
janus_llm-3.3.1.dist-info/LICENSE,sha256=_j0st0a-HB6MRbP3_BW3PUqpS16v54luyy-1zVyl8NU,10789
|
104
|
+
janus_llm-3.3.1.dist-info/METADATA,sha256=gqeMDm75fd7jLYsQHhaDgXKM1rIG0vh1N7jsOanSS80,4184
|
105
|
+
janus_llm-3.3.1.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
|
106
|
+
janus_llm-3.3.1.dist-info/entry_points.txt,sha256=OGhQwzj6pvXp79B0SaBD5apGekCu7Dwe9fZZT_TZ544,39
|
107
|
+
janus_llm-3.3.1.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|