janus-llm 4.4.5__py3-none-any.whl → 4.5.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (35) hide show
  1. janus/__init__.py +1 -1
  2. janus/cli/pipeline.py +6 -3
  3. janus/cli/self_eval.py +9 -0
  4. janus/converter/__init__.py +2 -0
  5. janus/converter/_tests/test_translate.py +1 -0
  6. janus/converter/chain.py +53 -133
  7. janus/converter/converter.py +199 -77
  8. janus/converter/diagram.py +5 -3
  9. janus/converter/document.py +10 -4
  10. janus/converter/evaluate.py +148 -113
  11. janus/converter/partition.py +4 -1
  12. janus/converter/passthrough.py +29 -0
  13. janus/converter/pool.py +74 -0
  14. janus/converter/requirements.py +4 -1
  15. janus/language/_tests/test_combine.py +1 -0
  16. janus/language/block.py +84 -3
  17. janus/llm/model_callbacks.py +6 -0
  18. janus/llm/models_info.py +19 -0
  19. janus/metrics/_tests/test_reading.py +48 -4
  20. janus/metrics/_tests/test_rouge_score.py +5 -11
  21. janus/metrics/reading.py +48 -28
  22. janus/metrics/rouge_score.py +21 -34
  23. janus/parsers/_tests/test_code_parser.py +1 -1
  24. janus/parsers/code_parser.py +2 -2
  25. janus/parsers/eval_parsers/incose_parser.py +3 -3
  26. janus/prompts/templates/cyclic/human.txt +16 -0
  27. janus/prompts/templates/cyclic/system.txt +1 -0
  28. janus/prompts/templates/eval_prompts/incose/human.txt +1 -1
  29. janus/prompts/templates/extract_variables/human.txt +5 -0
  30. janus/prompts/templates/extract_variables/system.txt +1 -0
  31. {janus_llm-4.4.5.dist-info → janus_llm-4.5.4.dist-info}/METADATA +3 -4
  32. {janus_llm-4.4.5.dist-info → janus_llm-4.5.4.dist-info}/RECORD +35 -29
  33. {janus_llm-4.4.5.dist-info → janus_llm-4.5.4.dist-info}/WHEEL +1 -1
  34. {janus_llm-4.4.5.dist-info → janus_llm-4.5.4.dist-info}/LICENSE +0 -0
  35. {janus_llm-4.4.5.dist-info → janus_llm-4.5.4.dist-info}/entry_points.txt +0 -0
janus/llm/models_info.py CHANGED
@@ -96,12 +96,16 @@ claude_models = [
96
96
  "bedrock-claude-haiku",
97
97
  "bedrock-claude-sonnet",
98
98
  "bedrock-claude-sonnet-3.5",
99
+ "bedrock-claude-sonnet-3.5-v2",
99
100
  ]
100
101
  llama2_models = [
101
102
  "bedrock-llama2-70b",
102
103
  "bedrock-llama2-70b-chat",
103
104
  "bedrock-llama2-13b",
104
105
  "bedrock-llama2-13b-chat",
106
+ "bedrock-llama3-8b-instruct",
107
+ "bedrock-llama3-70b-instruct",
108
+ "bedrock-llama3-3-70b-instruct",
105
109
  ]
106
110
  llama3_models = [
107
111
  "bedrock-llama3-8b-instruct",
@@ -113,6 +117,11 @@ titan_models = [
113
117
  "bedrock-jurassic-2-mid",
114
118
  "bedrock-jurassic-2-ultra",
115
119
  ]
120
+ nova_models = [
121
+ "bedrock-nova-lite",
122
+ "bedrock-nova-micro",
123
+ "bedrock-nova-pro",
124
+ ]
116
125
  cohere_models = [
117
126
  "bedrock-command-r-plus",
118
127
  ]
@@ -160,12 +169,17 @@ MODEL_ID_TO_LONG_ID = {
160
169
  "bedrock-claude-haiku": "anthropic.claude-3-haiku-20240307-v1:0",
161
170
  "bedrock-claude-sonnet": "anthropic.claude-3-sonnet-20240229-v1:0",
162
171
  "bedrock-claude-sonnet-3.5": "anthropic.claude-3-5-sonnet-20240620-v1:0",
172
+ "bedrock-claude-sonnet-3.5-v2": "anthropic.claude-3-5-sonnet-20241022-v2:0",
163
173
  "bedrock-llama2-70b": "meta.llama2-70b-v1",
164
174
  "bedrock-llama2-70b-chat": "meta.llama2-70b-chat-v1",
165
175
  "bedrock-llama2-13b": "meta.llama2-13b-chat-v1",
166
176
  "bedrock-llama2-13b-chat": "meta.llama2-13b-v1",
167
177
  "bedrock-llama3-8b-instruct": "meta.llama3-8b-instruct-v1:0",
168
178
  "bedrock-llama3-70b-instruct": "meta.llama3-70b-instruct-v1:0",
179
+ "bedrock-llama3-3-70b-instruct": "meta.llama3-3-70b-instruct-v1:0",
180
+ "bedrock-nova-lite": "amazon.nova-lite-v1:0",
181
+ "bedrock-nova-micro": "amazon.nova-micro-v1:0",
182
+ "bedrock-nova-pro": "amazon.nova-pro-v1:0",
169
183
  "bedrock-titan-text-lite": "amazon.titan-text-lite-v1",
170
184
  "bedrock-titan-text-express": "amazon.titan-text-express-v1",
171
185
  "bedrock-jurassic-2-mid": "ai21.j2-mid-v1",
@@ -208,12 +222,17 @@ TOKEN_LIMITS: dict[str, int] = {
208
222
  "anthropic.claude-3-haiku-20240307-v1:0": 248_000,
209
223
  "anthropic.claude-3-sonnet-20240229-v1:0": 248_000,
210
224
  "anthropic.claude-3-5-sonnet-20240620-v1:0": 200_000,
225
+ "anthropic.claude-3-5-sonnet-20241022-v2:0": 200_000,
211
226
  "meta.llama2-70b-v1": 4096,
212
227
  "meta.llama2-70b-chat-v1": 4096,
213
228
  "meta.llama2-13b-chat-v1": 4096,
214
229
  "meta.llama2-13b-v1": 4096,
215
230
  "meta.llama3-8b-instruct-v1:0": 8000,
216
231
  "meta.llama3-70b-instruct-v1:0": 8000,
232
+ "meta.llama3-3-70b-instruct-v1:0": 128_000,
233
+ "amazon.nova-lite-v1:0": 300_000,
234
+ "amazon.nova-micro-v1:0": 128_000,
235
+ "amazon.nova-pro-v1:0": 300_000,
217
236
  "amazon.titan-text-lite-v1": 4096,
218
237
  "amazon.titan-text-express-v1": 8192,
219
238
  "ai21.j2-mid-v1": 8192,
@@ -1,11 +1,25 @@
1
1
  import unittest
2
2
 
3
- from janus.metrics.reading import _repeat_text, flesch, gunning_fog
3
+ from janus.metrics.reading import (
4
+ _repeat_text,
5
+ automated_readability,
6
+ coleman_liau,
7
+ dale_chall,
8
+ flesch,
9
+ flesch_grade,
10
+ gunning_fog,
11
+ word_count,
12
+ )
4
13
 
5
14
 
6
15
  class TestReading(unittest.TestCase):
7
16
  def setUp(self):
8
- self.text = "This is a sample text for testing readability metrics"
17
+ self.text = "This is a sample text for testing readability metrics."
18
+
19
+ def test_word_count(self):
20
+ """Test the word_count function."""
21
+ count = word_count(self.text)
22
+ self.assertEqual(count, 9)
9
23
 
10
24
  def test_repeat_text(self):
11
25
  """Test the _repeat_text function."""
@@ -16,12 +30,42 @@ class TestReading(unittest.TestCase):
16
30
  def test_flesch(self):
17
31
  """Test the Flesch readability score."""
18
32
  score = flesch(self.text)
19
- self.assertAlmostEqual(score, 47.3, places=2)
33
+ self.assertAlmostEqual(score, 45.42, places=2)
34
+
35
+ def test_flesch_grade(self):
36
+ """Test the Flesch Grade Level readability score."""
37
+ score = flesch_grade(self.text)
38
+ self.assertAlmostEqual(score, 9.2, places=2)
20
39
 
21
40
  def test_gunning_fog(self):
22
41
  """Test the Gunning-Fog readability score."""
23
42
  score = gunning_fog(self.text)
24
- self.assertAlmostEqual(score, 8.04, places=2)
43
+ self.assertAlmostEqual(score, 3.97, places=2)
44
+
45
+ def test_dale_chall(self):
46
+ """Test the Dale-Chall readability score."""
47
+ score = dale_chall(self.text)
48
+ self.assertAlmostEqual(score, 4.67, places=2)
49
+
50
+ def test_automated_readability(self):
51
+ """Test the Automated Readability Index score."""
52
+ score = automated_readability(self.text)
53
+ self.assertAlmostEqual(score, 7.1, places=2)
54
+
55
+ def test_coleman_liau(self):
56
+ """Test the Coleman-Liau Index."""
57
+ score = coleman_liau(self.text)
58
+ self.assertAlmostEqual(score, 9.94, places=2)
59
+
60
+ def test_blank_target(self):
61
+ """Test that blank targets return None for all metric functions."""
62
+ blank = " " # blank string with whitespaces
63
+ self.assertIsNone(flesch(blank))
64
+ self.assertIsNone(flesch_grade(blank))
65
+ self.assertIsNone(gunning_fog(blank))
66
+ self.assertIsNone(dale_chall(blank))
67
+ self.assertIsNone(automated_readability(blank))
68
+ self.assertIsNone(coleman_liau(blank))
25
69
 
26
70
 
27
71
  if __name__ == "__main__":
@@ -12,19 +12,13 @@ class TestRouge(unittest.TestCase):
12
12
  score = rouge(
13
13
  self.target, self.reference, granularity="n", n_gram=2, score_type="f"
14
14
  )
15
- self.assertIsInstance(score, float)
15
+ self.assertEqual(score, 0.5)
16
16
 
17
17
  def test_rouge_with_granularity_l(self):
18
18
  score = rouge(
19
19
  self.target, self.reference, granularity="l", n_gram=2, score_type="f"
20
20
  )
21
- self.assertIsInstance(score, float)
22
-
23
- def test_rouge_with_granularity_w(self):
24
- score = rouge(
25
- self.target, self.reference, granularity="w", n_gram=2, score_type="f"
26
- )
27
- self.assertIsInstance(score, float)
21
+ self.assertAlmostEqual(score, 0.8, places=2)
28
22
 
29
23
  def test_rouge_with_invalid_granularity(self):
30
24
  with self.assertRaises(ValueError):
@@ -40,19 +34,19 @@ class TestRouge(unittest.TestCase):
40
34
  score = rouge(
41
35
  self.target, self.reference, granularity="n", n_gram=2, score_type="f"
42
36
  )
43
- self.assertIsInstance(score, float)
37
+ self.assertAlmostEqual(score, 0.5, places=2)
44
38
 
45
39
  def test_rouge_with_score_type_p(self):
46
40
  score = rouge(
47
41
  self.target, self.reference, granularity="n", n_gram=2, score_type="p"
48
42
  )
49
- self.assertIsInstance(score, float)
43
+ self.assertAlmostEqual(score, 0.5, places=2)
50
44
 
51
45
  def test_rouge_with_score_type_r(self):
52
46
  score = rouge(
53
47
  self.target, self.reference, granularity="n", n_gram=2, score_type="r"
54
48
  )
55
- self.assertIsInstance(score, float)
49
+ self.assertAlmostEqual(score, 0.5, places=2)
56
50
 
57
51
  def test_rouge_with_invalid_score_type(self):
58
52
  with self.assertRaises(ValueError):
janus/metrics/reading.py CHANGED
@@ -1,8 +1,7 @@
1
1
  import re
2
2
 
3
- import nltk
4
- import readability
5
3
  from nltk.tokenize import TweetTokenizer
4
+ from textstat import textstat
6
5
 
7
6
  from janus.metrics.metric import metric
8
7
 
@@ -48,23 +47,9 @@ def _repeat_text(text):
48
47
  return repeated_text
49
48
 
50
49
 
51
- def get_readability(target: str) -> readability.Readability:
52
- """Create a Readability object from an input string
53
-
54
- Arguments:
55
- target: The target text.
56
-
57
- Returns:
58
- py-readability-metrics Readability object for that text
59
- """
60
- nltk.download("punkt", quiet=True)
61
- target = _repeat_text(target)
62
- return readability.Readability(target)
63
-
64
-
65
50
  @metric(use_reference=False, help="The Flesch Readability score")
66
51
  def flesch(target: str, **kwargs) -> float:
67
- """Calculate the Flesch Score using py-readability-metrics.
52
+ """Calculate the Flesch Score using textstat.
68
53
 
69
54
  Arguments:
70
55
  target: The target text.
@@ -74,12 +59,13 @@ def flesch(target: str, **kwargs) -> float:
74
59
  """
75
60
  if not target.strip(): # Check if the target text is blank
76
61
  return None
77
- return get_readability(target).flesch().score
62
+ target = _repeat_text(target)
63
+ return textstat.flesch_reading_ease(target)
78
64
 
79
65
 
80
66
  @metric(use_reference=False, help="The Flesch Grade Level Readability score")
81
67
  def flesch_grade(target: str, **kwargs) -> float:
82
- """Calculate the Flesch Score using py-readability-metrics.
68
+ """Calculate the Flesch Score using textstat.
83
69
 
84
70
  Arguments:
85
71
  target: The target text.
@@ -89,12 +75,13 @@ def flesch_grade(target: str, **kwargs) -> float:
89
75
  """
90
76
  if not target.strip(): # Check if the target text is blank
91
77
  return None
92
- return get_readability(target).flesch_kincaid().score
78
+ target = _repeat_text(target)
79
+ return textstat.flesch_kincaid_grade(target)
93
80
 
94
81
 
95
82
  @metric(use_reference=False, help="The Gunning-Fog Readability score")
96
83
  def gunning_fog(target: str, **kwargs) -> float:
97
- """Calculate the Gunning-Fog Score using py-readability-metrics.
84
+ """Calculate the Gunning-Fog Score using textstat.
98
85
 
99
86
  Arguments:
100
87
  target: The target text.
@@ -104,20 +91,53 @@ def gunning_fog(target: str, **kwargs) -> float:
104
91
  """
105
92
  if not target.strip(): # Check if the target text is blank
106
93
  return None
107
- return get_readability(target).gunning_fog().score
94
+ target = _repeat_text(target)
95
+ return textstat.gunning_fog(target)
108
96
 
109
97
 
110
- @metric(use_reference=False, help="The Gunning-Fog Grade Level Readability score")
111
- def gunning_fog_grade(target: str, **kwargs) -> float:
112
- """Calculate the Gunning-Fog Grade Level Score using py-readability-metrics.
98
+ @metric(use_reference=False, help="The Dale-Chall Readability score")
99
+ def dale_chall(target: str, **kwargs) -> float:
100
+ """Calculate the Dale-Chall Readability Score using textstat.
113
101
 
114
102
  Arguments:
115
103
  target: The target text.
116
104
 
117
105
  Returns:
118
- The Gunning-Fog Grade Level score.
106
+ The Dale-Chall score.
119
107
  """
120
108
  if not target.strip(): # Check if the target text is blank
121
109
  return None
122
- grade_level = get_readability(target).gunning_fog().grade_level
123
- return None if grade_level == "na" else grade_level
110
+ target = _repeat_text(target)
111
+ return textstat.dale_chall_readability_score_v2(target)
112
+
113
+
114
+ @metric(use_reference=False, help="The Automated Readability Index")
115
+ def automated_readability(target: str, **kwargs) -> float:
116
+ """Calculate the Automated Readability Index using textstat.
117
+
118
+ Arguments:
119
+ target: The target text.
120
+
121
+ Returns:
122
+ The Automated Readability score.
123
+ """
124
+ if not target.strip(): # Check if the target text is blank
125
+ return None
126
+ target = _repeat_text(target)
127
+ return textstat.automated_readability_index(target)
128
+
129
+
130
+ @metric(use_reference=False, help="The Coleman-Liau Index")
131
+ def coleman_liau(target: str, **kwargs) -> float:
132
+ """Calculate the Coleman-Liau Index using textstat.
133
+
134
+ Arguments:
135
+ target: The target text.
136
+
137
+ Returns:
138
+ The Coleman-Liau Index.
139
+ """
140
+ if not target.strip(): # Check if the target text is blank
141
+ return None
142
+ target = _repeat_text(target)
143
+ return textstat.coleman_liau_index(target)
@@ -1,7 +1,6 @@
1
1
  import click
2
- import nltk
3
2
  import typer
4
- from rouge import Rouge
3
+ from rouge_score import rouge_scorer
5
4
  from typing_extensions import Annotated
6
5
 
7
6
  from janus.metrics.metric import metric
@@ -18,9 +17,9 @@ def rouge(
18
17
  "-g",
19
18
  help=(
20
19
  "The granularity of the ROUGE score. `n` refers to "
21
- "ROUGE-N, `l` refers to ROUGE-L, and `w` refers to ROUGE-W."
20
+ "ROUGE-N, `l` refers to ROUGE-L."
22
21
  ),
23
- click_type=click.Choice(["n", "l", "w"]),
22
+ click_type=click.Choice(["n", "l"]),
24
23
  ),
25
24
  ] = "n",
26
25
  n_gram: Annotated[
@@ -52,7 +51,7 @@ def rouge(
52
51
  target: The target text.
53
52
  reference: The reference text.
54
53
  granularity: The granularity of the ROUGE score. `n` refers to ROUGE-N, `l`
55
- refers to ROUGE-L, and `w` refers to ROUGE-W.
54
+ refers to ROUGE-L.
56
55
  n_gram: The n-gram overlap calculated for ROUGE-N. Can be an integer.
57
56
  score_type: Whether to use the F-score, precision, or recall. For example, `f`
58
57
  refers to the F-score, `p` refers to precision, and `r` refers to recall.
@@ -60,37 +59,25 @@ def rouge(
60
59
  Returns:
61
60
  The ROUGE score.
62
61
  """
63
- nltk.download("punkt", quiet=True)
64
-
65
62
  if granularity.lower() == "n":
66
- metric_name = "rouge-n"
67
- metric_name_output = f"rouge-{n_gram}"
68
- max_n = n_gram
63
+ metric_name = f"rouge{n_gram}"
69
64
  elif granularity.lower() == "l":
70
- metric_name = "rouge-l"
71
- metric_name_output = "rouge-l"
72
- max_n = 4
73
- elif granularity.lower() == "w":
74
- metric_name = "rouge-w"
75
- metric_name_output = "rouge-w"
76
- max_n = 4
65
+ metric_name = "rougeL"
77
66
  else:
78
- raise ValueError("Invalid granularity. Must be one of `n`, `l`, or `w`.")
79
-
80
- if score_type.lower() not in ["f", "p", "r"]:
81
- raise ValueError("Invalid score type. Must be one of `f`, `p`, or `r`.")
67
+ raise ValueError("Invalid granularity. Must be one of `n` or `l`")
82
68
 
83
- evaluator = Rouge(
84
- metrics=[metric_name],
85
- max_n=max_n,
86
- limit_length=False,
87
- length_limit=1_000,
88
- length_limit_type="words",
89
- apply_avg=False,
90
- apply_best=False,
91
- alpha=0.5, # Default F1_score
92
- weight_factor=1.2,
93
- stemming=True,
69
+ evaluator = rouge_scorer.RougeScorer(
70
+ [metric_name],
71
+ use_stemmer=True,
94
72
  )
95
- scores = evaluator.get_scores(target, reference)
96
- return scores[metric_name_output][0][score_type.lower()][0]
73
+ scores = evaluator.score(target, reference)
74
+ scores_fpr = scores[metric_name]
75
+ if score_type.lower() == "f":
76
+ score = scores_fpr.fmeasure
77
+ elif score_type.lower() == "p":
78
+ score = scores_fpr.precision
79
+ elif score_type.lower() == "r":
80
+ score = scores_fpr.recall
81
+ else:
82
+ raise ValueError("Invalid score type. Must be one of `f`, `p`, or `r`.")
83
+ return score
@@ -25,7 +25,7 @@ class TestCodeParser(unittest.TestCase):
25
25
  def test_get_format_instructions(self):
26
26
  self.assertEqual(
27
27
  self.parser.get_format_instructions(),
28
- "Output must contain text contained within triple square brackets (```)",
28
+ "Output must contain text contained within triple backticks (```)",
29
29
  )
30
30
 
31
31
 
@@ -19,9 +19,9 @@ class CodeParser(JanusParser):
19
19
  if code is None:
20
20
  raise JanusParserException(
21
21
  text,
22
- "Code not find code between triple square brackets",
22
+ "Code not find code between triple backticks",
23
23
  )
24
24
  return str(code.group(1))
25
25
 
26
26
  def get_format_instructions(self) -> str:
27
- return "Output must contain text contained within triple square brackets (```)"
27
+ return "Output must contain text contained within triple backticks (```)"
@@ -70,7 +70,6 @@ class IncoseParser(JanusParser, PydanticOutputParser):
70
70
 
71
71
  obj = json.loads(text)
72
72
 
73
- # For some reason requirements objects are in a double list?
74
73
  reqs = obj["requirements"]
75
74
 
76
75
  # Generate a unique ID for each requirement (ensure they are unique)
@@ -91,10 +90,11 @@ class IncoseParser(JanusParser, PydanticOutputParser):
91
90
 
92
91
  # Strip everything outside the JSON object
93
92
  begin, end = text.find("["), text.rfind("]")
94
- text = text[begin : end + 1]
93
+ end += 1 if end != -1 else 0
94
+ text = text[begin:end]
95
95
 
96
96
  try:
97
- out: RequirementList = super().parse(text)
97
+ out: RequirementList = super(IncoseParser, self).parse(text)
98
98
  except json.JSONDecodeError as e:
99
99
  log.debug(f"Invalid JSON object. Output:\n{text}")
100
100
  raise OutputParserException(f"Got invalid JSON object. Error: {e}")
@@ -0,0 +1,16 @@
1
+ You are tasked with generating code in the {TARGET_LANGUAGE} language given a list of requirements.
2
+
3
+
4
+ 1. Read all requirements.
5
+ 2. Write code that addresses all requirments, ensuring that all mentioned conditions are met.
6
+ 3. Adhere to the coding conventions and best practices of the {TARGET_LANGUAGE} language.
7
+ 4. Ensure the code is correct, well-structured, and includes comments for readability.
8
+ 5. The code you provide should be succienct, concise, and runable.
9
+
10
+ Here are the requirements for the code:
11
+
12
+ ```
13
+ {SOURCE_CODE}
14
+ ```
15
+
16
+ Don't forget to include your final code between triple backticks!
@@ -0,0 +1 @@
1
+ Your task is to generate code based on the provided requirements. The code should be written in the {TARGET_LANGUAGE} language. Make sure the code adheres to best practices, is efficient, and well-documented.
@@ -17,7 +17,7 @@ C9 - Conforming: Individual needs and requirements should conform to an approved
17
17
 
18
18
  For each and every requirement below, you must indicate whether they "pass" or "fail" each of the above criteria. Briefly explain your reasoning before providing each pass/fail.
19
19
 
20
- Your response should be formatted as a list of JSON objects, with each object corresponding to one requirement. Each object should include 10 keys: `requirement_id`, `C1`, `C2`, ..., `C9`. `requirement_id` should have a string value that holds the 8-character UUID associated with the requirement. The other four values should each be a JSON object with two keys: `reasoning` (a clear explanation of why the criterion is passed or failed) and a `score` (the literal string "pass" or "fail").
20
+ Your response should be formatted as a list of JSON objects, with each object corresponding to one requirement. Each object should include 10 keys: `requirement_id`, `C1`, `C2`, ..., `C9`. `requirement_id` should have a string value that holds the 8-character UUID associated with the requirement. The other four values should each be a JSON object with two keys: `reasoning` (a clear explanation of why the criterion is passed or failed) and a `score` (the literal string "pass" or "fail"). You should also include the requirement itself as a string value for the key `requirement`.
21
21
 
22
22
  Be discerning in your evaluation; only very high-quality requirements should pass all criteria. Be a hard grader. If a requirement fails a criterion, be thorough and detailed in your explanation of why.
23
23
 
@@ -0,0 +1,5 @@
1
+ Extract all global variables and functions from the {SOURCE_LANGUAGE} code below.
2
+ Here is the source code:
3
+ ```
4
+ {SOURCE_CODE}
5
+ ```
@@ -0,0 +1 @@
1
+ You are a senior software engineer named John and tasked with creating intermediate products of {SOURCE_LANGUAGE} code.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: janus-llm
3
- Version: 4.4.5
3
+ Version: 4.5.4
4
4
  Summary: A transcoding library using LLMs.
5
5
  License: Apache 2.0
6
6
  Author: Michael Doyle
@@ -24,22 +24,21 @@ Requires-Dist: langchain-community (>=0.2.0,<0.3.0)
24
24
  Requires-Dist: langchain-core (>=0.2.0,<0.3.0)
25
25
  Requires-Dist: langchain-openai (>=0.1.8,<0.2.0)
26
26
  Requires-Dist: langchain-unstructured (>=0.1.2,<0.2.0)
27
- Requires-Dist: nltk (>=3.8.1,<4.0.0)
28
27
  Requires-Dist: numpy (>=1.24.3,<2.0.0)
29
28
  Requires-Dist: openai (>=1.14.0,<2.0.0)
30
29
  Requires-Dist: pi-heif (>=0.20.0,<0.21.0)
31
- Requires-Dist: py-readability-metrics (>=1.4.5,<2.0.0)
32
- Requires-Dist: py-rouge (>=1.1,<2.0)
33
30
  Requires-Dist: pybind11 (>=2.13.6,<3.0.0)
34
31
  Requires-Dist: pytesseract (>=0.3.13,<0.4.0)
35
32
  Requires-Dist: python-dotenv (>=1.0.0,<2.0.0)
36
33
  Requires-Dist: rich (>=13.7.1,<14.0.0)
34
+ Requires-Dist: rouge-score (>=0.1.2,<0.2.0)
37
35
  Requires-Dist: sacrebleu (>=2.4.1,<3.0.0)
38
36
  Requires-Dist: scikit-learn (>=1.5.2,<2.0.0)
39
37
  Requires-Dist: sentence-transformers (>=2.6.1,<3.0.0) ; extra == "hf-local" or extra == "all"
40
38
  Requires-Dist: setuptools (>=75.6.0,<76.0.0)
41
39
  Requires-Dist: tesseract (>=0.1.3,<0.2.0)
42
40
  Requires-Dist: text-generation (>=0.6.0,<0.7.0)
41
+ Requires-Dist: textstat (>=0.7.5,<0.8.0)
43
42
  Requires-Dist: tiktoken (>=0.7.0,<0.8.0)
44
43
  Requires-Dist: transformers (>=4.31.0,<5.0.0)
45
44
  Requires-Dist: tree-sitter (>=0.21.0,<0.22.0)