janus-llm 4.0.0__py3-none-any.whl → 4.1.0__py3-none-any.whl
Sign up to get free protection for your applications and to get access to all the features.
- janus/__init__.py +1 -1
- janus/cli.py +25 -1
- janus/converter/_tests/test_translate.py +2 -2
- janus/llm/model_callbacks.py +9 -0
- janus/llm/models_info.py +38 -17
- {janus_llm-4.0.0.dist-info → janus_llm-4.1.0.dist-info}/METADATA +1 -1
- {janus_llm-4.0.0.dist-info → janus_llm-4.1.0.dist-info}/RECORD +10 -10
- {janus_llm-4.0.0.dist-info → janus_llm-4.1.0.dist-info}/LICENSE +0 -0
- {janus_llm-4.0.0.dist-info → janus_llm-4.1.0.dist-info}/WHEEL +0 -0
- {janus_llm-4.0.0.dist-info → janus_llm-4.1.0.dist-info}/entry_points.txt +0 -0
janus/__init__.py
CHANGED
@@ -5,7 +5,7 @@ from langchain_core._api.deprecation import LangChainDeprecationWarning
|
|
5
5
|
from janus.converter.translate import Translator
|
6
6
|
from janus.metrics import * # noqa: F403
|
7
7
|
|
8
|
-
__version__ = "4.
|
8
|
+
__version__ = "4.1.0"
|
9
9
|
|
10
10
|
# Ignoring a deprecation warning from langchain_core that I can't seem to hunt down
|
11
11
|
warnings.filterwarnings("ignore", category=LangChainDeprecationWarning)
|
janus/cli.py
CHANGED
@@ -39,6 +39,7 @@ from janus.llm.models_info import (
|
|
39
39
|
MODEL_TYPE_CONSTRUCTORS,
|
40
40
|
MODEL_TYPES,
|
41
41
|
TOKEN_LIMITS,
|
42
|
+
azure_models,
|
42
43
|
bedrock_models,
|
43
44
|
openai_models,
|
44
45
|
)
|
@@ -952,7 +953,7 @@ def llm_add(
|
|
952
953
|
help="The type of the model",
|
953
954
|
click_type=click.Choice(sorted(list(MODEL_TYPE_CONSTRUCTORS.keys()))),
|
954
955
|
),
|
955
|
-
] = "
|
956
|
+
] = "Azure",
|
956
957
|
):
|
957
958
|
if not MODEL_CONFIG_DIR.exists():
|
958
959
|
MODEL_CONFIG_DIR.mkdir(parents=True)
|
@@ -996,6 +997,7 @@ def llm_add(
|
|
996
997
|
"model_cost": {"input": in_cost, "output": out_cost},
|
997
998
|
}
|
998
999
|
elif model_type == "OpenAI":
|
1000
|
+
print("DEPRECATED: Use 'Azure' instead. CTRL+C to exit.")
|
999
1001
|
model_id = typer.prompt(
|
1000
1002
|
"Enter the model ID (list model IDs with `janus llm ls -a`)",
|
1001
1003
|
default="gpt-4o",
|
@@ -1017,6 +1019,28 @@ def llm_add(
|
|
1017
1019
|
"token_limit": max_tokens,
|
1018
1020
|
"model_cost": model_cost,
|
1019
1021
|
}
|
1022
|
+
elif model_type == "Azure":
|
1023
|
+
model_id = typer.prompt(
|
1024
|
+
"Enter the model ID (list model IDs with `janus llm ls -a`)",
|
1025
|
+
default="gpt-4o",
|
1026
|
+
type=click.Choice(azure_models),
|
1027
|
+
show_choices=False,
|
1028
|
+
)
|
1029
|
+
params = dict(
|
1030
|
+
# Azure uses the "azure_deployment" key for what we're calling "long_model_id"
|
1031
|
+
azure_deployment=MODEL_ID_TO_LONG_ID[model_id],
|
1032
|
+
temperature=0.7,
|
1033
|
+
n=1,
|
1034
|
+
)
|
1035
|
+
max_tokens = TOKEN_LIMITS[MODEL_ID_TO_LONG_ID[model_id]]
|
1036
|
+
model_cost = COST_PER_1K_TOKENS[MODEL_ID_TO_LONG_ID[model_id]]
|
1037
|
+
cfg = {
|
1038
|
+
"model_type": model_type,
|
1039
|
+
"model_id": model_id,
|
1040
|
+
"model_args": params,
|
1041
|
+
"token_limit": max_tokens,
|
1042
|
+
"model_cost": model_cost,
|
1043
|
+
}
|
1020
1044
|
elif model_type == "BedrockChat" or model_type == "Bedrock":
|
1021
1045
|
model_id = typer.prompt(
|
1022
1046
|
"Enter the model ID (list model IDs with `janus llm ls -a`)",
|
@@ -90,14 +90,14 @@ class TestDiagramGenerator(unittest.TestCase):
|
|
90
90
|
def setUp(self):
|
91
91
|
"""Set up the tests."""
|
92
92
|
self.diagram_generator = DiagramGenerator(
|
93
|
-
model="gpt-4o",
|
93
|
+
model="gpt-4o-mini",
|
94
94
|
source_language="fortran",
|
95
95
|
diagram_type="Activity",
|
96
96
|
)
|
97
97
|
|
98
98
|
def test_init(self):
|
99
99
|
"""Test __init__ method."""
|
100
|
-
self.assertEqual(self.diagram_generator._model_name, "gpt-4o")
|
100
|
+
self.assertEqual(self.diagram_generator._model_name, "gpt-4o-mini")
|
101
101
|
self.assertEqual(self.diagram_generator._source_language, "fortran")
|
102
102
|
self.assertEqual(self.diagram_generator._diagram_type, "Activity")
|
103
103
|
|
janus/llm/model_callbacks.py
CHANGED
@@ -23,6 +23,11 @@ openai_model_reroutes = {
|
|
23
23
|
"gpt-3.5-turbo-16k-0613": "gpt-3.5-turbo-0125",
|
24
24
|
}
|
25
25
|
|
26
|
+
azure_model_reroutes = {
|
27
|
+
"gpt-4o": "gpt-4o-2024-08-06",
|
28
|
+
"gpt-4o-mini": "gpt-4o-mini",
|
29
|
+
"gpt-3.5-turbo-16k": "gpt35-turbo-16k",
|
30
|
+
}
|
26
31
|
|
27
32
|
# Updated 2024-06-21
|
28
33
|
COST_PER_1K_TOKENS: dict[str, dict[str, float]] = {
|
@@ -31,6 +36,10 @@ COST_PER_1K_TOKENS: dict[str, dict[str, float]] = {
|
|
31
36
|
"gpt-4-0125-preview": {"input": 0.01, "output": 0.03},
|
32
37
|
"gpt-4-0613": {"input": 0.03, "output": 0.06},
|
33
38
|
"gpt-4o-2024-05-13": {"input": 0.005, "output": 0.015},
|
39
|
+
"gpt-4o-2024-08-06": {"input": 0.00275, "output": 0.011},
|
40
|
+
"gpt-4o-mini": {"input": 0.00015, "output": 0.0006},
|
41
|
+
"gpt35-turbo-16k": {"input": 0.003, "output": 0.004},
|
42
|
+
"gpt-35-turbo-16k": {"input": 0.003, "output": 0.004},
|
34
43
|
"anthropic.claude-v2": {"input": 0.008, "output": 0.024},
|
35
44
|
"anthropic.claude-instant-v1": {"input": 0.0008, "output": 0.0024},
|
36
45
|
"anthropic.claude-3-haiku-20240307-v1:0": {"input": 0.00025, "output": 0.00125},
|
janus/llm/models_info.py
CHANGED
@@ -1,15 +1,14 @@
|
|
1
1
|
import json
|
2
2
|
import os
|
3
|
-
import time
|
4
3
|
from pathlib import Path
|
5
|
-
from typing import Protocol, TypeVar
|
4
|
+
from typing import Callable, Protocol, TypeVar
|
6
5
|
|
7
6
|
from dotenv import load_dotenv
|
8
7
|
from langchain_community.llms import HuggingFaceTextGenInference
|
9
8
|
from langchain_core.runnables import Runnable
|
10
|
-
from langchain_openai import
|
9
|
+
from langchain_openai import AzureChatOpenAI
|
11
10
|
|
12
|
-
from janus.llm.model_callbacks import COST_PER_1K_TOKENS,
|
11
|
+
from janus.llm.model_callbacks import COST_PER_1K_TOKENS, azure_model_reroutes
|
13
12
|
from janus.prompts.prompt import (
|
14
13
|
ChatGptPromptEngine,
|
15
14
|
ClaudePromptEngine,
|
@@ -46,7 +45,7 @@ except ImportError:
|
|
46
45
|
|
47
46
|
ModelType = TypeVar(
|
48
47
|
"ModelType",
|
49
|
-
|
48
|
+
AzureChatOpenAI,
|
50
49
|
HuggingFaceTextGenInference,
|
51
50
|
Bedrock,
|
52
51
|
BedrockChat,
|
@@ -72,7 +71,6 @@ class JanusModel(Runnable, JanusModelProtocol):
|
|
72
71
|
|
73
72
|
load_dotenv()
|
74
73
|
|
75
|
-
|
76
74
|
openai_models = [
|
77
75
|
"gpt-4o",
|
78
76
|
"gpt-4o-mini",
|
@@ -82,6 +80,11 @@ openai_models = [
|
|
82
80
|
"gpt-3.5-turbo",
|
83
81
|
"gpt-3.5-turbo-16k",
|
84
82
|
]
|
83
|
+
azure_models = [
|
84
|
+
"gpt-4o",
|
85
|
+
"gpt-4o-mini",
|
86
|
+
"gpt-3.5-turbo-16k",
|
87
|
+
]
|
85
88
|
claude_models = [
|
86
89
|
"bedrock-claude-v2",
|
87
90
|
"bedrock-claude-instant-v1",
|
@@ -120,18 +123,21 @@ bedrock_models = [
|
|
120
123
|
*cohere_models,
|
121
124
|
*mistral_models,
|
122
125
|
]
|
123
|
-
all_models = [*
|
126
|
+
all_models = [*azure_models, *bedrock_models]
|
124
127
|
|
125
128
|
MODEL_TYPE_CONSTRUCTORS: dict[str, ModelType] = {
|
126
|
-
"OpenAI": ChatOpenAI,
|
129
|
+
# "OpenAI": ChatOpenAI,
|
127
130
|
"HuggingFace": HuggingFaceTextGenInference,
|
131
|
+
"Azure": AzureChatOpenAI,
|
128
132
|
"Bedrock": Bedrock,
|
129
133
|
"BedrockChat": BedrockChat,
|
130
134
|
"HuggingFaceLocal": HuggingFacePipeline,
|
131
135
|
}
|
132
136
|
|
133
|
-
|
134
|
-
|
137
|
+
|
138
|
+
MODEL_PROMPT_ENGINES: dict[str, Callable[..., PromptEngine]] = {
|
139
|
+
# **{m: ChatGptPromptEngine for m in openai_models},
|
140
|
+
**{m: ChatGptPromptEngine for m in azure_models},
|
135
141
|
**{m: ClaudePromptEngine for m in claude_models},
|
136
142
|
**{m: Llama2PromptEngine for m in llama2_models},
|
137
143
|
**{m: Llama3PromptEngine for m in llama3_models},
|
@@ -141,7 +147,8 @@ MODEL_PROMPT_ENGINES: dict[str, type[PromptEngine]] = {
|
|
141
147
|
}
|
142
148
|
|
143
149
|
MODEL_ID_TO_LONG_ID = {
|
144
|
-
**{m: mr for m, mr in openai_model_reroutes.items()},
|
150
|
+
# **{m: mr for m, mr in openai_model_reroutes.items()},
|
151
|
+
**{m: mr for m, mr in azure_model_reroutes.items()},
|
145
152
|
"bedrock-claude-v2": "anthropic.claude-v2",
|
146
153
|
"bedrock-claude-instant-v1": "anthropic.claude-instant-v1",
|
147
154
|
"bedrock-claude-haiku": "anthropic.claude-3-haiku-20240307-v1:0",
|
@@ -171,8 +178,9 @@ DEFAULT_MODELS = list(MODEL_DEFAULT_ARGUMENTS.keys())
|
|
171
178
|
|
172
179
|
MODEL_CONFIG_DIR = Path.home().expanduser() / ".janus" / "llm"
|
173
180
|
|
174
|
-
MODEL_TYPES: dict[str,
|
175
|
-
**{m: "OpenAI" for m in openai_models},
|
181
|
+
MODEL_TYPES: dict[str, PromptEngine] = {
|
182
|
+
# **{m: "OpenAI" for m in openai_models},
|
183
|
+
**{m: "Azure" for m in azure_models},
|
176
184
|
**{m: "BedrockChat" for m in bedrock_models},
|
177
185
|
}
|
178
186
|
|
@@ -182,7 +190,10 @@ TOKEN_LIMITS: dict[str, int] = {
|
|
182
190
|
"gpt-4-1106-preview": 128_000,
|
183
191
|
"gpt-4-0125-preview": 128_000,
|
184
192
|
"gpt-4o-2024-05-13": 128_000,
|
193
|
+
"gpt-4o-2024-08-06": 128_000,
|
194
|
+
"gpt-4o-mini": 128_000,
|
185
195
|
"gpt-3.5-turbo-0125": 16_384,
|
196
|
+
"gpt35-turbo-16k": 16_384,
|
186
197
|
"text-embedding-ada-002": 8191,
|
187
198
|
"gpt4all": 16_384,
|
188
199
|
"anthropic.claude-v2": 100_000,
|
@@ -270,11 +281,21 @@ def load_model(model_id) -> JanusModel:
|
|
270
281
|
openai_api_key=str(os.getenv("OPENAI_API_KEY")),
|
271
282
|
openai_organization=str(os.getenv("OPENAI_ORG_ID")),
|
272
283
|
)
|
273
|
-
log.warning("Do NOT use this model in sensitive environments!")
|
274
|
-
log.warning("If you would like to cancel, please press Ctrl+C.")
|
275
|
-
log.warning("Waiting 10 seconds...")
|
284
|
+
# log.warning("Do NOT use this model in sensitive environments!")
|
285
|
+
# log.warning("If you would like to cancel, please press Ctrl+C.")
|
286
|
+
# log.warning("Waiting 10 seconds...")
|
276
287
|
# Give enough time for the user to read the warnings and cancel
|
277
|
-
time.sleep(10)
|
288
|
+
# time.sleep(10)
|
289
|
+
raise DeprecationWarning("OpenAI models are no longer supported.")
|
290
|
+
|
291
|
+
elif model_type_name == "Azure":
|
292
|
+
model_args.update(
|
293
|
+
{
|
294
|
+
"api_key": os.getenv("AZURE_OPENAI_API_KEY"),
|
295
|
+
"azure_endpoint": os.getenv("AZURE_OPENAI_ENDPOINT"),
|
296
|
+
"api_version": os.getenv("OPENAI_API_VERSION", "2024-02-01"),
|
297
|
+
}
|
298
|
+
)
|
278
299
|
|
279
300
|
model_type = MODEL_TYPE_CONSTRUCTORS[model_type_name]
|
280
301
|
prompt_engine = MODEL_PROMPT_ENGINES[model_id]
|
@@ -1,12 +1,12 @@
|
|
1
|
-
janus/__init__.py,sha256=
|
1
|
+
janus/__init__.py,sha256=DBaGeUwOFIZW4QsQV06WFYM6RAz6pNdOZxJjTj3Uf7I,361
|
2
2
|
janus/__main__.py,sha256=lEkpNtLVPtFo8ySDZeXJ_NXDHb0GVdZFPWB4gD4RPS8,64
|
3
3
|
janus/_tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
4
4
|
janus/_tests/conftest.py,sha256=V7uW-oq3YbFiRPvrq15YoVVrA1n_83pjgiyTZ-IUGW8,963
|
5
5
|
janus/_tests/test_cli.py,sha256=6ef7h11bg4i7Q6L1-r0ZdcY7YrH4n472kvDiA03T4c8,4275
|
6
|
-
janus/cli.py,sha256=
|
6
|
+
janus/cli.py,sha256=MkeV0FiYXmEcMnERne6pSrLSyc5xhny2bORLY-19A6o,39091
|
7
7
|
janus/converter/__init__.py,sha256=U2EOMcCykiC0ZqhorNefOP_04hOF18qhYoPKrVp1Vrk,345
|
8
8
|
janus/converter/_tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
9
|
-
janus/converter/_tests/test_translate.py,sha256=
|
9
|
+
janus/converter/_tests/test_translate.py,sha256=T5CzNrwHqJWfb39Izq84R9WvM3toSlJq31SeA_U7d_4,5641
|
10
10
|
janus/converter/aggregator.py,sha256=MuAXMKmq6PuUo_w6ljyiuDn81Gk2dN-Ci7FVeLc6vhs,1966
|
11
11
|
janus/converter/converter.py,sha256=HWw-a4j-Qq0AWDyyk6cMVOwYjJeXxa3bvs9OOlKo_KI,25965
|
12
12
|
janus/converter/diagram.py,sha256=-wktVBPrSBgNIQfHIfa2bJNg6L9CYJQgrr9-xU8DFPw,1646
|
@@ -57,8 +57,8 @@ janus/language/treesitter/_tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRk
|
|
57
57
|
janus/language/treesitter/_tests/test_treesitter.py,sha256=fmr_mFSja7vaCVu0TVyLDua3A94jMjY4AqSC5NqnOdQ,2179
|
58
58
|
janus/language/treesitter/treesitter.py,sha256=q7fqfFxt7QsqM6tE39uqutRMsOfEgBd3omv7zVZSEOc,7517
|
59
59
|
janus/llm/__init__.py,sha256=TKLYvnsWKWfxMucy-lCLQ-4bkN9ENotJZDywDEQmrKg,45
|
60
|
-
janus/llm/model_callbacks.py,sha256=
|
61
|
-
janus/llm/models_info.py,sha256=
|
60
|
+
janus/llm/model_callbacks.py,sha256=cHRZBpYgAwiYbA2k0GQ7DBwBFQZJpEGMUBV3Q_5GTpU,7940
|
61
|
+
janus/llm/models_info.py,sha256=KWOQiWU1oAJB9nspI6N6Q32vHRH8loO8xM-Ys497fxc,10443
|
62
62
|
janus/metrics/__init__.py,sha256=AsxtZJUzZiXJPr2ehPPltuYP-ddechjg6X85WZUO7mA,241
|
63
63
|
janus/metrics/_tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
64
64
|
janus/metrics/_tests/reference.py,sha256=hiaJPP9CXkvFBV_wL-gOe_BzELTw0nvB6uCxhxtIiE8,13
|
@@ -102,8 +102,8 @@ janus/utils/_tests/test_progress.py,sha256=Rs_u5PiGjP-L-o6C1fhwfE1ig8jYu9Xo9s4p8
|
|
102
102
|
janus/utils/enums.py,sha256=AoilbdiYyMvY2Mp0AM4xlbLSELfut2XMwhIM1S_msP4,27610
|
103
103
|
janus/utils/logger.py,sha256=KZeuaMAnlSZCsj4yL0P6N-JzZwpxXygzACWfdZFeuek,2337
|
104
104
|
janus/utils/progress.py,sha256=PIpcQec7SrhsfqB25LHj2CDDkfm9umZx90d9LZnAx6k,1469
|
105
|
-
janus_llm-4.
|
106
|
-
janus_llm-4.
|
107
|
-
janus_llm-4.
|
108
|
-
janus_llm-4.
|
109
|
-
janus_llm-4.
|
105
|
+
janus_llm-4.1.0.dist-info/LICENSE,sha256=_j0st0a-HB6MRbP3_BW3PUqpS16v54luyy-1zVyl8NU,10789
|
106
|
+
janus_llm-4.1.0.dist-info/METADATA,sha256=RPA9U5cyO_-ulE45F1gBbTOnkRcjdBTOARyJs9XAvgg,4184
|
107
|
+
janus_llm-4.1.0.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
|
108
|
+
janus_llm-4.1.0.dist-info/entry_points.txt,sha256=OGhQwzj6pvXp79B0SaBD5apGekCu7Dwe9fZZT_TZ544,39
|
109
|
+
janus_llm-4.1.0.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|