lm-deluge 0.0.34__py3-none-any.whl → 0.0.36__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of lm-deluge might be problematic. Click here for more details.

@@ -0,0 +1,153 @@
1
+ GOOGLE_MODELS = {
2
+ # .oooooo. oooo .o. ooooo
3
+ # d8P' `Y8b `888 .888. `888'
4
+ # 888 .ooooo. .ooooo. .oooooooo 888 .ooooo. .8"888. 888
5
+ # 888 d88' `88b d88' `88b 888' `88b 888 d88' `88b .8' `888. 888
6
+ # 888 ooooo 888 888 888 888 888 888 888 888ooo888 .88ooo8888. 888
7
+ # `88. .88' 888 888 888 888 `88bod8P' 888 888 .o .8' `888. 888
8
+ # `Y8bood8P' `Y8bod8P' `Y8bod8P' `8oooooo. o888o `Y8bod8P' o88o o8888o o888o
9
+ # d" YD
10
+ # "Y88888P'
11
+ # these are through AI studio rather than Vertex, and using the OpenAI-compatible endpoints
12
+ "gemini-2.0-flash-compat": {
13
+ "id": "gemini-2.0-flash-compat",
14
+ "name": "gemini-2.0-flash",
15
+ "api_base": "https://generativelanguage.googleapis.com/v1beta/openai",
16
+ "api_key_env_var": "GEMINI_API_KEY",
17
+ "supports_json": True,
18
+ "supports_logprobs": False,
19
+ "api_spec": "openai",
20
+ "input_cost": 0.1,
21
+ "output_cost": 0.4,
22
+ "requests_per_minute": 20,
23
+ "tokens_per_minute": 100_000,
24
+ "reasoning_model": False,
25
+ },
26
+ "gemini-2.0-flash-lite-compat": {
27
+ "id": "gemini-2.0-flash-lite-compat",
28
+ "name": "gemini-2.0-flash-lite",
29
+ "api_base": "https://generativelanguage.googleapis.com/v1beta/openai",
30
+ "api_key_env_var": "GEMINI_API_KEY",
31
+ "supports_json": True,
32
+ "supports_logprobs": False,
33
+ "api_spec": "openai",
34
+ "input_cost": 0.1,
35
+ "output_cost": 0.4,
36
+ "requests_per_minute": 20,
37
+ "tokens_per_minute": 100_000,
38
+ "reasoning_model": False,
39
+ },
40
+ "gemini-2.5-pro-compat": {
41
+ "id": "gemini-2.5-pro-compat",
42
+ "name": "gemini-2.5-pro",
43
+ "api_base": "https://generativelanguage.googleapis.com/v1beta/openai",
44
+ "api_key_env_var": "GEMINI_API_KEY",
45
+ "supports_json": True,
46
+ "supports_logprobs": False,
47
+ "api_spec": "openai",
48
+ "input_cost": 0.1,
49
+ "output_cost": 0.4,
50
+ "requests_per_minute": 20,
51
+ "tokens_per_minute": 100_000,
52
+ "reasoning_model": True,
53
+ },
54
+ "gemini-2.5-flash-compat": {
55
+ "id": "gemini-2.5-flash-compat",
56
+ "name": "gemini-2.5-flash",
57
+ "api_base": "https://generativelanguage.googleapis.com/v1beta/openai",
58
+ "api_key_env_var": "GEMINI_API_KEY",
59
+ "supports_json": True,
60
+ "supports_logprobs": False,
61
+ "api_spec": "openai",
62
+ "input_cost": 0.1,
63
+ "output_cost": 0.4,
64
+ "requests_per_minute": 20,
65
+ "tokens_per_minute": 100_000,
66
+ "reasoning_model": True,
67
+ },
68
+ "gemini-2.5-flash-lite-compat": {
69
+ "id": "gemini-2.5-flash-lite-compat",
70
+ "name": "gemini-2.5-flash-lite",
71
+ "api_base": "https://generativelanguage.googleapis.com/v1beta/openai",
72
+ "api_key_env_var": "GEMINI_API_KEY",
73
+ "supports_json": True,
74
+ "supports_logprobs": False,
75
+ "api_spec": "openai",
76
+ "input_cost": 0.1,
77
+ "output_cost": 0.4,
78
+ "requests_per_minute": 20,
79
+ "tokens_per_minute": 100_000,
80
+ "reasoning_model": True,
81
+ },
82
+ # Native Gemini API versions with file support
83
+ "gemini-2.0-flash": {
84
+ "id": "gemini-2.0-flash",
85
+ "name": "gemini-2.0-flash",
86
+ "api_base": "https://generativelanguage.googleapis.com/v1beta",
87
+ "api_key_env_var": "GEMINI_API_KEY",
88
+ "supports_json": True,
89
+ "supports_logprobs": False,
90
+ "api_spec": "gemini",
91
+ "input_cost": 0.1,
92
+ "output_cost": 0.4,
93
+ "requests_per_minute": 20,
94
+ "tokens_per_minute": 100_000,
95
+ "reasoning_model": False,
96
+ },
97
+ "gemini-2.0-flash-lite": {
98
+ "id": "gemini-2.0-flash-lite",
99
+ "name": "gemini-2.0-flash-lite",
100
+ "api_base": "https://generativelanguage.googleapis.com/v1beta",
101
+ "api_key_env_var": "GEMINI_API_KEY",
102
+ "supports_json": True,
103
+ "supports_logprobs": False,
104
+ "api_spec": "gemini",
105
+ "input_cost": 0.1,
106
+ "output_cost": 0.4,
107
+ "requests_per_minute": 20,
108
+ "tokens_per_minute": 100_000,
109
+ "reasoning_model": False,
110
+ },
111
+ "gemini-2.5-pro": {
112
+ "id": "gemini-2.5-pro",
113
+ "name": "gemini-2.5-pro",
114
+ "api_base": "https://generativelanguage.googleapis.com/v1beta",
115
+ "api_key_env_var": "GEMINI_API_KEY",
116
+ "supports_json": True,
117
+ "supports_logprobs": False,
118
+ "api_spec": "gemini",
119
+ "input_cost": 0.1,
120
+ "output_cost": 0.4,
121
+ "requests_per_minute": 20,
122
+ "tokens_per_minute": 100_000,
123
+ "reasoning_model": True,
124
+ },
125
+ "gemini-2.5-flash": {
126
+ "id": "gemini-2.5-flash",
127
+ "name": "gemini-2.5-flash",
128
+ "api_base": "https://generativelanguage.googleapis.com/v1beta",
129
+ "api_key_env_var": "GEMINI_API_KEY",
130
+ "supports_json": True,
131
+ "supports_logprobs": False,
132
+ "api_spec": "gemini",
133
+ "input_cost": 0.1,
134
+ "output_cost": 0.4,
135
+ "requests_per_minute": 20,
136
+ "tokens_per_minute": 100_000,
137
+ "reasoning_model": True,
138
+ },
139
+ "gemini-2.5-flash-lite": {
140
+ "id": "gemini-2.5-flash-lite",
141
+ "name": "gemini-2.5-flash-lite",
142
+ "api_base": "https://generativelanguage.googleapis.com/v1beta",
143
+ "api_key_env_var": "GEMINI_API_KEY",
144
+ "supports_json": True,
145
+ "supports_logprobs": False,
146
+ "api_spec": "gemini",
147
+ "input_cost": 0.1,
148
+ "output_cost": 0.4,
149
+ "requests_per_minute": 20,
150
+ "tokens_per_minute": 100_000,
151
+ "reasoning_model": True,
152
+ },
153
+ }
@@ -0,0 +1,38 @@
1
+ XAI_MODELS = {
2
+ # .d8888b. 888
3
+ # d88P Y88b 888
4
+ # 888 888 888
5
+ # 888 888d888 .d88b. 888 888
6
+ # 888 88888 888P" d88""88b 888 .88P
7
+ # 888 888 888 888 888 888888K
8
+ # Y88b d88P 888 Y88..88P 888 "88b
9
+ # "Y8888P88 888 "Y88P" 888 888
10
+ "grok-3": {
11
+ "id": "grok-3",
12
+ "name": "grok-3-latest",
13
+ "api_base": "https://api.x.ai/v1",
14
+ "api_key_env_var": "GROK_API_KEY",
15
+ "supports_json": True,
16
+ "supports_logprobs": True,
17
+ "api_spec": "openai",
18
+ "input_cost": 2.0,
19
+ "output_cost": 8.0,
20
+ "requests_per_minute": 20,
21
+ "tokens_per_minute": 100_000,
22
+ "reasoning_model": False,
23
+ },
24
+ "grok-3-mini": {
25
+ "id": "grok-3-mini",
26
+ "name": "grok-3-mini-latest",
27
+ "api_base": "https://api.x.ai/v1",
28
+ "api_key_env_var": "GROK_API_KEY",
29
+ "supports_json": True,
30
+ "supports_logprobs": True,
31
+ "api_spec": "openai",
32
+ "input_cost": 2.0,
33
+ "output_cost": 8.0,
34
+ "requests_per_minute": 20,
35
+ "tokens_per_minute": 100_000,
36
+ "reasoning_model": True,
37
+ },
38
+ }
@@ -0,0 +1,74 @@
1
+ GROQ_MODELS = {
2
+ "llama-3.1-8b-groq": {
3
+ "id": "llama-3.1-8b-groq",
4
+ "name": "llama-3.1-8b-instant",
5
+ "api_base": "https://api.groq.com/openai/v1",
6
+ "api_key_env_var": "GROQ_API_KEY",
7
+ "supports_json": False,
8
+ "api_spec": "openai",
9
+ },
10
+ "llama-3.3-70b-groq": {
11
+ "id": "llama-3.3-70b-groq",
12
+ "name": "llama-3.3-70b-versatile",
13
+ "api_base": "https://api.groq.com/openai/v1",
14
+ "api_key_env_var": "GROQ_API_KEY",
15
+ "supports_json": False,
16
+ "api_spec": "openai",
17
+ },
18
+ "r1-llama-70b-groq": {
19
+ "id": "r1-llama-70b-groq",
20
+ "name": "deepseek-r1-distill-llama-70b",
21
+ "api_base": "https://api.groq.com/openai/v1",
22
+ "api_key_env_var": "GROQ_API_KEY",
23
+ "supports_json": False,
24
+ "api_spec": "openai",
25
+ },
26
+ "llama-4-maverick-groq": {
27
+ "id": "llama-4-maverick-groq",
28
+ "name": "meta-llama/llama-4-maverick-17b-128e-instruct",
29
+ "api_base": "https://api.groq.com/openai/v1",
30
+ "api_key_env_var": "GROQ_API_KEY",
31
+ "supports_json": False,
32
+ "api_spec": "openai",
33
+ },
34
+ "llama-4-scout-groq": {
35
+ "id": "llama-4-scout-groq",
36
+ "name": "meta-llama/llama-4-scout-17b-16e-instruct",
37
+ "api_base": "https://api.groq.com/openai/v1",
38
+ "api_key_env_var": "GROQ_API_KEY",
39
+ "supports_json": False,
40
+ "api_spec": "openai",
41
+ },
42
+ "kimi-k2-groq": {
43
+ "id": "kimi-k2-groq",
44
+ "name": "moonshotai/kimi-k2-instruct",
45
+ "api_base": "https://api.groq.com/openai/v1",
46
+ "api_key_env_var": "GROQ_API_KEY",
47
+ "supports_json": False,
48
+ "api_spec": "openai",
49
+ },
50
+ "gpt-oss-120b-groq": {
51
+ "id": "gpt-oss-120b-groq",
52
+ "name": "openai/gpt-oss-120b",
53
+ "api_base": "https://api.groq.com/openai/v1",
54
+ "api_key_env_var": "GROQ_API_KEY",
55
+ "supports_json": False,
56
+ "api_spec": "openai",
57
+ },
58
+ "gpt-oss-20b-groq": {
59
+ "id": "gpt-oss-20b-groq",
60
+ "name": "openai/gpt-oss-20b",
61
+ "api_base": "https://api.groq.com/openai/v1",
62
+ "api_key_env_var": "GROQ_API_KEY",
63
+ "supports_json": False,
64
+ "api_spec": "openai",
65
+ },
66
+ "qwen-3-32b-groq": {
67
+ "id": "qwen-3-32b-groq",
68
+ "name": "qwen/qwen3-32b",
69
+ "api_base": "https://api.groq.com/openai/v1",
70
+ "api_key_env_var": "GROQ_API_KEY",
71
+ "supports_json": False,
72
+ "api_spec": "openai",
73
+ },
74
+ }
@@ -0,0 +1,65 @@
1
+ META_MODELS = {
2
+ # `7MMM. ,MMF' mm
3
+ # MMMb dPMM MM
4
+ # M YM ,M MM .gP"Ya mmMMmm ,6"Yb.
5
+ # M Mb M' MM ,M' Yb MM 8) MM
6
+ # M YM.P' MM 8M"""""" MM ,pm9MM
7
+ # M `YM' MM YM. , MM 8M MM
8
+ # .JML. `' .JMML.`Mbmmd' `Mbmo`Moo9^Yo.
9
+ "llama-4-scout": {
10
+ "id": "llama-4-scout",
11
+ "name": "Llama-4-Scout-17B-16E-Instruct-FP8",
12
+ "api_base": "https://api.llama.com/compat/v1",
13
+ "api_key_env_var": "META_API_KEY",
14
+ "supports_json": True,
15
+ "supports_logprobs": True,
16
+ "api_spec": "openai",
17
+ "input_cost": 0.0,
18
+ "output_cost": 0.0,
19
+ "requests_per_minute": 3_000,
20
+ "tokens_per_minute": 1_000_000,
21
+ "reasoning_model": False,
22
+ },
23
+ "llama-4-maverick": {
24
+ "id": "llama-4-maverick",
25
+ "name": "Llama-4-Maverick-17B-128E-Instruct-FP8",
26
+ "api_base": "https://api.llama.com/compat/v1",
27
+ "api_key_env_var": "META_API_KEY",
28
+ "supports_json": True,
29
+ "supports_logprobs": True,
30
+ "api_spec": "openai",
31
+ "input_cost": 0.0,
32
+ "output_cost": 0.0,
33
+ "requests_per_minute": 3_000,
34
+ "tokens_per_minute": 1_000_000,
35
+ "reasoning_model": False,
36
+ },
37
+ "llama-3.3-70b": {
38
+ "id": "llama-3.3-70b",
39
+ "name": "Llama-3.3-70B-Instruct",
40
+ "api_base": "https://api.llama.com/compat/v1",
41
+ "api_key_env_var": "META_API_KEY",
42
+ "supports_json": True,
43
+ "supports_logprobs": True,
44
+ "api_spec": "openai",
45
+ "input_cost": 0.0,
46
+ "output_cost": 0.0,
47
+ "requests_per_minute": 3_000,
48
+ "tokens_per_minute": 1_000_000,
49
+ "reasoning_model": False,
50
+ },
51
+ "llama-3.3-8b": {
52
+ "id": "llama-3.3-8b",
53
+ "name": "Llama-3.3-8B-Instruct",
54
+ "api_base": "https://api.llama.com/compat/v1",
55
+ "api_key_env_var": "META_API_KEY",
56
+ "supports_json": True,
57
+ "supports_logprobs": True,
58
+ "api_spec": "openai",
59
+ "input_cost": 0.0,
60
+ "output_cost": 0.0,
61
+ "requests_per_minute": 3_000,
62
+ "tokens_per_minute": 1_000_000,
63
+ "reasoning_model": False,
64
+ },
65
+ }
@@ -0,0 +1,110 @@
1
+ # ██████ ██████ ███ █████ ████
2
+ # ░░██████ ██████ ░░░ ░░███ ░░███
3
+ # ░███░█████░███ ████ █████ ███████ ████████ ██████ ░███
4
+ # ░███░░███ ░███ ░░███ ███░░ ░░░███░ ░░███░░███ ░░░░░███ ░███
5
+ # ░███ ░░░ ░███ ░███ ░░█████ ░███ ░███ ░░░ ███████ ░███
6
+ # ░███ ░███ ░███ ░░░░███ ░███ ███ ░███ ███░░███ ░███
7
+ # █████ █████ █████ ██████ ░░█████ █████ ░░████████ █████
8
+ # ░░░░░ ░░░░░ ░░░░░ ░░░░░░ ░░░░░ ░░░░░ ░░░░░░░░ ░░░░░
9
+ MISTRAL_MODELS = {
10
+ "mistral-medium": {
11
+ "id": "mistral-medium",
12
+ "name": "mistral-medium-latest",
13
+ "api_base": "https://api.mistral.ai/v1",
14
+ "api_key_env_var": "MISTRAL_API_KEY",
15
+ "supports_json": True,
16
+ "api_spec": "mistral",
17
+ "input_cost": 0.4,
18
+ "output_cost": 2.0,
19
+ },
20
+ "mistral-large": {
21
+ "id": "mistral-large",
22
+ "name": "mistral-large-latest",
23
+ "api_base": "https://api.mistral.ai/v1",
24
+ "api_key_env_var": "MISTRAL_API_KEY",
25
+ "supports_json": True,
26
+ "api_spec": "mistral",
27
+ "input_cost": 2.0,
28
+ "output_cost": 6.0,
29
+ },
30
+ "pixtral-large": {
31
+ "id": "pixtral-large",
32
+ "name": "pixtral-large-latest",
33
+ "api_base": "https://api.mistral.ai/v1",
34
+ "api_key_env_var": "MISTRAL_API_KEY",
35
+ "supports_json": True,
36
+ "api_spec": "mistral",
37
+ "input_cost": 2.0,
38
+ "output_cost": 6.0,
39
+ },
40
+ "mistral-small": {
41
+ "id": "mistral-small",
42
+ "name": "mistral-small-latest",
43
+ "api_base": "https://api.mistral.ai/v1",
44
+ "api_key_env_var": "MISTRAL_API_KEY",
45
+ "supports_json": True,
46
+ "api_spec": "mistral",
47
+ "input_cost": 0.1,
48
+ "output_cost": 0.3,
49
+ },
50
+ "devstral-small": {
51
+ "id": "devstral-small",
52
+ "name": "devstral-small-2505",
53
+ "api_base": "https://api.mistral.ai/v1",
54
+ "api_key_env_var": "MISTRAL_API_KEY",
55
+ "supports_json": True,
56
+ "api_spec": "mistral",
57
+ "input_cost": 0.1,
58
+ "output_cost": 0.3,
59
+ },
60
+ "codestral": {
61
+ "id": "codestral",
62
+ "name": "codestral-latest",
63
+ "api_base": "https://api.mistral.ai/v1",
64
+ "api_key_env_var": "MISTRAL_API_KEY",
65
+ "supports_json": True,
66
+ "api_spec": "mistral",
67
+ "input_cost": 0.2,
68
+ "output_cost": 0.6,
69
+ },
70
+ "pixtral-12b": {
71
+ "id": "pixtral-12b",
72
+ "name": "pixtral-12b",
73
+ "api_base": "https://api.mistral.ai/v1",
74
+ "api_key_env_var": "MISTRAL_API_KEY",
75
+ "supports_json": True,
76
+ "api_spec": "mistral",
77
+ "input_cost": 0.1,
78
+ "output_cost": 0.3,
79
+ },
80
+ "mistral-nemo": {
81
+ "id": "mistral-nemo",
82
+ "name": "open-mistral-nemo",
83
+ "api_base": "https://api.mistral.ai/v1",
84
+ "api_key_env_var": "MISTRAL_API_KEY",
85
+ "supports_json": True,
86
+ "api_spec": "mistral",
87
+ "input_cost": 0.1,
88
+ "output_cost": 0.3,
89
+ },
90
+ "ministral-8b": {
91
+ "id": "ministral-8b",
92
+ "name": "ministral-8b-latest",
93
+ "api_base": "https://api.mistral.ai/v1",
94
+ "api_key_env_var": "MISTRAL_API_KEY",
95
+ "supports_json": True,
96
+ "api_spec": "mistral",
97
+ "input_cost": 0.7,
98
+ "output_cost": 0.7,
99
+ },
100
+ "mixtral-8x22b": {
101
+ "id": "mixtral-8x22b",
102
+ "name": "open-mixtral-8x22b",
103
+ "api_base": "https://api.mistral.ai/v1",
104
+ "api_key_env_var": "MISTRAL_API_KEY",
105
+ "supports_json": True,
106
+ "api_spec": "mistral",
107
+ "input_cost": 2.0,
108
+ "output_cost": 6.0,
109
+ },
110
+ }