gllm-inference-binary 0.4.58__cp313-cp313-win_amd64.whl → 0.4.59__cp313-cp313-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of gllm-inference-binary might be problematic. Click here for more details.
- gllm_inference/builder/build_lm_invoker.pyi +2 -1
- gllm_inference/model/__init__.pyi +9 -0
- gllm_inference/model/em/__init__.pyi +0 -0
- gllm_inference/model/em/google_em.pyi +16 -0
- gllm_inference/model/em/openai_em.pyi +15 -0
- gllm_inference/model/em/twelvelabs_em.pyi +13 -0
- gllm_inference/model/em/voyage_em.pyi +20 -0
- gllm_inference/model/lm/__init__.pyi +0 -0
- gllm_inference/model/lm/anthropic_lm.pyi +20 -0
- gllm_inference/model/lm/google_lm.pyi +17 -0
- gllm_inference/model/lm/openai_lm.pyi +27 -0
- gllm_inference.cp313-win_amd64.pyd +0 -0
- {gllm_inference_binary-0.4.58.dist-info → gllm_inference_binary-0.4.59.dist-info}/METADATA +1 -1
- {gllm_inference_binary-0.4.58.dist-info → gllm_inference_binary-0.4.59.dist-info}/RECORD +15 -5
- {gllm_inference_binary-0.4.58.dist-info → gllm_inference_binary-0.4.59.dist-info}/WHEEL +0 -0
|
@@ -65,7 +65,8 @@ def build_lm_invoker(model_id: str | ModelId, credentials: str | dict[str, Any]
|
|
|
65
65
|
},
|
|
66
66
|
)
|
|
67
67
|
```
|
|
68
|
-
|
|
68
|
+
The credentials can also be provided through the `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY`
|
|
69
|
+
environment variables.
|
|
69
70
|
|
|
70
71
|
# Using Datasaur LLM Projects Deployment API
|
|
71
72
|
```python
|
|
@@ -0,0 +1,9 @@
|
|
|
1
|
+
from gllm_inference.model.em.google_em import GoogleEM as GoogleEM
|
|
2
|
+
from gllm_inference.model.em.openai_em import OpenAIEM as OpenAIEM
|
|
3
|
+
from gllm_inference.model.em.twelvelabs_em import TwelveLabsEM as TwelveLabsEM
|
|
4
|
+
from gllm_inference.model.em.voyage_em import VoyageEM as VoyageEM
|
|
5
|
+
from gllm_inference.model.lm.anthropic_lm import AnthropicLM as AnthropicLM
|
|
6
|
+
from gllm_inference.model.lm.google_lm import GoogleLM as GoogleLM
|
|
7
|
+
from gllm_inference.model.lm.openai_lm import OpenAILM as OpenAILM
|
|
8
|
+
|
|
9
|
+
__all__ = ['AnthropicLM', 'GoogleEM', 'GoogleLM', 'OpenAIEM', 'OpenAILM', 'TwelveLabsEM', 'VoyageEM']
|
|
File without changes
|
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
class GoogleEM:
|
|
2
|
+
'''Defines Google embedding model names constants.
|
|
3
|
+
|
|
4
|
+
Usage example:
|
|
5
|
+
```python
|
|
6
|
+
from gllm_inference.model import GoogleEM
|
|
7
|
+
from gllm_inference.em_invoker import GoogleEMInvoker
|
|
8
|
+
|
|
9
|
+
em_invoker = GoogleEMInvoker(GoogleEM.GEMINI_EMBEDDING_001)
|
|
10
|
+
result = await em_invoker.invoke("Hello, world!")
|
|
11
|
+
```
|
|
12
|
+
'''
|
|
13
|
+
GEMINI_EMBEDDING_001: str
|
|
14
|
+
TEXT_EMBEDDING_004: str
|
|
15
|
+
TEXT_EMBEDDING_005: str
|
|
16
|
+
TEXT_MULTILINGUAL_EMBEDDING_002: str
|
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
class OpenAIEM:
|
|
2
|
+
'''Defines OpenAI embedding model names constants.
|
|
3
|
+
|
|
4
|
+
Usage example:
|
|
5
|
+
```python
|
|
6
|
+
from gllm_inference.model import OpenAIEM
|
|
7
|
+
from gllm_inference.em_invoker import OpenAIEMInvoker
|
|
8
|
+
|
|
9
|
+
em_invoker = OpenAIEMInvoker(OpenAIEM.TEXT_EMBEDDING_3_SMALL)
|
|
10
|
+
result = await em_invoker.invoke("Hello, world!")
|
|
11
|
+
```
|
|
12
|
+
'''
|
|
13
|
+
TEXT_EMBEDDING_3_SMALL: str
|
|
14
|
+
TEXT_EMBEDDING_3_LARGE: str
|
|
15
|
+
TEXT_EMBEDDING_ADA_002: str
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
class TwelveLabsEM:
|
|
2
|
+
'''Defines TwelveLabs embedding model names constants.
|
|
3
|
+
|
|
4
|
+
Usage example:
|
|
5
|
+
```python
|
|
6
|
+
from gllm_inference.model import TwelveLabsEM
|
|
7
|
+
from gllm_inference.em_invoker import TwelveLabsEMInvoker
|
|
8
|
+
|
|
9
|
+
em_invoker = TwelveLabsEMInvoker(TwelveLabsEM.MARENGO_RETRIEVAL_2_7)
|
|
10
|
+
result = await em_invoker.invoke("Hello, world!")
|
|
11
|
+
```
|
|
12
|
+
'''
|
|
13
|
+
MARENGO_RETRIEVAL_2_7: str
|
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
class VoyageEM:
|
|
2
|
+
'''Defines Voyage embedding model names constants.
|
|
3
|
+
|
|
4
|
+
Usage example:
|
|
5
|
+
```python
|
|
6
|
+
from gllm_inference.model import VoyageEM
|
|
7
|
+
from gllm_inference.em_invoker import VoyageEMInvoker
|
|
8
|
+
|
|
9
|
+
em_invoker = VoyageEMInvoker(VoyageEM.VOYAGE_3_5_LITE)
|
|
10
|
+
result = await em_invoker.invoke("Hello, world!")
|
|
11
|
+
```
|
|
12
|
+
'''
|
|
13
|
+
VOYAGE_3_5: str
|
|
14
|
+
VOYAGE_3_5_LITE: str
|
|
15
|
+
VOYAGE_3_LARGE: str
|
|
16
|
+
VOYAGE_CODE_3: str
|
|
17
|
+
VOYAGE_FINANCE_2: str
|
|
18
|
+
VOYAGE_LAW_2: str
|
|
19
|
+
VOYAGE_CODE_2: str
|
|
20
|
+
VOYAGE_MULTIMODAL_3: str
|
|
File without changes
|
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
class AnthropicLM:
|
|
2
|
+
'''Defines Anthropic language model names constants.
|
|
3
|
+
|
|
4
|
+
Usage example:
|
|
5
|
+
```python
|
|
6
|
+
from gllm_inference.model import AnthropicLM
|
|
7
|
+
from gllm_inference.lm_invoker import AnthropicLMInvoker
|
|
8
|
+
|
|
9
|
+
lm_invoker = AnthropicLMInvoker(AnthropicLM.CLAUDE_SONNET_4)
|
|
10
|
+
response = await lm_invoker.invoke("Hello, world!")
|
|
11
|
+
```
|
|
12
|
+
'''
|
|
13
|
+
CLAUDE_OPUS_4_1: str
|
|
14
|
+
CLAUDE_OPUS_4: str
|
|
15
|
+
CLAUDE_SONNET_4: str
|
|
16
|
+
CLAUDE_SONNET_3_7: str
|
|
17
|
+
CLAUDE_SONNET_3_5: str
|
|
18
|
+
CLAUDE_HAIKU_3_5: str
|
|
19
|
+
CLAUDE_OPUS_3: str
|
|
20
|
+
CLAUDE_HAIKU_3: str
|
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
class GoogleLM:
|
|
2
|
+
'''Defines Google language model names constants.
|
|
3
|
+
|
|
4
|
+
Usage example:
|
|
5
|
+
```python
|
|
6
|
+
from gllm_inference.model import GoogleLM
|
|
7
|
+
from gllm_inference.lm_invoker import GoogleLMInvoker
|
|
8
|
+
|
|
9
|
+
lm_invoker = GoogleLMInvoker(GoogleLM.GEMINI_2_5_FLASH)
|
|
10
|
+
response = await lm_invoker.invoke("Hello, world!")
|
|
11
|
+
```
|
|
12
|
+
'''
|
|
13
|
+
GEMINI_2_5_PRO: str
|
|
14
|
+
GEMINI_2_5_FLASH: str
|
|
15
|
+
GEMINI_2_5_FLASH_LITE: str
|
|
16
|
+
GEMINI_2_0_FLASH: str
|
|
17
|
+
GEMINI_2_0_FLASH_LITE: str
|
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
class OpenAILM:
|
|
2
|
+
'''Defines OpenAI language model names constants.
|
|
3
|
+
|
|
4
|
+
Usage example:
|
|
5
|
+
```python
|
|
6
|
+
from gllm_inference.model import OpenAILM
|
|
7
|
+
from gllm_inference.lm_invoker import OpenAILMInvoker
|
|
8
|
+
|
|
9
|
+
lm_invoker = OpenAILMInvoker(OpenAILM.GPT_5_NANO)
|
|
10
|
+
response = await lm_invoker.invoke("Hello, world!")
|
|
11
|
+
```
|
|
12
|
+
'''
|
|
13
|
+
GPT_5: str
|
|
14
|
+
GPT_5_MINI: str
|
|
15
|
+
GPT_5_NANO: str
|
|
16
|
+
GPT_4_1: str
|
|
17
|
+
GPT_4_1_MINI: str
|
|
18
|
+
GPT_4_1_NANO: str
|
|
19
|
+
GPT_4O: str
|
|
20
|
+
GPT_4O_MINI: str
|
|
21
|
+
O4_MINI: str
|
|
22
|
+
O4_MINI_DEEP_RESEARCH: str
|
|
23
|
+
O3: str
|
|
24
|
+
O3_PRO: str
|
|
25
|
+
O3_DEEP_RESEARCH: str
|
|
26
|
+
O1: str
|
|
27
|
+
O1_PRO: str
|
|
Binary file
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
gllm_inference/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
2
2
|
gllm_inference/builder/__init__.pyi,sha256=-bw1uDx7CAM7pkvjvb1ZXku9zXlQ7aEAyC83KIn3bz8,506
|
|
3
3
|
gllm_inference/builder/build_em_invoker.pyi,sha256=PGRHlmiQ-GUTDC51PwYFjVkXRxeN0immnaSBOI06Uno,5474
|
|
4
|
-
gllm_inference/builder/build_lm_invoker.pyi,sha256=
|
|
4
|
+
gllm_inference/builder/build_lm_invoker.pyi,sha256=6dQha47M19hllF5ID5xUeiNPmbWUpKyNbG9D78qFGck,6618
|
|
5
5
|
gllm_inference/builder/build_lm_request_processor.pyi,sha256=Xp1v9u20UYzDgmccL24bWVbVHaZVyqU4VVtPyeTKMGs,4488
|
|
6
6
|
gllm_inference/builder/build_output_parser.pyi,sha256=sgSTrzUmSRxPzUUum0fDU7A3NXYoYhpi6bEx4Q2XMnA,965
|
|
7
7
|
gllm_inference/builder/model_id.pyi,sha256=99Upl0mLOQT8pA7XlhUjPuFhLAW0KCYw4C4a00M1tic,429
|
|
@@ -54,6 +54,16 @@ gllm_inference/lm_invoker/schema/langchain.pyi,sha256=qYiQvzUw0xZa4ii-qyRCFTuIY7
|
|
|
54
54
|
gllm_inference/lm_invoker/schema/openai.pyi,sha256=CNkIGljwRyQYx0krONX1ik9hwBiN45t9vBk-ZY45rP4,1989
|
|
55
55
|
gllm_inference/lm_invoker/schema/openai_compatible.pyi,sha256=WiWEFoPQ0PEAx6EW-P8Nk6O7RF5I9i_hItEHtOl_F4A,1074
|
|
56
56
|
gllm_inference/lm_invoker/tgi_lm_invoker.pyi,sha256=X8drUmvFDEdjEAga4xeIj7QlA8HEppXCsmb2Fu23xsw,2207
|
|
57
|
+
gllm_inference/model/__init__.pyi,sha256=JKQB0wVSVYD-_tdRkG7N_oEVAKGCcoBw0BUOUMLieFo,602
|
|
58
|
+
gllm_inference/model/em/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
59
|
+
gllm_inference/model/em/google_em.pyi,sha256=c53H-KNdNOK9ppPLyOSkmCA890eF5FsMd05upkPIzF0,487
|
|
60
|
+
gllm_inference/model/em/openai_em.pyi,sha256=b6ID1JsLZH9OAo9E37CkbgWNR_eI65eKXK6TYi_0ndA,457
|
|
61
|
+
gllm_inference/model/em/twelvelabs_em.pyi,sha256=5R2zkKDiEatdATFzF8TOoKW9XRkOsOoNGY5lORimueo,413
|
|
62
|
+
gllm_inference/model/em/voyage_em.pyi,sha256=kTInLttWfPqCNfBX-TK5VMMaFfPxwqqudBw1kz4hnxk,551
|
|
63
|
+
gllm_inference/model/lm/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
64
|
+
gllm_inference/model/lm/anthropic_lm.pyi,sha256=3rppksDF4nVAR3Konoj6nRi_T8vSaFPxLub1CzJh7Us,578
|
|
65
|
+
gllm_inference/model/lm/google_lm.pyi,sha256=yv5nXnLxuCGDUsh7QP9furSx-6sZj6FQi-pJ9lZbHAk,496
|
|
66
|
+
gllm_inference/model/lm/openai_lm.pyi,sha256=yj3AJj1xDYRkNIPHX2enw46AJ9wArPZruKsxg1ME9Rg,645
|
|
57
67
|
gllm_inference/multimodal_em_invoker/__init__.pyi,sha256=fFgxDCJLa_h3y0x6B_mjkcuQEl32zyNB333Q1_Zmvq0,389
|
|
58
68
|
gllm_inference/multimodal_em_invoker/google_vertexai_multimodal_em_invoker.pyi,sha256=k_E81BgnLk94Zko6_F_eLvJnbakSyVXkO5RaROFq1Ms,3125
|
|
59
69
|
gllm_inference/multimodal_em_invoker/multimodal_em_invoker.pyi,sha256=xdGGbSuq2ptLnjbZ8RyrZOWya1HfL0mLcKUJVs_wtRE,1544
|
|
@@ -98,8 +108,8 @@ gllm_inference/utils/openai_multimodal_lm_helper.pyi,sha256=oolyuXA5S9Njft6E15Th
|
|
|
98
108
|
gllm_inference/utils/retry.pyi,sha256=gEHkFUmzX8CCkvFrXPYhFuoZ_iq0a210TBiRU88ZHbA,80
|
|
99
109
|
gllm_inference/utils/utils.pyi,sha256=N1fum4TLEsIYsdnK8y6fVxDDF5WT_MnLP9FSJUsjcGQ,6159
|
|
100
110
|
gllm_inference.build/.gitignore,sha256=aEiIwOuxfzdCmLZe4oB1JsBmCUxwG8x-u-HBCV9JT8E,1
|
|
101
|
-
gllm_inference.cp313-win_amd64.pyd,sha256=
|
|
111
|
+
gllm_inference.cp313-win_amd64.pyd,sha256=Rb27-GxrfovpfRFpFp9R0W_FNIIZdLmqK5AJG_K2T1A,3442176
|
|
102
112
|
gllm_inference.pyi,sha256=9X7vzatJlBsyNRLVKmsyzwELb-r27h82-anXEQWfkUU,5005
|
|
103
|
-
gllm_inference_binary-0.4.
|
|
104
|
-
gllm_inference_binary-0.4.
|
|
105
|
-
gllm_inference_binary-0.4.
|
|
113
|
+
gllm_inference_binary-0.4.59.dist-info/METADATA,sha256=LwMFdou-CWZyPaIbalJ2tWROCXCWHR2v7ItRrZL8LGo,4917
|
|
114
|
+
gllm_inference_binary-0.4.59.dist-info/WHEEL,sha256=RBxSuTKD__NDRUBZC1I4b5R6FamU3rQfymmsTgmeb3A,98
|
|
115
|
+
gllm_inference_binary-0.4.59.dist-info/RECORD,,
|
|
File without changes
|