gllm-inference-binary 0.5.24__cp312-cp312-win_amd64.whl → 0.5.25__cp312-cp312-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of gllm-inference-binary might be problematic. Click here for more details.

@@ -86,7 +86,7 @@ def build_em_invoker(model_id: str | ModelId, credentials: str | dict[str, Any]
86
86
  # Using Azure OpenAI
87
87
  ```python
88
88
  em_invoker = build_em_invoker(
89
- model_id="azure-openai/https://my-resource.openai.azure.com:my-deployment",
89
+ model_id="azure-openai/https://my-resource.openai.azure.com/openai/v1:my-deployment",
90
90
  credentials="azure-api-key"
91
91
  )
92
92
  ```
@@ -1,6 +1,6 @@
1
1
  from _typeshed import Incomplete
2
2
 
3
- DEFAULT_AZURE_OPENAI_API_VERSION: str
3
+ AZURE_OPENAI_URL_SUFFIX: str
4
4
  DOCUMENT_MIME_TYPES: Incomplete
5
5
  GOOGLE_SCOPES: Incomplete
6
6
  GRPC_ENABLE_RETRIES_KEY: str
@@ -1,6 +1,6 @@
1
1
  from _typeshed import Incomplete
2
2
  from gllm_core.utils.retry import RetryConfig as RetryConfig
3
- from gllm_inference.constants import DEFAULT_AZURE_OPENAI_API_VERSION as DEFAULT_AZURE_OPENAI_API_VERSION, INVOKER_PROPAGATED_MAX_RETRIES as INVOKER_PROPAGATED_MAX_RETRIES
3
+ from gllm_inference.constants import AZURE_OPENAI_URL_SUFFIX as AZURE_OPENAI_URL_SUFFIX, INVOKER_PROPAGATED_MAX_RETRIES as INVOKER_PROPAGATED_MAX_RETRIES
4
4
  from gllm_inference.em_invoker.openai_em_invoker import OpenAIEMInvoker as OpenAIEMInvoker
5
5
  from gllm_inference.em_invoker.schema.openai import Key as Key
6
6
  from gllm_inference.schema import ModelId as ModelId, ModelProvider as ModelProvider, TruncationConfig as TruncationConfig
@@ -69,7 +69,7 @@ class AzureOpenAIEMInvoker(OpenAIEMInvoker):
69
69
  ```
70
70
  '''
71
71
  client: Incomplete
72
- def __init__(self, azure_endpoint: str, azure_deployment: str, api_key: str | None = None, api_version: str = ..., model_kwargs: dict[str, Any] | None = None, default_hyperparameters: dict[str, Any] | None = None, retry_config: RetryConfig | None = None, truncation_config: TruncationConfig | None = None) -> None:
72
+ def __init__(self, azure_endpoint: str, azure_deployment: str, api_key: str | None = None, api_version: str | None = None, model_kwargs: dict[str, Any] | None = None, default_hyperparameters: dict[str, Any] | None = None, retry_config: RetryConfig | None = None, truncation_config: TruncationConfig | None = None) -> None:
73
73
  """Initializes a new instance of the AzureOpenAIEMInvoker class.
74
74
 
75
75
  Args:
@@ -77,8 +77,7 @@ class AzureOpenAIEMInvoker(OpenAIEMInvoker):
77
77
  azure_deployment (str): The deployment name of the Azure OpenAI service.
78
78
  api_key (str | None, optional): The API key for authenticating with Azure OpenAI. Defaults to None, in
79
79
  which case the `AZURE_OPENAI_API_KEY` environment variable will be used.
80
- api_version (str, optional): The API version of the Azure OpenAI service. Defaults to
81
- `DEFAULT_AZURE_OPENAI_API_VERSION`.
80
+ api_version (str | None, optional): Deprecated parameter to be removed in v0.6. Defaults to None.
82
81
  model_kwargs (dict[str, Any] | None, optional): Additional model parameters. Defaults to None.
83
82
  default_hyperparameters (dict[str, Any] | None, optional): Default hyperparameters for invoking the model.
84
83
  Defaults to None.
@@ -1,15 +1,13 @@
1
1
  from _typeshed import Incomplete
2
2
  from gllm_core.schema.tool import Tool as Tool
3
3
  from gllm_core.utils.retry import RetryConfig as RetryConfig
4
- from gllm_inference.constants import INVOKER_PROPAGATED_MAX_RETRIES as INVOKER_PROPAGATED_MAX_RETRIES
4
+ from gllm_inference.constants import AZURE_OPENAI_URL_SUFFIX as AZURE_OPENAI_URL_SUFFIX, INVOKER_PROPAGATED_MAX_RETRIES as INVOKER_PROPAGATED_MAX_RETRIES
5
5
  from gllm_inference.lm_invoker.openai_lm_invoker import OpenAILMInvoker as OpenAILMInvoker, ReasoningEffort as ReasoningEffort, ReasoningSummary as ReasoningSummary
6
6
  from gllm_inference.lm_invoker.schema.openai import Key as Key
7
7
  from gllm_inference.schema import ModelId as ModelId, ModelProvider as ModelProvider, ResponseSchema as ResponseSchema
8
8
  from langchain_core.tools import Tool as LangChainTool
9
9
  from typing import Any
10
10
 
11
- URL_SUFFIX: str
12
-
13
11
  class AzureOpenAILMInvoker(OpenAILMInvoker):
14
12
  '''A language model invoker to interact with Azure OpenAI language models.
15
13
 
Binary file
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: gllm-inference-binary
3
- Version: 0.5.24
3
+ Version: 0.5.25
4
4
  Summary: A library containing components related to model inferences in Gen AI applications.
5
5
  Author: Henry Wicaksono
6
6
  Author-email: henry.wicaksono@gdplabs.id
@@ -1,6 +1,6 @@
1
1
  gllm_inference/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
2
  gllm_inference/builder/__init__.pyi,sha256=-bw1uDx7CAM7pkvjvb1ZXku9zXlQ7aEAyC83KIn3bz8,506
3
- gllm_inference/builder/build_em_invoker.pyi,sha256=cBqifw0frhYZinDndeCjqDVqv7oeW728-i5nH4JMyhk,6010
3
+ gllm_inference/builder/build_em_invoker.pyi,sha256=Mh1vRoJhsqc8hX4jUdopV14Fn44ql27NB7xbGjoHJtE,6020
4
4
  gllm_inference/builder/build_lm_invoker.pyi,sha256=p63iuVBOOpNizItGK6HDxYDrgXdovtfSe0VrvrEd-PA,7047
5
5
  gllm_inference/builder/build_lm_request_processor.pyi,sha256=0pJINCP4nnXVwuhIbhsaiwzjX8gohQt2oqXFZhTFSUs,4584
6
6
  gllm_inference/builder/build_output_parser.pyi,sha256=sgSTrzUmSRxPzUUum0fDU7A3NXYoYhpi6bEx4Q2XMnA,965
@@ -8,9 +8,9 @@ gllm_inference/catalog/__init__.pyi,sha256=HWgPKWIzprpMHRKe_qN9BZSIQhVhrqiyjLjIX
8
8
  gllm_inference/catalog/catalog.pyi,sha256=eWPqgQKi-SJGHabi_XOTEKpAj96OSRypKsb5ZEC1VWU,4911
9
9
  gllm_inference/catalog/lm_request_processor_catalog.pyi,sha256=GemCEjFRHNChtNOfbyXSVsJiA3klOCAe_X11fnymhYs,5540
10
10
  gllm_inference/catalog/prompt_builder_catalog.pyi,sha256=iViWB4SaezzjQY4UY1YxeoXUNxqxa2cTJGaD9JSx4Q8,3279
11
- gllm_inference/constants.pyi,sha256=0WlEIaWQipEfW2yFmxUxHfZyAzw34SrWyGs_jHZMSts,286
11
+ gllm_inference/constants.pyi,sha256=KQmondDEkHK2P249ymmce3SdutVrx8kYm4v1eTCkW9U,277
12
12
  gllm_inference/em_invoker/__init__.pyi,sha256=pmbsjmsqXwfe4WPykMnrmasKrYuylJWnf2s0pbo0ioM,997
13
- gllm_inference/em_invoker/azure_openai_em_invoker.pyi,sha256=g1I3Aexg5VeDeU_zbZWCVgca2fhrUztVrpbzS5GBBYI,5072
13
+ gllm_inference/em_invoker/azure_openai_em_invoker.pyi,sha256=SfJPC_PJGiEfWS9JH5kRQPJztsR7jRhwVuETqdY-JsQ,5021
14
14
  gllm_inference/em_invoker/bedrock_em_invoker.pyi,sha256=UqodtpDmE7fEgpctXEETIlZGorX9i1lmmuTvGaJke6o,5829
15
15
  gllm_inference/em_invoker/em_invoker.pyi,sha256=YDYJ8TGScsz5Gg-OBnEENN1tI1RYvwoddypxUr6SAWw,5191
16
16
  gllm_inference/em_invoker/google_em_invoker.pyi,sha256=q69kdVuE44ZqziQ8BajFYZ1tYn-MPjKjzXS9cRh4oAo,6951
@@ -35,7 +35,7 @@ gllm_inference/exceptions/exceptions.pyi,sha256=6y3ECgHAStqMGgQv8Dv-Ui-5PDD07mSj
35
35
  gllm_inference/exceptions/provider_error_map.pyi,sha256=4AsAgbXAh91mxEW2YiomEuhBoeSNeAIo9WbT9WK8gQk,1233
36
36
  gllm_inference/lm_invoker/__init__.pyi,sha256=eE_HDCl9A135mi6mtIV55q-T9J1O8OpbMcqWuny3w9A,1214
37
37
  gllm_inference/lm_invoker/anthropic_lm_invoker.pyi,sha256=85uvShLv4-eiGOpTMgwWpQGZXPW6XaB6GrexBmxg_sQ,15200
38
- gllm_inference/lm_invoker/azure_openai_lm_invoker.pyi,sha256=Ek7pZYaSWWFdPKI6iPKiICvZxN7xCVglQleTSSwW5ok,14799
38
+ gllm_inference/lm_invoker/azure_openai_lm_invoker.pyi,sha256=Wzw6We1KwLcWW9-4tGGMZoPnnHSKofOmCuqoddTHr2Q,14832
39
39
  gllm_inference/lm_invoker/bedrock_lm_invoker.pyi,sha256=HzpDRIhe4-XRj9n43bmsXQHxPwx5lcnetxIe5EMbHIE,12924
40
40
  gllm_inference/lm_invoker/datasaur_lm_invoker.pyi,sha256=c4H3TOz0LIhWjokCCdQ4asiwQR4_LPyaimo4RAqU9es,9369
41
41
  gllm_inference/lm_invoker/google_lm_invoker.pyi,sha256=IPmVAFTtZGvBDb-veoeCq8u7R9chKU958vJoBlWbIvE,17369
@@ -97,8 +97,8 @@ gllm_inference/utils/io_utils.pyi,sha256=Eg7dvHWdXslTKdjh1j3dG50i7r35XG2zTmJ9XXv
97
97
  gllm_inference/utils/langchain.pyi,sha256=4AwFiVAO0ZpdgmqeC4Pb5NJwBt8vVr0MSUqLeCdTscc,1194
98
98
  gllm_inference/utils/validation.pyi,sha256=-RdMmb8afH7F7q4Ao7x6FbwaDfxUHn3hA3WiOgzB-3s,397
99
99
  gllm_inference.build/.gitignore,sha256=aEiIwOuxfzdCmLZe4oB1JsBmCUxwG8x-u-HBCV9JT8E,1
100
- gllm_inference.cp312-win_amd64.pyd,sha256=QPT_t-K5qqQrk9GfD_HQhFyfNJbkSME_FYTNaWvUvCk,3065856
100
+ gllm_inference.cp312-win_amd64.pyd,sha256=chmSP2dPOzlrqoDWRSzR89vi75CzNscirmC4I2nTMAw,3068928
101
101
  gllm_inference.pyi,sha256=Pq6P04np3S3x7juGVCzC5sL2im4MsyligEvahVQNWzM,3820
102
- gllm_inference_binary-0.5.24.dist-info/METADATA,sha256=VRF96O0qP_WUnJN5O6eyphnYi4zYXo06tfLHbSlJMtI,4615
103
- gllm_inference_binary-0.5.24.dist-info/WHEEL,sha256=4N0hGcnWMI_Ty6ATf4qJqqSl-UNI-Ln828iTWGIywmU,98
104
- gllm_inference_binary-0.5.24.dist-info/RECORD,,
102
+ gllm_inference_binary-0.5.25.dist-info/METADATA,sha256=EegMUA9X6Hb5cn9UveDy_bEBJCV5oB860sE0GEv65mc,4615
103
+ gllm_inference_binary-0.5.25.dist-info/WHEEL,sha256=4N0hGcnWMI_Ty6ATf4qJqqSl-UNI-Ln828iTWGIywmU,98
104
+ gllm_inference_binary-0.5.25.dist-info/RECORD,,