gllm-inference-binary 0.5.33__cp312-cp312-win_amd64.whl → 0.5.35__cp312-cp312-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of gllm-inference-binary might be problematic. Click here for more details.

@@ -0,0 +1,40 @@
1
+ from gllm_core.schema import Event
2
+ from gllm_inference.schema.activity import Activity as Activity
3
+ from gllm_inference.schema.enums import EmitDataType as EmitDataType
4
+ from typing import Literal
5
+
6
+ class ReasoningEvent(Event):
7
+ """Event schema for model reasoning.
8
+
9
+ Attributes:
10
+ id (str): The unique identifier for the thinking event. Defaults to an UUID string.
11
+ data_type (Literal): The type of thinking event (thinking, thinking_start, or thinking_end).
12
+ data_value (str): The thinking content or message.
13
+ """
14
+ id: str
15
+ data_type: Literal[EmitDataType.THINKING, EmitDataType.THINKING_START, EmitDataType.THINKING_END]
16
+ data_value: str
17
+
18
+ class ActivityEvent(Event):
19
+ """Event schema for model-triggered activities (e.g. web search, MCP).
20
+
21
+ Attributes:
22
+ id (str): The unique identifier for the activity event. Defaults to an UUID string.
23
+ data_type (Literal): The type of event, always 'activity'.
24
+ data_value (Activity): The activity data containing message and type.
25
+ """
26
+ id: str
27
+ data_type: Literal[EmitDataType.ACTIVITY]
28
+ data_value: Activity
29
+
30
+ class CodeEvent(Event):
31
+ """Event schema for model-triggered code execution.
32
+
33
+ Attributes:
34
+ id (str): The unique identifier for the code event. Defaults to an UUID string.
35
+ data_type (Literal): The type of event (code, code_start, or code_end).
36
+ data_value (str): The code content.
37
+ """
38
+ id: str
39
+ data_type: Literal[EmitDataType.CODE, EmitDataType.CODE_START, EmitDataType.CODE_END]
40
+ data_value: str
@@ -1,3 +1,4 @@
1
+ from _typeshed import Incomplete
1
2
  from enum import StrEnum
2
3
  from gllm_inference.utils import validate_string_enum as validate_string_enum
3
4
  from pydantic import BaseModel
@@ -16,11 +17,15 @@ class ModelProvider(StrEnum):
16
17
  LANGCHAIN = 'langchain'
17
18
  LITELLM = 'litellm'
18
19
  OPENAI = 'openai'
20
+ OPENAI_CHAT_COMPLETIONS = 'openai-chat-completions'
19
21
  OPENAI_COMPATIBLE = 'openai-compatible'
20
22
  TWELVELABS = 'twelvelabs'
21
23
  VOYAGE = 'voyage'
22
24
  XAI = 'xai'
23
25
 
26
+ OPTIONAL_PATH_PROVIDERS: Incomplete
27
+ PATH_SUPPORTING_PROVIDERS: Incomplete
28
+
24
29
  class ModelId(BaseModel):
25
30
  '''Defines a representation of a valid model id.
26
31
 
@@ -32,7 +37,7 @@ class ModelId(BaseModel):
32
37
  Provider-specific examples:
33
38
  # Using Anthropic
34
39
  ```python
35
- model_id = ModelId.from_string("anthropic/claude-3-5-sonnet-latest")
40
+ model_id = ModelId.from_string("anthropic/claude-sonnet-4-20250514")
36
41
  ```
37
42
 
38
43
  # Using Bedrock
@@ -47,22 +52,32 @@ class ModelId(BaseModel):
47
52
 
48
53
  # Using Google
49
54
  ```python
50
- model_id = ModelId.from_string("google/gemini-1.5-flash")
55
+ model_id = ModelId.from_string("google/gemini-2.5-flash-lite")
51
56
  ```
52
57
 
53
58
  # Using OpenAI
54
59
  ```python
55
- model_id = ModelId.from_string("openai/gpt-4o-mini")
60
+ model_id = ModelId.from_string("openai/gpt-5-nano")
56
61
  ```
57
62
 
58
- # Using Azure OpenAI
63
+ # Using OpenAI with Chat Completions API
59
64
  ```python
60
- model_id = ModelId.from_string("azure-openai/https://my-resource.openai.azure.com/openai/v1:my-deployment")
65
+ model_id = ModelId.from_string("openai-chat-completions/gpt-5-nano")
66
+ ```
67
+
68
+ # Using OpenAI Responses API-compatible endpoints (e.g. SGLang)
69
+ ```python
70
+ model_id = ModelId.from_string("openai/https://my-sglang-url:8000/v1:my-model-name")
61
71
  ```
62
72
 
63
- # Using OpenAI compatible endpoints (e.g. Groq)
73
+ # Using OpenAI Chat Completions API-compatible endpoints (e.g. Groq)
64
74
  ```python
65
- model_id = ModelId.from_string("openai-compatible/https://api.groq.com/openai/v1:llama3-8b-8192")
75
+ model_id = ModelId.from_string("openai-chat-completions/https://api.groq.com/openai/v1:llama3-8b-8192")
76
+ ```
77
+
78
+ # Using Azure OpenAI
79
+ ```python
80
+ model_id = ModelId.from_string("azure-openai/https://my-resource.openai.azure.com/openai/v1:my-deployment")
66
81
  ```
67
82
 
68
83
  # Using Voyage
@@ -89,7 +104,7 @@ class ModelId(BaseModel):
89
104
  For the list of supported providers, please refer to the following page:
90
105
  https://docs.litellm.ai/docs/providers/
91
106
 
92
- # Using XAI
107
+ # Using xAI
93
108
  ```python
94
109
  model_id = ModelId.from_string("xai/grok-4-0709")
95
110
  ```
@@ -99,9 +114,9 @@ class ModelId(BaseModel):
99
114
  Custom model name validation example:
100
115
  ```python
101
116
  validation_map = {
102
- ModelProvider.ANTHROPIC: {"claude-3-5-sonnet-latest"},
103
- ModelProvider.GOOGLE: {"gemini-1.5-flash", "gemini-1.5-pro"},
104
- ModelProvider.OPENAI: {"gpt-4o", "gpt-4o-mini"},
117
+ ModelProvider.ANTHROPIC: {"claude-sonnet-4-20250514"},
118
+ ModelProvider.GOOGLE: {"gemini-2.5-flash-lite"},
119
+ ModelProvider.OPENAI: {"gpt-4.1-nano", "gpt-5-nano"},
105
120
  }
106
121
 
107
122
  model_id = ModelId.from_string("...", validation_map)
@@ -115,13 +130,8 @@ class ModelId(BaseModel):
115
130
  """Parse a model id string into a ModelId object.
116
131
 
117
132
  Args:
118
- model_id (str): The model id to parse. Must be in the the following format:
119
- 1. For `azure-openai` provider: `azure-openai/azure-endpoint:azure-deployment`.
120
- 2. For `openai-compatible` provider: `openai-compatible/base-url:model-name`.
121
- 3. For `langchain` provider: `langchain/<package>.<class>:model-name`.
122
- 4. For `litellm` provider: `litellm/provider/model-name`.
123
- 5. For `datasaur` provider: `datasaur/base-url`.
124
- 6. For other providers: `provider/model-name`.
133
+ model_id (str): The model id to parse. Must be in the format defined in the following page:
134
+ https://gdplabs.gitbook.io/sdk/resources/supported-models
125
135
  validation_map (dict[str, set[str]] | None, optional): An optional dictionary that maps provider names to
126
136
  sets of valid model names. For the defined model providers, the model names will be validated against
127
137
  the set of valid model names. For the undefined model providers, the model name will not be validated.
@@ -137,11 +147,6 @@ class ModelId(BaseModel):
137
147
  """Convert the ModelId object to a string.
138
148
 
139
149
  Returns:
140
- str: The string representation of the ModelId object. The format is as follows:
141
- 1. For `azure-openai` provider: `azure-openai/azure-endpoint:azure-deployment`.
142
- 2. For `openai-compatible` provider: `openai-compatible/base-url:model-name`.
143
- 3. For `langchain` provider: `langchain/<package>.<class>:model-name`.
144
- 4. For `litellm` provider: `litellm/provider/model-name`.
145
- 5. For `datasaur` provider: `datasaur/base-url`.
146
- 6. For other providers: `provider/model-name`.
150
+ str: The string representation of the ModelId object. The format is defined in the following page:
151
+ https://gdplabs.gitbook.io/sdk/resources/supported-models
147
152
  """
Binary file
gllm_inference.pyi CHANGED
@@ -27,6 +27,7 @@ import gllm_inference.lm_invoker.DatasaurLMInvoker
27
27
  import gllm_inference.lm_invoker.GoogleLMInvoker
28
28
  import gllm_inference.lm_invoker.LangChainLMInvoker
29
29
  import gllm_inference.lm_invoker.LiteLLMLMInvoker
30
+ import gllm_inference.lm_invoker.OpenAIChatCompletionsLMInvoker
30
31
  import gllm_inference.lm_invoker.OpenAICompatibleLMInvoker
31
32
  import gllm_inference.lm_invoker.OpenAILMInvoker
32
33
  import gllm_inference.lm_invoker.XAILMInvoker
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: gllm-inference-binary
3
- Version: 0.5.33
3
+ Version: 0.5.35
4
4
  Summary: A library containing components related to model inferences in Gen AI applications.
5
5
  Author-email: Henry Wicaksono <henry.wicaksono@gdplabs.id>, Resti Febrina <resti.febrina@gdplabs.id>
6
6
  Requires-Python: <3.14,>=3.11
@@ -57,12 +57,12 @@ A library containing components related to model inferences in Gen AI applicatio
57
57
  ## Installation
58
58
 
59
59
  ### Prerequisites
60
- - Python 3.11+ - [Install here](https://www.python.org/downloads/)
61
- - Pip (if using Pip) - [Install here](https://pip.pypa.io/en/stable/installation/)
62
- - Poetry 1.8.1+ (if using Poetry) - [Install here](https://python-poetry.org/docs/#installation)
63
- - Git (if using Git) - [Install here](https://git-scm.com/downloads)
64
- - For git installation:
65
- - Access to the [GDP Labs SDK github repository](https://github.com/GDP-ADMIN/gen-ai-internal)
60
+ 1. Python 3.11+ - [Install here](https://www.python.org/downloads/)
61
+ 2. Pip (if using Pip) - [Install here](https://pip.pypa.io/en/stable/installation/)
62
+ 3. Poetry (automatically installed via Makefile) - [Install here](https://python-poetry.org/docs/#installation)
63
+ 4. Git (if using Git) - [Install here](https://git-scm.com/downloads)
64
+ 5. gcloud CLI (for authentication) - [Install here](https://cloud.google.com/sdk/docs/install)
65
+ 6. For git installation, access to the [GDP Labs SDK github repository](https://github.com/GDP-ADMIN/gl-sdk)
66
66
 
67
67
  ### 1. Installation from Artifact Registry
68
68
  Choose one of the following methods to install the package:
@@ -91,24 +91,44 @@ Available extras:
91
91
  - `openai`: Install OpenAI models dependencies
92
92
  - `twelvelabs`: Install TwelveLabs models dependencies
93
93
 
94
- ## Managing Dependencies
94
+ ## Local Development Setup
95
+
96
+ ### Quick Setup (Recommended)
97
+ For local development with editable gllm packages, use the provided Makefile:
98
+
99
+ ```bash
100
+ # Complete setup: installs Poetry, configures auth, installs packages, sets up pre-commit
101
+ make setup
102
+ ```
103
+
104
+ The following are the available Makefile targets:
105
+
106
+ 1. `make setup` - Complete development setup (recommended for new developers)
107
+ 2. `make install-poetry` - Install or upgrade Poetry to the latest version
108
+ 3. `make auth` - Configure authentication for internal repositories
109
+ 4. `make install` - Install all dependencies
110
+ 5. `make install-pre-commit` - Set up pre-commit hooks
111
+ 6. `make update` - Update dependencies
112
+ ### Manual Development Setup (Legacy)
113
+ If you prefer to manage dependencies manually:
114
+
95
115
  1. Go to root folder of `gllm-inference` module, e.g. `cd libs/gllm-inference`.
96
116
  2. Run `poetry shell` to create a virtual environment.
97
117
  3. Run `poetry lock` to create a lock file if you haven't done it yet.
98
118
  4. Run `poetry install` to install the `gllm-inference` requirements for the first time.
99
119
  5. Run `poetry update` if you update any dependency module version at `pyproject.toml`.
100
120
 
121
+
101
122
  ## Contributing
102
123
  Please refer to this [Python Style Guide](https://docs.google.com/document/d/1uRggCrHnVfDPBnG641FyQBwUwLoFw0kTzNqRm92vUwM/edit?usp=sharing)
103
124
  to get information about code style, documentation standard, and SCA that you need to use when contributing to this project
104
125
 
105
- 1. Activate `pre-commit` hooks using `pre-commit install`
106
- 2. Run `poetry shell` to create a virtual environment.
107
- 3. Run `poetry lock` to create a lock file if you haven't done it yet.
108
- 4. Run `poetry install` to install the `gllm-inference` requirements for the first time.
109
- 5. Run `which python` to get the path to be referenced at Visual Studio Code interpreter path (`Ctrl`+`Shift`+`P` or `Cmd`+`Shift`+`P`)
110
- 6. Try running the unit test to see if it's working:
126
+ ### Getting Started with Development
127
+ 1. Clone the repository and navigate to the gllm-inference directory
128
+ 2. Run `make setup` to set up your development environment
129
+ 3. Run `which python` to get the path to be referenced at Visual Studio Code interpreter path (`Ctrl`+`Shift`+`P` or `Cmd`+`Shift`+`P`)
130
+ 4. Try running the unit test to see if it's working:
111
131
  ```bash
112
132
  poetry run pytest -s tests/unit_tests/
113
133
  ```
114
-
134
+ 5. When you want to update the dependencies, run `make update`
@@ -1,15 +1,15 @@
1
- gllm_inference.cp312-win_amd64.pyd,sha256=vZtLh5mzjg5xWtPsQSq54ju3c13bxt6DBnknXYq85wo,3180032
2
- gllm_inference.pyi,sha256=bvMQNMzysfZtXgjW4ZX0KwSOV4uroNakpE0NUHKCMmk,4199
1
+ gllm_inference.cp312-win_amd64.pyd,sha256=UXJ_p2_ofLPoUo6TTJB5JTovweDoFHa-HzqG6fD_19g,3243008
2
+ gllm_inference.pyi,sha256=bOGYhW7SnMMPCLRRlkVbSq8I8xn8YwHssI1zZoD1lVU,4263
3
3
  gllm_inference/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
4
- gllm_inference/constants.pyi,sha256=KQmondDEkHK2P249ymmce3SdutVrx8kYm4v1eTCkW9U,277
4
+ gllm_inference/constants.pyi,sha256=1OBoHfeWfW9bXH9kStNEH__MGnGp--jLfyheAeQnogY,302
5
5
  gllm_inference/builder/__init__.pyi,sha256=-bw1uDx7CAM7pkvjvb1ZXku9zXlQ7aEAyC83KIn3bz8,506
6
- gllm_inference/builder/build_em_invoker.pyi,sha256=Mh1vRoJhsqc8hX4jUdopV14Fn44ql27NB7xbGjoHJtE,6020
7
- gllm_inference/builder/build_lm_invoker.pyi,sha256=p63iuVBOOpNizItGK6HDxYDrgXdovtfSe0VrvrEd-PA,7047
8
- gllm_inference/builder/build_lm_request_processor.pyi,sha256=0pJINCP4nnXVwuhIbhsaiwzjX8gohQt2oqXFZhTFSUs,4584
6
+ gllm_inference/builder/build_em_invoker.pyi,sha256=r4p0T9g_831Fq0youhxdMrQMWkzARw-PSahMu83ZzQo,5762
7
+ gllm_inference/builder/build_lm_invoker.pyi,sha256=HvQICF-qvOTzfXZUqhi7rlwcpkMZpxaC-8QZmhnXKzI,7466
8
+ gllm_inference/builder/build_lm_request_processor.pyi,sha256=H7Rg88e7PTTCtuyY64r333moTmh4-ypOwgnG10gkEdY,4232
9
9
  gllm_inference/builder/build_output_parser.pyi,sha256=sgSTrzUmSRxPzUUum0fDU7A3NXYoYhpi6bEx4Q2XMnA,965
10
10
  gllm_inference/catalog/__init__.pyi,sha256=HWgPKWIzprpMHRKe_qN9BZSIQhVhrqiyjLjIXwvj1ho,291
11
11
  gllm_inference/catalog/catalog.pyi,sha256=eWPqgQKi-SJGHabi_XOTEKpAj96OSRypKsb5ZEC1VWU,4911
12
- gllm_inference/catalog/lm_request_processor_catalog.pyi,sha256=GemCEjFRHNChtNOfbyXSVsJiA3klOCAe_X11fnymhYs,5540
12
+ gllm_inference/catalog/lm_request_processor_catalog.pyi,sha256=FiveqPDkV58XbDO2znXL-Ix5tFbZwNiVnitlEa90YOY,5536
13
13
  gllm_inference/catalog/prompt_builder_catalog.pyi,sha256=iViWB4SaezzjQY4UY1YxeoXUNxqxa2cTJGaD9JSx4Q8,3279
14
14
  gllm_inference/em_invoker/__init__.pyi,sha256=pmbsjmsqXwfe4WPykMnrmasKrYuylJWnf2s0pbo0ioM,997
15
15
  gllm_inference/em_invoker/azure_openai_em_invoker.pyi,sha256=SfJPC_PJGiEfWS9JH5kRQPJztsR7jRhwVuETqdY-JsQ,5021
@@ -17,8 +17,8 @@ gllm_inference/em_invoker/bedrock_em_invoker.pyi,sha256=UqodtpDmE7fEgpctXEETIlZG
17
17
  gllm_inference/em_invoker/em_invoker.pyi,sha256=YDYJ8TGScsz5Gg-OBnEENN1tI1RYvwoddypxUr6SAWw,5191
18
18
  gllm_inference/em_invoker/google_em_invoker.pyi,sha256=q69kdVuE44ZqziQ8BajFYZ1tYn-MPjKjzXS9cRh4oAo,6951
19
19
  gllm_inference/em_invoker/langchain_em_invoker.pyi,sha256=nhX6LynrjhfySEt_44OlLoSBd15hoz3giWyNM9CYLKY,3544
20
- gllm_inference/em_invoker/openai_compatible_em_invoker.pyi,sha256=zEYOBDXKQhvcMGer9DYDu50_3KRDjYyN8-JgpBIFPOI,5456
21
- gllm_inference/em_invoker/openai_em_invoker.pyi,sha256=0TDIQa-5UwsPcVxgkze-QJJWrt-ToakAKbuAk9TW5SM,4746
20
+ gllm_inference/em_invoker/openai_compatible_em_invoker.pyi,sha256=S7OHYa1VF24tiv46VCjHN3zZtHDNr5XT_k1acti54jY,2889
21
+ gllm_inference/em_invoker/openai_em_invoker.pyi,sha256=jqsj9rlNW13XSkDUpNwSIUhiQcc8U1Py7hHQ1FZS5CM,6280
22
22
  gllm_inference/em_invoker/twelevelabs_em_invoker.pyi,sha256=MMVgSnjMXksdhSDXIi3vOULIXnjbhtq19eR5LPnUmGo,5446
23
23
  gllm_inference/em_invoker/voyage_em_invoker.pyi,sha256=vdB_qS8QKrCcb-HtXwKZS4WW1R1wGzpMBFmOKC39sjU,5619
24
24
  gllm_inference/em_invoker/langchain/__init__.pyi,sha256=aOTlRvS9aG1tBErjsmhe75s4Sq-g2z9ArfGqNW7QyEs,151
@@ -35,17 +35,18 @@ gllm_inference/exceptions/__init__.pyi,sha256=nXOqwsuwUgsnBcJEANVuxbZ1nDfcJ6-pKU
35
35
  gllm_inference/exceptions/error_parser.pyi,sha256=4aiJZhBzBOqlhdmpvaCvildGy7_XxlJzQpe3PzGt8eE,2040
36
36
  gllm_inference/exceptions/exceptions.pyi,sha256=6y3ECgHAStqMGgQv8Dv-Ui-5PDD07mSj6qaRZeSWea4,5857
37
37
  gllm_inference/exceptions/provider_error_map.pyi,sha256=4AsAgbXAh91mxEW2YiomEuhBoeSNeAIo9WbT9WK8gQk,1233
38
- gllm_inference/lm_invoker/__init__.pyi,sha256=eE_HDCl9A135mi6mtIV55q-T9J1O8OpbMcqWuny3w9A,1214
38
+ gllm_inference/lm_invoker/__init__.pyi,sha256=jG1xc5fTOeIgeKKVYSnsMzQThKk9kTW38yO_MYtv540,1387
39
39
  gllm_inference/lm_invoker/anthropic_lm_invoker.pyi,sha256=5fscLpROscxjBNP13GmcU9I83YiZH-pb42FzQ2JzGBA,17575
40
40
  gllm_inference/lm_invoker/azure_openai_lm_invoker.pyi,sha256=Tcmr0OfEk3As4nmL_lCoQzVB9qTQ2BLASRFhcObvrPY,15286
41
41
  gllm_inference/lm_invoker/bedrock_lm_invoker.pyi,sha256=ptyHTm1szPJEpQObdrsxHpbTkchCWE6K-YmVTmbdhvM,13037
42
42
  gllm_inference/lm_invoker/datasaur_lm_invoker.pyi,sha256=sR1vSTifBykASzAGMYn7nJVxTEwXMFz-Xa0rQjXzb6A,9482
43
43
  gllm_inference/lm_invoker/google_lm_invoker.pyi,sha256=Zkt-BdOZT106mn07_7krBQ5GiXsp9z9aoHs_d96P4lg,17482
44
- gllm_inference/lm_invoker/langchain_lm_invoker.pyi,sha256=2pghfayLQxNJ1UEJZGPCro_sfFq0u4_RFSupcSsNkg8,13972
45
- gllm_inference/lm_invoker/litellm_lm_invoker.pyi,sha256=7OJSwv0FCg9Hf4wxtdHcblCcFYu4SqPiMACFH-ZM1c0,13489
44
+ gllm_inference/lm_invoker/langchain_lm_invoker.pyi,sha256=dKN0flBxjiCUWW1QOz8HjoRfKpqXjNEz1pm5cS-40zA,13966
45
+ gllm_inference/lm_invoker/litellm_lm_invoker.pyi,sha256=cnkg4Lk9sRqXylWQq5G3ujD_DzcVX88DCfEpA5hkTFA,13487
46
46
  gllm_inference/lm_invoker/lm_invoker.pyi,sha256=JQFExiblFbCMQ3HXOE62Ho1VTMdmxf_CZ-edGSQbCrQ,8312
47
- gllm_inference/lm_invoker/openai_compatible_lm_invoker.pyi,sha256=7Tnq-4Cl46sPDLiRQLQBDFuk_INhm0BIknXZXqXYb-8,15316
48
- gllm_inference/lm_invoker/openai_lm_invoker.pyi,sha256=NK-HJXvRrQjDgfpqXGUBi_rf5iTiMjJvxfRsqj_JT_I,22512
47
+ gllm_inference/lm_invoker/openai_chat_completions_lm_invoker.pyi,sha256=9Nkun_k_i1VDx3A0ixOosAYp_8WpfzjW-UBI-T_3g6M,16014
48
+ gllm_inference/lm_invoker/openai_compatible_lm_invoker.pyi,sha256=HkHIDtuw1IIwDglOelAy6VKwKmslEVTOsgC1Ememsq0,3886
49
+ gllm_inference/lm_invoker/openai_lm_invoker.pyi,sha256=GPzPZeKiopSk0zLydQY0bQ3V3-oka9BNtFCepdYiEl4,24065
49
50
  gllm_inference/lm_invoker/xai_lm_invoker.pyi,sha256=6TwO3KU1DBWoe4UAsz97MY1yKBf-N38WjbrBqCmWCNU,15992
50
51
  gllm_inference/lm_invoker/batch/__init__.pyi,sha256=vJOTHRJ83oq8Bq0UsMdID9_HW5JAxr06gUs4aPRZfEE,130
51
52
  gllm_inference/lm_invoker/batch/batch_operations.pyi,sha256=o2U17M41RKVFW6j_oxy-SxU1JqUtVt75pKRxrqXzorE,5499
@@ -56,7 +57,7 @@ gllm_inference/lm_invoker/schema/datasaur.pyi,sha256=GLv6XAwKtWyRrX6EsbEufYjkPff
56
57
  gllm_inference/lm_invoker/schema/google.pyi,sha256=elXHrUMS46pbTsulk7hBXVVFcT022iD-_U_I590xeV8,529
57
58
  gllm_inference/lm_invoker/schema/langchain.pyi,sha256=2OJOUQPlGdlUbIOTDOyiWDBOMm3MoVX-kU2nK0zQsF0,452
58
59
  gllm_inference/lm_invoker/schema/openai.pyi,sha256=eDbU3lGOeJgwuaEVv2Zd0amylGX4e-d78_97kPN1cuQ,2291
59
- gllm_inference/lm_invoker/schema/openai_compatible.pyi,sha256=fVLRIrOvLJjhY7qPUgC3HRFoOFa7XimWLjr2EOo5qmQ,1226
60
+ gllm_inference/lm_invoker/schema/openai_chat_completions.pyi,sha256=8oYQ2VC7L4WQpBAdUEDNIJKvcSGEdu4z_iIuy_TQpeE,1224
60
61
  gllm_inference/lm_invoker/schema/xai.pyi,sha256=jpC6ZSBDUltzm9GjD6zvSFIPwqizn_ywLnjvwSa7KuU,663
61
62
  gllm_inference/model/__init__.pyi,sha256=JKQB0wVSVYD-_tdRkG7N_oEVAKGCcoBw0BUOUMLieFo,602
62
63
  gllm_inference/model/em/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -83,16 +84,18 @@ gllm_inference/prompt_formatter/prompt_formatter.pyi,sha256=hAc6rxWc6JSYdD-OypLi
83
84
  gllm_inference/request_processor/__init__.pyi,sha256=giEme2WFQhgyKiBZHhSet0_nKSCHwGy-_2p6NRzg0Zc,231
84
85
  gllm_inference/request_processor/lm_request_processor.pyi,sha256=0fy1HyILCVDw6y46E-7tLnQTRYx4ppeRMe0QP6t9Jyw,5990
85
86
  gllm_inference/request_processor/uses_lm_mixin.pyi,sha256=LYHq-zLoXEMel1LfVdYv7W3BZ8WtBLo_WWFjRf10Yto,6512
86
- gllm_inference/schema/__init__.pyi,sha256=sVhsDa6arWrRRFK9WRj9l4QX20gRnDPUc42vfUDJuoc,1695
87
+ gllm_inference/schema/__init__.pyi,sha256=qGvSWs8JujR1yIfUWFb39bABLWbYNRVFFQay0jGgt9A,1951
88
+ gllm_inference/schema/activity.pyi,sha256=LUfVG3PVhHjHc5J6a1yBX5td-tud6NUQxSdxZzvJt2I,220
87
89
  gllm_inference/schema/attachment.pyi,sha256=9zgAjGXBjLfzPGaKi68FMW6b5mXdEA352nDe-ynOSvY,3385
88
90
  gllm_inference/schema/code_exec_result.pyi,sha256=WQ-ARoGM9r6nyRX-A0Ro1XKiqrc9R3jRYXZpu_xo5S4,573
89
91
  gllm_inference/schema/config.pyi,sha256=NVmjQK6HipIE0dKSfx12hgIC0O-S1HEcAc-TWlXAF5A,689
90
92
  gllm_inference/schema/enums.pyi,sha256=3XF3HhiS8IL-o7NfGA4tZ4XrZE_8usCbUX-4yplqB6c,937
93
+ gllm_inference/schema/events.pyi,sha256=cssaHUixfmPxfn8Cj8IWvmuIqobPbPH9flESGUaCExQ,1606
91
94
  gllm_inference/schema/lm_input.pyi,sha256=HxQiZgY7zcXh_Dw8nK8LSeBTZEHMPZVwmPmnfgSsAbs,197
92
95
  gllm_inference/schema/lm_output.pyi,sha256=xafvq38SJkon0QfkuhswCX8ql777el5dUmzbbhLyOvA,2222
93
96
  gllm_inference/schema/mcp.pyi,sha256=4SgQ83pEowfWm2p-w9lupV4NayqqVBOy7SuYxIFeWRs,1045
94
97
  gllm_inference/schema/message.pyi,sha256=jJV6A0ihEcun2OhzyMtNkiHnf7d6v5R-GdpTBGfJ0AQ,2272
95
- gllm_inference/schema/model_id.pyi,sha256=h2nAmYgUYjF8MjT9pTnRfrevYuSHeksEZHvizkmu6n8,5638
98
+ gllm_inference/schema/model_id.pyi,sha256=NuaS4XlKDRJJezj45CEzn8reDDeII9XeRARmM5SZPqA,5408
96
99
  gllm_inference/schema/reasoning.pyi,sha256=jbPxkDRHt0Vt-zdcc8lTT1l2hIE1Jm3HIHeNd0hfXGo,577
97
100
  gllm_inference/schema/token_usage.pyi,sha256=WJiGQyz5qatzBK2b-sABLCyTRLCBbAvxCRcqSJOzu-8,3025
98
101
  gllm_inference/schema/tool_call.pyi,sha256=OWT9LUqs_xfUcOkPG0aokAAqzLYYDkfnjTa0zOWvugk,403
@@ -103,7 +106,7 @@ gllm_inference/utils/io_utils.pyi,sha256=Eg7dvHWdXslTKdjh1j3dG50i7r35XG2zTmJ9XXv
103
106
  gllm_inference/utils/langchain.pyi,sha256=4AwFiVAO0ZpdgmqeC4Pb5NJwBt8vVr0MSUqLeCdTscc,1194
104
107
  gllm_inference/utils/validation.pyi,sha256=-RdMmb8afH7F7q4Ao7x6FbwaDfxUHn3hA3WiOgzB-3s,397
105
108
  gllm_inference.build/.gitignore,sha256=aEiIwOuxfzdCmLZe4oB1JsBmCUxwG8x-u-HBCV9JT8E,1
106
- gllm_inference_binary-0.5.33.dist-info/METADATA,sha256=1P1byafm__5eeo7uRyb-SxSySmI3QvThB1PNYLiRLoY,4971
107
- gllm_inference_binary-0.5.33.dist-info/WHEEL,sha256=x5rgv--I0NI0IT1Lh9tN1VG2cI637p3deednwYLKnxc,96
108
- gllm_inference_binary-0.5.33.dist-info/top_level.txt,sha256=FpOjtN80F-qVNgbScXSEyqa0w09FYn6301iq6qt69IQ,15
109
- gllm_inference_binary-0.5.33.dist-info/RECORD,,
109
+ gllm_inference_binary-0.5.35.dist-info/METADATA,sha256=8BM_m-hKxOYbudW-KKFBmrwJOXeMrWKzueq-EZ_KIGo,5770
110
+ gllm_inference_binary-0.5.35.dist-info/WHEEL,sha256=x5rgv--I0NI0IT1Lh9tN1VG2cI637p3deednwYLKnxc,96
111
+ gllm_inference_binary-0.5.35.dist-info/top_level.txt,sha256=FpOjtN80F-qVNgbScXSEyqa0w09FYn6301iq6qt69IQ,15
112
+ gllm_inference_binary-0.5.35.dist-info/RECORD,,