inferencesh 0.4.2__tar.gz → 0.4.4__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of inferencesh might be problematic. Click here for more details.
- {inferencesh-0.4.2/src/inferencesh.egg-info → inferencesh-0.4.4}/PKG-INFO +1 -1
- {inferencesh-0.4.2 → inferencesh-0.4.4}/pyproject.toml +1 -1
- {inferencesh-0.4.2 → inferencesh-0.4.4}/src/inferencesh/models/base.py +81 -3
- {inferencesh-0.4.2 → inferencesh-0.4.4}/src/inferencesh/models/llm.py +3 -1
- {inferencesh-0.4.2 → inferencesh-0.4.4/src/inferencesh.egg-info}/PKG-INFO +1 -1
- {inferencesh-0.4.2 → inferencesh-0.4.4}/LICENSE +0 -0
- {inferencesh-0.4.2 → inferencesh-0.4.4}/README.md +0 -0
- {inferencesh-0.4.2 → inferencesh-0.4.4}/setup.cfg +0 -0
- {inferencesh-0.4.2 → inferencesh-0.4.4}/src/inferencesh/__init__.py +0 -0
- {inferencesh-0.4.2 → inferencesh-0.4.4}/src/inferencesh/client.py +0 -0
- {inferencesh-0.4.2 → inferencesh-0.4.4}/src/inferencesh/models/__init__.py +0 -0
- {inferencesh-0.4.2 → inferencesh-0.4.4}/src/inferencesh/models/file.py +0 -0
- {inferencesh-0.4.2 → inferencesh-0.4.4}/src/inferencesh/utils/__init__.py +0 -0
- {inferencesh-0.4.2 → inferencesh-0.4.4}/src/inferencesh/utils/download.py +0 -0
- {inferencesh-0.4.2 → inferencesh-0.4.4}/src/inferencesh/utils/storage.py +0 -0
- {inferencesh-0.4.2 → inferencesh-0.4.4}/src/inferencesh.egg-info/SOURCES.txt +0 -0
- {inferencesh-0.4.2 → inferencesh-0.4.4}/src/inferencesh.egg-info/dependency_links.txt +0 -0
- {inferencesh-0.4.2 → inferencesh-0.4.4}/src/inferencesh.egg-info/entry_points.txt +0 -0
- {inferencesh-0.4.2 → inferencesh-0.4.4}/src/inferencesh.egg-info/requires.txt +0 -0
- {inferencesh-0.4.2 → inferencesh-0.4.4}/src/inferencesh.egg-info/top_level.txt +0 -0
- {inferencesh-0.4.2 → inferencesh-0.4.4}/tests/test_client.py +0 -0
- {inferencesh-0.4.2 → inferencesh-0.4.4}/tests/test_sdk.py +0 -0
|
@@ -1,10 +1,23 @@
|
|
|
1
|
-
from typing import Any, Dict, List
|
|
1
|
+
from typing import Any, Dict, List, Optional
|
|
2
2
|
from pydantic import BaseModel, ConfigDict
|
|
3
3
|
import inspect
|
|
4
4
|
import ast
|
|
5
5
|
import textwrap
|
|
6
6
|
from collections import OrderedDict
|
|
7
|
-
|
|
7
|
+
from inferencesh.models.file import File
|
|
8
|
+
from pydantic import Field
|
|
9
|
+
|
|
10
|
+
class Metadata(BaseModel):
|
|
11
|
+
app_id: Optional[str] = None
|
|
12
|
+
app_version_id: Optional[str] = None
|
|
13
|
+
app_variant: Optional[str] = None
|
|
14
|
+
worker_id: Optional[str] = None
|
|
15
|
+
def update(self, other: Dict[str, Any] | BaseModel) -> None:
|
|
16
|
+
update_dict = other.model_dump() if isinstance(other, BaseModel) else other
|
|
17
|
+
for key, value in update_dict.items():
|
|
18
|
+
setattr(self, key, value)
|
|
19
|
+
class Config:
|
|
20
|
+
extra = "allow"
|
|
8
21
|
|
|
9
22
|
class OrderedSchemaModel(BaseModel):
|
|
10
23
|
"""A base model that ensures the JSON schema properties and required fields are in the order of field definition."""
|
|
@@ -91,4 +104,69 @@ class BaseApp(BaseModel):
|
|
|
91
104
|
raise NotImplementedError("run method must be implemented")
|
|
92
105
|
|
|
93
106
|
async def unload(self):
|
|
94
|
-
pass
|
|
107
|
+
pass
|
|
108
|
+
|
|
109
|
+
|
|
110
|
+
# Mixins
|
|
111
|
+
|
|
112
|
+
class OptionalImageFieldMixin(BaseModel):
|
|
113
|
+
image: Optional[File] = Field(
|
|
114
|
+
description="the image to use for the model",
|
|
115
|
+
default=None,
|
|
116
|
+
contentMediaType="image/*",
|
|
117
|
+
)
|
|
118
|
+
|
|
119
|
+
class RequiredImageFieldMixin(BaseModel):
|
|
120
|
+
image: File = Field(
|
|
121
|
+
description="the image to use for the model",
|
|
122
|
+
contentMediaType="image/*",
|
|
123
|
+
)
|
|
124
|
+
|
|
125
|
+
class OptionalVideoFieldMixin(BaseModel):
|
|
126
|
+
video: Optional[File] = Field(
|
|
127
|
+
description="the video to use for the model",
|
|
128
|
+
default=None,
|
|
129
|
+
contentMediaType="video/*",
|
|
130
|
+
)
|
|
131
|
+
|
|
132
|
+
class RequiredVideoFieldMixin(BaseModel):
|
|
133
|
+
video: File = Field(
|
|
134
|
+
description="the video to use for the model",
|
|
135
|
+
contentMediaType="video/*",
|
|
136
|
+
)
|
|
137
|
+
|
|
138
|
+
class OptionalAudioFieldMixin(BaseModel):
|
|
139
|
+
audio: Optional[File] = Field(
|
|
140
|
+
description="the audio to use for the model",
|
|
141
|
+
default=None,
|
|
142
|
+
contentMediaType="audio/*",
|
|
143
|
+
)
|
|
144
|
+
|
|
145
|
+
class RequiredAudioFieldMixin(BaseModel):
|
|
146
|
+
audio: File = Field(
|
|
147
|
+
description="the audio to use for the model",
|
|
148
|
+
contentMediaType="audio/*",
|
|
149
|
+
)
|
|
150
|
+
|
|
151
|
+
class OptionalTextFieldMixin(BaseModel):
|
|
152
|
+
text: Optional[str] = Field(
|
|
153
|
+
description="the text to use for the model",
|
|
154
|
+
default=None,
|
|
155
|
+
)
|
|
156
|
+
|
|
157
|
+
class RequiredTextFieldMixin(BaseModel):
|
|
158
|
+
text: str = Field(
|
|
159
|
+
description="the text to use for the model",
|
|
160
|
+
)
|
|
161
|
+
|
|
162
|
+
class OptionalFileFieldMixin(BaseModel):
|
|
163
|
+
file: Optional[File] = Field(
|
|
164
|
+
description="the file to use for the model",
|
|
165
|
+
default=None,
|
|
166
|
+
)
|
|
167
|
+
|
|
168
|
+
class RequiredFileFieldMixin(BaseModel):
|
|
169
|
+
file: Optional[File] = Field(
|
|
170
|
+
description="the file to use for the model",
|
|
171
|
+
default=None,
|
|
172
|
+
)
|
|
@@ -584,6 +584,7 @@ def stream_generate(
|
|
|
584
584
|
stop: Optional[List[str]] = None,
|
|
585
585
|
verbose: bool = False,
|
|
586
586
|
output_cls: type[BaseLLMOutput] = LLMOutput,
|
|
587
|
+
kwargs: Optional[Dict[str, Any]] = None,
|
|
587
588
|
) -> Generator[BaseLLMOutput, None, None]:
|
|
588
589
|
"""Stream generate from LLaMA.cpp model with timing and usage tracking."""
|
|
589
590
|
|
|
@@ -604,7 +605,8 @@ def stream_generate(
|
|
|
604
605
|
"stream": True,
|
|
605
606
|
"temperature": temperature,
|
|
606
607
|
"top_p": top_p,
|
|
607
|
-
"stop": stop
|
|
608
|
+
"stop": stop,
|
|
609
|
+
**kwargs
|
|
608
610
|
}
|
|
609
611
|
if tools is not None:
|
|
610
612
|
completion_kwargs["tools"] = tools
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|