inferencesh 0.2.33__tar.gz → 0.2.35__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of inferencesh might be problematic. Click here for more details.
- {inferencesh-0.2.33/src/inferencesh.egg-info → inferencesh-0.2.35}/PKG-INFO +1 -1
- {inferencesh-0.2.33 → inferencesh-0.2.35}/pyproject.toml +1 -1
- {inferencesh-0.2.33 → inferencesh-0.2.35}/src/inferencesh/models/llm.py +21 -29
- {inferencesh-0.2.33 → inferencesh-0.2.35/src/inferencesh.egg-info}/PKG-INFO +1 -1
- {inferencesh-0.2.33 → inferencesh-0.2.35}/LICENSE +0 -0
- {inferencesh-0.2.33 → inferencesh-0.2.35}/README.md +0 -0
- {inferencesh-0.2.33 → inferencesh-0.2.35}/setup.cfg +0 -0
- {inferencesh-0.2.33 → inferencesh-0.2.35}/setup.py +0 -0
- {inferencesh-0.2.33 → inferencesh-0.2.35}/src/inferencesh/__init__.py +0 -0
- {inferencesh-0.2.33 → inferencesh-0.2.35}/src/inferencesh/models/__init__.py +0 -0
- {inferencesh-0.2.33 → inferencesh-0.2.35}/src/inferencesh/models/base.py +0 -0
- {inferencesh-0.2.33 → inferencesh-0.2.35}/src/inferencesh/models/file.py +0 -0
- {inferencesh-0.2.33 → inferencesh-0.2.35}/src/inferencesh/utils/__init__.py +0 -0
- {inferencesh-0.2.33 → inferencesh-0.2.35}/src/inferencesh/utils/download.py +0 -0
- {inferencesh-0.2.33 → inferencesh-0.2.35}/src/inferencesh/utils/storage.py +0 -0
- {inferencesh-0.2.33 → inferencesh-0.2.35}/src/inferencesh.egg-info/SOURCES.txt +0 -0
- {inferencesh-0.2.33 → inferencesh-0.2.35}/src/inferencesh.egg-info/dependency_links.txt +0 -0
- {inferencesh-0.2.33 → inferencesh-0.2.35}/src/inferencesh.egg-info/entry_points.txt +0 -0
- {inferencesh-0.2.33 → inferencesh-0.2.35}/src/inferencesh.egg-info/requires.txt +0 -0
- {inferencesh-0.2.33 → inferencesh-0.2.35}/src/inferencesh.egg-info/top_level.txt +0 -0
- {inferencesh-0.2.33 → inferencesh-0.2.35}/tests/test_sdk.py +0 -0
|
@@ -23,31 +23,27 @@ class Message(BaseAppInput):
|
|
|
23
23
|
|
|
24
24
|
class ContextMessage(BaseAppInput):
|
|
25
25
|
role: ContextMessageRole = Field(
|
|
26
|
-
description="
|
|
26
|
+
description="the role of the message. user, assistant, or system",
|
|
27
27
|
)
|
|
28
28
|
text: str = Field(
|
|
29
|
-
description="
|
|
29
|
+
description="the text content of the message"
|
|
30
30
|
)
|
|
31
31
|
image: Optional[File] = Field(
|
|
32
|
-
description="
|
|
32
|
+
description="the image file of the message",
|
|
33
33
|
default=None
|
|
34
34
|
)
|
|
35
35
|
|
|
36
36
|
class BaseLLMInput(BaseAppInput):
|
|
37
37
|
"""Base class with common LLM fields."""
|
|
38
38
|
system_prompt: str = Field(
|
|
39
|
-
description="
|
|
40
|
-
default="
|
|
39
|
+
description="the system prompt to use for the model",
|
|
40
|
+
default="you are a helpful assistant that can answer questions and help with tasks.",
|
|
41
41
|
examples=[
|
|
42
|
-
"
|
|
43
|
-
"You are a certified medical professional who can provide accurate health information.",
|
|
44
|
-
"You are a certified financial advisor who can give sound investment guidance.",
|
|
45
|
-
"You are a certified cybersecurity expert who can explain security best practices.",
|
|
46
|
-
"You are a certified environmental scientist who can discuss climate and sustainability.",
|
|
42
|
+
"you are a helpful assistant that can answer questions and help with tasks.",
|
|
47
43
|
]
|
|
48
44
|
)
|
|
49
45
|
context: List[ContextMessage] = Field(
|
|
50
|
-
description="
|
|
46
|
+
description="the context to use for the model",
|
|
51
47
|
default=[],
|
|
52
48
|
examples=[
|
|
53
49
|
[
|
|
@@ -57,37 +53,35 @@ class BaseLLMInput(BaseAppInput):
|
|
|
57
53
|
]
|
|
58
54
|
)
|
|
59
55
|
text: str = Field(
|
|
60
|
-
description="
|
|
56
|
+
description="the user prompt to use for the model",
|
|
61
57
|
examples=[
|
|
62
|
-
"
|
|
63
|
-
"What is the weather like today?",
|
|
64
|
-
"Can you help me write a poem about spring?",
|
|
65
|
-
"Explain quantum computing in simple terms"
|
|
58
|
+
"write a haiku about artificial general intelligence"
|
|
66
59
|
]
|
|
67
60
|
)
|
|
68
|
-
temperature: float = Field(default=0.7)
|
|
69
|
-
top_p: float = Field(default=0.95)
|
|
70
|
-
max_tokens: int = Field(default=4096)
|
|
61
|
+
temperature: float = Field(default=0.7, ge=0.0, le=1.0)
|
|
62
|
+
top_p: float = Field(default=0.95, ge=0.0, le=1.0)
|
|
71
63
|
context_size: int = Field(default=4096)
|
|
72
64
|
|
|
73
65
|
class ImageCapabilityMixin(BaseModel):
|
|
74
66
|
"""Mixin for models that support image inputs."""
|
|
75
67
|
image: Optional[File] = Field(
|
|
76
|
-
description="
|
|
77
|
-
default=None
|
|
68
|
+
description="the image to use for the model",
|
|
69
|
+
default=None,
|
|
70
|
+
content_type=["image/*"],
|
|
71
|
+
max_size_mb=10
|
|
78
72
|
)
|
|
79
73
|
|
|
80
74
|
class ReasoningCapabilityMixin(BaseModel):
|
|
81
75
|
"""Mixin for models that support reasoning."""
|
|
82
76
|
reasoning: bool = Field(
|
|
83
|
-
description="
|
|
77
|
+
description="enable step-by-step reasoning",
|
|
84
78
|
default=False
|
|
85
79
|
)
|
|
86
80
|
|
|
87
81
|
class ToolsCapabilityMixin(BaseModel):
|
|
88
82
|
"""Mixin for models that support tool/function calling."""
|
|
89
83
|
tools: Optional[List[Dict[str, Any]]] = Field(
|
|
90
|
-
description="
|
|
84
|
+
description="tool definitions for function calling",
|
|
91
85
|
default=None
|
|
92
86
|
)
|
|
93
87
|
|
|
@@ -112,26 +106,26 @@ class LLMUsage(BaseAppOutput):
|
|
|
112
106
|
|
|
113
107
|
class BaseLLMOutput(BaseAppOutput):
|
|
114
108
|
"""Base class for LLM outputs with common fields."""
|
|
115
|
-
response: str = Field(description="
|
|
109
|
+
response: str = Field(description="the generated text response")
|
|
116
110
|
|
|
117
111
|
class LLMUsageMixin(BaseModel):
|
|
118
112
|
"""Mixin for models that provide token usage statistics."""
|
|
119
113
|
usage: Optional[LLMUsage] = Field(
|
|
120
|
-
description="
|
|
114
|
+
description="token usage statistics",
|
|
121
115
|
default=None
|
|
122
116
|
)
|
|
123
117
|
|
|
124
118
|
class ReasoningMixin(BaseModel):
|
|
125
119
|
"""Mixin for models that support reasoning."""
|
|
126
120
|
reasoning: Optional[str] = Field(
|
|
127
|
-
description="
|
|
121
|
+
description="the reasoning output of the model",
|
|
128
122
|
default=None
|
|
129
123
|
)
|
|
130
124
|
|
|
131
125
|
class ToolCallsMixin(BaseModel):
|
|
132
126
|
"""Mixin for models that support tool calls."""
|
|
133
127
|
tool_calls: Optional[List[Dict[str, Any]]] = Field(
|
|
134
|
-
description="
|
|
128
|
+
description="tool calls for function calling",
|
|
135
129
|
default=None
|
|
136
130
|
)
|
|
137
131
|
|
|
@@ -588,7 +582,6 @@ def stream_generate(
|
|
|
588
582
|
tool_choice: Optional[Dict[str, Any]] = None,
|
|
589
583
|
temperature: float = 0.7,
|
|
590
584
|
top_p: float = 0.95,
|
|
591
|
-
max_tokens: int = 4096,
|
|
592
585
|
stop: Optional[List[str]] = None,
|
|
593
586
|
verbose: bool = False,
|
|
594
587
|
output_cls: type[BaseLLMOutput] = LLMOutput,
|
|
@@ -612,7 +605,6 @@ def stream_generate(
|
|
|
612
605
|
"stream": True,
|
|
613
606
|
"temperature": temperature,
|
|
614
607
|
"top_p": top_p,
|
|
615
|
-
"max_tokens": max_tokens,
|
|
616
608
|
"stop": stop
|
|
617
609
|
}
|
|
618
610
|
if tools is not None:
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|