inferencesh 0.4.0__tar.gz → 0.4.2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of inferencesh might be problematic. Click here for more details.

Files changed (26) hide show
  1. inferencesh-0.4.2/PKG-INFO +196 -0
  2. inferencesh-0.4.2/README.md +172 -0
  3. {inferencesh-0.4.0 → inferencesh-0.4.2}/pyproject.toml +1 -1
  4. {inferencesh-0.4.0 → inferencesh-0.4.2}/src/inferencesh/client.py +392 -173
  5. {inferencesh-0.4.0 → inferencesh-0.4.2}/src/inferencesh/models/file.py +6 -9
  6. inferencesh-0.4.2/src/inferencesh.egg-info/PKG-INFO +196 -0
  7. {inferencesh-0.4.0 → inferencesh-0.4.2}/src/inferencesh.egg-info/SOURCES.txt +0 -1
  8. inferencesh-0.4.0/PKG-INFO +0 -109
  9. inferencesh-0.4.0/README.md +0 -82
  10. inferencesh-0.4.0/setup.py +0 -22
  11. inferencesh-0.4.0/src/inferencesh.egg-info/PKG-INFO +0 -109
  12. {inferencesh-0.4.0 → inferencesh-0.4.2}/LICENSE +0 -0
  13. {inferencesh-0.4.0 → inferencesh-0.4.2}/setup.cfg +0 -0
  14. {inferencesh-0.4.0 → inferencesh-0.4.2}/src/inferencesh/__init__.py +0 -0
  15. {inferencesh-0.4.0 → inferencesh-0.4.2}/src/inferencesh/models/__init__.py +0 -0
  16. {inferencesh-0.4.0 → inferencesh-0.4.2}/src/inferencesh/models/base.py +0 -0
  17. {inferencesh-0.4.0 → inferencesh-0.4.2}/src/inferencesh/models/llm.py +0 -0
  18. {inferencesh-0.4.0 → inferencesh-0.4.2}/src/inferencesh/utils/__init__.py +0 -0
  19. {inferencesh-0.4.0 → inferencesh-0.4.2}/src/inferencesh/utils/download.py +0 -0
  20. {inferencesh-0.4.0 → inferencesh-0.4.2}/src/inferencesh/utils/storage.py +0 -0
  21. {inferencesh-0.4.0 → inferencesh-0.4.2}/src/inferencesh.egg-info/dependency_links.txt +0 -0
  22. {inferencesh-0.4.0 → inferencesh-0.4.2}/src/inferencesh.egg-info/entry_points.txt +0 -0
  23. {inferencesh-0.4.0 → inferencesh-0.4.2}/src/inferencesh.egg-info/requires.txt +0 -0
  24. {inferencesh-0.4.0 → inferencesh-0.4.2}/src/inferencesh.egg-info/top_level.txt +0 -0
  25. {inferencesh-0.4.0 → inferencesh-0.4.2}/tests/test_client.py +0 -0
  26. {inferencesh-0.4.0 → inferencesh-0.4.2}/tests/test_sdk.py +0 -0
@@ -0,0 +1,196 @@
1
+ Metadata-Version: 2.4
2
+ Name: inferencesh
3
+ Version: 0.4.2
4
+ Summary: inference.sh Python SDK
5
+ Author-email: "Inference Shell Inc." <hello@inference.sh>
6
+ Project-URL: Homepage, https://github.com/inference-sh/sdk
7
+ Project-URL: Bug Tracker, https://github.com/inference-sh/sdk/issues
8
+ Classifier: Programming Language :: Python :: 3
9
+ Classifier: License :: OSI Approved :: MIT License
10
+ Classifier: Operating System :: OS Independent
11
+ Requires-Python: >=3.7
12
+ Description-Content-Type: text/markdown
13
+ License-File: LICENSE
14
+ Requires-Dist: pydantic>=2.0.0
15
+ Requires-Dist: tqdm>=4.67.0
16
+ Requires-Dist: requests>=2.31.0
17
+ Provides-Extra: test
18
+ Requires-Dist: pytest>=7.0.0; extra == "test"
19
+ Requires-Dist: pytest-cov>=4.0.0; extra == "test"
20
+ Provides-Extra: async
21
+ Requires-Dist: aiohttp>=3.9.0; python_version >= "3.8" and extra == "async"
22
+ Requires-Dist: aiofiles>=23.2.1; python_version >= "3.8" and extra == "async"
23
+ Dynamic: license-file
24
+
25
+ # inference.sh sdk
26
+
27
+ helper package for inference.sh python applications.
28
+
29
+ ## installation
30
+
31
+ ```bash
32
+ pip install infsh
33
+ ```
34
+
35
+ ## client usage
36
+
37
+ ```python
38
+ from inferencesh import Inference, TaskStatus
39
+
40
+ # Create client
41
+ client = Inference(api_key="your-api-key")
42
+
43
+ # Simple synchronous usage
44
+ try:
45
+ task = client.run({
46
+ "app": "your-app",
47
+ "input": {"key": "value"},
48
+ "infra": "cloud",
49
+ "variant": "default"
50
+ })
51
+
52
+ print(f"Task ID: {task.get('id')}")
53
+
54
+ if task.get("status") == TaskStatus.COMPLETED:
55
+ print("✓ Task completed successfully!")
56
+ print(f"Output: {task.get('output')}")
57
+ else:
58
+ status = task.get("status")
59
+ status_name = TaskStatus(status).name if status is not None else "UNKNOWN"
60
+ print(f"✗ Task did not complete. Final status: {status_name}")
61
+
62
+ except Exception as exc:
63
+ print(f"Error: {type(exc).__name__}: {exc}")
64
+ raise # Re-raise to see full traceback
65
+
66
+ # Streaming updates (recommended)
67
+ try:
68
+ for update in client.run(
69
+ {
70
+ "app": "your-app",
71
+ "input": {"key": "value"},
72
+ "infra": "cloud",
73
+ "variant": "default"
74
+ },
75
+ stream=True # Enable streaming updates
76
+ ):
77
+ status = update.get("status")
78
+ status_name = TaskStatus(status).name if status is not None else "UNKNOWN"
79
+ print(f"Status: {status_name}")
80
+
81
+ if status == TaskStatus.COMPLETED:
82
+ print("✓ Task completed!")
83
+ print(f"Output: {update.get('output')}")
84
+ break
85
+ elif status == TaskStatus.FAILED:
86
+ print(f"✗ Task failed: {update.get('error')}")
87
+ break
88
+ elif status == TaskStatus.CANCELLED:
89
+ print("✗ Task was cancelled")
90
+ break
91
+
92
+ except Exception as exc:
93
+ print(f"Error: {type(exc).__name__}: {exc}")
94
+ raise # Re-raise to see full traceback
95
+
96
+ # Async support
97
+ async def run_async():
98
+ from inferencesh import AsyncInference
99
+
100
+ client = AsyncInference(api_key="your-api-key")
101
+
102
+ # Simple usage
103
+ result = await client.run({
104
+ "app": "your-app",
105
+ "input": {"key": "value"},
106
+ "infra": "cloud",
107
+ "variant": "default"
108
+ })
109
+
110
+ # Stream updates
111
+ async for update in await client.run(
112
+ {
113
+ "app": "your-app",
114
+ "input": {"key": "value"},
115
+ "infra": "cloud",
116
+ "variant": "default"
117
+ },
118
+ stream=True
119
+ ):
120
+ status = update.get("status")
121
+ status_name = TaskStatus(status).name if status is not None else "UNKNOWN"
122
+ print(f"Status: {status_name}")
123
+ ```
124
+
125
+ ## file handling
126
+
127
+ the `File` class provides a standardized way to handle files in the inference.sh ecosystem:
128
+
129
+ ```python
130
+ from infsh import File
131
+
132
+ # Basic file creation
133
+ file = File(path="/path/to/file.png")
134
+
135
+ # File with explicit metadata
136
+ file = File(
137
+ path="/path/to/file.png",
138
+ content_type="image/png",
139
+ filename="custom_name.png",
140
+ size=1024 # in bytes
141
+ )
142
+
143
+ # Create from path (automatically populates metadata)
144
+ file = File.from_path("/path/to/file.png")
145
+
146
+ # Check if file exists
147
+ exists = file.exists()
148
+
149
+ # Access file metadata
150
+ print(file.content_type) # automatically detected if not specified
151
+ print(file.size) # file size in bytes
152
+ print(file.filename) # basename of the file
153
+
154
+ # Refresh metadata (useful if file has changed)
155
+ file.refresh_metadata()
156
+ ```
157
+
158
+ the `File` class automatically handles:
159
+ - mime type detection
160
+ - file size calculation
161
+ - filename extraction from path
162
+ - file existence checking
163
+
164
+ ## creating an app
165
+
166
+ to create an inference app, inherit from `BaseApp` and define your input/output types:
167
+
168
+ ```python
169
+ from infsh import BaseApp, BaseAppInput, BaseAppOutput, File
170
+
171
+ class AppInput(BaseAppInput):
172
+ image: str # URL or file path to image
173
+ mask: str # URL or file path to mask
174
+
175
+ class AppOutput(BaseAppOutput):
176
+ image: File
177
+
178
+ class MyApp(BaseApp):
179
+ async def setup(self):
180
+ # Initialize your model here
181
+ pass
182
+
183
+ async def run(self, app_input: AppInput) -> AppOutput:
184
+ # Process input and return output
185
+ result_path = "/tmp/result.png"
186
+ return AppOutput(image=File(path=result_path))
187
+
188
+ async def unload(self):
189
+ # Clean up resources
190
+ pass
191
+ ```
192
+
193
+ app lifecycle has three main methods:
194
+ - `setup()`: called when the app starts, use it to initialize models
195
+ - `run()`: called for each inference request
196
+ - `unload()`: called when shutting down, use it to free resources
@@ -0,0 +1,172 @@
1
+ # inference.sh sdk
2
+
3
+ helper package for inference.sh python applications.
4
+
5
+ ## installation
6
+
7
+ ```bash
8
+ pip install infsh
9
+ ```
10
+
11
+ ## client usage
12
+
13
+ ```python
14
+ from inferencesh import Inference, TaskStatus
15
+
16
+ # Create client
17
+ client = Inference(api_key="your-api-key")
18
+
19
+ # Simple synchronous usage
20
+ try:
21
+ task = client.run({
22
+ "app": "your-app",
23
+ "input": {"key": "value"},
24
+ "infra": "cloud",
25
+ "variant": "default"
26
+ })
27
+
28
+ print(f"Task ID: {task.get('id')}")
29
+
30
+ if task.get("status") == TaskStatus.COMPLETED:
31
+ print("✓ Task completed successfully!")
32
+ print(f"Output: {task.get('output')}")
33
+ else:
34
+ status = task.get("status")
35
+ status_name = TaskStatus(status).name if status is not None else "UNKNOWN"
36
+ print(f"✗ Task did not complete. Final status: {status_name}")
37
+
38
+ except Exception as exc:
39
+ print(f"Error: {type(exc).__name__}: {exc}")
40
+ raise # Re-raise to see full traceback
41
+
42
+ # Streaming updates (recommended)
43
+ try:
44
+ for update in client.run(
45
+ {
46
+ "app": "your-app",
47
+ "input": {"key": "value"},
48
+ "infra": "cloud",
49
+ "variant": "default"
50
+ },
51
+ stream=True # Enable streaming updates
52
+ ):
53
+ status = update.get("status")
54
+ status_name = TaskStatus(status).name if status is not None else "UNKNOWN"
55
+ print(f"Status: {status_name}")
56
+
57
+ if status == TaskStatus.COMPLETED:
58
+ print("✓ Task completed!")
59
+ print(f"Output: {update.get('output')}")
60
+ break
61
+ elif status == TaskStatus.FAILED:
62
+ print(f"✗ Task failed: {update.get('error')}")
63
+ break
64
+ elif status == TaskStatus.CANCELLED:
65
+ print("✗ Task was cancelled")
66
+ break
67
+
68
+ except Exception as exc:
69
+ print(f"Error: {type(exc).__name__}: {exc}")
70
+ raise # Re-raise to see full traceback
71
+
72
+ # Async support
73
+ async def run_async():
74
+ from inferencesh import AsyncInference
75
+
76
+ client = AsyncInference(api_key="your-api-key")
77
+
78
+ # Simple usage
79
+ result = await client.run({
80
+ "app": "your-app",
81
+ "input": {"key": "value"},
82
+ "infra": "cloud",
83
+ "variant": "default"
84
+ })
85
+
86
+ # Stream updates
87
+ async for update in await client.run(
88
+ {
89
+ "app": "your-app",
90
+ "input": {"key": "value"},
91
+ "infra": "cloud",
92
+ "variant": "default"
93
+ },
94
+ stream=True
95
+ ):
96
+ status = update.get("status")
97
+ status_name = TaskStatus(status).name if status is not None else "UNKNOWN"
98
+ print(f"Status: {status_name}")
99
+ ```
100
+
101
+ ## file handling
102
+
103
+ the `File` class provides a standardized way to handle files in the inference.sh ecosystem:
104
+
105
+ ```python
106
+ from infsh import File
107
+
108
+ # Basic file creation
109
+ file = File(path="/path/to/file.png")
110
+
111
+ # File with explicit metadata
112
+ file = File(
113
+ path="/path/to/file.png",
114
+ content_type="image/png",
115
+ filename="custom_name.png",
116
+ size=1024 # in bytes
117
+ )
118
+
119
+ # Create from path (automatically populates metadata)
120
+ file = File.from_path("/path/to/file.png")
121
+
122
+ # Check if file exists
123
+ exists = file.exists()
124
+
125
+ # Access file metadata
126
+ print(file.content_type) # automatically detected if not specified
127
+ print(file.size) # file size in bytes
128
+ print(file.filename) # basename of the file
129
+
130
+ # Refresh metadata (useful if file has changed)
131
+ file.refresh_metadata()
132
+ ```
133
+
134
+ the `File` class automatically handles:
135
+ - mime type detection
136
+ - file size calculation
137
+ - filename extraction from path
138
+ - file existence checking
139
+
140
+ ## creating an app
141
+
142
+ to create an inference app, inherit from `BaseApp` and define your input/output types:
143
+
144
+ ```python
145
+ from infsh import BaseApp, BaseAppInput, BaseAppOutput, File
146
+
147
+ class AppInput(BaseAppInput):
148
+ image: str # URL or file path to image
149
+ mask: str # URL or file path to mask
150
+
151
+ class AppOutput(BaseAppOutput):
152
+ image: File
153
+
154
+ class MyApp(BaseApp):
155
+ async def setup(self):
156
+ # Initialize your model here
157
+ pass
158
+
159
+ async def run(self, app_input: AppInput) -> AppOutput:
160
+ # Process input and return output
161
+ result_path = "/tmp/result.png"
162
+ return AppOutput(image=File(path=result_path))
163
+
164
+ async def unload(self):
165
+ # Clean up resources
166
+ pass
167
+ ```
168
+
169
+ app lifecycle has three main methods:
170
+ - `setup()`: called when the app starts, use it to initialize models
171
+ - `run()`: called for each inference request
172
+ - `unload()`: called when shutting down, use it to free resources
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "inferencesh"
7
- version = "0.4.0"
7
+ version = "0.4.2"
8
8
  description = "inference.sh Python SDK"
9
9
  authors = [
10
10
  {name = "Inference Shell Inc.", email = "hello@inference.sh"},