silkloom-core 0.1.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- silkloom_core-0.1.0/LICENSE +21 -0
- silkloom_core-0.1.0/PKG-INFO +230 -0
- silkloom_core-0.1.0/README.md +205 -0
- silkloom_core-0.1.0/pyproject.toml +38 -0
- silkloom_core-0.1.0/setup.cfg +4 -0
- silkloom_core-0.1.0/silkloom_core/__init__.py +3 -0
- silkloom_core-0.1.0/silkloom_core/core.py +651 -0
- silkloom_core-0.1.0/silkloom_core/utils.py +35 -0
- silkloom_core-0.1.0/silkloom_core.egg-info/PKG-INFO +230 -0
- silkloom_core-0.1.0/silkloom_core.egg-info/SOURCES.txt +11 -0
- silkloom_core-0.1.0/silkloom_core.egg-info/dependency_links.txt +1 -0
- silkloom_core-0.1.0/silkloom_core.egg-info/requires.txt +7 -0
- silkloom_core-0.1.0/silkloom_core.egg-info/top_level.txt +1 -0
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2026 SilkLoom Core Contributors
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
|
@@ -0,0 +1,230 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: silkloom-core
|
|
3
|
+
Version: 0.1.0
|
|
4
|
+
Summary: SilkLoom Core: lightweight, resilient batch pipeline for repeatable LLM and function workflows
|
|
5
|
+
Author: SilkLoom Core Contributors
|
|
6
|
+
License-Expression: MIT
|
|
7
|
+
Keywords: llm,pipeline,workflow,batch-processing,silkloom
|
|
8
|
+
Classifier: Development Status :: 3 - Alpha
|
|
9
|
+
Classifier: Intended Audience :: Developers
|
|
10
|
+
Classifier: Programming Language :: Python :: 3
|
|
11
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
12
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
13
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
14
|
+
Classifier: Topic :: Software Development :: Libraries
|
|
15
|
+
Requires-Python: >=3.10
|
|
16
|
+
Description-Content-Type: text/markdown
|
|
17
|
+
License-File: LICENSE
|
|
18
|
+
Requires-Dist: openai>=1.40.0
|
|
19
|
+
Requires-Dist: pydantic>=2.0
|
|
20
|
+
Requires-Dist: tenacity>=8.2.0
|
|
21
|
+
Provides-Extra: dev
|
|
22
|
+
Requires-Dist: build>=1.2.1; extra == "dev"
|
|
23
|
+
Requires-Dist: twine>=5.1.1; extra == "dev"
|
|
24
|
+
Dynamic: license-file
|
|
25
|
+
|
|
26
|
+
# SilkLoom Core
|
|
27
|
+
|
|
28
|
+
[中文](README.zh-CN.md) | [English](README.md)
|
|
29
|
+
|
|
30
|
+
SilkLoom Core is a lightweight, resilient batch pipeline for repeatable workflows. It is a general-purpose execution layer for running the same process over many inputs, with retries and resumability built in.
|
|
31
|
+
|
|
32
|
+
## Overview
|
|
33
|
+
|
|
34
|
+
Key capabilities:
|
|
35
|
+
|
|
36
|
+
- Node-based workflow composition (`LLMNode`, `FunctionNode`, custom `BaseNode`)
|
|
37
|
+
- Concurrent execution
|
|
38
|
+
- Retry with exponential backoff
|
|
39
|
+
- SQLite persistence and resumability with `run_id`
|
|
40
|
+
- Structured output with Pydantic
|
|
41
|
+
|
|
42
|
+
Design philosophy:
|
|
43
|
+
|
|
44
|
+
- Focus on repeatable execution, not intelligent scheduling
|
|
45
|
+
- Keep workflow logic explicit and deterministic
|
|
46
|
+
- Make long-running batch jobs restartable and observable
|
|
47
|
+
|
|
48
|
+
## Installation
|
|
49
|
+
|
|
50
|
+
```bash
|
|
51
|
+
pip install silkloom-core
|
|
52
|
+
```
|
|
53
|
+
|
|
54
|
+
Install from source:
|
|
55
|
+
|
|
56
|
+
```bash
|
|
57
|
+
git clone https://github.com/your-org/silkloom-core.git
|
|
58
|
+
cd silkloom-core
|
|
59
|
+
pip install -e .
|
|
60
|
+
```
|
|
61
|
+
|
|
62
|
+
Dev extras:
|
|
63
|
+
|
|
64
|
+
```bash
|
|
65
|
+
pip install -e ".[dev]"
|
|
66
|
+
```
|
|
67
|
+
|
|
68
|
+
## Quick Start
|
|
69
|
+
|
|
70
|
+
```python
|
|
71
|
+
from silkloom_core import Pipeline, LLMNode, FunctionNode
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
def score_text(text: str) -> dict:
|
|
75
|
+
score = min(len(text) / 100, 1.0)
|
|
76
|
+
return {"score": round(score, 3)}
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
pipeline = Pipeline(db_path="pipeline.db", execution_mode="depth_first", default_workers=4)
|
|
80
|
+
|
|
81
|
+
pipeline.add_node(
|
|
82
|
+
LLMNode(
|
|
83
|
+
name="summarize",
|
|
84
|
+
prompt_template="Summarize in one sentence: {input.text}",
|
|
85
|
+
model="gpt-4o-mini",
|
|
86
|
+
)
|
|
87
|
+
)
|
|
88
|
+
|
|
89
|
+
pipeline.add_node(
|
|
90
|
+
FunctionNode(
|
|
91
|
+
name="score",
|
|
92
|
+
func=score_text,
|
|
93
|
+
kwargs_mapping={"text": "{summarize.text}"},
|
|
94
|
+
)
|
|
95
|
+
)
|
|
96
|
+
|
|
97
|
+
run_id = pipeline.run([
|
|
98
|
+
{"text": "SilkLoom Core supports repeatable LLM batch processing."},
|
|
99
|
+
{"text": "It persists progress in SQLite and can resume by run_id."},
|
|
100
|
+
])
|
|
101
|
+
|
|
102
|
+
print(pipeline.export_results(run_id))
|
|
103
|
+
```
|
|
104
|
+
|
|
105
|
+
## OpenAI-Compatible Endpoints
|
|
106
|
+
|
|
107
|
+
`LLMNode` supports custom OpenAI clients via:
|
|
108
|
+
|
|
109
|
+
```python
|
|
110
|
+
LLMNode(..., client=your_openai_client)
|
|
111
|
+
```
|
|
112
|
+
|
|
113
|
+
So any endpoint compatible with OpenAI Chat Completions can be used.
|
|
114
|
+
|
|
115
|
+
### 1) Official OpenAI
|
|
116
|
+
|
|
117
|
+
```python
|
|
118
|
+
from silkloom_core import LLMNode
|
|
119
|
+
|
|
120
|
+
node = LLMNode(
|
|
121
|
+
name="extract",
|
|
122
|
+
prompt_template="Extract key facts: {input.note}",
|
|
123
|
+
model="gpt-4o-mini",
|
|
124
|
+
)
|
|
125
|
+
```
|
|
126
|
+
|
|
127
|
+
```bash
|
|
128
|
+
export OPENAI_API_KEY="your_openai_key"
|
|
129
|
+
# PowerShell:
|
|
130
|
+
# $env:OPENAI_API_KEY="your_openai_key"
|
|
131
|
+
```
|
|
132
|
+
|
|
133
|
+
### 2) GLM-4-Flash (OpenAI-compatible)
|
|
134
|
+
|
|
135
|
+
```python
|
|
136
|
+
import os
|
|
137
|
+
from openai import OpenAI
|
|
138
|
+
from silkloom_core import LLMNode
|
|
139
|
+
|
|
140
|
+
glm_client = OpenAI(
|
|
141
|
+
api_key=os.environ["ZHIPUAI_API_KEY"],
|
|
142
|
+
base_url="https://open.bigmodel.cn/api/paas/v4/",
|
|
143
|
+
)
|
|
144
|
+
|
|
145
|
+
node = LLMNode(
|
|
146
|
+
name="extract_geo",
|
|
147
|
+
prompt_template="Extract city, topic, and coordinates: {input.note}",
|
|
148
|
+
model="glm-4-flash",
|
|
149
|
+
client=glm_client,
|
|
150
|
+
)
|
|
151
|
+
```
|
|
152
|
+
|
|
153
|
+
```bash
|
|
154
|
+
export ZHIPUAI_API_KEY="your_glm_key"
|
|
155
|
+
# PowerShell:
|
|
156
|
+
# $env:ZHIPUAI_API_KEY="your_glm_key"
|
|
157
|
+
```
|
|
158
|
+
|
|
159
|
+
### 3) Local Ollama (OpenAI-compatible)
|
|
160
|
+
|
|
161
|
+
Start Ollama and pull a model (example):
|
|
162
|
+
|
|
163
|
+
```bash
|
|
164
|
+
ollama pull qwen2.5:7b
|
|
165
|
+
ollama serve
|
|
166
|
+
```
|
|
167
|
+
|
|
168
|
+
Use it in SilkLoom Core:
|
|
169
|
+
|
|
170
|
+
```python
|
|
171
|
+
from openai import OpenAI
|
|
172
|
+
from silkloom_core import LLMNode
|
|
173
|
+
|
|
174
|
+
ollama_client = OpenAI(
|
|
175
|
+
api_key="ollama",
|
|
176
|
+
base_url="http://localhost:11434/v1",
|
|
177
|
+
)
|
|
178
|
+
|
|
179
|
+
node = LLMNode(
|
|
180
|
+
name="local_summary",
|
|
181
|
+
prompt_template="Summarize this note: {input.note}",
|
|
182
|
+
model="qwen2.5:7b",
|
|
183
|
+
client=ollama_client,
|
|
184
|
+
)
|
|
185
|
+
```
|
|
186
|
+
|
|
187
|
+
Note: local models vary in structured-output quality. If you use `response_model`, explicitly require strict JSON-only output in the prompt.
|
|
188
|
+
|
|
189
|
+
## Example Scripts
|
|
190
|
+
|
|
191
|
+
The provided examples use GIS/urban research as one domain case, but SilkLoom Core itself is domain-agnostic.
|
|
192
|
+
|
|
193
|
+
```bash
|
|
194
|
+
python examples/quickstart.py
|
|
195
|
+
python examples/structured_output.py
|
|
196
|
+
python examples/resume_with_run_id.py
|
|
197
|
+
python examples/trajectory_od_commute.py
|
|
198
|
+
```
|
|
199
|
+
|
|
200
|
+
- quickstart.py: summarize notes and tag themes
|
|
201
|
+
- structured_output.py: extract structured attributes and build GeoJSON-like features
|
|
202
|
+
- resume_with_run_id.py: simulate repeatable tile processing with resume
|
|
203
|
+
- trajectory_od_commute.py: OD extraction + distance/time segmentation + flowline output
|
|
204
|
+
|
|
205
|
+
## Core Concepts
|
|
206
|
+
|
|
207
|
+
### 1. Pipeline Modes
|
|
208
|
+
|
|
209
|
+
- `depth_first`: per-item end-to-end progression
|
|
210
|
+
- `breadth_first`: stage-by-stage progression across items
|
|
211
|
+
|
|
212
|
+
### 2. Context Flow
|
|
213
|
+
|
|
214
|
+
- Initial context: `{"input": ...}`
|
|
215
|
+
- Node output storage: `context[node_name] = output_dict`
|
|
216
|
+
|
|
217
|
+
### 3. Retry and Resume
|
|
218
|
+
|
|
219
|
+
- Automatic retries with exponential backoff
|
|
220
|
+
- Resume unfinished tasks by reusing the same `run_id`
|
|
221
|
+
|
|
222
|
+
## API Summary
|
|
223
|
+
|
|
224
|
+
- `Pipeline.add_node(node) -> Pipeline`
|
|
225
|
+
- `Pipeline.run(inputs, run_id=None) -> str`
|
|
226
|
+
- `Pipeline.export_results(run_id, format="json") -> list[dict]`
|
|
227
|
+
|
|
228
|
+
## License
|
|
229
|
+
|
|
230
|
+
MIT
|
|
@@ -0,0 +1,205 @@
|
|
|
1
|
+
# SilkLoom Core
|
|
2
|
+
|
|
3
|
+
[中文](README.zh-CN.md) | [English](README.md)
|
|
4
|
+
|
|
5
|
+
SilkLoom Core is a lightweight, resilient batch pipeline for repeatable workflows. It is a general-purpose execution layer for running the same process over many inputs, with retries and resumability built in.
|
|
6
|
+
|
|
7
|
+
## Overview
|
|
8
|
+
|
|
9
|
+
Key capabilities:
|
|
10
|
+
|
|
11
|
+
- Node-based workflow composition (`LLMNode`, `FunctionNode`, custom `BaseNode`)
|
|
12
|
+
- Concurrent execution
|
|
13
|
+
- Retry with exponential backoff
|
|
14
|
+
- SQLite persistence and resumability with `run_id`
|
|
15
|
+
- Structured output with Pydantic
|
|
16
|
+
|
|
17
|
+
Design philosophy:
|
|
18
|
+
|
|
19
|
+
- Focus on repeatable execution, not intelligent scheduling
|
|
20
|
+
- Keep workflow logic explicit and deterministic
|
|
21
|
+
- Make long-running batch jobs restartable and observable
|
|
22
|
+
|
|
23
|
+
## Installation
|
|
24
|
+
|
|
25
|
+
```bash
|
|
26
|
+
pip install silkloom-core
|
|
27
|
+
```
|
|
28
|
+
|
|
29
|
+
Install from source:
|
|
30
|
+
|
|
31
|
+
```bash
|
|
32
|
+
git clone https://github.com/your-org/silkloom-core.git
|
|
33
|
+
cd silkloom-core
|
|
34
|
+
pip install -e .
|
|
35
|
+
```
|
|
36
|
+
|
|
37
|
+
Dev extras:
|
|
38
|
+
|
|
39
|
+
```bash
|
|
40
|
+
pip install -e ".[dev]"
|
|
41
|
+
```
|
|
42
|
+
|
|
43
|
+
## Quick Start
|
|
44
|
+
|
|
45
|
+
```python
|
|
46
|
+
from silkloom_core import Pipeline, LLMNode, FunctionNode
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
def score_text(text: str) -> dict:
|
|
50
|
+
score = min(len(text) / 100, 1.0)
|
|
51
|
+
return {"score": round(score, 3)}
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
pipeline = Pipeline(db_path="pipeline.db", execution_mode="depth_first", default_workers=4)
|
|
55
|
+
|
|
56
|
+
pipeline.add_node(
|
|
57
|
+
LLMNode(
|
|
58
|
+
name="summarize",
|
|
59
|
+
prompt_template="Summarize in one sentence: {input.text}",
|
|
60
|
+
model="gpt-4o-mini",
|
|
61
|
+
)
|
|
62
|
+
)
|
|
63
|
+
|
|
64
|
+
pipeline.add_node(
|
|
65
|
+
FunctionNode(
|
|
66
|
+
name="score",
|
|
67
|
+
func=score_text,
|
|
68
|
+
kwargs_mapping={"text": "{summarize.text}"},
|
|
69
|
+
)
|
|
70
|
+
)
|
|
71
|
+
|
|
72
|
+
run_id = pipeline.run([
|
|
73
|
+
{"text": "SilkLoom Core supports repeatable LLM batch processing."},
|
|
74
|
+
{"text": "It persists progress in SQLite and can resume by run_id."},
|
|
75
|
+
])
|
|
76
|
+
|
|
77
|
+
print(pipeline.export_results(run_id))
|
|
78
|
+
```
|
|
79
|
+
|
|
80
|
+
## OpenAI-Compatible Endpoints
|
|
81
|
+
|
|
82
|
+
`LLMNode` supports custom OpenAI clients via:
|
|
83
|
+
|
|
84
|
+
```python
|
|
85
|
+
LLMNode(..., client=your_openai_client)
|
|
86
|
+
```
|
|
87
|
+
|
|
88
|
+
So any endpoint compatible with OpenAI Chat Completions can be used.
|
|
89
|
+
|
|
90
|
+
### 1) Official OpenAI
|
|
91
|
+
|
|
92
|
+
```python
|
|
93
|
+
from silkloom_core import LLMNode
|
|
94
|
+
|
|
95
|
+
node = LLMNode(
|
|
96
|
+
name="extract",
|
|
97
|
+
prompt_template="Extract key facts: {input.note}",
|
|
98
|
+
model="gpt-4o-mini",
|
|
99
|
+
)
|
|
100
|
+
```
|
|
101
|
+
|
|
102
|
+
```bash
|
|
103
|
+
export OPENAI_API_KEY="your_openai_key"
|
|
104
|
+
# PowerShell:
|
|
105
|
+
# $env:OPENAI_API_KEY="your_openai_key"
|
|
106
|
+
```
|
|
107
|
+
|
|
108
|
+
### 2) GLM-4-Flash (OpenAI-compatible)
|
|
109
|
+
|
|
110
|
+
```python
|
|
111
|
+
import os
|
|
112
|
+
from openai import OpenAI
|
|
113
|
+
from silkloom_core import LLMNode
|
|
114
|
+
|
|
115
|
+
glm_client = OpenAI(
|
|
116
|
+
api_key=os.environ["ZHIPUAI_API_KEY"],
|
|
117
|
+
base_url="https://open.bigmodel.cn/api/paas/v4/",
|
|
118
|
+
)
|
|
119
|
+
|
|
120
|
+
node = LLMNode(
|
|
121
|
+
name="extract_geo",
|
|
122
|
+
prompt_template="Extract city, topic, and coordinates: {input.note}",
|
|
123
|
+
model="glm-4-flash",
|
|
124
|
+
client=glm_client,
|
|
125
|
+
)
|
|
126
|
+
```
|
|
127
|
+
|
|
128
|
+
```bash
|
|
129
|
+
export ZHIPUAI_API_KEY="your_glm_key"
|
|
130
|
+
# PowerShell:
|
|
131
|
+
# $env:ZHIPUAI_API_KEY="your_glm_key"
|
|
132
|
+
```
|
|
133
|
+
|
|
134
|
+
### 3) Local Ollama (OpenAI-compatible)
|
|
135
|
+
|
|
136
|
+
Start Ollama and pull a model (example):
|
|
137
|
+
|
|
138
|
+
```bash
|
|
139
|
+
ollama pull qwen2.5:7b
|
|
140
|
+
ollama serve
|
|
141
|
+
```
|
|
142
|
+
|
|
143
|
+
Use it in SilkLoom Core:
|
|
144
|
+
|
|
145
|
+
```python
|
|
146
|
+
from openai import OpenAI
|
|
147
|
+
from silkloom_core import LLMNode
|
|
148
|
+
|
|
149
|
+
ollama_client = OpenAI(
|
|
150
|
+
api_key="ollama",
|
|
151
|
+
base_url="http://localhost:11434/v1",
|
|
152
|
+
)
|
|
153
|
+
|
|
154
|
+
node = LLMNode(
|
|
155
|
+
name="local_summary",
|
|
156
|
+
prompt_template="Summarize this note: {input.note}",
|
|
157
|
+
model="qwen2.5:7b",
|
|
158
|
+
client=ollama_client,
|
|
159
|
+
)
|
|
160
|
+
```
|
|
161
|
+
|
|
162
|
+
Note: local models vary in structured-output quality. If you use `response_model`, explicitly require strict JSON-only output in the prompt.
|
|
163
|
+
|
|
164
|
+
## Example Scripts
|
|
165
|
+
|
|
166
|
+
The provided examples use GIS/urban research as one domain case, but SilkLoom Core itself is domain-agnostic.
|
|
167
|
+
|
|
168
|
+
```bash
|
|
169
|
+
python examples/quickstart.py
|
|
170
|
+
python examples/structured_output.py
|
|
171
|
+
python examples/resume_with_run_id.py
|
|
172
|
+
python examples/trajectory_od_commute.py
|
|
173
|
+
```
|
|
174
|
+
|
|
175
|
+
- quickstart.py: summarize notes and tag themes
|
|
176
|
+
- structured_output.py: extract structured attributes and build GeoJSON-like features
|
|
177
|
+
- resume_with_run_id.py: simulate repeatable tile processing with resume
|
|
178
|
+
- trajectory_od_commute.py: OD extraction + distance/time segmentation + flowline output
|
|
179
|
+
|
|
180
|
+
## Core Concepts
|
|
181
|
+
|
|
182
|
+
### 1. Pipeline Modes
|
|
183
|
+
|
|
184
|
+
- `depth_first`: per-item end-to-end progression
|
|
185
|
+
- `breadth_first`: stage-by-stage progression across items
|
|
186
|
+
|
|
187
|
+
### 2. Context Flow
|
|
188
|
+
|
|
189
|
+
- Initial context: `{"input": ...}`
|
|
190
|
+
- Node output storage: `context[node_name] = output_dict`
|
|
191
|
+
|
|
192
|
+
### 3. Retry and Resume
|
|
193
|
+
|
|
194
|
+
- Automatic retries with exponential backoff
|
|
195
|
+
- Resume unfinished tasks by reusing the same `run_id`
|
|
196
|
+
|
|
197
|
+
## API Summary
|
|
198
|
+
|
|
199
|
+
- `Pipeline.add_node(node) -> Pipeline`
|
|
200
|
+
- `Pipeline.run(inputs, run_id=None) -> str`
|
|
201
|
+
- `Pipeline.export_results(run_id, format="json") -> list[dict]`
|
|
202
|
+
|
|
203
|
+
## License
|
|
204
|
+
|
|
205
|
+
MIT
|
|
@@ -0,0 +1,38 @@
|
|
|
1
|
+
[build-system]
|
|
2
|
+
requires = ["setuptools>=68", "wheel"]
|
|
3
|
+
build-backend = "setuptools.build_meta"
|
|
4
|
+
|
|
5
|
+
[project]
|
|
6
|
+
name = "silkloom-core"
|
|
7
|
+
version = "0.1.0"
|
|
8
|
+
description = "SilkLoom Core: lightweight, resilient batch pipeline for repeatable LLM and function workflows"
|
|
9
|
+
readme = "README.md"
|
|
10
|
+
requires-python = ">=3.10"
|
|
11
|
+
dependencies = [
|
|
12
|
+
"openai>=1.40.0",
|
|
13
|
+
"pydantic>=2.0",
|
|
14
|
+
"tenacity>=8.2.0"
|
|
15
|
+
]
|
|
16
|
+
authors = [
|
|
17
|
+
{ name = "SilkLoom Core Contributors" }
|
|
18
|
+
]
|
|
19
|
+
license = "MIT"
|
|
20
|
+
keywords = ["llm", "pipeline", "workflow", "batch-processing", "silkloom"]
|
|
21
|
+
classifiers = [
|
|
22
|
+
"Development Status :: 3 - Alpha",
|
|
23
|
+
"Intended Audience :: Developers",
|
|
24
|
+
"Programming Language :: Python :: 3",
|
|
25
|
+
"Programming Language :: Python :: 3.10",
|
|
26
|
+
"Programming Language :: Python :: 3.11",
|
|
27
|
+
"Programming Language :: Python :: 3.12",
|
|
28
|
+
"Topic :: Software Development :: Libraries",
|
|
29
|
+
]
|
|
30
|
+
|
|
31
|
+
[project.optional-dependencies]
|
|
32
|
+
dev = [
|
|
33
|
+
"build>=1.2.1",
|
|
34
|
+
"twine>=5.1.1",
|
|
35
|
+
]
|
|
36
|
+
|
|
37
|
+
[tool.setuptools.packages.find]
|
|
38
|
+
include = ["silkloom_core*"]
|