picoflow 0.1.1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- picoflow-0.1.1/LICENSE +21 -0
- picoflow-0.1.1/PKG-INFO +240 -0
- picoflow-0.1.1/README.md +217 -0
- picoflow-0.1.1/picoflow/__init__.py +3 -0
- picoflow-0.1.1/picoflow/adapters/__init__.py +5 -0
- picoflow-0.1.1/picoflow/adapters/_http.py +46 -0
- picoflow-0.1.1/picoflow/adapters/ollama.py +201 -0
- picoflow-0.1.1/picoflow/adapters/openai_compat.py +195 -0
- picoflow-0.1.1/picoflow/adapters/registry.py +38 -0
- picoflow-0.1.1/picoflow/adapters/types.py +10 -0
- picoflow-0.1.1/picoflow/cookbook/minimal-demo/main.py +16 -0
- picoflow-0.1.1/picoflow/cookbook/multiple-crew/main.py +242 -0
- picoflow-0.1.1/picoflow/cookbook/simple-chat/main.py +63 -0
- picoflow-0.1.1/picoflow/cookbook/simple-chat-stream/main.py +76 -0
- picoflow-0.1.1/picoflow/cookbook/simple-tool/main.py +67 -0
- picoflow-0.1.1/picoflow/cookbook/trace/main.py +40 -0
- picoflow-0.1.1/picoflow/core.py +467 -0
- picoflow-0.1.1/picoflow.egg-info/PKG-INFO +240 -0
- picoflow-0.1.1/picoflow.egg-info/SOURCES.txt +21 -0
- picoflow-0.1.1/picoflow.egg-info/dependency_links.txt +1 -0
- picoflow-0.1.1/picoflow.egg-info/top_level.txt +1 -0
- picoflow-0.1.1/pyproject.toml +44 -0
- picoflow-0.1.1/setup.cfg +4 -0
picoflow-0.1.1/LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2026 the-picoflow
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
picoflow-0.1.1/PKG-INFO
ADDED
|
@@ -0,0 +1,240 @@
|
|
|
1
|
+
Metadata-Version: 2.1
|
|
2
|
+
Name: picoflow
|
|
3
|
+
Version: 0.1.1
|
|
4
|
+
Summary: Simple, flexible AI agent framework with a small DSL and explicit state
|
|
5
|
+
Author: PicoFlow Contributors
|
|
6
|
+
License: MIT
|
|
7
|
+
Project-URL: Homepage, https://github.com/the-picoflow/picoflow
|
|
8
|
+
Project-URL: Repository, https://github.com/the-picoflow/picoflow
|
|
9
|
+
Project-URL: Issues, https://github.com/the-picoflow/picoflow/issues
|
|
10
|
+
Classifier: Development Status :: 3 - Alpha
|
|
11
|
+
Classifier: Intended Audience :: Developers
|
|
12
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
13
|
+
Classifier: Programming Language :: Python :: 3
|
|
14
|
+
Classifier: Programming Language :: Python :: 3.8
|
|
15
|
+
Classifier: Programming Language :: Python :: 3.9
|
|
16
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
17
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
18
|
+
Classifier: Topic :: Software Development :: Libraries
|
|
19
|
+
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
|
20
|
+
Requires-Python: >=3.8
|
|
21
|
+
Description-Content-Type: text/markdown
|
|
22
|
+
License-File: LICENSE
|
|
23
|
+
|
|
24
|
+
<p align="center">
|
|
25
|
+
<img src="picoflow/assets/picoflow_logo.png" width="280">
|
|
26
|
+
</p>
|
|
27
|
+
|
|
28
|
+
# PicoFlow — Simple, Flexible AI Agent Framework
|
|
29
|
+
|
|
30
|
+
**Build agents with explicit steps and a small DSL.
|
|
31
|
+
LLMs, tools, loops, and branches compose naturally.**
|
|
32
|
+
|
|
33
|
+
---
|
|
34
|
+
|
|
35
|
+
## A Minimal PicoFlow Application
|
|
36
|
+
|
|
37
|
+
```python
|
|
38
|
+
from picoflow import flow, llm, create_agent
|
|
39
|
+
|
|
40
|
+
LLM_URL = "llm+openai://api.openai.com/v1/chat/completions?model=gpt-4.1-mini&api_key_env=OPENAI_API_KEY"
|
|
41
|
+
|
|
42
|
+
@flow
|
|
43
|
+
async def mem(ctx):
|
|
44
|
+
return ctx.add_memory("user", ctx.input)
|
|
45
|
+
|
|
46
|
+
agent = create_agent(
|
|
47
|
+
mem >> llm("Answer in one sentence: {input}", llm_adapter=LLM_URL)
|
|
48
|
+
)
|
|
49
|
+
|
|
50
|
+
print(agent.get_output("What is PicoFlow?", trace=True))
|
|
51
|
+
```
|
|
52
|
+
|
|
53
|
+
```bash
|
|
54
|
+
export OPENAI_API_KEY=sk-...
|
|
55
|
+
python minimal.py
|
|
56
|
+
```
|
|
57
|
+
|
|
58
|
+
---
|
|
59
|
+
|
|
60
|
+
## Core Ideas
|
|
61
|
+
|
|
62
|
+
- **Flow = step**
|
|
63
|
+
A flow is just a Python function that takes and returns `State`.
|
|
64
|
+
|
|
65
|
+
- **DSL = pipeline**
|
|
66
|
+
Use `>>` to compose steps into readable execution graphs.
|
|
67
|
+
|
|
68
|
+
- **Agent = runner**
|
|
69
|
+
`create_agent(flow)` gives you `run / arun / get_output`.
|
|
70
|
+
|
|
71
|
+
- **State = context (Ctx)**
|
|
72
|
+
`Ctx` is an alias of `State`. It is immutable and explicit.
|
|
73
|
+
|
|
74
|
+
---
|
|
75
|
+
|
|
76
|
+
## Quick Start (Step by Step)
|
|
77
|
+
|
|
78
|
+
### 1. Define Steps with `@flow`
|
|
79
|
+
|
|
80
|
+
```python
|
|
81
|
+
from picoflow import flow, Ctx
|
|
82
|
+
|
|
83
|
+
@flow
|
|
84
|
+
async def normalize(ctx: Ctx) -> Ctx:
|
|
85
|
+
return ctx.update(input=ctx.input.strip().lower())
|
|
86
|
+
```
|
|
87
|
+
|
|
88
|
+
---
|
|
89
|
+
|
|
90
|
+
### 2. Call LLM as a Step
|
|
91
|
+
|
|
92
|
+
```python
|
|
93
|
+
from picoflow import llm
|
|
94
|
+
|
|
95
|
+
ask = llm("Answer briefly: {input}")
|
|
96
|
+
```
|
|
97
|
+
|
|
98
|
+
---
|
|
99
|
+
|
|
100
|
+
### 3. Compose with DSL
|
|
101
|
+
|
|
102
|
+
```python
|
|
103
|
+
pipeline = normalize >> ask
|
|
104
|
+
```
|
|
105
|
+
|
|
106
|
+
---
|
|
107
|
+
|
|
108
|
+
### 4. Run with Agent
|
|
109
|
+
|
|
110
|
+
```python
|
|
111
|
+
from picoflow import create_agent
|
|
112
|
+
|
|
113
|
+
agent = create_agent(pipeline)
|
|
114
|
+
|
|
115
|
+
state = await agent.arun("Hello WORLD")
|
|
116
|
+
print(state.output)
|
|
117
|
+
```
|
|
118
|
+
|
|
119
|
+
---
|
|
120
|
+
|
|
121
|
+
## DSL in One Minute
|
|
122
|
+
|
|
123
|
+
### Sequential
|
|
124
|
+
|
|
125
|
+
```python
|
|
126
|
+
flow = a >> b >> c
|
|
127
|
+
```
|
|
128
|
+
|
|
129
|
+
### Loop
|
|
130
|
+
|
|
131
|
+
```python
|
|
132
|
+
flow = step.repeat()
|
|
133
|
+
```
|
|
134
|
+
|
|
135
|
+
or:
|
|
136
|
+
|
|
137
|
+
```python
|
|
138
|
+
flow = repeat(step, until=lambda s: s.done)
|
|
139
|
+
```
|
|
140
|
+
|
|
141
|
+
### Parallel + Merge
|
|
142
|
+
|
|
143
|
+
```python
|
|
144
|
+
flow = fork(a, b) >> merge()
|
|
145
|
+
```
|
|
146
|
+
|
|
147
|
+
Custom merge:
|
|
148
|
+
|
|
149
|
+
```python
|
|
150
|
+
flow = fork(a, b) >> merge(
|
|
151
|
+
mode=MergeType.CUSTOM,
|
|
152
|
+
reducer=lambda branches, main: branches[0]
|
|
153
|
+
)
|
|
154
|
+
```
|
|
155
|
+
|
|
156
|
+
---
|
|
157
|
+
|
|
158
|
+
## LLM URL
|
|
159
|
+
|
|
160
|
+
```python
|
|
161
|
+
from picoflow.adapters.registry import from_url
|
|
162
|
+
|
|
163
|
+
adapter = from_url(
|
|
164
|
+
"llm+openai://api.openai.com/v1/chat/completions"
|
|
165
|
+
"?model=gpt-4.1-mini&api_key_env=OPENAI_API_KEY"
|
|
166
|
+
)
|
|
167
|
+
```
|
|
168
|
+
|
|
169
|
+
Then:
|
|
170
|
+
|
|
171
|
+
```python
|
|
172
|
+
flow = llm("Explain: {input}", llm_adapter=adapter)
|
|
173
|
+
```
|
|
174
|
+
|
|
175
|
+
### Custom Adapters
|
|
176
|
+
|
|
177
|
+
```python
|
|
178
|
+
class MyAdapter(LLMAdapter):
|
|
179
|
+
def __call__(self, prompt: str, stream: bool):
|
|
180
|
+
...
|
|
181
|
+
```
|
|
182
|
+
|
|
183
|
+
```python
|
|
184
|
+
from picoflow.adapters.registry import register
|
|
185
|
+
|
|
186
|
+
register("myllm", lambda url: MyAdapter(...))
|
|
187
|
+
```
|
|
188
|
+
|
|
189
|
+
Use:
|
|
190
|
+
|
|
191
|
+
```
|
|
192
|
+
llm+myllm://host/model?param=value
|
|
193
|
+
```
|
|
194
|
+
|
|
195
|
+
---
|
|
196
|
+
|
|
197
|
+
## Runtime Options
|
|
198
|
+
|
|
199
|
+
### Tracing
|
|
200
|
+
|
|
201
|
+
```python
|
|
202
|
+
await agent.arun("hi", trace=True)
|
|
203
|
+
```
|
|
204
|
+
|
|
205
|
+
### Timeout
|
|
206
|
+
|
|
207
|
+
```python
|
|
208
|
+
await agent.arun("hi", timeout=10)
|
|
209
|
+
```
|
|
210
|
+
|
|
211
|
+
### Streaming
|
|
212
|
+
|
|
213
|
+
```python
|
|
214
|
+
async def on_chunk(text: str):
|
|
215
|
+
print(text, end="", flush=True)
|
|
216
|
+
|
|
217
|
+
await agent.arun("stream me", stream_callback=on_chunk)
|
|
218
|
+
```
|
|
219
|
+
|
|
220
|
+
---
|
|
221
|
+
|
|
222
|
+
## Tools
|
|
223
|
+
|
|
224
|
+
```python
|
|
225
|
+
from picoflow import tool
|
|
226
|
+
|
|
227
|
+
flow = tool("search", lambda q: {"result": "..."} )
|
|
228
|
+
```
|
|
229
|
+
|
|
230
|
+
Results:
|
|
231
|
+
|
|
232
|
+
```python
|
|
233
|
+
state.tools["search"]
|
|
234
|
+
```
|
|
235
|
+
|
|
236
|
+
---
|
|
237
|
+
|
|
238
|
+
## License
|
|
239
|
+
|
|
240
|
+
MIT
|
picoflow-0.1.1/README.md
ADDED
|
@@ -0,0 +1,217 @@
|
|
|
1
|
+
<p align="center">
|
|
2
|
+
<img src="picoflow/assets/picoflow_logo.png" width="280">
|
|
3
|
+
</p>
|
|
4
|
+
|
|
5
|
+
# PicoFlow — Simple, Flexible AI Agent Framework
|
|
6
|
+
|
|
7
|
+
**Build agents with explicit steps and a small DSL.
|
|
8
|
+
LLMs, tools, loops, and branches compose naturally.**
|
|
9
|
+
|
|
10
|
+
---
|
|
11
|
+
|
|
12
|
+
## A Minimal PicoFlow Application
|
|
13
|
+
|
|
14
|
+
```python
|
|
15
|
+
from picoflow import flow, llm, create_agent
|
|
16
|
+
|
|
17
|
+
LLM_URL = "llm+openai://api.openai.com/v1/chat/completions?model=gpt-4.1-mini&api_key_env=OPENAI_API_KEY"
|
|
18
|
+
|
|
19
|
+
@flow
|
|
20
|
+
async def mem(ctx):
|
|
21
|
+
return ctx.add_memory("user", ctx.input)
|
|
22
|
+
|
|
23
|
+
agent = create_agent(
|
|
24
|
+
mem >> llm("Answer in one sentence: {input}", llm_adapter=LLM_URL)
|
|
25
|
+
)
|
|
26
|
+
|
|
27
|
+
print(agent.get_output("What is PicoFlow?", trace=True))
|
|
28
|
+
```
|
|
29
|
+
|
|
30
|
+
```bash
|
|
31
|
+
export OPENAI_API_KEY=sk-...
|
|
32
|
+
python minimal.py
|
|
33
|
+
```
|
|
34
|
+
|
|
35
|
+
---
|
|
36
|
+
|
|
37
|
+
## Core Ideas
|
|
38
|
+
|
|
39
|
+
- **Flow = step**
|
|
40
|
+
A flow is just a Python function that takes and returns `State`.
|
|
41
|
+
|
|
42
|
+
- **DSL = pipeline**
|
|
43
|
+
Use `>>` to compose steps into readable execution graphs.
|
|
44
|
+
|
|
45
|
+
- **Agent = runner**
|
|
46
|
+
`create_agent(flow)` gives you `run / arun / get_output`.
|
|
47
|
+
|
|
48
|
+
- **State = context (Ctx)**
|
|
49
|
+
`Ctx` is an alias of `State`. It is immutable and explicit.
|
|
50
|
+
|
|
51
|
+
---
|
|
52
|
+
|
|
53
|
+
## Quick Start (Step by Step)
|
|
54
|
+
|
|
55
|
+
### 1. Define Steps with `@flow`
|
|
56
|
+
|
|
57
|
+
```python
|
|
58
|
+
from picoflow import flow, Ctx
|
|
59
|
+
|
|
60
|
+
@flow
|
|
61
|
+
async def normalize(ctx: Ctx) -> Ctx:
|
|
62
|
+
return ctx.update(input=ctx.input.strip().lower())
|
|
63
|
+
```
|
|
64
|
+
|
|
65
|
+
---
|
|
66
|
+
|
|
67
|
+
### 2. Call LLM as a Step
|
|
68
|
+
|
|
69
|
+
```python
|
|
70
|
+
from picoflow import llm
|
|
71
|
+
|
|
72
|
+
ask = llm("Answer briefly: {input}")
|
|
73
|
+
```
|
|
74
|
+
|
|
75
|
+
---
|
|
76
|
+
|
|
77
|
+
### 3. Compose with DSL
|
|
78
|
+
|
|
79
|
+
```python
|
|
80
|
+
pipeline = normalize >> ask
|
|
81
|
+
```
|
|
82
|
+
|
|
83
|
+
---
|
|
84
|
+
|
|
85
|
+
### 4. Run with Agent
|
|
86
|
+
|
|
87
|
+
```python
|
|
88
|
+
from picoflow import create_agent
|
|
89
|
+
|
|
90
|
+
agent = create_agent(pipeline)
|
|
91
|
+
|
|
92
|
+
state = await agent.arun("Hello WORLD")
|
|
93
|
+
print(state.output)
|
|
94
|
+
```
|
|
95
|
+
|
|
96
|
+
---
|
|
97
|
+
|
|
98
|
+
## DSL in One Minute
|
|
99
|
+
|
|
100
|
+
### Sequential
|
|
101
|
+
|
|
102
|
+
```python
|
|
103
|
+
flow = a >> b >> c
|
|
104
|
+
```
|
|
105
|
+
|
|
106
|
+
### Loop
|
|
107
|
+
|
|
108
|
+
```python
|
|
109
|
+
flow = step.repeat()
|
|
110
|
+
```
|
|
111
|
+
|
|
112
|
+
or:
|
|
113
|
+
|
|
114
|
+
```python
|
|
115
|
+
flow = repeat(step, until=lambda s: s.done)
|
|
116
|
+
```
|
|
117
|
+
|
|
118
|
+
### Parallel + Merge
|
|
119
|
+
|
|
120
|
+
```python
|
|
121
|
+
flow = fork(a, b) >> merge()
|
|
122
|
+
```
|
|
123
|
+
|
|
124
|
+
Custom merge:
|
|
125
|
+
|
|
126
|
+
```python
|
|
127
|
+
flow = fork(a, b) >> merge(
|
|
128
|
+
mode=MergeType.CUSTOM,
|
|
129
|
+
reducer=lambda branches, main: branches[0]
|
|
130
|
+
)
|
|
131
|
+
```
|
|
132
|
+
|
|
133
|
+
---
|
|
134
|
+
|
|
135
|
+
## LLM URL
|
|
136
|
+
|
|
137
|
+
```python
|
|
138
|
+
from picoflow.adapters.registry import from_url
|
|
139
|
+
|
|
140
|
+
adapter = from_url(
|
|
141
|
+
"llm+openai://api.openai.com/v1/chat/completions"
|
|
142
|
+
"?model=gpt-4.1-mini&api_key_env=OPENAI_API_KEY"
|
|
143
|
+
)
|
|
144
|
+
```
|
|
145
|
+
|
|
146
|
+
Then:
|
|
147
|
+
|
|
148
|
+
```python
|
|
149
|
+
flow = llm("Explain: {input}", llm_adapter=adapter)
|
|
150
|
+
```
|
|
151
|
+
|
|
152
|
+
### Custom Adapters
|
|
153
|
+
|
|
154
|
+
```python
|
|
155
|
+
class MyAdapter(LLMAdapter):
|
|
156
|
+
def __call__(self, prompt: str, stream: bool):
|
|
157
|
+
...
|
|
158
|
+
```
|
|
159
|
+
|
|
160
|
+
```python
|
|
161
|
+
from picoflow.adapters.registry import register
|
|
162
|
+
|
|
163
|
+
register("myllm", lambda url: MyAdapter(...))
|
|
164
|
+
```
|
|
165
|
+
|
|
166
|
+
Use:
|
|
167
|
+
|
|
168
|
+
```
|
|
169
|
+
llm+myllm://host/model?param=value
|
|
170
|
+
```
|
|
171
|
+
|
|
172
|
+
---
|
|
173
|
+
|
|
174
|
+
## Runtime Options
|
|
175
|
+
|
|
176
|
+
### Tracing
|
|
177
|
+
|
|
178
|
+
```python
|
|
179
|
+
await agent.arun("hi", trace=True)
|
|
180
|
+
```
|
|
181
|
+
|
|
182
|
+
### Timeout
|
|
183
|
+
|
|
184
|
+
```python
|
|
185
|
+
await agent.arun("hi", timeout=10)
|
|
186
|
+
```
|
|
187
|
+
|
|
188
|
+
### Streaming
|
|
189
|
+
|
|
190
|
+
```python
|
|
191
|
+
async def on_chunk(text: str):
|
|
192
|
+
print(text, end="", flush=True)
|
|
193
|
+
|
|
194
|
+
await agent.arun("stream me", stream_callback=on_chunk)
|
|
195
|
+
```
|
|
196
|
+
|
|
197
|
+
---
|
|
198
|
+
|
|
199
|
+
## Tools
|
|
200
|
+
|
|
201
|
+
```python
|
|
202
|
+
from picoflow import tool
|
|
203
|
+
|
|
204
|
+
flow = tool("search", lambda q: {"result": "..."} )
|
|
205
|
+
```
|
|
206
|
+
|
|
207
|
+
Results:
|
|
208
|
+
|
|
209
|
+
```python
|
|
210
|
+
state.tools["search"]
|
|
211
|
+
```
|
|
212
|
+
|
|
213
|
+
---
|
|
214
|
+
|
|
215
|
+
## License
|
|
216
|
+
|
|
217
|
+
MIT
|
|
@@ -0,0 +1,46 @@
|
|
|
1
|
+
# adapters/_http.py
|
|
2
|
+
import json
|
|
3
|
+
import urllib.error
|
|
4
|
+
import asyncio
|
|
5
|
+
from typing import Callable, TypeVar
|
|
6
|
+
|
|
7
|
+
T = TypeVar("T")
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
async def run_blocking(fn: Callable[[], T]) -> T:
|
|
11
|
+
loop = asyncio.get_running_loop()
|
|
12
|
+
return await loop.run_in_executor(None, fn)
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
def raise_http_error(e: urllib.error.HTTPError, *, provider: str, hint: str = "") -> None:
|
|
16
|
+
body = None
|
|
17
|
+
try:
|
|
18
|
+
raw = e.read().decode("utf-8", errors="ignore")
|
|
19
|
+
if raw:
|
|
20
|
+
try:
|
|
21
|
+
obj = json.loads(raw)
|
|
22
|
+
# OpenAI-style: {"error": {"message": "..."}}
|
|
23
|
+
if isinstance(obj, dict):
|
|
24
|
+
if "error" in obj and isinstance(obj["error"], dict):
|
|
25
|
+
body = obj["error"].get("message")
|
|
26
|
+
else:
|
|
27
|
+
body = raw
|
|
28
|
+
except Exception:
|
|
29
|
+
body = raw
|
|
30
|
+
except Exception:
|
|
31
|
+
pass
|
|
32
|
+
|
|
33
|
+
msg = f"[{provider}] HTTP {e.code} {e.reason}"
|
|
34
|
+
if body:
|
|
35
|
+
msg += f": {body}"
|
|
36
|
+
if hint:
|
|
37
|
+
msg += f"\nHint: {hint}"
|
|
38
|
+
|
|
39
|
+
raise RuntimeError(msg) from None
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
def raise_url_error(e: urllib.error.URLError, *, provider: str, hint: str = "") -> None:
|
|
43
|
+
msg = f"[{provider}] Network error: {e.reason}"
|
|
44
|
+
if hint:
|
|
45
|
+
msg += f"\nHint: {hint}"
|
|
46
|
+
raise RuntimeError(msg) from None
|