ldbg 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
ldbg/__init__.py ADDED
@@ -0,0 +1,3 @@
1
+ from .ldbg import generate_commands, gc
2
+
3
+ __all__ = ["generate_commands", "gc"]
ldbg/ldbg.py ADDED
@@ -0,0 +1,303 @@
1
+ import inspect
2
+ import re
3
+ import textwrap
4
+ import traceback
5
+ import linecache
6
+ import os
7
+ import pprint
8
+ from types import FrameType
9
+ from typing import cast
10
+
11
+ from openai import OpenAI
12
+
13
+ LENGTH_MAX = 10000
14
+ CODE_BLOCK_REGEX = r"```(?:[\w+-]*)\n(.*?)```"
15
+
16
+ if "OPENROUTER_API_KEY" in os.environ:
17
+ client = OpenAI(
18
+ base_url="https://openrouter.ai/api/v1",
19
+ api_key=os.environ["OPENROUTER_API_KEY"],
20
+ )
21
+ else:
22
+ client = OpenAI()
23
+
24
+
25
+ def extract_code_blocks(markdown_text: str):
26
+ pattern = re.compile(CODE_BLOCK_REGEX, re.DOTALL)
27
+ return pattern.findall(markdown_text)
28
+
29
+
30
+ def execute_code_block(code: str):
31
+ exec(code, {})
32
+
33
+
34
+ def execute_blocks(markdown_text: str | None) -> None:
35
+ """
36
+ Extract the code blocks in the markdown and ask user if he wants to execute them
37
+ """
38
+ if markdown_text is None:
39
+ return
40
+ blocks = extract_code_blocks(markdown_text)
41
+ for block in blocks:
42
+ print("Would you like to execute the following code block:")
43
+ print(textwrap.indent(block, " "))
44
+ confirm = input("(y/n)").lower()
45
+ if confirm.lower() in ["yes", "y"]:
46
+ execute_code_block(block)
47
+
48
+
49
+ def generate_commands(
50
+ prompt: str,
51
+ frame=None,
52
+ model="gpt-5-mini-2025-08-07",
53
+ print_prompt=True,
54
+ length_max=LENGTH_MAX,
55
+ context="",
56
+ ):
57
+ """
58
+ Generate Python debug help based on natural-language instructions.
59
+
60
+ Includes:
61
+ - Call stack / traceback
62
+ - Current function’s source
63
+ - Surrounding source lines (like ipdb 'll')
64
+
65
+ Example:
66
+
67
+ >>> import ldbg
68
+ >>> ldbg.generate_commands("describe unknown_data")
69
+ The model "gpt-5-mini-2025-08-07" answered:
70
+
71
+ unknown_data is an numpy array which can be described with the following pandas code:
72
+
73
+ ```
74
+ pandas.DataFrame(unknown_data).describe()
75
+ ```
76
+
77
+ Note: you can use numpy.set_printoptions (or a library like numpyprint) to pretty print your array:
78
+
79
+ ```
80
+ with np.printoptions(precision=2, suppress=True, threshold=5):
81
+ unknown_data
82
+ ```
83
+
84
+ Would you like to execute the following code block:
85
+ pandas.DataFrame(unknown_data).describe()
86
+ (y/n)?
87
+
88
+
89
+ <<< user enters y
90
+ 0
91
+ count 9.000000
92
+ mean 4.000000
93
+ std 2.738613
94
+ min 0.000000
95
+ 25% 2.000000
96
+ 50% 4.000000
97
+ 75% 6.000000
98
+ max 8.000000
99
+
100
+
101
+ Would you like to execute the following code block:
102
+ with np.printoptions(precision=2, suppress=True, threshold=5):
103
+ unknown_data
104
+ (y/n)?
105
+
106
+ <<< user enters n
107
+ """
108
+ if frame is None:
109
+ frame = cast(FrameType, inspect.currentframe().f_back) # type: ignore
110
+
111
+ # Locals & globals preview
112
+ locals_preview = pprint.pformat(frame.f_locals)[
113
+ :length_max
114
+ ] # {k: type(v).__name__ for k, v in frame.f_locals.items()}
115
+ globals_preview = pprint.pformat(frame.f_globals)[
116
+ :length_max
117
+ ] # {k: type(v).__name__ for k, v in frame.f_globals.items()}
118
+
119
+ # Traceback / call stack
120
+ stack_summary = traceback.format_stack(frame)
121
+ stack_text = "".join(stack_summary[-15:]) # limit to avoid overload
122
+
123
+ # Current function source
124
+ try:
125
+ source_lines, start_line = inspect.getsourcelines(frame)
126
+ func_source = "".join(source_lines)
127
+ except (OSError, TypeError):
128
+ func_source = "<source unavailable>"
129
+
130
+ # Context like ipdb 'll'
131
+ filename = frame.f_code.co_filename
132
+ lineno = frame.f_lineno
133
+ start_context = max(lineno - 10, 1)
134
+ context_lines = []
135
+ for i in range(start_context, lineno + 10):
136
+ line = linecache.getline(filename, i)
137
+ if line:
138
+ context_lines.append(f"{i:4d}: {line}")
139
+ context_text = "".join(context_lines)
140
+
141
+ # ldbg.generate_commands({prompt}, model={model}, code_only={code_only}, print_prompt={print_prompt}, print_answer={print_answer}, length_max={length_max})
142
+ context = textwrap.dedent(f"""
143
+ You are a Python debugging assistant.
144
+ The user is paused inside a Python script.
145
+
146
+ The user just ran `import ldbg; ldbg.gc({prompt}, model={model})` to ask you some help (gc stands for generate commands).
147
+
148
+ Local variables and their types (`locals = pprint.pformat(inspect.currentframe().f_locals)[:length_max]`):
149
+ {locals_preview}
150
+
151
+ Global variables and their types (`globals = pprint.pformat(inspect.currentframe().f_globals)[:length_max]`):
152
+ {globals_preview}
153
+
154
+ Current call stack (traceback):
155
+ {stack_text}
156
+
157
+ Current function source:
158
+ {func_source}
159
+
160
+ Nearby code (like ipdb 'll'):
161
+ {context_text}
162
+
163
+ Additional context:
164
+ {context}
165
+
166
+ If you need more context, a more detailed view of the local variables or the content of a source file,
167
+ tell the user the commands he should run to print the details you need.
168
+
169
+ For example, if you need to know more details about the local variables, tell him:
170
+
171
+ I need more context to help you.
172
+ Could you execute the following commands to give me more context? They will provide the details I need to help you.
173
+
174
+ ```
175
+ import inspect
176
+ frame = inspect.currentframe()
177
+ # Get frame.f_locals with a depth of 2
178
+ local_variables = pprint.pformat(frame.f_locals, depth=2)
179
+ ```
180
+
181
+ Then you can ask me again:
182
+ ```
183
+ ldbg.gc({prompt}, model={model}, context = f"local variables are: {{local_variables:.50000}}")
184
+ ```
185
+
186
+ Another example, if you need to know the content of some source files:
187
+
188
+ I need more context to help you.
189
+ Could you execute the following commands to give me more context? They will provide the details I need to help you.
190
+
191
+ ```
192
+ # Get the content of important.py
193
+ import_file_path = list(Path().glob('**/important.py'))[0]
194
+
195
+ with open(import_file_path) as f:
196
+ important_content = f.read()
197
+
198
+ # Find the lines surrounding the class ImportantClass in very_large_script.py
199
+ search = "class ImportantClass"
200
+ with open('path/to/important/very_large_script.py') as f:
201
+ lines = f.readlines()
202
+
203
+ # Find the 0-based index of the first matching line
204
+ idx = next(i for i, line in enumerate(lines) if search in line)
205
+
206
+ # Calculate start and end indices
207
+ start = max(0, idx - 10)
208
+ end = min(len(lines), idx + 10 + 1)
209
+
210
+ # Get the surrounding lines
211
+ script_content = []
212
+ for i, line in enumerate(lines[start:end]):
213
+ script_content.append(f"{{start + i + 1:04d}}: {{line.rstrip()}}")
214
+ ```
215
+
216
+ Then you can ask me again:
217
+ ```
218
+ ldbg.gc({prompt}, model={model}, context=f"important.py: {{important_content:.50000}}, very_large_script.py (lines {{start}} to {{end}}): {{script_content:.50000}}")
219
+ ```
220
+
221
+ You can also ask for help in multiple steps:
222
+
223
+ Could you execute the following commands to give me more context?
224
+ This will tell me all the source files in the current working directory.
225
+
226
+ ```
227
+ import pathlib
228
+ EXCLUDED = {{".venv", ".pixi"}}
229
+ python_files = [str(p) for p in pathlib.Path('.').rglob('*.py') if not any(part in EXCLUDED for part in p.parts)]
230
+ ```
231
+
232
+ Then you can ask me again:
233
+ ```
234
+ ldbg.gc({prompt}, model={model}, context=f"the python files are: {{python_files:.50000}}")
235
+ ```
236
+
237
+ And then I will know more about the project, and I might ask you to execute more commands
238
+ (for example to read some important files) to get all the context I need.
239
+
240
+ The length of your context window is limited and you perform better with focused questions and context.
241
+ Thus, when you ask the user to execute commands and send you more information,
242
+ always make sure to be precise so that you get a response of reasonable length.
243
+ For example, if you need some information in a huge file,
244
+ provide commands to extract exactly what you need instead of reading the entire file.
245
+ If you need a specific value deep in the locals values, get `frame.f_locals["deep_object"].deep_dict["deep_attribute"]["sub_attribute"]["etc"]`
246
+ instead of getting the entire locals with a large depth as in `local_variables = pprint.pformat(frame.f_locals, depth=10)`.
247
+
248
+ Cap the length of the responses to avoid reaching the maximum prompt length (which would result in a failure).
249
+
250
+ The user is a developer, you can also ask him details about the context in natural language.
251
+
252
+ If you have all the context you need, just provide a useful answer.
253
+ For example, if the user asks "describe unknown_data", you could answer:
254
+
255
+ `unknown_data` is an numpy array which can be described with the following pandas code:
256
+
257
+ ```
258
+ pandas.DataFrame(unknown_data).describe()
259
+ ```
260
+
261
+ You could also use numpy.set_printoptions (or a library like numpyprint) to pretty print your array:
262
+
263
+ ```
264
+ with np.printoptions(precision=2, suppress=True, threshold=5):
265
+ unknown_data
266
+ ```
267
+
268
+ Always put the code to execute in triple backticks code blocks.
269
+ Provide relatively short and concise answers.
270
+ """)
271
+
272
+ if print_prompt:
273
+ print("System prompt:")
274
+ print(context)
275
+ print("\nUser prompt:")
276
+ print(prompt)
277
+
278
+ resp = client.chat.completions.create(
279
+ model=model,
280
+ messages=[
281
+ {"role": "system", "content": context},
282
+ {"role": "user", "content": prompt},
283
+ ],
284
+ temperature=1,
285
+ )
286
+
287
+ response = resp.choices[0].message.content
288
+
289
+ if print_prompt:
290
+ print("\n\n\n")
291
+
292
+ if response is None:
293
+ return
294
+
295
+ print(f"Model {model} says:")
296
+ print(textwrap.indent(response, " "))
297
+
298
+ execute_blocks(response)
299
+
300
+ return
301
+
302
+
303
+ gc = generate_commands
ldbg/py.typed ADDED
File without changes
@@ -0,0 +1,117 @@
1
+ Metadata-Version: 2.4
2
+ Name: ldbg
3
+ Version: 0.1.0
4
+ Summary: Add your description here
5
+ Author-email: Arthur Masson <arthur.masson@inria.fr>
6
+ Requires-Python: >=3.12
7
+ Requires-Dist: openai>=1.107.3
8
+ Description-Content-Type: text/markdown
9
+
10
+ # llm-debug
11
+
12
+ ### A Minimal Python Library to debug with LLMs
13
+
14
+ Use natural-language prompts while debugging. Prompts are augmented with your current stack, variables, and source context.
15
+
16
+ DO NOT USE THIS LIBRARY
17
+
18
+ > “AI everywhere is rocket engines on a skateboard: a thrill that ends in wreckage. The planet pays in energy and emissions, and we pay in something subtler — the slow atrophy of our own intelligence, left idle while the machines do the heavy lifting.” ChatGPT
19
+
20
+ ## Features
21
+
22
+ - 🐍 Generate Python debug commands from natural-language instructions.
23
+ - 🔍 Context-aware: prompt auto-includes call stack, local/global variable previews, current function - source, and nearby code.
24
+ - ⚡ Works like an AI-augmented pdb: just ask what you want to inspect.
25
+ - 🤖 Supports OpenRouter
26
+
27
+ ## Installation
28
+
29
+ `pip install ldbg`
30
+
31
+ ## Quick Start
32
+
33
+ ### Example Session
34
+
35
+ ```python
36
+
37
+ >>> unknown_data = np.arange(9)
38
+ >>> example_dict = {"a": 1, "b": [1, 2, 3]}
39
+ >>> example_numbers = list(range(10))
40
+ >>> import ldbg
41
+ >>> ldbg.gc("describe unknown_data")
42
+ The model "gpt-5-mini-2025-08-07" says:
43
+
44
+ unknown_data is an numpy array which can be described with the following pandas code:
45
+
46
+ ```code block 1
47
+ pandas.DataFrame(unknown_data).describe()
48
+ ```
49
+
50
+ Note: you can use numpy.set_printoptions (or a library like numpyprint) to pretty print your array:
51
+
52
+ ```code block 2
53
+ with np.printoptions(precision=2, suppress=True, threshold=5):
54
+ unknown_data
55
+ ```
56
+
57
+ Would you like to execute the following code block:
58
+ pandas.DataFrame(unknown_data).describe()
59
+ (y/n)
60
+ ```
61
+
62
+ User enters y:
63
+ ```
64
+ 0
65
+ count 9.000000
66
+ mean 4.000000
67
+ std 2.738613
68
+ min 0.000000
69
+ 25% 2.000000
70
+ 50% 4.000000
71
+ 75% 6.000000
72
+ max 8.000000
73
+
74
+
75
+
76
+ Would you like to execute the following code block:
77
+ with np.printoptions(precision=2, suppress=True, threshold=5):
78
+ unknown_data
79
+ (y/n)
80
+ ```
81
+
82
+ User enters n and continues:
83
+
84
+ ```python
85
+ >>> ldbg.gc("plot example_numbers as a bar chart")
86
+ The model "gpt-5-mini-2025-08-07" says:
87
+
88
+ ```
89
+ import matplotlib.pyplot as plt
90
+ plt.bar(range(len(numbers)), numbers)
91
+ plt.show()
92
+ ```
93
+
94
+ Would you like to execute the following code block:
95
+ ...
96
+ ```
97
+
98
+ ### Example natural-language prompts
99
+
100
+ - "Describe my numpy arrays"
101
+ - "plot my_data['b'] as a histogram"
102
+ - "give me an example pandas dataframe about employees"
103
+ - "generate a 3x10x12 numpy array which will be used as an example image"
104
+ - "convert this Pillow image to grayscale"
105
+ - "open this 'image.ome.tiff' with bioio"
106
+
107
+ ## Configuration
108
+
109
+ By default, llm-debug uses the OpenAI client. So it reads the [OPENAI_API_KEY environment variable](https://platform.openai.com/docs/quickstart).
110
+
111
+ To use OpenRouter instead, define the `OPENROUTER_API_KEY` environment variable:
112
+
113
+ `export OPENROUTER_API_KEY="your_api_key_here"`
114
+
115
+ ## License
116
+
117
+ MIT License.
@@ -0,0 +1,6 @@
1
+ ldbg/__init__.py,sha256=pTjJ9KayMK3061FeYd8cE-E8mhwdYRB8OJz0-APm6zI,79
2
+ ldbg/ldbg.py,sha256=0bIfFZUoMH4329OJzRqB3Zmah60RP_MCBQeTxP5c7xE,9869
3
+ ldbg/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
4
+ ldbg-0.1.0.dist-info/METADATA,sha256=ApXXFHbt7YuoQcDGumT_O_TS4gdrGH-mwa9x2IKtgaY,3087
5
+ ldbg-0.1.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
6
+ ldbg-0.1.0.dist-info/RECORD,,
@@ -0,0 +1,4 @@
1
+ Wheel-Version: 1.0
2
+ Generator: hatchling 1.27.0
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any