cognify-code 0.2.2__py3-none-any.whl → 0.2.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ai_code_assistant/cli.py +74 -24
- ai_code_assistant/generator/code_gen.py +155 -1
- {cognify_code-0.2.2.dist-info → cognify_code-0.2.3.dist-info}/METADATA +1 -1
- {cognify_code-0.2.2.dist-info → cognify_code-0.2.3.dist-info}/RECORD +8 -8
- {cognify_code-0.2.2.dist-info → cognify_code-0.2.3.dist-info}/WHEEL +0 -0
- {cognify_code-0.2.2.dist-info → cognify_code-0.2.3.dist-info}/entry_points.txt +0 -0
- {cognify_code-0.2.2.dist-info → cognify_code-0.2.3.dist-info}/licenses/LICENSE +0 -0
- {cognify_code-0.2.2.dist-info → cognify_code-0.2.3.dist-info}/top_level.txt +0 -0
ai_code_assistant/cli.py
CHANGED
|
@@ -156,41 +156,91 @@ def review(ctx, files: Tuple[Path, ...], review_type: str, output_format: str,
|
|
|
156
156
|
type=click.Choice(["console", "markdown", "json"]))
|
|
157
157
|
@click.option("--source", "-s", type=click.Path(exists=True, path_type=Path),
|
|
158
158
|
help="Source file (for test mode)")
|
|
159
|
+
@click.option("--stream/--no-stream", default=True, help="Stream output in real-time")
|
|
159
160
|
@click.pass_context
|
|
160
161
|
def generate(ctx, description: str, mode: str, language: str, name: Optional[str],
|
|
161
162
|
params: Optional[str], output: Optional[Path], output_format: str,
|
|
162
|
-
source: Optional[Path]):
|
|
163
|
+
source: Optional[Path], stream: bool):
|
|
163
164
|
"""Generate code from natural language description."""
|
|
165
|
+
from rich.live import Live
|
|
166
|
+
from rich.markdown import Markdown
|
|
167
|
+
from rich.panel import Panel
|
|
168
|
+
|
|
164
169
|
config, llm = get_components(ctx.obj.get("config_path"))
|
|
165
170
|
generator = CodeGenerator(config, llm)
|
|
166
171
|
formatter = get_formatter(output_format, config.output.use_colors)
|
|
167
172
|
|
|
168
173
|
console.print(f"\n[bold]Generating {mode} in {language}...[/bold]\n")
|
|
169
174
|
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
)
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
175
|
+
# Handle test mode source requirement
|
|
176
|
+
source_code = ""
|
|
177
|
+
if mode == "test":
|
|
178
|
+
if not source:
|
|
179
|
+
console.print("[red]Error:[/red] --source required for test mode")
|
|
180
|
+
sys.exit(1)
|
|
181
|
+
source_code = source.read_text()
|
|
182
|
+
|
|
183
|
+
if stream:
|
|
184
|
+
# Streaming mode - show output as it generates
|
|
185
|
+
full_response = ""
|
|
186
|
+
final_code = ""
|
|
187
|
+
|
|
188
|
+
console.print("[dim]Streaming response...[/dim]\n")
|
|
189
|
+
|
|
190
|
+
for chunk, is_complete in generator.generate_stream(
|
|
191
|
+
description=description,
|
|
192
|
+
mode=mode,
|
|
193
|
+
language=language,
|
|
194
|
+
name=name or "",
|
|
195
|
+
parameters=params or "",
|
|
196
|
+
source_code=source_code,
|
|
197
|
+
):
|
|
198
|
+
if is_complete:
|
|
199
|
+
final_code = chunk
|
|
200
|
+
else:
|
|
201
|
+
console.print(chunk, end="", highlight=False)
|
|
202
|
+
full_response += chunk
|
|
203
|
+
|
|
204
|
+
console.print("\n")
|
|
205
|
+
|
|
206
|
+
# Create result for formatting
|
|
207
|
+
from ai_code_assistant.generator import GenerationResult
|
|
208
|
+
result = GenerationResult(
|
|
209
|
+
code=final_code,
|
|
210
|
+
language=language,
|
|
211
|
+
mode=mode,
|
|
212
|
+
description=description,
|
|
213
|
+
raw_response=full_response,
|
|
214
|
+
)
|
|
215
|
+
|
|
216
|
+
# Show extracted code in a panel
|
|
217
|
+
console.print(Panel(
|
|
218
|
+
final_code,
|
|
219
|
+
title=f"[bold green]Generated {mode.title()}[/bold green]",
|
|
220
|
+
border_style="green",
|
|
221
|
+
))
|
|
222
|
+
else:
|
|
223
|
+
# Non-streaming mode (original behavior)
|
|
224
|
+
with console.status("[bold green]Generating code..."):
|
|
225
|
+
if mode == "function":
|
|
226
|
+
result = generator.generate_function(
|
|
227
|
+
description=description, name=name or "generated_function",
|
|
228
|
+
language=language, parameters=params or "",
|
|
229
|
+
)
|
|
230
|
+
elif mode == "class":
|
|
231
|
+
result = generator.generate_class(
|
|
232
|
+
description=description, name=name or "GeneratedClass", language=language,
|
|
233
|
+
)
|
|
234
|
+
elif mode == "script":
|
|
235
|
+
result = generator.generate_script(
|
|
236
|
+
description=description, requirements=[description], language=language,
|
|
237
|
+
)
|
|
238
|
+
elif mode == "test":
|
|
239
|
+
result = generator.generate_tests(source_code=source_code, language=language)
|
|
240
|
+
else:
|
|
241
|
+
result = generator.generate(description=description, language=language)
|
|
192
242
|
|
|
193
|
-
|
|
243
|
+
formatted = formatter.format_generation(result)
|
|
194
244
|
|
|
195
245
|
if output and result.success:
|
|
196
246
|
output.parent.mkdir(parents=True, exist_ok=True)
|
|
@@ -3,7 +3,7 @@
|
|
|
3
3
|
import re
|
|
4
4
|
from dataclasses import dataclass, field
|
|
5
5
|
from pathlib import Path
|
|
6
|
-
from typing import List, Literal, Optional
|
|
6
|
+
from typing import Iterator, List, Literal, Optional, Tuple
|
|
7
7
|
|
|
8
8
|
from ai_code_assistant.config import Config
|
|
9
9
|
from ai_code_assistant.llm import LLMManager
|
|
@@ -214,6 +214,160 @@ class CodeGenerator:
|
|
|
214
214
|
error=str(e),
|
|
215
215
|
)
|
|
216
216
|
|
|
217
|
+
|
|
218
|
+
def generate_stream(
|
|
219
|
+
self,
|
|
220
|
+
description: str,
|
|
221
|
+
mode: str = "generic",
|
|
222
|
+
language: str = "python",
|
|
223
|
+
name: str = "",
|
|
224
|
+
parameters: str = "",
|
|
225
|
+
source_code: str = "",
|
|
226
|
+
) -> Iterator[Tuple[str, bool]]:
|
|
227
|
+
"""
|
|
228
|
+
Stream code generation with real-time output.
|
|
229
|
+
|
|
230
|
+
Yields tuples of (chunk, is_complete).
|
|
231
|
+
The final yield will have is_complete=True and contain the extracted code.
|
|
232
|
+
"""
|
|
233
|
+
# Build the prompt based on mode
|
|
234
|
+
if mode == "function":
|
|
235
|
+
prompt_template = GENERATION_PROMPTS["function"]
|
|
236
|
+
formatted = prompt_template.format(
|
|
237
|
+
language=language,
|
|
238
|
+
description=description,
|
|
239
|
+
name=name or "generated_function",
|
|
240
|
+
parameters=parameters or "None specified",
|
|
241
|
+
return_type="appropriate type",
|
|
242
|
+
include_type_hints=self.config.generation.include_type_hints,
|
|
243
|
+
include_docstrings=self.config.generation.include_docstrings,
|
|
244
|
+
)
|
|
245
|
+
elif mode == "class":
|
|
246
|
+
prompt_template = GENERATION_PROMPTS["class"]
|
|
247
|
+
formatted = prompt_template.format(
|
|
248
|
+
language=language,
|
|
249
|
+
description=description,
|
|
250
|
+
name=name or "GeneratedClass",
|
|
251
|
+
attributes="None specified",
|
|
252
|
+
methods="None specified",
|
|
253
|
+
include_type_hints=self.config.generation.include_type_hints,
|
|
254
|
+
include_docstrings=self.config.generation.include_docstrings,
|
|
255
|
+
)
|
|
256
|
+
elif mode == "script":
|
|
257
|
+
prompt_template = GENERATION_PROMPTS["script"]
|
|
258
|
+
formatted = prompt_template.format(
|
|
259
|
+
language=language,
|
|
260
|
+
description=description,
|
|
261
|
+
requirements=f"- {description}",
|
|
262
|
+
include_type_hints=self.config.generation.include_type_hints,
|
|
263
|
+
include_docstrings=self.config.generation.include_docstrings,
|
|
264
|
+
)
|
|
265
|
+
elif mode == "test":
|
|
266
|
+
prompt_template = GENERATION_PROMPTS["test"]
|
|
267
|
+
formatted = prompt_template.format(
|
|
268
|
+
language=language,
|
|
269
|
+
source_code=source_code,
|
|
270
|
+
test_framework="pytest",
|
|
271
|
+
)
|
|
272
|
+
else: # generic
|
|
273
|
+
prompt_template = GENERATION_PROMPTS["generic"]
|
|
274
|
+
formatted = prompt_template.format(
|
|
275
|
+
language=language,
|
|
276
|
+
description=description,
|
|
277
|
+
include_type_hints=self.config.generation.include_type_hints,
|
|
278
|
+
include_docstrings=self.config.generation.include_docstrings,
|
|
279
|
+
)
|
|
280
|
+
|
|
281
|
+
# Stream the response
|
|
282
|
+
full_response = ""
|
|
283
|
+
try:
|
|
284
|
+
for chunk in self.llm.stream(formatted):
|
|
285
|
+
full_response += chunk
|
|
286
|
+
yield (chunk, False)
|
|
287
|
+
|
|
288
|
+
# Extract code and yield final result
|
|
289
|
+
code = self._extract_code(full_response, language)
|
|
290
|
+
yield (code, True)
|
|
291
|
+
except Exception as e:
|
|
292
|
+
yield (f"Error: {str(e)}", True)
|
|
293
|
+
|
|
294
|
+
|
|
295
|
+
def generate_stream(
|
|
296
|
+
self,
|
|
297
|
+
description: str,
|
|
298
|
+
mode: str = "generic",
|
|
299
|
+
language: str = "python",
|
|
300
|
+
name: str = "",
|
|
301
|
+
parameters: str = "",
|
|
302
|
+
source_code: str = "",
|
|
303
|
+
) -> Iterator[Tuple[str, bool]]:
|
|
304
|
+
"""
|
|
305
|
+
Stream code generation with real-time output.
|
|
306
|
+
|
|
307
|
+
Yields tuples of (chunk, is_complete).
|
|
308
|
+
The final yield will have is_complete=True and contain the extracted code.
|
|
309
|
+
"""
|
|
310
|
+
# Build the prompt based on mode
|
|
311
|
+
if mode == "function":
|
|
312
|
+
prompt_template = GENERATION_PROMPTS["function"]
|
|
313
|
+
formatted = prompt_template.format(
|
|
314
|
+
language=language,
|
|
315
|
+
description=description,
|
|
316
|
+
name=name or "generated_function",
|
|
317
|
+
parameters=parameters or "None specified",
|
|
318
|
+
return_type="appropriate type",
|
|
319
|
+
include_type_hints=self.config.generation.include_type_hints,
|
|
320
|
+
include_docstrings=self.config.generation.include_docstrings,
|
|
321
|
+
)
|
|
322
|
+
elif mode == "class":
|
|
323
|
+
prompt_template = GENERATION_PROMPTS["class"]
|
|
324
|
+
formatted = prompt_template.format(
|
|
325
|
+
language=language,
|
|
326
|
+
description=description,
|
|
327
|
+
name=name or "GeneratedClass",
|
|
328
|
+
attributes="None specified",
|
|
329
|
+
methods="None specified",
|
|
330
|
+
include_type_hints=self.config.generation.include_type_hints,
|
|
331
|
+
include_docstrings=self.config.generation.include_docstrings,
|
|
332
|
+
)
|
|
333
|
+
elif mode == "script":
|
|
334
|
+
prompt_template = GENERATION_PROMPTS["script"]
|
|
335
|
+
formatted = prompt_template.format(
|
|
336
|
+
language=language,
|
|
337
|
+
description=description,
|
|
338
|
+
requirements=f"- {description}",
|
|
339
|
+
include_type_hints=self.config.generation.include_type_hints,
|
|
340
|
+
include_docstrings=self.config.generation.include_docstrings,
|
|
341
|
+
)
|
|
342
|
+
elif mode == "test":
|
|
343
|
+
prompt_template = GENERATION_PROMPTS["test"]
|
|
344
|
+
formatted = prompt_template.format(
|
|
345
|
+
language=language,
|
|
346
|
+
source_code=source_code,
|
|
347
|
+
test_framework="pytest",
|
|
348
|
+
)
|
|
349
|
+
else: # generic
|
|
350
|
+
prompt_template = GENERATION_PROMPTS["generic"]
|
|
351
|
+
formatted = prompt_template.format(
|
|
352
|
+
language=language,
|
|
353
|
+
description=description,
|
|
354
|
+
include_type_hints=self.config.generation.include_type_hints,
|
|
355
|
+
include_docstrings=self.config.generation.include_docstrings,
|
|
356
|
+
)
|
|
357
|
+
|
|
358
|
+
# Stream the response
|
|
359
|
+
full_response = ""
|
|
360
|
+
try:
|
|
361
|
+
for chunk in self.llm.stream(formatted):
|
|
362
|
+
full_response += chunk
|
|
363
|
+
yield (chunk, False)
|
|
364
|
+
|
|
365
|
+
# Extract code and yield final result
|
|
366
|
+
code = self._extract_code(full_response, language)
|
|
367
|
+
yield (code, True)
|
|
368
|
+
except Exception as e:
|
|
369
|
+
yield (f"Error: {str(e)}", True)
|
|
370
|
+
|
|
217
371
|
def _extract_code(self, response: str, language: str) -> str:
|
|
218
372
|
"""Extract code block from LLM response."""
|
|
219
373
|
# Try to find language-specific code block
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: cognify-code
|
|
3
|
-
Version: 0.2.
|
|
3
|
+
Version: 0.2.3
|
|
4
4
|
Summary: Your local AI-powered code assistant. Review, generate, search, and refactor code with an intelligent AI agent—all running locally with complete privacy.
|
|
5
5
|
Author-email: Ashok Kumar <akkssy@users.noreply.github.com>
|
|
6
6
|
Maintainer-email: Ashok Kumar <akkssy@users.noreply.github.com>
|
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
ai_code_assistant/__init__.py,sha256=XnpG4h-2gW3cXseFvqQT_-XyOmVJtikVMrHUnmy8XKI,409
|
|
2
|
-
ai_code_assistant/cli.py,sha256=
|
|
2
|
+
ai_code_assistant/cli.py,sha256=lvgNRB0LwGmZJbUUJHRdZMuoV83USMvNUB8-wLZjBkc,62987
|
|
3
3
|
ai_code_assistant/config.py,sha256=6sAufexwzfCu2JNWvt9KevS9k_gMcjj1TAnwuaO1ZFw,4727
|
|
4
4
|
ai_code_assistant/llm.py,sha256=DfcWJf6zEAUsPSEZLdEmb9o6BQNf1Ja88nswjpy6cOw,4209
|
|
5
5
|
ai_code_assistant/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
@@ -18,7 +18,7 @@ ai_code_assistant/editor/diff_handler.py,sha256=LeI-00GuH7ASIetsUzT3Y_pDq4K1wmyc
|
|
|
18
18
|
ai_code_assistant/editor/file_editor.py,sha256=csD8MW0jrfXAek5blWNuot_QWlhkgTTmtQtf8rbIdhY,11143
|
|
19
19
|
ai_code_assistant/editor/prompts.py,sha256=wryxwb4dNaeSbl5mHkDuw2uTAZxkdrdyZ89Gfogafow,4356
|
|
20
20
|
ai_code_assistant/generator/__init__.py,sha256=CfCO58CBye-BlZHjOfLLShovp2TVXg_GHKJuXe6ihu0,273
|
|
21
|
-
ai_code_assistant/generator/code_gen.py,sha256=
|
|
21
|
+
ai_code_assistant/generator/code_gen.py,sha256=Sp_j1IdR0vbU5xRZHhvTBCbxfNxtvNMU1tEg0NLV00k,14790
|
|
22
22
|
ai_code_assistant/generator/prompts.py,sha256=uoEDpcRzpTd-4TLHNW_EbSHJiADMlu9SoGWZvvo1Adk,3384
|
|
23
23
|
ai_code_assistant/git/__init__.py,sha256=YgqmzneAnZyRrbazMqGoFSPIk5Yf5OTm2LXPbkQmecU,232
|
|
24
24
|
ai_code_assistant/git/commit_generator.py,sha256=CzDH5ZPqEaXyPznBg8FgTz8wbV4adALUQD__kl8au6o,4135
|
|
@@ -47,9 +47,9 @@ ai_code_assistant/reviewer/prompts.py,sha256=9RrHEBttS5ngxY2BNsUvqGC6-cTxco-kDPb
|
|
|
47
47
|
ai_code_assistant/utils/__init__.py,sha256=3HO-1Bj4VvUtM7W1C3MKR4DzQ9Xc875QKSHHkHwuqVs,368
|
|
48
48
|
ai_code_assistant/utils/file_handler.py,sha256=jPxvtI5dJxkpPjELgRJ11WXamtyKKmZANQ1fcfMVtiU,5239
|
|
49
49
|
ai_code_assistant/utils/formatters.py,sha256=5El9ew9HS6JLBucBUxxcw4fO5nLpOucgNJrJj2NC3zw,8945
|
|
50
|
-
cognify_code-0.2.
|
|
51
|
-
cognify_code-0.2.
|
|
52
|
-
cognify_code-0.2.
|
|
53
|
-
cognify_code-0.2.
|
|
54
|
-
cognify_code-0.2.
|
|
55
|
-
cognify_code-0.2.
|
|
50
|
+
cognify_code-0.2.3.dist-info/licenses/LICENSE,sha256=5yu_kWq2bK-XKhWo79Eykdg4Qf3O8V2Ys7cpOO7GyyE,1063
|
|
51
|
+
cognify_code-0.2.3.dist-info/METADATA,sha256=oGRohUiX2gCBNbV7FqAWyRlK_nD7aGFav2AjCuESJo0,11862
|
|
52
|
+
cognify_code-0.2.3.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
|
|
53
|
+
cognify_code-0.2.3.dist-info/entry_points.txt,sha256=MrBnnWPHZVozqqKyTlnJO63YN2kE5yPWKlr2nnRFRks,94
|
|
54
|
+
cognify_code-0.2.3.dist-info/top_level.txt,sha256=dD_r1x-oX0s1uspYY72kig4jfIsjh3oDKwOBCMYXqpo,18
|
|
55
|
+
cognify_code-0.2.3.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|