cognify-code 0.2.1__tar.gz → 0.2.3__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (61) hide show
  1. {cognify_code-0.2.1/src/cognify_code.egg-info → cognify_code-0.2.3}/PKG-INFO +1 -1
  2. {cognify_code-0.2.1 → cognify_code-0.2.3}/pyproject.toml +1 -1
  3. {cognify_code-0.2.1 → cognify_code-0.2.3}/src/ai_code_assistant/__init__.py +1 -1
  4. {cognify_code-0.2.1 → cognify_code-0.2.3}/src/ai_code_assistant/cli.py +80 -25
  5. {cognify_code-0.2.1 → cognify_code-0.2.3}/src/ai_code_assistant/generator/code_gen.py +155 -1
  6. {cognify_code-0.2.1 → cognify_code-0.2.3/src/cognify_code.egg-info}/PKG-INFO +1 -1
  7. {cognify_code-0.2.1 → cognify_code-0.2.3}/LICENSE +0 -0
  8. {cognify_code-0.2.1 → cognify_code-0.2.3}/MANIFEST.in +0 -0
  9. {cognify_code-0.2.1 → cognify_code-0.2.3}/README.md +0 -0
  10. {cognify_code-0.2.1 → cognify_code-0.2.3}/setup.cfg +0 -0
  11. {cognify_code-0.2.1 → cognify_code-0.2.3}/src/ai_code_assistant/agent/__init__.py +0 -0
  12. {cognify_code-0.2.1 → cognify_code-0.2.3}/src/ai_code_assistant/agent/code_agent.py +0 -0
  13. {cognify_code-0.2.1 → cognify_code-0.2.3}/src/ai_code_assistant/agent/code_generator.py +0 -0
  14. {cognify_code-0.2.1 → cognify_code-0.2.3}/src/ai_code_assistant/agent/code_reviewer.py +0 -0
  15. {cognify_code-0.2.1 → cognify_code-0.2.3}/src/ai_code_assistant/agent/diff_engine.py +0 -0
  16. {cognify_code-0.2.1 → cognify_code-0.2.3}/src/ai_code_assistant/agent/file_manager.py +0 -0
  17. {cognify_code-0.2.1 → cognify_code-0.2.3}/src/ai_code_assistant/agent/intent_classifier.py +0 -0
  18. {cognify_code-0.2.1 → cognify_code-0.2.3}/src/ai_code_assistant/chat/__init__.py +0 -0
  19. {cognify_code-0.2.1 → cognify_code-0.2.3}/src/ai_code_assistant/chat/agent_session.py +0 -0
  20. {cognify_code-0.2.1 → cognify_code-0.2.3}/src/ai_code_assistant/chat/session.py +0 -0
  21. {cognify_code-0.2.1 → cognify_code-0.2.3}/src/ai_code_assistant/config.py +0 -0
  22. {cognify_code-0.2.1 → cognify_code-0.2.3}/src/ai_code_assistant/editor/__init__.py +0 -0
  23. {cognify_code-0.2.1 → cognify_code-0.2.3}/src/ai_code_assistant/editor/diff_handler.py +0 -0
  24. {cognify_code-0.2.1 → cognify_code-0.2.3}/src/ai_code_assistant/editor/file_editor.py +0 -0
  25. {cognify_code-0.2.1 → cognify_code-0.2.3}/src/ai_code_assistant/editor/prompts.py +0 -0
  26. {cognify_code-0.2.1 → cognify_code-0.2.3}/src/ai_code_assistant/generator/__init__.py +0 -0
  27. {cognify_code-0.2.1 → cognify_code-0.2.3}/src/ai_code_assistant/generator/prompts.py +0 -0
  28. {cognify_code-0.2.1 → cognify_code-0.2.3}/src/ai_code_assistant/git/__init__.py +0 -0
  29. {cognify_code-0.2.1 → cognify_code-0.2.3}/src/ai_code_assistant/git/commit_generator.py +0 -0
  30. {cognify_code-0.2.1 → cognify_code-0.2.3}/src/ai_code_assistant/git/manager.py +0 -0
  31. {cognify_code-0.2.1 → cognify_code-0.2.3}/src/ai_code_assistant/llm.py +0 -0
  32. {cognify_code-0.2.1 → cognify_code-0.2.3}/src/ai_code_assistant/providers/__init__.py +0 -0
  33. {cognify_code-0.2.1 → cognify_code-0.2.3}/src/ai_code_assistant/providers/base.py +0 -0
  34. {cognify_code-0.2.1 → cognify_code-0.2.3}/src/ai_code_assistant/providers/cerebras.py +0 -0
  35. {cognify_code-0.2.1 → cognify_code-0.2.3}/src/ai_code_assistant/providers/factory.py +0 -0
  36. {cognify_code-0.2.1 → cognify_code-0.2.3}/src/ai_code_assistant/providers/google.py +0 -0
  37. {cognify_code-0.2.1 → cognify_code-0.2.3}/src/ai_code_assistant/providers/groq.py +0 -0
  38. {cognify_code-0.2.1 → cognify_code-0.2.3}/src/ai_code_assistant/providers/ollama.py +0 -0
  39. {cognify_code-0.2.1 → cognify_code-0.2.3}/src/ai_code_assistant/providers/openai.py +0 -0
  40. {cognify_code-0.2.1 → cognify_code-0.2.3}/src/ai_code_assistant/providers/openrouter.py +0 -0
  41. {cognify_code-0.2.1 → cognify_code-0.2.3}/src/ai_code_assistant/py.typed +0 -0
  42. {cognify_code-0.2.1 → cognify_code-0.2.3}/src/ai_code_assistant/refactor/__init__.py +0 -0
  43. {cognify_code-0.2.1 → cognify_code-0.2.3}/src/ai_code_assistant/refactor/analyzer.py +0 -0
  44. {cognify_code-0.2.1 → cognify_code-0.2.3}/src/ai_code_assistant/refactor/change_plan.py +0 -0
  45. {cognify_code-0.2.1 → cognify_code-0.2.3}/src/ai_code_assistant/refactor/multi_file_editor.py +0 -0
  46. {cognify_code-0.2.1 → cognify_code-0.2.3}/src/ai_code_assistant/refactor/prompts.py +0 -0
  47. {cognify_code-0.2.1 → cognify_code-0.2.3}/src/ai_code_assistant/retrieval/__init__.py +0 -0
  48. {cognify_code-0.2.1 → cognify_code-0.2.3}/src/ai_code_assistant/retrieval/chunker.py +0 -0
  49. {cognify_code-0.2.1 → cognify_code-0.2.3}/src/ai_code_assistant/retrieval/indexer.py +0 -0
  50. {cognify_code-0.2.1 → cognify_code-0.2.3}/src/ai_code_assistant/retrieval/search.py +0 -0
  51. {cognify_code-0.2.1 → cognify_code-0.2.3}/src/ai_code_assistant/reviewer/__init__.py +0 -0
  52. {cognify_code-0.2.1 → cognify_code-0.2.3}/src/ai_code_assistant/reviewer/analyzer.py +0 -0
  53. {cognify_code-0.2.1 → cognify_code-0.2.3}/src/ai_code_assistant/reviewer/prompts.py +0 -0
  54. {cognify_code-0.2.1 → cognify_code-0.2.3}/src/ai_code_assistant/utils/__init__.py +0 -0
  55. {cognify_code-0.2.1 → cognify_code-0.2.3}/src/ai_code_assistant/utils/file_handler.py +0 -0
  56. {cognify_code-0.2.1 → cognify_code-0.2.3}/src/ai_code_assistant/utils/formatters.py +0 -0
  57. {cognify_code-0.2.1 → cognify_code-0.2.3}/src/cognify_code.egg-info/SOURCES.txt +0 -0
  58. {cognify_code-0.2.1 → cognify_code-0.2.3}/src/cognify_code.egg-info/dependency_links.txt +0 -0
  59. {cognify_code-0.2.1 → cognify_code-0.2.3}/src/cognify_code.egg-info/entry_points.txt +0 -0
  60. {cognify_code-0.2.1 → cognify_code-0.2.3}/src/cognify_code.egg-info/requires.txt +0 -0
  61. {cognify_code-0.2.1 → cognify_code-0.2.3}/src/cognify_code.egg-info/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: cognify-code
3
- Version: 0.2.1
3
+ Version: 0.2.3
4
4
  Summary: Your local AI-powered code assistant. Review, generate, search, and refactor code with an intelligent AI agent—all running locally with complete privacy.
5
5
  Author-email: Ashok Kumar <akkssy@users.noreply.github.com>
6
6
  Maintainer-email: Ashok Kumar <akkssy@users.noreply.github.com>
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "cognify-code"
7
- version = "0.2.1"
7
+ version = "0.2.3"
8
8
  description = "Your local AI-powered code assistant. Review, generate, search, and refactor code with an intelligent AI agent—all running locally with complete privacy."
9
9
  readme = "README.md"
10
10
  license = {text = "MIT"}
@@ -5,7 +5,7 @@ Review, generate, search, and refactor code with an intelligent AI agent.
5
5
  All running locally with complete privacy using Ollama.
6
6
  """
7
7
 
8
- __version__ = "0.2.1"
8
+ __version__ = "0.2.2"
9
9
  __author__ = "Ashok Kumar"
10
10
 
11
11
  from ai_code_assistant.config import Config, load_config
@@ -156,41 +156,91 @@ def review(ctx, files: Tuple[Path, ...], review_type: str, output_format: str,
156
156
  type=click.Choice(["console", "markdown", "json"]))
157
157
  @click.option("--source", "-s", type=click.Path(exists=True, path_type=Path),
158
158
  help="Source file (for test mode)")
159
+ @click.option("--stream/--no-stream", default=True, help="Stream output in real-time")
159
160
  @click.pass_context
160
161
  def generate(ctx, description: str, mode: str, language: str, name: Optional[str],
161
162
  params: Optional[str], output: Optional[Path], output_format: str,
162
- source: Optional[Path]):
163
+ source: Optional[Path], stream: bool):
163
164
  """Generate code from natural language description."""
165
+ from rich.live import Live
166
+ from rich.markdown import Markdown
167
+ from rich.panel import Panel
168
+
164
169
  config, llm = get_components(ctx.obj.get("config_path"))
165
170
  generator = CodeGenerator(config, llm)
166
171
  formatter = get_formatter(output_format, config.output.use_colors)
167
172
 
168
173
  console.print(f"\n[bold]Generating {mode} in {language}...[/bold]\n")
169
174
 
170
- with console.status("[bold green]Generating code..."):
171
- if mode == "function":
172
- result = generator.generate_function(
173
- description=description, name=name or "generated_function",
174
- language=language, parameters=params or "",
175
- )
176
- elif mode == "class":
177
- result = generator.generate_class(
178
- description=description, name=name or "GeneratedClass", language=language,
179
- )
180
- elif mode == "script":
181
- result = generator.generate_script(
182
- description=description, requirements=[description], language=language,
183
- )
184
- elif mode == "test":
185
- if not source:
186
- console.print("[red]Error:[/red] --source required for test mode")
187
- sys.exit(1)
188
- source_code = source.read_text()
189
- result = generator.generate_tests(source_code=source_code, language=language)
190
- else:
191
- result = generator.generate(description=description, language=language)
175
+ # Handle test mode source requirement
176
+ source_code = ""
177
+ if mode == "test":
178
+ if not source:
179
+ console.print("[red]Error:[/red] --source required for test mode")
180
+ sys.exit(1)
181
+ source_code = source.read_text()
182
+
183
+ if stream:
184
+ # Streaming mode - show output as it generates
185
+ full_response = ""
186
+ final_code = ""
187
+
188
+ console.print("[dim]Streaming response...[/dim]\n")
189
+
190
+ for chunk, is_complete in generator.generate_stream(
191
+ description=description,
192
+ mode=mode,
193
+ language=language,
194
+ name=name or "",
195
+ parameters=params or "",
196
+ source_code=source_code,
197
+ ):
198
+ if is_complete:
199
+ final_code = chunk
200
+ else:
201
+ console.print(chunk, end="", highlight=False)
202
+ full_response += chunk
203
+
204
+ console.print("\n")
205
+
206
+ # Create result for formatting
207
+ from ai_code_assistant.generator import GenerationResult
208
+ result = GenerationResult(
209
+ code=final_code,
210
+ language=language,
211
+ mode=mode,
212
+ description=description,
213
+ raw_response=full_response,
214
+ )
215
+
216
+ # Show extracted code in a panel
217
+ console.print(Panel(
218
+ final_code,
219
+ title=f"[bold green]Generated {mode.title()}[/bold green]",
220
+ border_style="green",
221
+ ))
222
+ else:
223
+ # Non-streaming mode (original behavior)
224
+ with console.status("[bold green]Generating code..."):
225
+ if mode == "function":
226
+ result = generator.generate_function(
227
+ description=description, name=name or "generated_function",
228
+ language=language, parameters=params or "",
229
+ )
230
+ elif mode == "class":
231
+ result = generator.generate_class(
232
+ description=description, name=name or "GeneratedClass", language=language,
233
+ )
234
+ elif mode == "script":
235
+ result = generator.generate_script(
236
+ description=description, requirements=[description], language=language,
237
+ )
238
+ elif mode == "test":
239
+ result = generator.generate_tests(source_code=source_code, language=language)
240
+ else:
241
+ result = generator.generate(description=description, language=language)
192
242
 
193
- formatted = formatter.format_generation(result)
243
+ formatted = formatter.format_generation(result)
194
244
 
195
245
  if output and result.success:
196
246
  output.parent.mkdir(parents=True, exist_ok=True)
@@ -222,7 +272,7 @@ def chat(ctx, context: Tuple[Path, ...], stream: bool):
222
272
  " /clear - Clear conversation history\n"
223
273
  " /context - Show loaded context files\n"
224
274
  " /export - Export conversation to markdown\n"
225
- " /quit - Exit chat\n",
275
+ " /quit or exit - Exit chat\n",
226
276
  title="Interactive Mode",
227
277
  ))
228
278
 
@@ -236,6 +286,11 @@ def chat(ctx, context: Tuple[Path, ...], stream: bool):
236
286
  if not user_input:
237
287
  continue
238
288
 
289
+ # Handle exit commands without slash
290
+ if user_input.lower() in ("exit", "quit", "bye", "q"):
291
+ console.print("[dim]Goodbye![/dim]")
292
+ break
293
+
239
294
  # Handle commands
240
295
  if user_input.startswith("/"):
241
296
  cmd_parts = user_input[1:].split(maxsplit=1)
@@ -3,7 +3,7 @@
3
3
  import re
4
4
  from dataclasses import dataclass, field
5
5
  from pathlib import Path
6
- from typing import List, Literal, Optional
6
+ from typing import Iterator, List, Literal, Optional, Tuple
7
7
 
8
8
  from ai_code_assistant.config import Config
9
9
  from ai_code_assistant.llm import LLMManager
@@ -214,6 +214,160 @@ class CodeGenerator:
214
214
  error=str(e),
215
215
  )
216
216
 
217
+
218
+ def generate_stream(
219
+ self,
220
+ description: str,
221
+ mode: str = "generic",
222
+ language: str = "python",
223
+ name: str = "",
224
+ parameters: str = "",
225
+ source_code: str = "",
226
+ ) -> Iterator[Tuple[str, bool]]:
227
+ """
228
+ Stream code generation with real-time output.
229
+
230
+ Yields tuples of (chunk, is_complete).
231
+ The final yield will have is_complete=True and contain the extracted code.
232
+ """
233
+ # Build the prompt based on mode
234
+ if mode == "function":
235
+ prompt_template = GENERATION_PROMPTS["function"]
236
+ formatted = prompt_template.format(
237
+ language=language,
238
+ description=description,
239
+ name=name or "generated_function",
240
+ parameters=parameters or "None specified",
241
+ return_type="appropriate type",
242
+ include_type_hints=self.config.generation.include_type_hints,
243
+ include_docstrings=self.config.generation.include_docstrings,
244
+ )
245
+ elif mode == "class":
246
+ prompt_template = GENERATION_PROMPTS["class"]
247
+ formatted = prompt_template.format(
248
+ language=language,
249
+ description=description,
250
+ name=name or "GeneratedClass",
251
+ attributes="None specified",
252
+ methods="None specified",
253
+ include_type_hints=self.config.generation.include_type_hints,
254
+ include_docstrings=self.config.generation.include_docstrings,
255
+ )
256
+ elif mode == "script":
257
+ prompt_template = GENERATION_PROMPTS["script"]
258
+ formatted = prompt_template.format(
259
+ language=language,
260
+ description=description,
261
+ requirements=f"- {description}",
262
+ include_type_hints=self.config.generation.include_type_hints,
263
+ include_docstrings=self.config.generation.include_docstrings,
264
+ )
265
+ elif mode == "test":
266
+ prompt_template = GENERATION_PROMPTS["test"]
267
+ formatted = prompt_template.format(
268
+ language=language,
269
+ source_code=source_code,
270
+ test_framework="pytest",
271
+ )
272
+ else: # generic
273
+ prompt_template = GENERATION_PROMPTS["generic"]
274
+ formatted = prompt_template.format(
275
+ language=language,
276
+ description=description,
277
+ include_type_hints=self.config.generation.include_type_hints,
278
+ include_docstrings=self.config.generation.include_docstrings,
279
+ )
280
+
281
+ # Stream the response
282
+ full_response = ""
283
+ try:
284
+ for chunk in self.llm.stream(formatted):
285
+ full_response += chunk
286
+ yield (chunk, False)
287
+
288
+ # Extract code and yield final result
289
+ code = self._extract_code(full_response, language)
290
+ yield (code, True)
291
+ except Exception as e:
292
+ yield (f"Error: {str(e)}", True)
293
+
294
+
295
+ def generate_stream(
296
+ self,
297
+ description: str,
298
+ mode: str = "generic",
299
+ language: str = "python",
300
+ name: str = "",
301
+ parameters: str = "",
302
+ source_code: str = "",
303
+ ) -> Iterator[Tuple[str, bool]]:
304
+ """
305
+ Stream code generation with real-time output.
306
+
307
+ Yields tuples of (chunk, is_complete).
308
+ The final yield will have is_complete=True and contain the extracted code.
309
+ """
310
+ # Build the prompt based on mode
311
+ if mode == "function":
312
+ prompt_template = GENERATION_PROMPTS["function"]
313
+ formatted = prompt_template.format(
314
+ language=language,
315
+ description=description,
316
+ name=name or "generated_function",
317
+ parameters=parameters or "None specified",
318
+ return_type="appropriate type",
319
+ include_type_hints=self.config.generation.include_type_hints,
320
+ include_docstrings=self.config.generation.include_docstrings,
321
+ )
322
+ elif mode == "class":
323
+ prompt_template = GENERATION_PROMPTS["class"]
324
+ formatted = prompt_template.format(
325
+ language=language,
326
+ description=description,
327
+ name=name or "GeneratedClass",
328
+ attributes="None specified",
329
+ methods="None specified",
330
+ include_type_hints=self.config.generation.include_type_hints,
331
+ include_docstrings=self.config.generation.include_docstrings,
332
+ )
333
+ elif mode == "script":
334
+ prompt_template = GENERATION_PROMPTS["script"]
335
+ formatted = prompt_template.format(
336
+ language=language,
337
+ description=description,
338
+ requirements=f"- {description}",
339
+ include_type_hints=self.config.generation.include_type_hints,
340
+ include_docstrings=self.config.generation.include_docstrings,
341
+ )
342
+ elif mode == "test":
343
+ prompt_template = GENERATION_PROMPTS["test"]
344
+ formatted = prompt_template.format(
345
+ language=language,
346
+ source_code=source_code,
347
+ test_framework="pytest",
348
+ )
349
+ else: # generic
350
+ prompt_template = GENERATION_PROMPTS["generic"]
351
+ formatted = prompt_template.format(
352
+ language=language,
353
+ description=description,
354
+ include_type_hints=self.config.generation.include_type_hints,
355
+ include_docstrings=self.config.generation.include_docstrings,
356
+ )
357
+
358
+ # Stream the response
359
+ full_response = ""
360
+ try:
361
+ for chunk in self.llm.stream(formatted):
362
+ full_response += chunk
363
+ yield (chunk, False)
364
+
365
+ # Extract code and yield final result
366
+ code = self._extract_code(full_response, language)
367
+ yield (code, True)
368
+ except Exception as e:
369
+ yield (f"Error: {str(e)}", True)
370
+
217
371
  def _extract_code(self, response: str, language: str) -> str:
218
372
  """Extract code block from LLM response."""
219
373
  # Try to find language-specific code block
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: cognify-code
3
- Version: 0.2.1
3
+ Version: 0.2.3
4
4
  Summary: Your local AI-powered code assistant. Review, generate, search, and refactor code with an intelligent AI agent—all running locally with complete privacy.
5
5
  Author-email: Ashok Kumar <akkssy@users.noreply.github.com>
6
6
  Maintainer-email: Ashok Kumar <akkssy@users.noreply.github.com>
File without changes
File without changes
File without changes
File without changes