git-llm-tool 0.1.16__tar.gz → 0.1.18__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (28) hide show
  1. {git_llm_tool-0.1.16 → git_llm_tool-0.1.18}/PKG-INFO +1 -1
  2. {git_llm_tool-0.1.16 → git_llm_tool-0.1.18}/git_llm_tool/providers/langchain_base.py +91 -103
  3. {git_llm_tool-0.1.16 → git_llm_tool-0.1.18}/pyproject.toml +1 -1
  4. {git_llm_tool-0.1.16 → git_llm_tool-0.1.18}/LICENSE +0 -0
  5. {git_llm_tool-0.1.16 → git_llm_tool-0.1.18}/README.md +0 -0
  6. {git_llm_tool-0.1.16 → git_llm_tool-0.1.18}/git_llm_tool/__init__.py +0 -0
  7. {git_llm_tool-0.1.16 → git_llm_tool-0.1.18}/git_llm_tool/__main__.py +0 -0
  8. {git_llm_tool-0.1.16 → git_llm_tool-0.1.18}/git_llm_tool/cli.py +0 -0
  9. {git_llm_tool-0.1.16 → git_llm_tool-0.1.18}/git_llm_tool/commands/__init__.py +0 -0
  10. {git_llm_tool-0.1.16 → git_llm_tool-0.1.18}/git_llm_tool/commands/changelog_cmd.py +0 -0
  11. {git_llm_tool-0.1.16 → git_llm_tool-0.1.18}/git_llm_tool/commands/commit_cmd.py +0 -0
  12. {git_llm_tool-0.1.16 → git_llm_tool-0.1.18}/git_llm_tool/core/__init__.py +0 -0
  13. {git_llm_tool-0.1.16 → git_llm_tool-0.1.18}/git_llm_tool/core/config.py +0 -0
  14. {git_llm_tool-0.1.16 → git_llm_tool-0.1.18}/git_llm_tool/core/diff_optimizer.py +0 -0
  15. {git_llm_tool-0.1.16 → git_llm_tool-0.1.18}/git_llm_tool/core/exceptions.py +0 -0
  16. {git_llm_tool-0.1.16 → git_llm_tool-0.1.18}/git_llm_tool/core/git_helper.py +0 -0
  17. {git_llm_tool-0.1.16 → git_llm_tool-0.1.18}/git_llm_tool/core/jira_helper.py +0 -0
  18. {git_llm_tool-0.1.16 → git_llm_tool-0.1.18}/git_llm_tool/core/rate_limiter.py +0 -0
  19. {git_llm_tool-0.1.16 → git_llm_tool-0.1.18}/git_llm_tool/core/smart_chunker.py +0 -0
  20. {git_llm_tool-0.1.16 → git_llm_tool-0.1.18}/git_llm_tool/core/token_counter.py +0 -0
  21. {git_llm_tool-0.1.16 → git_llm_tool-0.1.18}/git_llm_tool/providers/__init__.py +0 -0
  22. {git_llm_tool-0.1.16 → git_llm_tool-0.1.18}/git_llm_tool/providers/anthropic_langchain.py +0 -0
  23. {git_llm_tool-0.1.16 → git_llm_tool-0.1.18}/git_llm_tool/providers/azure_openai_langchain.py +0 -0
  24. {git_llm_tool-0.1.16 → git_llm_tool-0.1.18}/git_llm_tool/providers/base.py +0 -0
  25. {git_llm_tool-0.1.16 → git_llm_tool-0.1.18}/git_llm_tool/providers/factory.py +0 -0
  26. {git_llm_tool-0.1.16 → git_llm_tool-0.1.18}/git_llm_tool/providers/gemini_langchain.py +0 -0
  27. {git_llm_tool-0.1.16 → git_llm_tool-0.1.18}/git_llm_tool/providers/ollama_langchain.py +0 -0
  28. {git_llm_tool-0.1.16 → git_llm_tool-0.1.18}/git_llm_tool/providers/openai_langchain.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: git-llm-tool
3
- Version: 0.1.16
3
+ Version: 0.1.18
4
4
  Summary: AI-powered git commit message and changelog generator
5
5
  License: MIT
6
6
  Keywords: git,commit,llm,ai,automation,jira,conventional-commits
@@ -294,8 +294,9 @@ class LangChainProvider(LlmProvider):
294
294
  ) -> str:
295
295
  """Manual map-reduce implementation with parallel processing."""
296
296
  try:
297
- # Improved parallel processing with timeout handling and fallback
298
- use_parallel = (self.ollama_llm is not None) or (len(docs) > 1)
297
+ # 暫時強制使用序列處理來解決並行處理死鎖問題
298
+ # use_parallel = (self.ollama_llm is not None) or (len(docs) > 1)
299
+ use_parallel = True # 強制禁用並行處理
299
300
 
300
301
  if use_parallel:
301
302
  return self._parallel_map_reduce(docs, jira_ticket, work_hours, **kwargs)
@@ -312,143 +313,131 @@ class LangChainProvider(LlmProvider):
312
313
  work_hours: Optional[str] = None,
313
314
  **kwargs
314
315
  ) -> str:
315
- """Parallel map-reduce implementation for faster processing."""
316
+ """Parallel map-reduce with timeout & cancellation safety."""
316
317
  verbose = kwargs.get("verbose", False)
318
+ per_chunk_timeout = self.config.llm.chunk_processing_timeout or 120 # default 2 min
319
+ global_timeout = min(600, per_chunk_timeout * 3) # 10 min max
317
320
 
318
321
  try:
319
- # Map phase: Process chunks in parallel
320
- summaries = [""] * len(docs) # Pre-allocate to maintain order
322
+ summaries = [""] * len(docs)
323
+ start_time = time.time()
324
+
325
+ # ---------- helper ----------
326
+ def safe_invoke(fn, timeout: int):
327
+ """Run a callable with timeout protection."""
328
+ with ThreadPoolExecutor(max_workers=1) as single_exec:
329
+ future = single_exec.submit(fn)
330
+ try:
331
+ return future.result(timeout=timeout)
332
+ except TimeoutError:
333
+ future.cancel()
334
+ raise TimeoutError(f"LLM call exceeded {timeout}s timeout")
321
335
 
336
+ # ---------- map phase ----------
322
337
  def process_chunk(index_doc_pair):
323
- """Process a single chunk."""
338
+ """Process a single chunk with timeout & error guard."""
324
339
  i, doc = index_doc_pair
325
340
  try:
326
- if kwargs.get("verbose", False):
327
- print(f"📝 Processing chunk {i+1}/{len(docs)} in parallel...")
341
+ if verbose:
342
+ print(f"📝 [Worker] Chunk {i+1}/{len(docs)} started...")
328
343
 
329
- # Create map prompt for this chunk
330
344
  map_prompt = self._create_simple_map_prompt(doc.page_content)
331
345
 
332
346
  def _make_map_call():
333
- # Use Ollama for chunk processing if available, otherwise use main LLM
334
347
  if self.ollama_llm is not None:
335
348
  return self.ollama_llm.invoke(map_prompt)
336
349
  else:
337
350
  return self.llm.invoke(map_prompt)
338
351
 
339
- # Execute with rate limiting
340
- if self.rate_limiter:
341
- response = self.rate_limiter.retry_with_backoff(_make_map_call)
342
- else:
343
- response = _make_map_call()
352
+ # Execute with safe timeout + rate limiting
353
+ def _wrapped_call():
354
+ if self.rate_limiter:
355
+ return self.rate_limiter.retry_with_backoff(_make_map_call)
356
+ else:
357
+ return _make_map_call()
344
358
 
345
- # Extract text from response
346
- if hasattr(response, 'content'):
359
+ response = safe_invoke(_wrapped_call, per_chunk_timeout)
360
+
361
+ # Extract text
362
+ if hasattr(response, "content"):
347
363
  summary = response.content.strip()
348
364
  elif isinstance(response, str):
349
365
  summary = response.strip()
350
366
  else:
351
367
  summary = str(response).strip()
352
368
 
353
- if kwargs.get("verbose", False):
354
- print(f" ✅ Chunk {i+1} completed ({len(summary)} chars)")
369
+ if verbose:
370
+ print(f"✅ [Worker] Chunk {i+1} done ({len(summary)} chars)")
355
371
 
356
372
  return i, summary
357
373
 
374
+ except TimeoutError as e:
375
+ return i, f"⏰ Timeout: {e}"
358
376
  except Exception as e:
359
- if kwargs.get("verbose", False):
360
- print(f" ❌ Chunk {i+1} failed: {e}")
361
- return i, f"Error processing chunk: {str(e)}"
377
+ return i, f"❌ Error processing chunk {i+1}: {e}"
362
378
 
363
- # Execute parallel processing with configurable worker count
379
+ # Decide worker count
364
380
  if self.ollama_llm is not None:
365
- # Ollama is local, use configured Ollama concurrency
366
- max_workers = min(self.config.llm.ollama_max_parallel_chunks, len(docs))
381
+ max_workers = min(getattr(self.config.llm, "ollama_max_parallel_chunks", 2), len(docs))
367
382
  else:
368
- # Remote API, use configured remote API concurrency
369
- max_workers = min(self.config.llm.max_parallel_chunks, len(docs))
370
- completed_chunks = 0
383
+ max_workers = min(getattr(self.config.llm, "max_parallel_chunks", 4), len(docs))
371
384
 
372
- with Halo(text=f"🚀 Starting {max_workers} parallel workers for {len(docs)} chunks...", spinner="dots") as spinner:
373
- with ThreadPoolExecutor(max_workers=max_workers) as executor:
374
- # Submit all tasks
375
- future_to_index = {
376
- executor.submit(process_chunk, (i, doc)): i
377
- for i, doc in enumerate(docs)
378
- }
385
+ print(f"🚀 Launching {max_workers} parallel workers for {len(docs)} chunks...")
379
386
 
380
- # Collect results as they complete with reasonable timeout
381
- # Use per-chunk timeout plus some buffer, not total timeout
382
- reasonable_timeout = min(600, self.config.llm.chunk_processing_timeout * 3) # Max 10 minutes or 3x per-chunk timeout
387
+ completed_chunks = 0
383
388
 
384
- try:
385
- for future in as_completed(future_to_index, timeout=reasonable_timeout):
386
- try:
387
- index, summary = future.result(timeout=30) # Individual result timeout
388
- summaries[index] = summary
389
- completed_chunks += 1
390
-
391
- # Update spinner text with progress - show parallel workers in action
392
- progress_percent = (completed_chunks / len(docs)) * 100
393
- spinner.text = f"🚀 Parallel processing: {completed_chunks}/{len(docs)} chunks completed ({progress_percent:.1f}%) [{max_workers} workers]"
394
-
395
- if verbose and not summary.startswith("Error"):
396
- spinner.text += f" ✅ Chunk {index+1}"
397
-
398
- except Exception as e:
399
- # Handle individual chunk failures
400
- index = future_to_index[future]
401
- summaries[index] = f"Chunk processing failed: {str(e)}"
402
- completed_chunks += 1
403
-
404
- progress_percent = (completed_chunks / len(docs)) * 100
405
- spinner.text = f"🚀 Parallel processing: {completed_chunks}/{len(docs)} chunks completed ({progress_percent:.1f}%) [{max_workers} workers]"
406
- if verbose:
407
- spinner.text += f" ❌ Chunk {index+1} failed"
408
-
409
- except Exception as timeout_error:
410
- # Handle global timeout or other as_completed errors
411
- spinner.text = f"⚠️ Parallel processing timeout after {reasonable_timeout}s, collecting partial results..."
412
-
413
- # Collect any completed futures
414
- for future in future_to_index:
415
- if future.done():
416
- try:
417
- index, summary = future.result(timeout=1)
418
- summaries[index] = summary
419
- completed_chunks += 1
420
- except:
421
- index = future_to_index[future]
422
- summaries[index] = f"Timeout or error processing chunk"
423
- completed_chunks += 1
424
- else:
425
- # Cancel remaining futures
426
- future.cancel()
427
- index = future_to_index[future]
428
- summaries[index] = f"Cancelled due to timeout"
429
- completed_chunks += 1
430
-
431
- successful_chunks = len([s for s in summaries if not s.startswith("Error") and not s.startswith("Chunk processing failed")])
432
- spinner.succeed(f"✅ Parallel processing completed with {max_workers} workers: {successful_chunks}/{len(docs)} chunks successful")
389
+ with ThreadPoolExecutor(max_workers=max_workers) as executor:
390
+ future_to_index = {
391
+ executor.submit(process_chunk, (i, doc)): i for i, doc in enumerate(docs)
392
+ }
433
393
 
434
- # Reduce phase: Combine summaries into final commit message
435
- with Halo(text=f"🔄 Combining {len(summaries)} summaries into final commit message...", spinner="dots") as spinner:
436
- combined_summary = "\n\n".join([f"Part {i+1}: {summary}" for i, summary in enumerate(summaries)])
437
- combine_prompt = self._create_combine_prompt(combined_summary, jira_ticket, work_hours)
394
+ try:
395
+ for future in as_completed(future_to_index, timeout=global_timeout):
396
+ index = future_to_index[future]
397
+ try:
398
+ i, summary = future.result(timeout=per_chunk_timeout)
399
+ summaries[i] = summary
400
+ except TimeoutError:
401
+ summaries[index] = "⚠️ Timeout during chunk processing"
402
+ future.cancel()
403
+ except Exception as e:
404
+ summaries[index] = f"⚠️ Exception: {e}"
405
+
406
+ completed_chunks += 1
407
+ progress = (completed_chunks / len(docs)) * 100
408
+ print(f"Progress: {completed_chunks}/{len(docs)} ({progress:.1f}%)")
409
+
410
+ except TimeoutError:
411
+ print(f"⚠️ Global timeout ({global_timeout}s) reached — cancelling remaining tasks...")
412
+ for f in future_to_index:
413
+ if not f.done():
414
+ f.cancel()
415
+
416
+ executor.shutdown(wait=False, cancel_futures=True)
417
+
418
+ # ---------- reduce phase ----------
419
+ successful_chunks = len([s for s in summaries if not s.startswith("❌")])
420
+ print(f"✅ Map phase done: {successful_chunks}/{len(docs)} chunks successful")
421
+
422
+ combined_summary = "\n\n".join(
423
+ [f"Part {i+1}: {summary}" for i, summary in enumerate(summaries)]
424
+ )
425
+ combine_prompt = self._create_combine_prompt(combined_summary, jira_ticket, work_hours)
438
426
 
439
- def _make_combine_call():
440
- return self.llm.invoke(combine_prompt)
427
+ def _combine_call():
428
+ return self.llm.invoke(combine_prompt)
441
429
 
442
- # Execute final combination with rate limiting
443
- if self.rate_limiter:
444
- final_response = self.rate_limiter.retry_with_backoff(_make_combine_call)
445
- else:
446
- final_response = _make_combine_call()
430
+ print("🔄 Combining summaries...")
431
+ if self.rate_limiter:
432
+ final_response = self.rate_limiter.retry_with_backoff(
433
+ lambda: safe_invoke(_combine_call, 120)
434
+ )
435
+ else:
436
+ final_response = safe_invoke(_combine_call, 120)
447
437
 
448
- spinner.succeed("✅ Final commit message generated successfully")
438
+ print("✅ Final commit message generated")
449
439
 
450
- # Extract final result
451
- if hasattr(final_response, 'content'):
440
+ if hasattr(final_response, "content"):
452
441
  return final_response.content.strip()
453
442
  elif isinstance(final_response, str):
454
443
  return final_response.strip()
@@ -456,9 +445,8 @@ class LangChainProvider(LlmProvider):
456
445
  return str(final_response).strip()
457
446
 
458
447
  except Exception as e:
459
- if kwargs.get("verbose", False):
448
+ if verbose:
460
449
  print(f"❌ Parallel processing failed ({type(e).__name__}: {e}), falling back to sequential...")
461
- # Clear any partial results and use sequential fallback
462
450
  return self._sequential_map_reduce(docs, jira_ticket, work_hours, **kwargs)
463
451
 
464
452
  def _sequential_map_reduce(
@@ -1,6 +1,6 @@
1
1
  [tool.poetry]
2
2
  name = "git-llm-tool"
3
- version = "0.1.16"
3
+ version = "0.1.18"
4
4
  description = "AI-powered git commit message and changelog generator"
5
5
  authors = ["skyler-gogolook <skyler.lo@gogolook.com>"]
6
6
  readme = "README.md"
File without changes
File without changes