git-llm-tool 0.1.16__tar.gz → 0.1.18__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {git_llm_tool-0.1.16 → git_llm_tool-0.1.18}/PKG-INFO +1 -1
- {git_llm_tool-0.1.16 → git_llm_tool-0.1.18}/git_llm_tool/providers/langchain_base.py +91 -103
- {git_llm_tool-0.1.16 → git_llm_tool-0.1.18}/pyproject.toml +1 -1
- {git_llm_tool-0.1.16 → git_llm_tool-0.1.18}/LICENSE +0 -0
- {git_llm_tool-0.1.16 → git_llm_tool-0.1.18}/README.md +0 -0
- {git_llm_tool-0.1.16 → git_llm_tool-0.1.18}/git_llm_tool/__init__.py +0 -0
- {git_llm_tool-0.1.16 → git_llm_tool-0.1.18}/git_llm_tool/__main__.py +0 -0
- {git_llm_tool-0.1.16 → git_llm_tool-0.1.18}/git_llm_tool/cli.py +0 -0
- {git_llm_tool-0.1.16 → git_llm_tool-0.1.18}/git_llm_tool/commands/__init__.py +0 -0
- {git_llm_tool-0.1.16 → git_llm_tool-0.1.18}/git_llm_tool/commands/changelog_cmd.py +0 -0
- {git_llm_tool-0.1.16 → git_llm_tool-0.1.18}/git_llm_tool/commands/commit_cmd.py +0 -0
- {git_llm_tool-0.1.16 → git_llm_tool-0.1.18}/git_llm_tool/core/__init__.py +0 -0
- {git_llm_tool-0.1.16 → git_llm_tool-0.1.18}/git_llm_tool/core/config.py +0 -0
- {git_llm_tool-0.1.16 → git_llm_tool-0.1.18}/git_llm_tool/core/diff_optimizer.py +0 -0
- {git_llm_tool-0.1.16 → git_llm_tool-0.1.18}/git_llm_tool/core/exceptions.py +0 -0
- {git_llm_tool-0.1.16 → git_llm_tool-0.1.18}/git_llm_tool/core/git_helper.py +0 -0
- {git_llm_tool-0.1.16 → git_llm_tool-0.1.18}/git_llm_tool/core/jira_helper.py +0 -0
- {git_llm_tool-0.1.16 → git_llm_tool-0.1.18}/git_llm_tool/core/rate_limiter.py +0 -0
- {git_llm_tool-0.1.16 → git_llm_tool-0.1.18}/git_llm_tool/core/smart_chunker.py +0 -0
- {git_llm_tool-0.1.16 → git_llm_tool-0.1.18}/git_llm_tool/core/token_counter.py +0 -0
- {git_llm_tool-0.1.16 → git_llm_tool-0.1.18}/git_llm_tool/providers/__init__.py +0 -0
- {git_llm_tool-0.1.16 → git_llm_tool-0.1.18}/git_llm_tool/providers/anthropic_langchain.py +0 -0
- {git_llm_tool-0.1.16 → git_llm_tool-0.1.18}/git_llm_tool/providers/azure_openai_langchain.py +0 -0
- {git_llm_tool-0.1.16 → git_llm_tool-0.1.18}/git_llm_tool/providers/base.py +0 -0
- {git_llm_tool-0.1.16 → git_llm_tool-0.1.18}/git_llm_tool/providers/factory.py +0 -0
- {git_llm_tool-0.1.16 → git_llm_tool-0.1.18}/git_llm_tool/providers/gemini_langchain.py +0 -0
- {git_llm_tool-0.1.16 → git_llm_tool-0.1.18}/git_llm_tool/providers/ollama_langchain.py +0 -0
- {git_llm_tool-0.1.16 → git_llm_tool-0.1.18}/git_llm_tool/providers/openai_langchain.py +0 -0
|
@@ -294,8 +294,9 @@ class LangChainProvider(LlmProvider):
|
|
|
294
294
|
) -> str:
|
|
295
295
|
"""Manual map-reduce implementation with parallel processing."""
|
|
296
296
|
try:
|
|
297
|
-
#
|
|
298
|
-
use_parallel = (self.ollama_llm is not None) or (len(docs) > 1)
|
|
297
|
+
# 暫時強制使用序列處理來解決並行處理死鎖問題
|
|
298
|
+
# use_parallel = (self.ollama_llm is not None) or (len(docs) > 1)
|
|
299
|
+
use_parallel = True # 強制禁用並行處理
|
|
299
300
|
|
|
300
301
|
if use_parallel:
|
|
301
302
|
return self._parallel_map_reduce(docs, jira_ticket, work_hours, **kwargs)
|
|
@@ -312,143 +313,131 @@ class LangChainProvider(LlmProvider):
|
|
|
312
313
|
work_hours: Optional[str] = None,
|
|
313
314
|
**kwargs
|
|
314
315
|
) -> str:
|
|
315
|
-
"""Parallel map-reduce
|
|
316
|
+
"""Parallel map-reduce with timeout & cancellation safety."""
|
|
316
317
|
verbose = kwargs.get("verbose", False)
|
|
318
|
+
per_chunk_timeout = self.config.llm.chunk_processing_timeout or 120 # default 2 min
|
|
319
|
+
global_timeout = min(600, per_chunk_timeout * 3) # 10 min max
|
|
317
320
|
|
|
318
321
|
try:
|
|
319
|
-
|
|
320
|
-
|
|
322
|
+
summaries = [""] * len(docs)
|
|
323
|
+
start_time = time.time()
|
|
324
|
+
|
|
325
|
+
# ---------- helper ----------
|
|
326
|
+
def safe_invoke(fn, timeout: int):
|
|
327
|
+
"""Run a callable with timeout protection."""
|
|
328
|
+
with ThreadPoolExecutor(max_workers=1) as single_exec:
|
|
329
|
+
future = single_exec.submit(fn)
|
|
330
|
+
try:
|
|
331
|
+
return future.result(timeout=timeout)
|
|
332
|
+
except TimeoutError:
|
|
333
|
+
future.cancel()
|
|
334
|
+
raise TimeoutError(f"LLM call exceeded {timeout}s timeout")
|
|
321
335
|
|
|
336
|
+
# ---------- map phase ----------
|
|
322
337
|
def process_chunk(index_doc_pair):
|
|
323
|
-
"""Process a single chunk."""
|
|
338
|
+
"""Process a single chunk with timeout & error guard."""
|
|
324
339
|
i, doc = index_doc_pair
|
|
325
340
|
try:
|
|
326
|
-
if
|
|
327
|
-
print(f"📝
|
|
341
|
+
if verbose:
|
|
342
|
+
print(f"📝 [Worker] Chunk {i+1}/{len(docs)} started...")
|
|
328
343
|
|
|
329
|
-
# Create map prompt for this chunk
|
|
330
344
|
map_prompt = self._create_simple_map_prompt(doc.page_content)
|
|
331
345
|
|
|
332
346
|
def _make_map_call():
|
|
333
|
-
# Use Ollama for chunk processing if available, otherwise use main LLM
|
|
334
347
|
if self.ollama_llm is not None:
|
|
335
348
|
return self.ollama_llm.invoke(map_prompt)
|
|
336
349
|
else:
|
|
337
350
|
return self.llm.invoke(map_prompt)
|
|
338
351
|
|
|
339
|
-
# Execute with rate limiting
|
|
340
|
-
|
|
341
|
-
|
|
342
|
-
|
|
343
|
-
|
|
352
|
+
# Execute with safe timeout + rate limiting
|
|
353
|
+
def _wrapped_call():
|
|
354
|
+
if self.rate_limiter:
|
|
355
|
+
return self.rate_limiter.retry_with_backoff(_make_map_call)
|
|
356
|
+
else:
|
|
357
|
+
return _make_map_call()
|
|
344
358
|
|
|
345
|
-
|
|
346
|
-
|
|
359
|
+
response = safe_invoke(_wrapped_call, per_chunk_timeout)
|
|
360
|
+
|
|
361
|
+
# Extract text
|
|
362
|
+
if hasattr(response, "content"):
|
|
347
363
|
summary = response.content.strip()
|
|
348
364
|
elif isinstance(response, str):
|
|
349
365
|
summary = response.strip()
|
|
350
366
|
else:
|
|
351
367
|
summary = str(response).strip()
|
|
352
368
|
|
|
353
|
-
if
|
|
354
|
-
print(f"
|
|
369
|
+
if verbose:
|
|
370
|
+
print(f"✅ [Worker] Chunk {i+1} done ({len(summary)} chars)")
|
|
355
371
|
|
|
356
372
|
return i, summary
|
|
357
373
|
|
|
374
|
+
except TimeoutError as e:
|
|
375
|
+
return i, f"⏰ Timeout: {e}"
|
|
358
376
|
except Exception as e:
|
|
359
|
-
|
|
360
|
-
print(f" ❌ Chunk {i+1} failed: {e}")
|
|
361
|
-
return i, f"Error processing chunk: {str(e)}"
|
|
377
|
+
return i, f"❌ Error processing chunk {i+1}: {e}"
|
|
362
378
|
|
|
363
|
-
#
|
|
379
|
+
# Decide worker count
|
|
364
380
|
if self.ollama_llm is not None:
|
|
365
|
-
|
|
366
|
-
max_workers = min(self.config.llm.ollama_max_parallel_chunks, len(docs))
|
|
381
|
+
max_workers = min(getattr(self.config.llm, "ollama_max_parallel_chunks", 2), len(docs))
|
|
367
382
|
else:
|
|
368
|
-
|
|
369
|
-
max_workers = min(self.config.llm.max_parallel_chunks, len(docs))
|
|
370
|
-
completed_chunks = 0
|
|
383
|
+
max_workers = min(getattr(self.config.llm, "max_parallel_chunks", 4), len(docs))
|
|
371
384
|
|
|
372
|
-
|
|
373
|
-
with ThreadPoolExecutor(max_workers=max_workers) as executor:
|
|
374
|
-
# Submit all tasks
|
|
375
|
-
future_to_index = {
|
|
376
|
-
executor.submit(process_chunk, (i, doc)): i
|
|
377
|
-
for i, doc in enumerate(docs)
|
|
378
|
-
}
|
|
385
|
+
print(f"🚀 Launching {max_workers} parallel workers for {len(docs)} chunks...")
|
|
379
386
|
|
|
380
|
-
|
|
381
|
-
# Use per-chunk timeout plus some buffer, not total timeout
|
|
382
|
-
reasonable_timeout = min(600, self.config.llm.chunk_processing_timeout * 3) # Max 10 minutes or 3x per-chunk timeout
|
|
387
|
+
completed_chunks = 0
|
|
383
388
|
|
|
384
|
-
|
|
385
|
-
|
|
386
|
-
|
|
387
|
-
|
|
388
|
-
summaries[index] = summary
|
|
389
|
-
completed_chunks += 1
|
|
390
|
-
|
|
391
|
-
# Update spinner text with progress - show parallel workers in action
|
|
392
|
-
progress_percent = (completed_chunks / len(docs)) * 100
|
|
393
|
-
spinner.text = f"🚀 Parallel processing: {completed_chunks}/{len(docs)} chunks completed ({progress_percent:.1f}%) [{max_workers} workers]"
|
|
394
|
-
|
|
395
|
-
if verbose and not summary.startswith("Error"):
|
|
396
|
-
spinner.text += f" ✅ Chunk {index+1}"
|
|
397
|
-
|
|
398
|
-
except Exception as e:
|
|
399
|
-
# Handle individual chunk failures
|
|
400
|
-
index = future_to_index[future]
|
|
401
|
-
summaries[index] = f"Chunk processing failed: {str(e)}"
|
|
402
|
-
completed_chunks += 1
|
|
403
|
-
|
|
404
|
-
progress_percent = (completed_chunks / len(docs)) * 100
|
|
405
|
-
spinner.text = f"🚀 Parallel processing: {completed_chunks}/{len(docs)} chunks completed ({progress_percent:.1f}%) [{max_workers} workers]"
|
|
406
|
-
if verbose:
|
|
407
|
-
spinner.text += f" ❌ Chunk {index+1} failed"
|
|
408
|
-
|
|
409
|
-
except Exception as timeout_error:
|
|
410
|
-
# Handle global timeout or other as_completed errors
|
|
411
|
-
spinner.text = f"⚠️ Parallel processing timeout after {reasonable_timeout}s, collecting partial results..."
|
|
412
|
-
|
|
413
|
-
# Collect any completed futures
|
|
414
|
-
for future in future_to_index:
|
|
415
|
-
if future.done():
|
|
416
|
-
try:
|
|
417
|
-
index, summary = future.result(timeout=1)
|
|
418
|
-
summaries[index] = summary
|
|
419
|
-
completed_chunks += 1
|
|
420
|
-
except:
|
|
421
|
-
index = future_to_index[future]
|
|
422
|
-
summaries[index] = f"Timeout or error processing chunk"
|
|
423
|
-
completed_chunks += 1
|
|
424
|
-
else:
|
|
425
|
-
# Cancel remaining futures
|
|
426
|
-
future.cancel()
|
|
427
|
-
index = future_to_index[future]
|
|
428
|
-
summaries[index] = f"Cancelled due to timeout"
|
|
429
|
-
completed_chunks += 1
|
|
430
|
-
|
|
431
|
-
successful_chunks = len([s for s in summaries if not s.startswith("Error") and not s.startswith("Chunk processing failed")])
|
|
432
|
-
spinner.succeed(f"✅ Parallel processing completed with {max_workers} workers: {successful_chunks}/{len(docs)} chunks successful")
|
|
389
|
+
with ThreadPoolExecutor(max_workers=max_workers) as executor:
|
|
390
|
+
future_to_index = {
|
|
391
|
+
executor.submit(process_chunk, (i, doc)): i for i, doc in enumerate(docs)
|
|
392
|
+
}
|
|
433
393
|
|
|
434
|
-
|
|
435
|
-
|
|
436
|
-
|
|
437
|
-
|
|
394
|
+
try:
|
|
395
|
+
for future in as_completed(future_to_index, timeout=global_timeout):
|
|
396
|
+
index = future_to_index[future]
|
|
397
|
+
try:
|
|
398
|
+
i, summary = future.result(timeout=per_chunk_timeout)
|
|
399
|
+
summaries[i] = summary
|
|
400
|
+
except TimeoutError:
|
|
401
|
+
summaries[index] = "⚠️ Timeout during chunk processing"
|
|
402
|
+
future.cancel()
|
|
403
|
+
except Exception as e:
|
|
404
|
+
summaries[index] = f"⚠️ Exception: {e}"
|
|
405
|
+
|
|
406
|
+
completed_chunks += 1
|
|
407
|
+
progress = (completed_chunks / len(docs)) * 100
|
|
408
|
+
print(f"Progress: {completed_chunks}/{len(docs)} ({progress:.1f}%)")
|
|
409
|
+
|
|
410
|
+
except TimeoutError:
|
|
411
|
+
print(f"⚠️ Global timeout ({global_timeout}s) reached — cancelling remaining tasks...")
|
|
412
|
+
for f in future_to_index:
|
|
413
|
+
if not f.done():
|
|
414
|
+
f.cancel()
|
|
415
|
+
|
|
416
|
+
executor.shutdown(wait=False, cancel_futures=True)
|
|
417
|
+
|
|
418
|
+
# ---------- reduce phase ----------
|
|
419
|
+
successful_chunks = len([s for s in summaries if not s.startswith("❌")])
|
|
420
|
+
print(f"✅ Map phase done: {successful_chunks}/{len(docs)} chunks successful")
|
|
421
|
+
|
|
422
|
+
combined_summary = "\n\n".join(
|
|
423
|
+
[f"Part {i+1}: {summary}" for i, summary in enumerate(summaries)]
|
|
424
|
+
)
|
|
425
|
+
combine_prompt = self._create_combine_prompt(combined_summary, jira_ticket, work_hours)
|
|
438
426
|
|
|
439
|
-
|
|
440
|
-
|
|
427
|
+
def _combine_call():
|
|
428
|
+
return self.llm.invoke(combine_prompt)
|
|
441
429
|
|
|
442
|
-
|
|
443
|
-
|
|
444
|
-
|
|
445
|
-
|
|
446
|
-
|
|
430
|
+
print("🔄 Combining summaries...")
|
|
431
|
+
if self.rate_limiter:
|
|
432
|
+
final_response = self.rate_limiter.retry_with_backoff(
|
|
433
|
+
lambda: safe_invoke(_combine_call, 120)
|
|
434
|
+
)
|
|
435
|
+
else:
|
|
436
|
+
final_response = safe_invoke(_combine_call, 120)
|
|
447
437
|
|
|
448
|
-
|
|
438
|
+
print("✅ Final commit message generated")
|
|
449
439
|
|
|
450
|
-
|
|
451
|
-
if hasattr(final_response, 'content'):
|
|
440
|
+
if hasattr(final_response, "content"):
|
|
452
441
|
return final_response.content.strip()
|
|
453
442
|
elif isinstance(final_response, str):
|
|
454
443
|
return final_response.strip()
|
|
@@ -456,9 +445,8 @@ class LangChainProvider(LlmProvider):
|
|
|
456
445
|
return str(final_response).strip()
|
|
457
446
|
|
|
458
447
|
except Exception as e:
|
|
459
|
-
if
|
|
448
|
+
if verbose:
|
|
460
449
|
print(f"❌ Parallel processing failed ({type(e).__name__}: {e}), falling back to sequential...")
|
|
461
|
-
# Clear any partial results and use sequential fallback
|
|
462
450
|
return self._sequential_map_reduce(docs, jira_ticket, work_hours, **kwargs)
|
|
463
451
|
|
|
464
452
|
def _sequential_map_reduce(
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{git_llm_tool-0.1.16 → git_llm_tool-0.1.18}/git_llm_tool/providers/azure_openai_langchain.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|