lm-deluge 0.0.53__tar.gz → 0.0.54__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of lm-deluge might be problematic. Click here for more details.
- {lm_deluge-0.0.53/src/lm_deluge.egg-info → lm_deluge-0.0.54}/PKG-INFO +1 -1
- {lm_deluge-0.0.53 → lm_deluge-0.0.54}/pyproject.toml +1 -1
- {lm_deluge-0.0.53 → lm_deluge-0.0.54}/src/lm_deluge/__init__.py +3 -4
- {lm_deluge-0.0.53 → lm_deluge-0.0.54}/src/lm_deluge/client.py +54 -124
- {lm_deluge-0.0.53 → lm_deluge-0.0.54}/src/lm_deluge/llm_tools/extract.py +7 -5
- {lm_deluge-0.0.53 → lm_deluge-0.0.54/src/lm_deluge.egg-info}/PKG-INFO +1 -1
- {lm_deluge-0.0.53 → lm_deluge-0.0.54}/LICENSE +0 -0
- {lm_deluge-0.0.53 → lm_deluge-0.0.54}/README.md +0 -0
- {lm_deluge-0.0.53 → lm_deluge-0.0.54}/setup.cfg +0 -0
- {lm_deluge-0.0.53 → lm_deluge-0.0.54}/src/lm_deluge/agent.py +0 -0
- {lm_deluge-0.0.53 → lm_deluge-0.0.54}/src/lm_deluge/api_requests/__init__.py +0 -0
- {lm_deluge-0.0.53 → lm_deluge-0.0.54}/src/lm_deluge/api_requests/anthropic.py +0 -0
- {lm_deluge-0.0.53 → lm_deluge-0.0.54}/src/lm_deluge/api_requests/base.py +0 -0
- {lm_deluge-0.0.53 → lm_deluge-0.0.54}/src/lm_deluge/api_requests/bedrock.py +0 -0
- {lm_deluge-0.0.53 → lm_deluge-0.0.54}/src/lm_deluge/api_requests/common.py +0 -0
- {lm_deluge-0.0.53 → lm_deluge-0.0.54}/src/lm_deluge/api_requests/deprecated/bedrock.py +0 -0
- {lm_deluge-0.0.53 → lm_deluge-0.0.54}/src/lm_deluge/api_requests/deprecated/cohere.py +0 -0
- {lm_deluge-0.0.53 → lm_deluge-0.0.54}/src/lm_deluge/api_requests/deprecated/deepseek.py +0 -0
- {lm_deluge-0.0.53 → lm_deluge-0.0.54}/src/lm_deluge/api_requests/deprecated/mistral.py +0 -0
- {lm_deluge-0.0.53 → lm_deluge-0.0.54}/src/lm_deluge/api_requests/deprecated/vertex.py +0 -0
- {lm_deluge-0.0.53 → lm_deluge-0.0.54}/src/lm_deluge/api_requests/gemini.py +0 -0
- {lm_deluge-0.0.53 → lm_deluge-0.0.54}/src/lm_deluge/api_requests/mistral.py +0 -0
- {lm_deluge-0.0.53 → lm_deluge-0.0.54}/src/lm_deluge/api_requests/openai.py +0 -0
- {lm_deluge-0.0.53 → lm_deluge-0.0.54}/src/lm_deluge/api_requests/response.py +0 -0
- {lm_deluge-0.0.53 → lm_deluge-0.0.54}/src/lm_deluge/batches.py +0 -0
- {lm_deluge-0.0.53 → lm_deluge-0.0.54}/src/lm_deluge/built_in_tools/anthropic/__init__.py +0 -0
- {lm_deluge-0.0.53 → lm_deluge-0.0.54}/src/lm_deluge/built_in_tools/anthropic/bash.py +0 -0
- {lm_deluge-0.0.53 → lm_deluge-0.0.54}/src/lm_deluge/built_in_tools/anthropic/computer_use.py +0 -0
- {lm_deluge-0.0.53 → lm_deluge-0.0.54}/src/lm_deluge/built_in_tools/anthropic/editor.py +0 -0
- {lm_deluge-0.0.53 → lm_deluge-0.0.54}/src/lm_deluge/built_in_tools/base.py +0 -0
- {lm_deluge-0.0.53 → lm_deluge-0.0.54}/src/lm_deluge/built_in_tools/openai.py +0 -0
- {lm_deluge-0.0.53 → lm_deluge-0.0.54}/src/lm_deluge/cache.py +0 -0
- {lm_deluge-0.0.53 → lm_deluge-0.0.54}/src/lm_deluge/cli.py +0 -0
- {lm_deluge-0.0.53 → lm_deluge-0.0.54}/src/lm_deluge/config.py +0 -0
- {lm_deluge-0.0.53 → lm_deluge-0.0.54}/src/lm_deluge/embed.py +0 -0
- {lm_deluge-0.0.53 → lm_deluge-0.0.54}/src/lm_deluge/errors.py +0 -0
- {lm_deluge-0.0.53 → lm_deluge-0.0.54}/src/lm_deluge/file.py +0 -0
- {lm_deluge-0.0.53 → lm_deluge-0.0.54}/src/lm_deluge/gemini_limits.py +0 -0
- {lm_deluge-0.0.53 → lm_deluge-0.0.54}/src/lm_deluge/image.py +0 -0
- {lm_deluge-0.0.53 → lm_deluge-0.0.54}/src/lm_deluge/llm_tools/__init__.py +0 -0
- {lm_deluge-0.0.53 → lm_deluge-0.0.54}/src/lm_deluge/llm_tools/classify.py +0 -0
- {lm_deluge-0.0.53 → lm_deluge-0.0.54}/src/lm_deluge/llm_tools/locate.py +0 -0
- {lm_deluge-0.0.53 → lm_deluge-0.0.54}/src/lm_deluge/llm_tools/ocr.py +0 -0
- {lm_deluge-0.0.53 → lm_deluge-0.0.54}/src/lm_deluge/llm_tools/score.py +0 -0
- {lm_deluge-0.0.53 → lm_deluge-0.0.54}/src/lm_deluge/llm_tools/translate.py +0 -0
- {lm_deluge-0.0.53 → lm_deluge-0.0.54}/src/lm_deluge/models/__init__.py +0 -0
- {lm_deluge-0.0.53 → lm_deluge-0.0.54}/src/lm_deluge/models/anthropic.py +0 -0
- {lm_deluge-0.0.53 → lm_deluge-0.0.54}/src/lm_deluge/models/bedrock.py +0 -0
- {lm_deluge-0.0.53 → lm_deluge-0.0.54}/src/lm_deluge/models/cerebras.py +0 -0
- {lm_deluge-0.0.53 → lm_deluge-0.0.54}/src/lm_deluge/models/cohere.py +0 -0
- {lm_deluge-0.0.53 → lm_deluge-0.0.54}/src/lm_deluge/models/deepseek.py +0 -0
- {lm_deluge-0.0.53 → lm_deluge-0.0.54}/src/lm_deluge/models/fireworks.py +0 -0
- {lm_deluge-0.0.53 → lm_deluge-0.0.54}/src/lm_deluge/models/google.py +0 -0
- {lm_deluge-0.0.53 → lm_deluge-0.0.54}/src/lm_deluge/models/grok.py +0 -0
- {lm_deluge-0.0.53 → lm_deluge-0.0.54}/src/lm_deluge/models/groq.py +0 -0
- {lm_deluge-0.0.53 → lm_deluge-0.0.54}/src/lm_deluge/models/meta.py +0 -0
- {lm_deluge-0.0.53 → lm_deluge-0.0.54}/src/lm_deluge/models/mistral.py +0 -0
- {lm_deluge-0.0.53 → lm_deluge-0.0.54}/src/lm_deluge/models/openai.py +0 -0
- {lm_deluge-0.0.53 → lm_deluge-0.0.54}/src/lm_deluge/models/openrouter.py +0 -0
- {lm_deluge-0.0.53 → lm_deluge-0.0.54}/src/lm_deluge/models/together.py +0 -0
- {lm_deluge-0.0.53 → lm_deluge-0.0.54}/src/lm_deluge/presets/cerebras.py +0 -0
- {lm_deluge-0.0.53 → lm_deluge-0.0.54}/src/lm_deluge/presets/meta.py +0 -0
- {lm_deluge-0.0.53 → lm_deluge-0.0.54}/src/lm_deluge/prompt.py +0 -0
- {lm_deluge-0.0.53 → lm_deluge-0.0.54}/src/lm_deluge/request_context.py +0 -0
- {lm_deluge-0.0.53 → lm_deluge-0.0.54}/src/lm_deluge/rerank.py +0 -0
- {lm_deluge-0.0.53 → lm_deluge-0.0.54}/src/lm_deluge/tool.py +0 -0
- {lm_deluge-0.0.53 → lm_deluge-0.0.54}/src/lm_deluge/tracker.py +0 -0
- {lm_deluge-0.0.53 → lm_deluge-0.0.54}/src/lm_deluge/usage.py +0 -0
- {lm_deluge-0.0.53 → lm_deluge-0.0.54}/src/lm_deluge/util/harmony.py +0 -0
- {lm_deluge-0.0.53 → lm_deluge-0.0.54}/src/lm_deluge/util/json.py +0 -0
- {lm_deluge-0.0.53 → lm_deluge-0.0.54}/src/lm_deluge/util/logprobs.py +0 -0
- {lm_deluge-0.0.53 → lm_deluge-0.0.54}/src/lm_deluge/util/spatial.py +0 -0
- {lm_deluge-0.0.53 → lm_deluge-0.0.54}/src/lm_deluge/util/validation.py +0 -0
- {lm_deluge-0.0.53 → lm_deluge-0.0.54}/src/lm_deluge/util/xml.py +0 -0
- {lm_deluge-0.0.53 → lm_deluge-0.0.54}/src/lm_deluge.egg-info/SOURCES.txt +0 -0
- {lm_deluge-0.0.53 → lm_deluge-0.0.54}/src/lm_deluge.egg-info/dependency_links.txt +0 -0
- {lm_deluge-0.0.53 → lm_deluge-0.0.54}/src/lm_deluge.egg-info/requires.txt +0 -0
- {lm_deluge-0.0.53 → lm_deluge-0.0.54}/src/lm_deluge.egg-info/top_level.txt +0 -0
- {lm_deluge-0.0.53 → lm_deluge-0.0.54}/tests/test_builtin_tools.py +0 -0
- {lm_deluge-0.0.53 → lm_deluge-0.0.54}/tests/test_native_mcp_server.py +0 -0
|
@@ -1,10 +1,9 @@
|
|
|
1
|
-
from .client import LLMClient, SamplingParams
|
|
1
|
+
from .client import APIResponse, LLMClient, SamplingParams
|
|
2
|
+
from .file import File
|
|
2
3
|
from .prompt import Conversation, Message
|
|
3
4
|
from .tool import Tool
|
|
4
|
-
from .file import File
|
|
5
|
-
import dotenv
|
|
6
5
|
|
|
7
|
-
dotenv.load_dotenv()
|
|
6
|
+
# dotenv.load_dotenv() - don't do this, fucks with other packages
|
|
8
7
|
|
|
9
8
|
__all__ = [
|
|
10
9
|
"LLMClient",
|
|
@@ -80,6 +80,22 @@ class _LLMClient(BaseModel):
|
|
|
80
80
|
self._tracker.log_final_status()
|
|
81
81
|
self._tracker = None
|
|
82
82
|
|
|
83
|
+
def reset_tracker(self):
|
|
84
|
+
"""Reset tracker by closing and reopening with fresh state.
|
|
85
|
+
|
|
86
|
+
Useful when reusing a client across multiple batches and you want
|
|
87
|
+
the progress bar to start from 0 instead of showing cumulative totals.
|
|
88
|
+
"""
|
|
89
|
+
if self._tracker is None:
|
|
90
|
+
return
|
|
91
|
+
|
|
92
|
+
# Close existing tracker (including progress bar)
|
|
93
|
+
show_progress = self._tracker.use_progress_bar
|
|
94
|
+
self.close()
|
|
95
|
+
|
|
96
|
+
# Create fresh tracker
|
|
97
|
+
self.open(total=0, show_progress=show_progress)
|
|
98
|
+
|
|
83
99
|
# NEW! Builder methods
|
|
84
100
|
def with_model(self, model: str):
|
|
85
101
|
self.model_names = [model]
|
|
@@ -353,147 +369,61 @@ class _LLMClient(BaseModel):
|
|
|
353
369
|
cache: CachePattern | None = None,
|
|
354
370
|
use_responses_api: bool = False,
|
|
355
371
|
) -> list[APIResponse | None] | list[str | None] | dict[str, int]:
|
|
356
|
-
|
|
357
|
-
prompts = prompts_to_conversations(prompts)
|
|
358
|
-
ids = list(range(len(prompts)))
|
|
359
|
-
results: list[APIResponse | None] = [None for _ in range(len(prompts))]
|
|
360
|
-
contexts: list[RequestContext | None] = [None for _ in range(len(prompts))]
|
|
361
|
-
inflight_tasks: set[asyncio.Task[None]] = set()
|
|
362
|
-
# Use existing tracker if client has been opened; otherwise open/close automatically
|
|
363
|
-
tracker: StatusTracker
|
|
364
|
-
tracker_preopened = self._tracker is not None
|
|
365
|
-
if tracker_preopened:
|
|
366
|
-
tracker = self._tracker # type: ignore[assignment]
|
|
367
|
-
tracker.add_to_total(len(prompts))
|
|
368
|
-
else:
|
|
369
|
-
self.open(total=len(prompts), show_progress=show_progress)
|
|
370
|
-
tracker = self._tracker # type: ignore[assignment]
|
|
371
|
-
assert tracker is not None
|
|
372
|
+
"""Process multiple prompts asynchronously using the start_nowait/wait_for_all backend.
|
|
372
373
|
|
|
373
|
-
|
|
374
|
-
|
|
375
|
-
|
|
376
|
-
#
|
|
377
|
-
|
|
378
|
-
|
|
379
|
-
# Main dispatch loop - using original pattern but with all prompts
|
|
380
|
-
next_context = None # Persist across iterations like original
|
|
381
|
-
next_is_retry = False # Track whether next_context is a retry
|
|
382
|
-
prompts_not_finished = True
|
|
383
|
-
prompts_iter = iter(zip(ids, prompts))
|
|
384
|
-
|
|
385
|
-
while True:
|
|
386
|
-
# Get next context (retry or new) - only if we don't already have one waiting
|
|
387
|
-
if next_context is None:
|
|
388
|
-
if not retry_queue.empty():
|
|
389
|
-
next_context = retry_queue.get_nowait()
|
|
390
|
-
next_is_retry = True
|
|
391
|
-
print(f"Retrying request {next_context.task_id}.")
|
|
392
|
-
elif prompts_not_finished:
|
|
393
|
-
try:
|
|
394
|
-
task_id, prompt = next(prompts_iter)
|
|
395
|
-
model, sampling_params = self._select_model()
|
|
396
|
-
assert isinstance(prompt, Conversation)
|
|
397
|
-
next_context = RequestContext(
|
|
398
|
-
task_id=task_id,
|
|
399
|
-
model_name=model,
|
|
400
|
-
prompt=prompt,
|
|
401
|
-
sampling_params=sampling_params,
|
|
402
|
-
attempts_left=self.max_attempts,
|
|
403
|
-
request_timeout=self.request_timeout,
|
|
404
|
-
status_tracker=tracker,
|
|
405
|
-
tools=tools,
|
|
406
|
-
cache=cache,
|
|
407
|
-
use_responses_api=use_responses_api,
|
|
408
|
-
extra_headers=self.extra_headers,
|
|
409
|
-
force_local_mcp=self.force_local_mcp,
|
|
410
|
-
)
|
|
411
|
-
|
|
412
|
-
next_is_retry = False
|
|
413
|
-
except StopIteration:
|
|
414
|
-
prompts_not_finished = False
|
|
415
|
-
|
|
416
|
-
# Dispatch using shared capacity gate (consistent with start_nowait)
|
|
417
|
-
if next_context:
|
|
418
|
-
# Wait here until we have capacity to launch this context
|
|
419
|
-
await self._wait_for_capacity(
|
|
420
|
-
next_context.num_tokens, tracker, retry=next_is_retry
|
|
421
|
-
)
|
|
422
|
-
|
|
423
|
-
# Launch simplified request processing
|
|
424
|
-
contexts[next_context.task_id] = next_context
|
|
425
|
-
|
|
426
|
-
async def process_and_store(ctx: RequestContext):
|
|
427
|
-
try:
|
|
428
|
-
response = await self.process_single_request(ctx, retry_queue)
|
|
429
|
-
results[ctx.task_id] = response
|
|
430
|
-
except BaseException as exc:
|
|
431
|
-
# Capture cancellations and other BaseExceptions before fallback response fires.
|
|
432
|
-
error_response = APIResponse(
|
|
433
|
-
id=ctx.task_id,
|
|
434
|
-
model_internal=ctx.model_name,
|
|
435
|
-
prompt=ctx.prompt,
|
|
436
|
-
sampling_params=ctx.sampling_params,
|
|
437
|
-
status_code=None,
|
|
438
|
-
is_error=True,
|
|
439
|
-
error_message=f"{type(exc).__name__}: {exc}",
|
|
440
|
-
raw_response={"exception_repr": repr(exc)},
|
|
441
|
-
)
|
|
442
|
-
results[ctx.task_id] = error_response
|
|
443
|
-
if ctx.status_tracker:
|
|
444
|
-
ctx.status_tracker.task_failed(ctx.task_id)
|
|
445
|
-
raise
|
|
446
|
-
|
|
447
|
-
task = asyncio.create_task(process_and_store(next_context))
|
|
448
|
-
inflight_tasks.add(task)
|
|
449
|
-
task.add_done_callback(inflight_tasks.discard)
|
|
450
|
-
next_context = None # Reset after successful dispatch
|
|
451
|
-
next_is_retry = False
|
|
452
|
-
|
|
453
|
-
# Update progress - original logic
|
|
454
|
-
tracker.update_pbar()
|
|
455
|
-
|
|
456
|
-
# Check completion: consider final outcomes, not in-progress count
|
|
457
|
-
# This avoids rare hangs if in-progress is miscounted (e.g., double-increment).
|
|
458
|
-
if (tracker.num_tasks_succeeded + tracker.num_tasks_failed) >= len(
|
|
459
|
-
prompts
|
|
460
|
-
) and retry_queue.empty():
|
|
461
|
-
break
|
|
374
|
+
This implementation creates all tasks upfront and waits for them to complete,
|
|
375
|
+
avoiding issues with tracker state accumulating across multiple calls.
|
|
376
|
+
"""
|
|
377
|
+
# Convert prompts to Conversations
|
|
378
|
+
prompts = prompts_to_conversations(prompts)
|
|
462
379
|
|
|
463
|
-
|
|
464
|
-
|
|
380
|
+
# Ensure tracker exists (start_nowait will call add_to_total for each task)
|
|
381
|
+
if self._tracker is None:
|
|
382
|
+
self.open(total=0, show_progress=show_progress)
|
|
383
|
+
tracker_preopened = False
|
|
384
|
+
else:
|
|
385
|
+
tracker_preopened = True
|
|
386
|
+
|
|
387
|
+
# Start all tasks using start_nowait - tasks will coordinate via shared capacity lock
|
|
388
|
+
task_ids = []
|
|
389
|
+
for prompt in prompts:
|
|
390
|
+
assert isinstance(prompt, Conversation)
|
|
391
|
+
task_id = self.start_nowait(
|
|
392
|
+
prompt,
|
|
393
|
+
tools=tools,
|
|
394
|
+
cache=cache,
|
|
395
|
+
use_responses_api=use_responses_api,
|
|
396
|
+
)
|
|
397
|
+
task_ids.append(task_id)
|
|
465
398
|
|
|
466
|
-
|
|
467
|
-
|
|
399
|
+
# Wait for all tasks to complete
|
|
400
|
+
results = await self.wait_for_all(task_ids)
|
|
468
401
|
|
|
402
|
+
# Close tracker if we opened it
|
|
469
403
|
if not tracker_preopened:
|
|
470
404
|
self.close()
|
|
471
405
|
|
|
406
|
+
# Defensive check: This should rarely happen, but provides a safety net
|
|
472
407
|
for idx, response in enumerate(results):
|
|
473
408
|
if response is None:
|
|
474
|
-
|
|
475
|
-
|
|
476
|
-
|
|
477
|
-
|
|
478
|
-
if ctx
|
|
479
|
-
else self.sampling_params[0]
|
|
480
|
-
if self.sampling_params
|
|
481
|
-
else SamplingParams()
|
|
409
|
+
# This should only happen if there's a bug in _run_context
|
|
410
|
+
print(
|
|
411
|
+
f"WARNING: result[{idx}] is None! Creating defensive error response. "
|
|
412
|
+
f"Please report this bug."
|
|
482
413
|
)
|
|
483
|
-
model_name = ctx.model_name if ctx else self.model_names[0]
|
|
484
|
-
assert isinstance(
|
|
485
|
-
prompt, Conversation
|
|
486
|
-
), "expected prompt to be a conversation"
|
|
487
414
|
results[idx] = APIResponse(
|
|
488
415
|
id=idx,
|
|
489
|
-
model_internal=
|
|
490
|
-
prompt=
|
|
491
|
-
sampling_params=sampling_params
|
|
416
|
+
model_internal=self.model_names[0],
|
|
417
|
+
prompt=prompts[idx], # type: ignore
|
|
418
|
+
sampling_params=self.sampling_params[0]
|
|
419
|
+
if self.sampling_params
|
|
420
|
+
else SamplingParams(),
|
|
492
421
|
status_code=None,
|
|
493
422
|
is_error=True,
|
|
494
423
|
error_message="Internal error: no response produced.",
|
|
495
424
|
)
|
|
496
425
|
|
|
426
|
+
# Handle return format
|
|
497
427
|
if return_completions_only:
|
|
498
428
|
return [r.completion if r is not None else None for r in results]
|
|
499
429
|
|
|
@@ -1,11 +1,12 @@
|
|
|
1
1
|
import asyncio
|
|
2
2
|
import io
|
|
3
3
|
import json
|
|
4
|
+
import os
|
|
4
5
|
from typing import Any
|
|
5
6
|
|
|
7
|
+
from lm_deluge.client import _LLMClient
|
|
6
8
|
from lm_deluge.file import File
|
|
7
9
|
|
|
8
|
-
from ..client import LLMClient
|
|
9
10
|
from ..prompt import Conversation
|
|
10
11
|
from ..util.json import load_json
|
|
11
12
|
|
|
@@ -18,7 +19,7 @@ except ImportError:
|
|
|
18
19
|
async def extract_async(
|
|
19
20
|
inputs: list[str | Any],
|
|
20
21
|
schema: Any,
|
|
21
|
-
client:
|
|
22
|
+
client: _LLMClient,
|
|
22
23
|
document_name: str | None = None,
|
|
23
24
|
object_name: str | None = None,
|
|
24
25
|
show_progress: bool = True,
|
|
@@ -32,12 +33,13 @@ async def extract_async(
|
|
|
32
33
|
raise ValueError("schema must be a pydantic model or a dict.")
|
|
33
34
|
|
|
34
35
|
# warn if json_mode is not True
|
|
36
|
+
has_warned = os.environ.get("LM_DELUGE_WARN_JSON_MODE", False)
|
|
35
37
|
for sp in client.sampling_params:
|
|
36
|
-
if sp.json_mode is False:
|
|
38
|
+
if sp.json_mode is False and not has_warned:
|
|
37
39
|
print(
|
|
38
40
|
"Warning: json_mode is False for one or more sampling params. You may get invalid output."
|
|
39
41
|
)
|
|
40
|
-
|
|
42
|
+
os.environ["LM_DELUGE_WARN_JSON_MODE"] = "True"
|
|
41
43
|
# check_schema(schema_dict) -- figure out later
|
|
42
44
|
if document_name is None:
|
|
43
45
|
document_name = "text"
|
|
@@ -111,7 +113,7 @@ async def extract_async(
|
|
|
111
113
|
def extract(
|
|
112
114
|
inputs: list[str | Any],
|
|
113
115
|
schema: Any,
|
|
114
|
-
client:
|
|
116
|
+
client: _LLMClient,
|
|
115
117
|
document_name: str | None = None,
|
|
116
118
|
object_name: str | None = None,
|
|
117
119
|
show_progress: bool = True,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{lm_deluge-0.0.53 → lm_deluge-0.0.54}/src/lm_deluge/built_in_tools/anthropic/computer_use.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|