code-puppy 0.0.376__py3-none-any.whl → 0.0.378__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- code_puppy/command_line/add_model_menu.py +2 -0
- code_puppy/command_line/autosave_menu.py +2 -0
- code_puppy/command_line/colors_menu.py +2 -0
- code_puppy/command_line/diff_menu.py +2 -0
- code_puppy/command_line/model_settings_menu.py +15 -0
- code_puppy/command_line/onboarding_wizard.py +2 -0
- code_puppy/gemini_model.py +63 -22
- code_puppy/model_factory.py +13 -0
- code_puppy/models.json +2 -2
- code_puppy/tools/common.py +10 -2
- {code_puppy-0.0.376.data → code_puppy-0.0.378.data}/data/code_puppy/models.json +2 -2
- {code_puppy-0.0.376.dist-info → code_puppy-0.0.378.dist-info}/METADATA +1 -1
- {code_puppy-0.0.376.dist-info → code_puppy-0.0.378.dist-info}/RECORD +17 -17
- {code_puppy-0.0.376.data → code_puppy-0.0.378.data}/data/code_puppy/models_dev_api.json +0 -0
- {code_puppy-0.0.376.dist-info → code_puppy-0.0.378.dist-info}/WHEEL +0 -0
- {code_puppy-0.0.376.dist-info → code_puppy-0.0.378.dist-info}/entry_points.txt +0 -0
- {code_puppy-0.0.376.dist-info → code_puppy-0.0.378.dist-info}/licenses/LICENSE +0 -0
|
@@ -893,6 +893,7 @@ class AddModelMenu:
|
|
|
893
893
|
kb = KeyBindings()
|
|
894
894
|
|
|
895
895
|
@kb.add("up")
|
|
896
|
+
@kb.add("c-p") # Ctrl+P = previous (Emacs-style)
|
|
896
897
|
def _(event):
|
|
897
898
|
if self.view_mode == "providers":
|
|
898
899
|
if self.selected_provider_idx > 0:
|
|
@@ -905,6 +906,7 @@ class AddModelMenu:
|
|
|
905
906
|
self.update_display()
|
|
906
907
|
|
|
907
908
|
@kb.add("down")
|
|
909
|
+
@kb.add("c-n") # Ctrl+N = next (Emacs-style)
|
|
908
910
|
def _(event):
|
|
909
911
|
if self.view_mode == "providers":
|
|
910
912
|
if self.selected_provider_idx < len(self.providers) - 1:
|
|
@@ -470,6 +470,7 @@ async def interactive_autosave_picker() -> Optional[str]:
|
|
|
470
470
|
kb = KeyBindings()
|
|
471
471
|
|
|
472
472
|
@kb.add("up")
|
|
473
|
+
@kb.add("c-p") # Ctrl+P = previous (Emacs-style)
|
|
473
474
|
def _(event):
|
|
474
475
|
if browse_mode[0]:
|
|
475
476
|
# In browse mode: go to older message
|
|
@@ -485,6 +486,7 @@ async def interactive_autosave_picker() -> Optional[str]:
|
|
|
485
486
|
update_display()
|
|
486
487
|
|
|
487
488
|
@kb.add("down")
|
|
489
|
+
@kb.add("c-n") # Ctrl+N = next (Emacs-style)
|
|
488
490
|
def _(event):
|
|
489
491
|
if browse_mode[0]:
|
|
490
492
|
# In browse mode: go to newer message
|
|
@@ -299,6 +299,7 @@ async def _split_panel_selector(
|
|
|
299
299
|
kb = KeyBindings()
|
|
300
300
|
|
|
301
301
|
@kb.add("up")
|
|
302
|
+
@kb.add("c-p") # Ctrl+P = previous (Emacs-style)
|
|
302
303
|
def move_up(event):
|
|
303
304
|
if choices:
|
|
304
305
|
# Skip separator lines
|
|
@@ -310,6 +311,7 @@ async def _split_panel_selector(
|
|
|
310
311
|
event.app.invalidate()
|
|
311
312
|
|
|
312
313
|
@kb.add("down")
|
|
314
|
+
@kb.add("c-n") # Ctrl+N = next (Emacs-style)
|
|
313
315
|
def move_down(event):
|
|
314
316
|
if choices:
|
|
315
317
|
# Skip separator lines
|
|
@@ -533,6 +533,7 @@ async def _split_panel_selector(
|
|
|
533
533
|
kb = KeyBindings()
|
|
534
534
|
|
|
535
535
|
@kb.add("up")
|
|
536
|
+
@kb.add("c-p") # Ctrl+P = previous (Emacs-style)
|
|
536
537
|
def move_up(event):
|
|
537
538
|
if choices:
|
|
538
539
|
selected_index[0] = (selected_index[0] - 1) % len(choices)
|
|
@@ -540,6 +541,7 @@ async def _split_panel_selector(
|
|
|
540
541
|
event.app.invalidate()
|
|
541
542
|
|
|
542
543
|
@kb.add("down")
|
|
544
|
+
@kb.add("c-n") # Ctrl+N = next (Emacs-style)
|
|
543
545
|
def move_down(event):
|
|
544
546
|
if choices:
|
|
545
547
|
selected_index[0] = (selected_index[0] + 1) % len(choices)
|
|
@@ -106,6 +106,19 @@ SETTING_DEFINITIONS: Dict[str, Dict] = {
|
|
|
106
106
|
"type": "boolean",
|
|
107
107
|
"default": False,
|
|
108
108
|
},
|
|
109
|
+
"thinking_enabled": {
|
|
110
|
+
"name": "Thinking Enabled",
|
|
111
|
+
"description": "Enable thinking mode for Gemini 3 Pro models. When enabled, the model will show its reasoning process.",
|
|
112
|
+
"type": "boolean",
|
|
113
|
+
"default": True,
|
|
114
|
+
},
|
|
115
|
+
"thinking_level": {
|
|
116
|
+
"name": "Thinking Level",
|
|
117
|
+
"description": "Controls the depth of thinking for Gemini 3 Pro models. Low = faster responses, High = more thorough reasoning.",
|
|
118
|
+
"type": "choice",
|
|
119
|
+
"choices": ["low", "high"],
|
|
120
|
+
"default": "low",
|
|
121
|
+
},
|
|
109
122
|
}
|
|
110
123
|
|
|
111
124
|
|
|
@@ -730,6 +743,7 @@ class ModelSettingsMenu:
|
|
|
730
743
|
kb = KeyBindings()
|
|
731
744
|
|
|
732
745
|
@kb.add("up")
|
|
746
|
+
@kb.add("c-p") # Ctrl+P = previous (Emacs-style)
|
|
733
747
|
def _(event):
|
|
734
748
|
if self.view_mode == "models":
|
|
735
749
|
if self.model_index > 0:
|
|
@@ -742,6 +756,7 @@ class ModelSettingsMenu:
|
|
|
742
756
|
self.update_display()
|
|
743
757
|
|
|
744
758
|
@kb.add("down")
|
|
759
|
+
@kb.add("c-n") # Ctrl+N = next (Emacs-style)
|
|
745
760
|
def _(event):
|
|
746
761
|
if self.view_mode == "models":
|
|
747
762
|
if self.model_index < len(self.all_models) - 1:
|
|
@@ -249,12 +249,14 @@ async def run_onboarding_wizard() -> Optional[str]:
|
|
|
249
249
|
|
|
250
250
|
@kb.add("down")
|
|
251
251
|
@kb.add("j")
|
|
252
|
+
@kb.add("c-n") # Ctrl+N = next (Emacs-style)
|
|
252
253
|
def next_option(event):
|
|
253
254
|
wizard.next_option()
|
|
254
255
|
event.app.invalidate()
|
|
255
256
|
|
|
256
257
|
@kb.add("up")
|
|
257
258
|
@kb.add("k")
|
|
259
|
+
@kb.add("c-p") # Ctrl+P = previous (Emacs-style)
|
|
258
260
|
def prev_option(event):
|
|
259
261
|
wizard.prev_option()
|
|
260
262
|
event.app.invalidate()
|
code_puppy/gemini_model.py
CHANGED
|
@@ -40,6 +40,10 @@ from pydantic_ai.usage import RequestUsage
|
|
|
40
40
|
|
|
41
41
|
logger = logging.getLogger(__name__)
|
|
42
42
|
|
|
43
|
+
# Bypass thought signature for Gemini when no pending signature is available
|
|
44
|
+
# This allows function calls to work with thinking models
|
|
45
|
+
BYPASS_THOUGHT_SIGNATURE = "context_engineering_is_the_way_to_go"
|
|
46
|
+
|
|
43
47
|
|
|
44
48
|
def generate_tool_call_id() -> str:
|
|
45
49
|
"""Generate a unique tool call ID."""
|
|
@@ -410,27 +414,48 @@ class GeminiModel(Model):
|
|
|
410
414
|
return system_instruction, contents
|
|
411
415
|
|
|
412
416
|
def _map_model_response(self, m: ModelResponse) -> dict[str, Any] | None:
|
|
413
|
-
"""Map a ModelResponse to Gemini content format.
|
|
417
|
+
"""Map a ModelResponse to Gemini content format.
|
|
418
|
+
|
|
419
|
+
For Gemini thinking models, we need to track thought signatures from
|
|
420
|
+
ThinkingParts and apply them to subsequent function_call parts.
|
|
421
|
+
"""
|
|
414
422
|
parts: list[dict[str, Any]] = []
|
|
423
|
+
pending_signature: str | None = None
|
|
415
424
|
|
|
416
425
|
for item in m.parts:
|
|
417
426
|
if isinstance(item, ToolCallPart):
|
|
418
|
-
|
|
419
|
-
{
|
|
420
|
-
"
|
|
421
|
-
|
|
422
|
-
|
|
423
|
-
"id": item.tool_call_id,
|
|
424
|
-
}
|
|
427
|
+
part_dict: dict[str, Any] = {
|
|
428
|
+
"function_call": {
|
|
429
|
+
"name": item.tool_name,
|
|
430
|
+
"args": item.args_as_dict(),
|
|
431
|
+
"id": item.tool_call_id,
|
|
425
432
|
}
|
|
433
|
+
}
|
|
434
|
+
# Gemini thinking models REQUIRE thoughtSignature on function calls
|
|
435
|
+
# Use pending signature from thinking or bypass signature
|
|
436
|
+
part_dict["thoughtSignature"] = (
|
|
437
|
+
pending_signature
|
|
438
|
+
if pending_signature is not None
|
|
439
|
+
else BYPASS_THOUGHT_SIGNATURE
|
|
426
440
|
)
|
|
441
|
+
parts.append(part_dict)
|
|
427
442
|
elif isinstance(item, TextPart):
|
|
428
|
-
|
|
443
|
+
part_dict = {"text": item.content}
|
|
444
|
+
# Apply pending signature to text parts too if present
|
|
445
|
+
if pending_signature is not None:
|
|
446
|
+
part_dict["thoughtSignature"] = pending_signature
|
|
447
|
+
pending_signature = None
|
|
448
|
+
parts.append(part_dict)
|
|
429
449
|
elif isinstance(item, ThinkingPart):
|
|
430
450
|
if item.content:
|
|
431
|
-
part_dict
|
|
451
|
+
part_dict = {"text": item.content, "thought": True}
|
|
432
452
|
if item.signature:
|
|
433
453
|
part_dict["thoughtSignature"] = item.signature
|
|
454
|
+
# Store signature for subsequent parts
|
|
455
|
+
pending_signature = item.signature
|
|
456
|
+
else:
|
|
457
|
+
# No signature on thinking part, use bypass
|
|
458
|
+
pending_signature = BYPASS_THOUGHT_SIGNATURE
|
|
434
459
|
parts.append(part_dict)
|
|
435
460
|
|
|
436
461
|
if not parts:
|
|
@@ -462,18 +487,34 @@ class GeminiModel(Model):
|
|
|
462
487
|
config: dict[str, Any] = {}
|
|
463
488
|
|
|
464
489
|
if model_settings:
|
|
465
|
-
|
|
466
|
-
|
|
467
|
-
|
|
468
|
-
|
|
469
|
-
|
|
470
|
-
|
|
471
|
-
|
|
472
|
-
|
|
473
|
-
|
|
474
|
-
|
|
475
|
-
|
|
476
|
-
config["maxOutputTokens"] =
|
|
490
|
+
# ModelSettings is a TypedDict, so use .get() for all access
|
|
491
|
+
temperature = model_settings.get("temperature")
|
|
492
|
+
if temperature is not None:
|
|
493
|
+
config["temperature"] = temperature
|
|
494
|
+
|
|
495
|
+
top_p = model_settings.get("top_p")
|
|
496
|
+
if top_p is not None:
|
|
497
|
+
config["topP"] = top_p
|
|
498
|
+
|
|
499
|
+
max_tokens = model_settings.get("max_tokens")
|
|
500
|
+
if max_tokens is not None:
|
|
501
|
+
config["maxOutputTokens"] = max_tokens
|
|
502
|
+
|
|
503
|
+
# Handle Gemini 3 Pro thinking settings
|
|
504
|
+
thinking_enabled = model_settings.get("thinking_enabled")
|
|
505
|
+
thinking_level = model_settings.get("thinking_level")
|
|
506
|
+
|
|
507
|
+
# Build thinkingConfig if thinking settings are present
|
|
508
|
+
if thinking_enabled is False:
|
|
509
|
+
# Disable thinking by not including thinkingConfig
|
|
510
|
+
pass
|
|
511
|
+
elif thinking_level is not None:
|
|
512
|
+
# Gemini 3 Pro uses thinkingLevel with values "low" or "high"
|
|
513
|
+
# includeThoughts=True is required to surface the thinking in the response
|
|
514
|
+
config["thinkingConfig"] = {
|
|
515
|
+
"thinkingLevel": thinking_level,
|
|
516
|
+
"includeThoughts": True,
|
|
517
|
+
}
|
|
477
518
|
|
|
478
519
|
return config
|
|
479
520
|
|
code_puppy/model_factory.py
CHANGED
|
@@ -74,6 +74,7 @@ def make_model_settings(
|
|
|
74
74
|
get_effective_model_settings,
|
|
75
75
|
get_openai_reasoning_effort,
|
|
76
76
|
get_openai_verbosity,
|
|
77
|
+
model_supports_setting,
|
|
77
78
|
)
|
|
78
79
|
|
|
79
80
|
model_settings_dict: dict = {}
|
|
@@ -131,6 +132,18 @@ def make_model_settings(
|
|
|
131
132
|
}
|
|
132
133
|
model_settings = AnthropicModelSettings(**model_settings_dict)
|
|
133
134
|
|
|
135
|
+
# Handle Gemini thinking models (Gemini-3)
|
|
136
|
+
# Check if model supports thinking settings and apply defaults
|
|
137
|
+
if model_supports_setting(model_name, "thinking_level"):
|
|
138
|
+
# Apply defaults if not explicitly set by user
|
|
139
|
+
# Default: thinking_enabled=True, thinking_level="low"
|
|
140
|
+
if "thinking_enabled" not in model_settings_dict:
|
|
141
|
+
model_settings_dict["thinking_enabled"] = True
|
|
142
|
+
if "thinking_level" not in model_settings_dict:
|
|
143
|
+
model_settings_dict["thinking_level"] = "low"
|
|
144
|
+
# Recreate settings with Gemini thinking config
|
|
145
|
+
model_settings = ModelSettings(**model_settings_dict)
|
|
146
|
+
|
|
134
147
|
return model_settings
|
|
135
148
|
|
|
136
149
|
|
code_puppy/models.json
CHANGED
|
@@ -33,13 +33,13 @@
|
|
|
33
33
|
"type": "gemini",
|
|
34
34
|
"name": "gemini-3-pro-preview",
|
|
35
35
|
"context_length": 200000,
|
|
36
|
-
"supported_settings": ["temperature", "top_p"]
|
|
36
|
+
"supported_settings": ["temperature", "top_p", "thinking_enabled", "thinking_level"]
|
|
37
37
|
},
|
|
38
38
|
"Gemini-3-Long-Context": {
|
|
39
39
|
"type": "gemini",
|
|
40
40
|
"name": "gemini-3-pro-preview",
|
|
41
41
|
"context_length": 1000000,
|
|
42
|
-
"supported_settings": ["temperature", "top_p"]
|
|
42
|
+
"supported_settings": ["temperature", "top_p", "thinking_enabled", "thinking_level"]
|
|
43
43
|
},
|
|
44
44
|
"gpt-5.1": {
|
|
45
45
|
"type": "openai",
|
code_puppy/tools/common.py
CHANGED
|
@@ -880,18 +880,22 @@ async def arrow_select_async(
|
|
|
880
880
|
lines.append(border_bottom)
|
|
881
881
|
lines.append("")
|
|
882
882
|
|
|
883
|
-
lines.append(
|
|
883
|
+
lines.append(
|
|
884
|
+
"<ansicyan>(Use ↑↓ or Ctrl+P/N to select, Enter to confirm)</ansicyan>"
|
|
885
|
+
)
|
|
884
886
|
return HTML("\n".join(lines))
|
|
885
887
|
|
|
886
888
|
# Key bindings
|
|
887
889
|
kb = KeyBindings()
|
|
888
890
|
|
|
889
891
|
@kb.add("up")
|
|
892
|
+
@kb.add("c-p") # Ctrl+P = previous (Emacs-style)
|
|
890
893
|
def move_up(event):
|
|
891
894
|
selected_index[0] = (selected_index[0] - 1) % len(choices)
|
|
892
895
|
event.app.invalidate() # Force redraw to update preview
|
|
893
896
|
|
|
894
897
|
@kb.add("down")
|
|
898
|
+
@kb.add("c-n") # Ctrl+N = next (Emacs-style)
|
|
895
899
|
def move_down(event):
|
|
896
900
|
selected_index[0] = (selected_index[0] + 1) % len(choices)
|
|
897
901
|
event.app.invalidate() # Force redraw to update preview
|
|
@@ -957,18 +961,22 @@ def arrow_select(message: str, choices: list[str]) -> str:
|
|
|
957
961
|
else:
|
|
958
962
|
lines.append(f" {choice}")
|
|
959
963
|
lines.append("")
|
|
960
|
-
lines.append(
|
|
964
|
+
lines.append(
|
|
965
|
+
"<ansicyan>(Use ↑↓ or Ctrl+P/N to select, Enter to confirm)</ansicyan>"
|
|
966
|
+
)
|
|
961
967
|
return HTML("\n".join(lines))
|
|
962
968
|
|
|
963
969
|
# Key bindings
|
|
964
970
|
kb = KeyBindings()
|
|
965
971
|
|
|
966
972
|
@kb.add("up")
|
|
973
|
+
@kb.add("c-p") # Ctrl+P = previous (Emacs-style)
|
|
967
974
|
def move_up(event):
|
|
968
975
|
selected_index[0] = (selected_index[0] - 1) % len(choices)
|
|
969
976
|
event.app.invalidate() # Force redraw to update preview
|
|
970
977
|
|
|
971
978
|
@kb.add("down")
|
|
979
|
+
@kb.add("c-n") # Ctrl+N = next (Emacs-style)
|
|
972
980
|
def move_down(event):
|
|
973
981
|
selected_index[0] = (selected_index[0] + 1) % len(choices)
|
|
974
982
|
event.app.invalidate() # Force redraw to update preview
|
|
@@ -33,13 +33,13 @@
|
|
|
33
33
|
"type": "gemini",
|
|
34
34
|
"name": "gemini-3-pro-preview",
|
|
35
35
|
"context_length": 200000,
|
|
36
|
-
"supported_settings": ["temperature", "top_p"]
|
|
36
|
+
"supported_settings": ["temperature", "top_p", "thinking_enabled", "thinking_level"]
|
|
37
37
|
},
|
|
38
38
|
"Gemini-3-Long-Context": {
|
|
39
39
|
"type": "gemini",
|
|
40
40
|
"name": "gemini-3-pro-preview",
|
|
41
41
|
"context_length": 1000000,
|
|
42
|
-
"supported_settings": ["temperature", "top_p"]
|
|
42
|
+
"supported_settings": ["temperature", "top_p", "thinking_enabled", "thinking_level"]
|
|
43
43
|
},
|
|
44
44
|
"gpt-5.1": {
|
|
45
45
|
"type": "openai",
|
|
@@ -7,14 +7,14 @@ code_puppy/cli_runner.py,sha256=w5CLKgQYYaT7My3Cga2StXYol-u6DBxNzzUuhhsfhsA,3495
|
|
|
7
7
|
code_puppy/config.py,sha256=eAyVqiu8SwzJQpaJu80rlJvd8XXY51DqafMsP5lBRI4,55827
|
|
8
8
|
code_puppy/error_logging.py,sha256=a80OILCUtJhexI6a9GM-r5LqIdjvSRzggfgPp2jv1X0,3297
|
|
9
9
|
code_puppy/gemini_code_assist.py,sha256=KGS7sO5OLc83nDF3xxS-QiU6vxW9vcm6hmzilu79Ef8,13867
|
|
10
|
-
code_puppy/gemini_model.py,sha256=
|
|
10
|
+
code_puppy/gemini_model.py,sha256=UHb5vFC9zrEdFJ-yCN3vNCdp0UxP156BL_fwbnEhaw8,27988
|
|
11
11
|
code_puppy/http_utils.py,sha256=SAH6EOdbR6Cbfmi-4EtHDqRDBUV5bWtGc-5nr44F0Is,10418
|
|
12
12
|
code_puppy/keymap.py,sha256=IvMkTlB_bIqOWpbTpmftkdyjhtD5todXuEIw1zCZ4u0,3584
|
|
13
13
|
code_puppy/main.py,sha256=82r3vZy_XcyEsenLn82BnUusaoyL3Bpm_Th_jKgqecE,273
|
|
14
|
-
code_puppy/model_factory.py,sha256=
|
|
14
|
+
code_puppy/model_factory.py,sha256=VFMUY0u8oqHn9f1QqpWsxvXujlK0Ku5NEwfuaTcv2rs,32060
|
|
15
15
|
code_puppy/model_switching.py,sha256=3IsnSWKHLWzI5d2WDYNg0Xr78BeYNN1WrZuzas-lYJ4,2064
|
|
16
16
|
code_puppy/model_utils.py,sha256=cG1V4fsIOEQIb0W88FyGcsMWNv8SpmRAXN3A7LBEoyE,5116
|
|
17
|
-
code_puppy/models.json,sha256=
|
|
17
|
+
code_puppy/models.json,sha256=SC7N2lV1Q8ikXlalRNqABkNvuuL_8fIIk638739-gGY,3319
|
|
18
18
|
code_puppy/models_dev_api.json,sha256=wHjkj-IM_fx1oHki6-GqtOoCrRMR0ScK0f-Iz0UEcy8,548187
|
|
19
19
|
code_puppy/models_dev_parser.py,sha256=8ndmWrsSyKbXXpRZPXc0w6TfWMuCcgaHiMifmlaBaPc,20611
|
|
20
20
|
code_puppy/pydantic_patches.py,sha256=-tYaQW8FMAbxuKGbsM09pvjCBqwk67mf7GekXzGKf14,6444
|
|
@@ -69,25 +69,25 @@ code_puppy/api/routers/config.py,sha256=uDUFYZqki0fQd0U5EHpfTgqlZaRFfmhPyWrIHXNB
|
|
|
69
69
|
code_puppy/api/routers/sessions.py,sha256=GqYRT7IJYPpEdTseLF3FIpbvvD86lIqwwPswL31D9Wc,6786
|
|
70
70
|
code_puppy/api/templates/terminal.html,sha256=9alh6tTbLyXPDjBvkXw8nEWPXB-m_LIceGGRYpSLuyo,13125
|
|
71
71
|
code_puppy/command_line/__init__.py,sha256=y7WeRemfYppk8KVbCGeAIiTuiOszIURCDjOMZv_YRmU,45
|
|
72
|
-
code_puppy/command_line/add_model_menu.py,sha256=
|
|
72
|
+
code_puppy/command_line/add_model_menu.py,sha256=xsPhJYpqHv6mbNc5NVz46N5XzdJz1wbtDg3LaP2KRK0,43591
|
|
73
73
|
code_puppy/command_line/agent_menu.py,sha256=4SVPS0eA7YfpxacNk0Kel16bzqQ3bBGe8dqCCOI2A8s,20915
|
|
74
74
|
code_puppy/command_line/attachments.py,sha256=4Q5I2Es4j0ltnz5wjw2z0QXMsiMJvEfWRkPf_lJeITM,13093
|
|
75
|
-
code_puppy/command_line/autosave_menu.py,sha256=
|
|
75
|
+
code_puppy/command_line/autosave_menu.py,sha256=41YBToiWGglTKEdEBeNTqI2D3wO4BCBfv0WXZILOhnw,19928
|
|
76
76
|
code_puppy/command_line/clipboard.py,sha256=oe9bfAX5RnT81FiYrDmhvHaePS1tAT-NFG1fSXubSD4,16869
|
|
77
|
-
code_puppy/command_line/colors_menu.py,sha256=
|
|
77
|
+
code_puppy/command_line/colors_menu.py,sha256=AI36z4vicbhwDQkziQZXCfy_F59bJLYs-1p9-N7Enn0,17535
|
|
78
78
|
code_puppy/command_line/command_handler.py,sha256=yDR_NAvgwjFKxUuQMY5yzkkihFovDiT24wyKALmJSRU,10969
|
|
79
79
|
code_puppy/command_line/command_registry.py,sha256=qFySsw1g8dol3kgi0p6cXrIDlP11_OhOoaQ5nAadWXg,4416
|
|
80
80
|
code_puppy/command_line/config_commands.py,sha256=uwS7ln0I0w0A3tSSWZEtq7Xgj-VOaGEYLE1CihdQ8ic,25759
|
|
81
81
|
code_puppy/command_line/core_commands.py,sha256=QTQt2CS9_6ExcgS6BLgRZWkXDaSb-KC_tWplUkOGaMA,27133
|
|
82
|
-
code_puppy/command_line/diff_menu.py,sha256=
|
|
82
|
+
code_puppy/command_line/diff_menu.py,sha256=tokSiQXv9cWDd7HTBEkKKnxZjwSNyVjpIReXe-K2vL4,24245
|
|
83
83
|
code_puppy/command_line/file_path_completion.py,sha256=gw8NpIxa6GOpczUJRyh7VNZwoXKKn-yvCqit7h2y6Gg,2931
|
|
84
84
|
code_puppy/command_line/load_context_completion.py,sha256=a3JvLDeLLSYxVgTjAdqWzS4spjv6ccCrK2LKZgVJ1IM,2202
|
|
85
85
|
code_puppy/command_line/mcp_completion.py,sha256=eKzW2O7gun7HoHekOW0XVXhNS5J2xCtK7aaWyA8bkZk,6952
|
|
86
86
|
code_puppy/command_line/model_picker_completion.py,sha256=YRudzwGVtIjr02MyeIdmbkDhS00ENjCt9k3nATT3KdM,6143
|
|
87
|
-
code_puppy/command_line/model_settings_menu.py,sha256=
|
|
87
|
+
code_puppy/command_line/model_settings_menu.py,sha256=1SrhwmiL8fTqaM-frV0mSnssiVvjKfjMNoZAm1ofiYo,33708
|
|
88
88
|
code_puppy/command_line/motd.py,sha256=XuIk3UTLawwVFM-NfoaJGU5F2hPLASTFXq84UdDMT0Q,2408
|
|
89
89
|
code_puppy/command_line/onboarding_slides.py,sha256=itqAsuHzjHpD_XNz6FniBIYr6dNyP1AW_XQZQ6SbVek,7125
|
|
90
|
-
code_puppy/command_line/onboarding_wizard.py,sha256=
|
|
90
|
+
code_puppy/command_line/onboarding_wizard.py,sha256=mAFmQsPU8N48MGxHXOPBFRz7zeC4otdlqM6MpYVmrrc,10353
|
|
91
91
|
code_puppy/command_line/pin_command_completion.py,sha256=juSvdqRpk7AdfkPy1DJx5NzfEUU5KYGlChvP0hisM18,11667
|
|
92
92
|
code_puppy/command_line/prompt_toolkit_completion.py,sha256=49GM3jVE89G1M3XroMZk2LhGgXpOO8XZ0Sg8h4a6LLw,32806
|
|
93
93
|
code_puppy/command_line/session_commands.py,sha256=Jh8GGfhlfBAEVfucKLbcZjNaXYd0twImiOwq2ZnGdQQ,9902
|
|
@@ -205,7 +205,7 @@ code_puppy/prompts/antigravity_system_prompt.md,sha256=ZaTfRyY57ttROyZMmOBtqZQu1
|
|
|
205
205
|
code_puppy/tools/__init__.py,sha256=VsXs27kCtgq63LnkHMrtH39q13zjezvayjdiyges_Io,13919
|
|
206
206
|
code_puppy/tools/agent_tools.py,sha256=xDO0LPc0YUQHSqA29SEQH4t9hcbyApaiUWWgSdx6wE4,25511
|
|
207
207
|
code_puppy/tools/command_runner.py,sha256=C3wGcxt6slC1d5X4twKSoUjGdGYi02k2sAQSygoOt54,51518
|
|
208
|
-
code_puppy/tools/common.py,sha256=
|
|
208
|
+
code_puppy/tools/common.py,sha256=wNogZr0glUxKhJoF1S2iVsT8SAXjn_rXYIyY8__l8YI,46673
|
|
209
209
|
code_puppy/tools/display.py,sha256=-ulDyq55178f8O_TAEmnxGoy_ZdFkbHBw-W4ul851GM,2675
|
|
210
210
|
code_puppy/tools/file_modifications.py,sha256=vz9n7R0AGDSdLUArZr_55yJLkyI30M8zreAppxIx02M,29380
|
|
211
211
|
code_puppy/tools/file_operations.py,sha256=CqhpuBnOFOcQCIYXOujskxq2VMLWYJhibYrH0YcPSfA,35692
|
|
@@ -225,10 +225,10 @@ code_puppy/tools/browser/chromium_terminal_manager.py,sha256=w1thQ_ACb6oV45L93TS
|
|
|
225
225
|
code_puppy/tools/browser/terminal_command_tools.py,sha256=9byOZku-dwvTtCl532xt7Lumed_jTn0sLvUe_X75XCQ,19068
|
|
226
226
|
code_puppy/tools/browser/terminal_screenshot_tools.py,sha256=J_21YO_495NvYgNFu9KQP6VYg2K_f8CtSdZuF94Yhnw,18448
|
|
227
227
|
code_puppy/tools/browser/terminal_tools.py,sha256=F5LjVH3udSCFHmqC3O1UJLoLozZFZsEdX42jOmkqkW0,17853
|
|
228
|
-
code_puppy-0.0.
|
|
229
|
-
code_puppy-0.0.
|
|
230
|
-
code_puppy-0.0.
|
|
231
|
-
code_puppy-0.0.
|
|
232
|
-
code_puppy-0.0.
|
|
233
|
-
code_puppy-0.0.
|
|
234
|
-
code_puppy-0.0.
|
|
228
|
+
code_puppy-0.0.378.data/data/code_puppy/models.json,sha256=SC7N2lV1Q8ikXlalRNqABkNvuuL_8fIIk638739-gGY,3319
|
|
229
|
+
code_puppy-0.0.378.data/data/code_puppy/models_dev_api.json,sha256=wHjkj-IM_fx1oHki6-GqtOoCrRMR0ScK0f-Iz0UEcy8,548187
|
|
230
|
+
code_puppy-0.0.378.dist-info/METADATA,sha256=qZ-JMYspwKU2-m7ccFxWT7A4hm4XJh1helqEooIl2KY,27604
|
|
231
|
+
code_puppy-0.0.378.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
|
|
232
|
+
code_puppy-0.0.378.dist-info/entry_points.txt,sha256=Tp4eQC99WY3HOKd3sdvb22vZODRq0XkZVNpXOag_KdI,91
|
|
233
|
+
code_puppy-0.0.378.dist-info/licenses/LICENSE,sha256=31u8x0SPgdOq3izJX41kgFazWsM43zPEF9eskzqbJMY,1075
|
|
234
|
+
code_puppy-0.0.378.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|