code-puppy 0.0.376__py3-none-any.whl → 0.0.377__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -106,6 +106,19 @@ SETTING_DEFINITIONS: Dict[str, Dict] = {
106
106
  "type": "boolean",
107
107
  "default": False,
108
108
  },
109
+ "thinking_enabled": {
110
+ "name": "Thinking Enabled",
111
+ "description": "Enable thinking mode for Gemini 3 Pro models. When enabled, the model will show its reasoning process.",
112
+ "type": "boolean",
113
+ "default": True,
114
+ },
115
+ "thinking_level": {
116
+ "name": "Thinking Level",
117
+ "description": "Controls the depth of thinking for Gemini 3 Pro models. Low = faster responses, High = more thorough reasoning.",
118
+ "type": "choice",
119
+ "choices": ["low", "high"],
120
+ "default": "low",
121
+ },
109
122
  }
110
123
 
111
124
 
@@ -40,6 +40,10 @@ from pydantic_ai.usage import RequestUsage
40
40
 
41
41
  logger = logging.getLogger(__name__)
42
42
 
43
+ # Bypass thought signature for Gemini when no pending signature is available
44
+ # This allows function calls to work with thinking models
45
+ BYPASS_THOUGHT_SIGNATURE = "context_engineering_is_the_way_to_go"
46
+
43
47
 
44
48
  def generate_tool_call_id() -> str:
45
49
  """Generate a unique tool call ID."""
@@ -410,27 +414,48 @@ class GeminiModel(Model):
410
414
  return system_instruction, contents
411
415
 
412
416
  def _map_model_response(self, m: ModelResponse) -> dict[str, Any] | None:
413
- """Map a ModelResponse to Gemini content format."""
417
+ """Map a ModelResponse to Gemini content format.
418
+
419
+ For Gemini thinking models, we need to track thought signatures from
420
+ ThinkingParts and apply them to subsequent function_call parts.
421
+ """
414
422
  parts: list[dict[str, Any]] = []
423
+ pending_signature: str | None = None
415
424
 
416
425
  for item in m.parts:
417
426
  if isinstance(item, ToolCallPart):
418
- parts.append(
419
- {
420
- "function_call": {
421
- "name": item.tool_name,
422
- "args": item.args_as_dict(),
423
- "id": item.tool_call_id,
424
- }
427
+ part_dict: dict[str, Any] = {
428
+ "function_call": {
429
+ "name": item.tool_name,
430
+ "args": item.args_as_dict(),
431
+ "id": item.tool_call_id,
425
432
  }
433
+ }
434
+ # Gemini thinking models REQUIRE thoughtSignature on function calls
435
+ # Use pending signature from thinking or bypass signature
436
+ part_dict["thoughtSignature"] = (
437
+ pending_signature
438
+ if pending_signature is not None
439
+ else BYPASS_THOUGHT_SIGNATURE
426
440
  )
441
+ parts.append(part_dict)
427
442
  elif isinstance(item, TextPart):
428
- parts.append({"text": item.content})
443
+ part_dict = {"text": item.content}
444
+ # Apply pending signature to text parts too if present
445
+ if pending_signature is not None:
446
+ part_dict["thoughtSignature"] = pending_signature
447
+ pending_signature = None
448
+ parts.append(part_dict)
429
449
  elif isinstance(item, ThinkingPart):
430
450
  if item.content:
431
- part_dict: dict[str, Any] = {"text": item.content, "thought": True}
451
+ part_dict = {"text": item.content, "thought": True}
432
452
  if item.signature:
433
453
  part_dict["thoughtSignature"] = item.signature
454
+ # Store signature for subsequent parts
455
+ pending_signature = item.signature
456
+ else:
457
+ # No signature on thinking part, use bypass
458
+ pending_signature = BYPASS_THOUGHT_SIGNATURE
434
459
  parts.append(part_dict)
435
460
 
436
461
  if not parts:
@@ -462,18 +487,34 @@ class GeminiModel(Model):
462
487
  config: dict[str, Any] = {}
463
488
 
464
489
  if model_settings:
465
- if (
466
- hasattr(model_settings, "temperature")
467
- and model_settings.temperature is not None
468
- ):
469
- config["temperature"] = model_settings.temperature
470
- if hasattr(model_settings, "top_p") and model_settings.top_p is not None:
471
- config["topP"] = model_settings.top_p
472
- if (
473
- hasattr(model_settings, "max_tokens")
474
- and model_settings.max_tokens is not None
475
- ):
476
- config["maxOutputTokens"] = model_settings.max_tokens
490
+ # ModelSettings is a TypedDict, so use .get() for all access
491
+ temperature = model_settings.get("temperature")
492
+ if temperature is not None:
493
+ config["temperature"] = temperature
494
+
495
+ top_p = model_settings.get("top_p")
496
+ if top_p is not None:
497
+ config["topP"] = top_p
498
+
499
+ max_tokens = model_settings.get("max_tokens")
500
+ if max_tokens is not None:
501
+ config["maxOutputTokens"] = max_tokens
502
+
503
+ # Handle Gemini 3 Pro thinking settings
504
+ thinking_enabled = model_settings.get("thinking_enabled")
505
+ thinking_level = model_settings.get("thinking_level")
506
+
507
+ # Build thinkingConfig if thinking settings are present
508
+ if thinking_enabled is False:
509
+ # Disable thinking by not including thinkingConfig
510
+ pass
511
+ elif thinking_level is not None:
512
+ # Gemini 3 Pro uses thinkingLevel with values "low" or "high"
513
+ # includeThoughts=True is required to surface the thinking in the response
514
+ config["thinkingConfig"] = {
515
+ "thinkingLevel": thinking_level,
516
+ "includeThoughts": True,
517
+ }
477
518
 
478
519
  return config
479
520
 
@@ -74,6 +74,7 @@ def make_model_settings(
74
74
  get_effective_model_settings,
75
75
  get_openai_reasoning_effort,
76
76
  get_openai_verbosity,
77
+ model_supports_setting,
77
78
  )
78
79
 
79
80
  model_settings_dict: dict = {}
@@ -131,6 +132,18 @@ def make_model_settings(
131
132
  }
132
133
  model_settings = AnthropicModelSettings(**model_settings_dict)
133
134
 
135
+ # Handle Gemini thinking models (Gemini-3)
136
+ # Check if model supports thinking settings and apply defaults
137
+ if model_supports_setting(model_name, "thinking_level"):
138
+ # Apply defaults if not explicitly set by user
139
+ # Default: thinking_enabled=True, thinking_level="low"
140
+ if "thinking_enabled" not in model_settings_dict:
141
+ model_settings_dict["thinking_enabled"] = True
142
+ if "thinking_level" not in model_settings_dict:
143
+ model_settings_dict["thinking_level"] = "low"
144
+ # Recreate settings with Gemini thinking config
145
+ model_settings = ModelSettings(**model_settings_dict)
146
+
134
147
  return model_settings
135
148
 
136
149
 
code_puppy/models.json CHANGED
@@ -33,13 +33,13 @@
33
33
  "type": "gemini",
34
34
  "name": "gemini-3-pro-preview",
35
35
  "context_length": 200000,
36
- "supported_settings": ["temperature", "top_p"]
36
+ "supported_settings": ["temperature", "top_p", "thinking_enabled", "thinking_level"]
37
37
  },
38
38
  "Gemini-3-Long-Context": {
39
39
  "type": "gemini",
40
40
  "name": "gemini-3-pro-preview",
41
41
  "context_length": 1000000,
42
- "supported_settings": ["temperature", "top_p"]
42
+ "supported_settings": ["temperature", "top_p", "thinking_enabled", "thinking_level"]
43
43
  },
44
44
  "gpt-5.1": {
45
45
  "type": "openai",
@@ -33,13 +33,13 @@
33
33
  "type": "gemini",
34
34
  "name": "gemini-3-pro-preview",
35
35
  "context_length": 200000,
36
- "supported_settings": ["temperature", "top_p"]
36
+ "supported_settings": ["temperature", "top_p", "thinking_enabled", "thinking_level"]
37
37
  },
38
38
  "Gemini-3-Long-Context": {
39
39
  "type": "gemini",
40
40
  "name": "gemini-3-pro-preview",
41
41
  "context_length": 1000000,
42
- "supported_settings": ["temperature", "top_p"]
42
+ "supported_settings": ["temperature", "top_p", "thinking_enabled", "thinking_level"]
43
43
  },
44
44
  "gpt-5.1": {
45
45
  "type": "openai",
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: code-puppy
3
- Version: 0.0.376
3
+ Version: 0.0.377
4
4
  Summary: Code generation agent
5
5
  Project-URL: repository, https://github.com/mpfaffenberger/code_puppy
6
6
  Project-URL: HomePage, https://github.com/mpfaffenberger/code_puppy
@@ -7,14 +7,14 @@ code_puppy/cli_runner.py,sha256=w5CLKgQYYaT7My3Cga2StXYol-u6DBxNzzUuhhsfhsA,3495
7
7
  code_puppy/config.py,sha256=eAyVqiu8SwzJQpaJu80rlJvd8XXY51DqafMsP5lBRI4,55827
8
8
  code_puppy/error_logging.py,sha256=a80OILCUtJhexI6a9GM-r5LqIdjvSRzggfgPp2jv1X0,3297
9
9
  code_puppy/gemini_code_assist.py,sha256=KGS7sO5OLc83nDF3xxS-QiU6vxW9vcm6hmzilu79Ef8,13867
10
- code_puppy/gemini_model.py,sha256=i8XXmx9s1eWEXpJ8U288w0yayTt6Nq8V-hxpUHhti4s,25984
10
+ code_puppy/gemini_model.py,sha256=UHb5vFC9zrEdFJ-yCN3vNCdp0UxP156BL_fwbnEhaw8,27988
11
11
  code_puppy/http_utils.py,sha256=SAH6EOdbR6Cbfmi-4EtHDqRDBUV5bWtGc-5nr44F0Is,10418
12
12
  code_puppy/keymap.py,sha256=IvMkTlB_bIqOWpbTpmftkdyjhtD5todXuEIw1zCZ4u0,3584
13
13
  code_puppy/main.py,sha256=82r3vZy_XcyEsenLn82BnUusaoyL3Bpm_Th_jKgqecE,273
14
- code_puppy/model_factory.py,sha256=tFQ_DQxYiK6iieHkywsbvNXBFrY-vaECyv_v0QTQxaI,31385
14
+ code_puppy/model_factory.py,sha256=VFMUY0u8oqHn9f1QqpWsxvXujlK0Ku5NEwfuaTcv2rs,32060
15
15
  code_puppy/model_switching.py,sha256=3IsnSWKHLWzI5d2WDYNg0Xr78BeYNN1WrZuzas-lYJ4,2064
16
16
  code_puppy/model_utils.py,sha256=cG1V4fsIOEQIb0W88FyGcsMWNv8SpmRAXN3A7LBEoyE,5116
17
- code_puppy/models.json,sha256=jAHRsCl3trysP4vU_k_ltA8GcFU2APd4lxFl8-4Jnvc,3243
17
+ code_puppy/models.json,sha256=SC7N2lV1Q8ikXlalRNqABkNvuuL_8fIIk638739-gGY,3319
18
18
  code_puppy/models_dev_api.json,sha256=wHjkj-IM_fx1oHki6-GqtOoCrRMR0ScK0f-Iz0UEcy8,548187
19
19
  code_puppy/models_dev_parser.py,sha256=8ndmWrsSyKbXXpRZPXc0w6TfWMuCcgaHiMifmlaBaPc,20611
20
20
  code_puppy/pydantic_patches.py,sha256=-tYaQW8FMAbxuKGbsM09pvjCBqwk67mf7GekXzGKf14,6444
@@ -84,7 +84,7 @@ code_puppy/command_line/file_path_completion.py,sha256=gw8NpIxa6GOpczUJRyh7VNZwo
84
84
  code_puppy/command_line/load_context_completion.py,sha256=a3JvLDeLLSYxVgTjAdqWzS4spjv6ccCrK2LKZgVJ1IM,2202
85
85
  code_puppy/command_line/mcp_completion.py,sha256=eKzW2O7gun7HoHekOW0XVXhNS5J2xCtK7aaWyA8bkZk,6952
86
86
  code_puppy/command_line/model_picker_completion.py,sha256=YRudzwGVtIjr02MyeIdmbkDhS00ENjCt9k3nATT3KdM,6143
87
- code_puppy/command_line/model_settings_menu.py,sha256=VelHDj1kD_vzGzKfqJ16n-iNEMz4yBAx5TkLH0YTwC8,33055
87
+ code_puppy/command_line/model_settings_menu.py,sha256=RFdYe0CGApu-BE5lubRfZ5lBaAjt_MeX5AUmOdaAt-s,33596
88
88
  code_puppy/command_line/motd.py,sha256=XuIk3UTLawwVFM-NfoaJGU5F2hPLASTFXq84UdDMT0Q,2408
89
89
  code_puppy/command_line/onboarding_slides.py,sha256=itqAsuHzjHpD_XNz6FniBIYr6dNyP1AW_XQZQ6SbVek,7125
90
90
  code_puppy/command_line/onboarding_wizard.py,sha256=U5lV_1P3IwDYZUHar0zKgdp121zzkvOwwORvdCZwFcw,10241
@@ -225,10 +225,10 @@ code_puppy/tools/browser/chromium_terminal_manager.py,sha256=w1thQ_ACb6oV45L93TS
225
225
  code_puppy/tools/browser/terminal_command_tools.py,sha256=9byOZku-dwvTtCl532xt7Lumed_jTn0sLvUe_X75XCQ,19068
226
226
  code_puppy/tools/browser/terminal_screenshot_tools.py,sha256=J_21YO_495NvYgNFu9KQP6VYg2K_f8CtSdZuF94Yhnw,18448
227
227
  code_puppy/tools/browser/terminal_tools.py,sha256=F5LjVH3udSCFHmqC3O1UJLoLozZFZsEdX42jOmkqkW0,17853
228
- code_puppy-0.0.376.data/data/code_puppy/models.json,sha256=jAHRsCl3trysP4vU_k_ltA8GcFU2APd4lxFl8-4Jnvc,3243
229
- code_puppy-0.0.376.data/data/code_puppy/models_dev_api.json,sha256=wHjkj-IM_fx1oHki6-GqtOoCrRMR0ScK0f-Iz0UEcy8,548187
230
- code_puppy-0.0.376.dist-info/METADATA,sha256=8zBZ-_vl5I-bVjco6ZxTH1Vsiz7NQQO1AqtYAiisweA,27604
231
- code_puppy-0.0.376.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
232
- code_puppy-0.0.376.dist-info/entry_points.txt,sha256=Tp4eQC99WY3HOKd3sdvb22vZODRq0XkZVNpXOag_KdI,91
233
- code_puppy-0.0.376.dist-info/licenses/LICENSE,sha256=31u8x0SPgdOq3izJX41kgFazWsM43zPEF9eskzqbJMY,1075
234
- code_puppy-0.0.376.dist-info/RECORD,,
228
+ code_puppy-0.0.377.data/data/code_puppy/models.json,sha256=SC7N2lV1Q8ikXlalRNqABkNvuuL_8fIIk638739-gGY,3319
229
+ code_puppy-0.0.377.data/data/code_puppy/models_dev_api.json,sha256=wHjkj-IM_fx1oHki6-GqtOoCrRMR0ScK0f-Iz0UEcy8,548187
230
+ code_puppy-0.0.377.dist-info/METADATA,sha256=h4-FOgFscu0NlRuJK2QQS5-zzmCVUzx8wod813P7sPU,27604
231
+ code_puppy-0.0.377.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
232
+ code_puppy-0.0.377.dist-info/entry_points.txt,sha256=Tp4eQC99WY3HOKd3sdvb22vZODRq0XkZVNpXOag_KdI,91
233
+ code_puppy-0.0.377.dist-info/licenses/LICENSE,sha256=31u8x0SPgdOq3izJX41kgFazWsM43zPEF9eskzqbJMY,1075
234
+ code_puppy-0.0.377.dist-info/RECORD,,