vibesurf 0.1.23__py3-none-any.whl → 0.1.25__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of vibesurf might be problematic. Click here for more details.

vibe_surf/_version.py CHANGED
@@ -28,7 +28,7 @@ version_tuple: VERSION_TUPLE
28
28
  commit_id: COMMIT_ID
29
29
  __commit_id__: COMMIT_ID
30
30
 
31
- __version__ = version = '0.1.23'
32
- __version_tuple__ = version_tuple = (0, 1, 23)
31
+ __version__ = version = '0.1.25'
32
+ __version_tuple__ = version_tuple = (0, 1, 25)
33
33
 
34
34
  __commit_id__ = commit_id = None
@@ -479,7 +479,7 @@ async def _vibesurf_agent_node_impl(state: VibeSurfState) -> VibeSurfState:
479
479
  llm=vibesurf_agent.llm,
480
480
  file_system=vibesurf_agent.file_system,
481
481
  )
482
- if "skill_search" in action_name or "skill_crawl" in action_name or "skill_summary" in action_name:
482
+ if action_name in ["skill_search", "skill_crawl", "skill_summary", "skill_finance"]:
483
483
  state.current_step = "END"
484
484
  # Format final response
485
485
  final_response = f"{result.extracted_content}"
@@ -234,7 +234,9 @@ async def update_llm_profile(
234
234
 
235
235
  # Return updated profile
236
236
  updated_profile = await LLMProfileQueries.get_profile(db, profile_name)
237
-
237
+ from ..shared_state import current_llm_profile_name
238
+ if current_llm_profile_name != profile_name:
239
+ current_llm_profile_name = None
238
240
  # Use safe extraction to avoid greenlet issues
239
241
  return LLMProfileResponse(**_profile_to_response_dict(updated_profile))
240
242
 
@@ -142,8 +142,9 @@ def create_llm_from_profile(llm_profile) -> BaseChatModel:
142
142
  )
143
143
 
144
144
  elif provider == "deepseek":
145
- return ChatDeepSeek(
145
+ return ChatOpenAICompatible(
146
146
  model=model,
147
+ base_url="https://api.deepseek.com",
147
148
  api_key=api_key,
148
149
  **common_params
149
150
  )
@@ -51,6 +51,8 @@ from browser_use.llm.openai.serializer import OpenAIMessageSerializer
51
51
  from browser_use.llm.schema import SchemaOptimizer
52
52
  from browser_use.llm.views import ChatInvokeCompletion, ChatInvokeUsage
53
53
 
54
+ from json_repair import repair_json
55
+
54
56
  T = TypeVar('T', bound=BaseModel)
55
57
 
56
58
  from vibe_surf.logger import get_logger
@@ -74,6 +76,8 @@ class ChatOpenAICompatible(ChatOpenAI):
74
76
  The class automatically detects the model type and applies appropriate fixes.
75
77
  """
76
78
 
79
+ max_completion_tokens: int | None = 16000
80
+
77
81
  def _is_gemini_model(self) -> bool:
78
82
  """Check if the current model is a Gemini model."""
79
83
  return str(self.model).lower().startswith('gemini')
@@ -82,6 +86,11 @@ class ChatOpenAICompatible(ChatOpenAI):
82
86
  """Check if the current model is a Kimi/Moonshot model."""
83
87
  model_str = str(self.model).lower()
84
88
  return 'kimi' in model_str or 'moonshot' in model_str
89
+
90
+ def _is_deepseek_model(self) -> bool:
91
+ """Check if the current model is a Kimi/Moonshot model."""
92
+ model_str = str(self.model).lower()
93
+ return 'deepseek' in model_str
85
94
 
86
95
  def _is_qwen_model(self) -> bool:
87
96
  """Check if the current model is a Qwen model."""
@@ -223,10 +232,10 @@ class ChatOpenAICompatible(ChatOpenAI):
223
232
  """
224
233
  # If this is not a special model or no structured output is requested,
225
234
  # use the parent implementation directly
226
- if self._is_qwen_model() or self._is_kimi_model():
235
+ if self._is_qwen_model() or self._is_kimi_model() or self._is_deepseek_model() :
227
236
  self.add_schema_to_system_prompt = True
228
237
 
229
- if not (self._is_gemini_model() or self._is_kimi_model() or self._is_qwen_model()) or output_format is None:
238
+ if not (self._is_gemini_model() or self._is_kimi_model() or self._is_qwen_model() or self._is_deepseek_model()) or output_format is None:
230
239
  return await super().ainvoke(messages, output_format)
231
240
  openai_messages = OpenAIMessageSerializer.serialize_messages(messages)
232
241
 
@@ -241,6 +250,7 @@ class ChatOpenAICompatible(ChatOpenAI):
241
250
 
242
251
  if self.max_completion_tokens is not None:
243
252
  model_params['max_completion_tokens'] = self.max_completion_tokens
253
+ model_params['max_tokens'] = self.max_completion_tokens
244
254
 
245
255
  if self.top_p is not None:
246
256
  model_params['top_p'] = self.top_p
@@ -298,12 +308,22 @@ class ChatOpenAICompatible(ChatOpenAI):
298
308
  ]
299
309
 
300
310
  # Return structured response
301
- response = await self.get_client().chat.completions.create(
302
- model=self.model,
303
- messages=openai_messages,
304
- response_format=ResponseFormatJSONSchema(json_schema=response_format, type='json_schema'),
305
- **model_params,
306
- )
311
+ if self.add_schema_to_system_prompt:
312
+ response = await self.get_client().chat.completions.create(
313
+ model=self.model,
314
+ messages=openai_messages,
315
+ response_format={
316
+ 'type': 'json_object'
317
+ },
318
+ **model_params,
319
+ )
320
+ else:
321
+ response = await self.get_client().chat.completions.create(
322
+ model=self.model,
323
+ messages=openai_messages,
324
+ response_format=ResponseFormatJSONSchema(json_schema=response_format, type='json_schema'),
325
+ **model_params,
326
+ )
307
327
 
308
328
  if response.choices[0].message.content is None:
309
329
  raise ModelProviderError(
@@ -313,8 +333,12 @@ class ChatOpenAICompatible(ChatOpenAI):
313
333
  )
314
334
 
315
335
  usage = self._get_usage(response)
316
-
317
- parsed = output_format.model_validate_json(response.choices[0].message.content)
336
+ output_content = response.choices[0].message.content
337
+ try:
338
+ parsed = output_format.model_validate_json(output_content)
339
+ except Exception as e:
340
+ repair_content = repair_json(output_content)
341
+ parsed = output_format.model_validate_json(repair_content)
318
342
 
319
343
  return ChatInvokeCompletion(
320
344
  completion=parsed,
@@ -224,9 +224,9 @@ class CustomFileSystem(FileSystem):
224
224
  def _is_valid_filename(self, file_name: str) -> bool:
225
225
  """Check if filename matches the required pattern: name.extension"""
226
226
  # Build extensions pattern from _file_types
227
- file_name = os.path.basename(file_name)
227
+ file_name = os.path.splitext(file_name)[1]
228
228
  extensions = '|'.join(self._file_types.keys())
229
- pattern = rf'^[a-zA-Z0-9_\-]+\.({extensions})$'
229
+ pattern = rf'\.({extensions})$'
230
230
  return bool(re.match(pattern, file_name))
231
231
 
232
232
  async def append_file(self, full_filename: str, content: str) -> str: