sentienceapi 0.90.12__py3-none-any.whl → 0.92.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of sentienceapi might be problematic. Click here for more details.

Files changed (63) hide show
  1. sentience/__init__.py +14 -5
  2. sentience/_extension_loader.py +40 -0
  3. sentience/action_executor.py +215 -0
  4. sentience/actions.py +408 -25
  5. sentience/agent.py +804 -310
  6. sentience/agent_config.py +3 -0
  7. sentience/async_api.py +101 -0
  8. sentience/base_agent.py +95 -0
  9. sentience/browser.py +594 -25
  10. sentience/browser_evaluator.py +299 -0
  11. sentience/cloud_tracing.py +458 -36
  12. sentience/conversational_agent.py +79 -45
  13. sentience/element_filter.py +136 -0
  14. sentience/expect.py +98 -2
  15. sentience/extension/background.js +56 -185
  16. sentience/extension/content.js +117 -289
  17. sentience/extension/injected_api.js +799 -1374
  18. sentience/extension/manifest.json +1 -1
  19. sentience/extension/pkg/sentience_core.js +190 -396
  20. sentience/extension/pkg/sentience_core_bg.wasm +0 -0
  21. sentience/extension/release.json +47 -47
  22. sentience/formatting.py +9 -53
  23. sentience/inspector.py +183 -1
  24. sentience/llm_interaction_handler.py +191 -0
  25. sentience/llm_provider.py +256 -28
  26. sentience/llm_provider_utils.py +120 -0
  27. sentience/llm_response_builder.py +153 -0
  28. sentience/models.py +66 -1
  29. sentience/overlay.py +109 -2
  30. sentience/protocols.py +228 -0
  31. sentience/query.py +1 -1
  32. sentience/read.py +95 -3
  33. sentience/recorder.py +223 -3
  34. sentience/schemas/trace_v1.json +102 -9
  35. sentience/screenshot.py +48 -2
  36. sentience/sentience_methods.py +86 -0
  37. sentience/snapshot.py +309 -64
  38. sentience/snapshot_diff.py +141 -0
  39. sentience/text_search.py +119 -5
  40. sentience/trace_event_builder.py +129 -0
  41. sentience/trace_file_manager.py +197 -0
  42. sentience/trace_indexing/index_schema.py +95 -7
  43. sentience/trace_indexing/indexer.py +117 -14
  44. sentience/tracer_factory.py +119 -6
  45. sentience/tracing.py +172 -8
  46. sentience/utils/__init__.py +40 -0
  47. sentience/utils/browser.py +46 -0
  48. sentience/utils/element.py +257 -0
  49. sentience/utils/formatting.py +59 -0
  50. sentience/utils.py +1 -1
  51. sentience/visual_agent.py +2056 -0
  52. sentience/wait.py +70 -4
  53. {sentienceapi-0.90.12.dist-info → sentienceapi-0.92.2.dist-info}/METADATA +61 -22
  54. sentienceapi-0.92.2.dist-info/RECORD +65 -0
  55. sentienceapi-0.92.2.dist-info/licenses/LICENSE +24 -0
  56. sentienceapi-0.92.2.dist-info/licenses/LICENSE-APACHE +201 -0
  57. sentienceapi-0.92.2.dist-info/licenses/LICENSE-MIT +21 -0
  58. sentience/extension/test-content.js +0 -4
  59. sentienceapi-0.90.12.dist-info/RECORD +0 -46
  60. sentienceapi-0.90.12.dist-info/licenses/LICENSE.md +0 -43
  61. {sentienceapi-0.90.12.dist-info → sentienceapi-0.92.2.dist-info}/WHEEL +0 -0
  62. {sentienceapi-0.90.12.dist-info → sentienceapi-0.92.2.dist-info}/entry_points.txt +0 -0
  63. {sentienceapi-0.90.12.dist-info → sentienceapi-0.92.2.dist-info}/top_level.txt +0 -0
sentience/llm_provider.py CHANGED
@@ -1,3 +1,5 @@
1
+ from typing import Optional
2
+
1
3
  """
2
4
  LLM Provider abstraction layer for Sentience SDK
3
5
  Enables "Bring Your Own Brain" (BYOB) pattern - plug in any LLM provider
@@ -6,6 +8,9 @@ Enables "Bring Your Own Brain" (BYOB) pattern - plug in any LLM provider
6
8
  from abc import ABC, abstractmethod
7
9
  from dataclasses import dataclass
8
10
 
11
+ from .llm_provider_utils import get_api_key_from_env, handle_provider_error, require_package
12
+ from .llm_response_builder import LLMResponseBuilder
13
+
9
14
 
10
15
  @dataclass
11
16
  class LLMResponse:
@@ -31,6 +36,15 @@ class LLMProvider(ABC):
31
36
  - Any other completion API
32
37
  """
33
38
 
39
+ def __init__(self, model: str):
40
+ """
41
+ Initialize LLM provider with model name.
42
+
43
+ Args:
44
+ model: Model identifier (e.g., "gpt-4o", "claude-3-sonnet")
45
+ """
46
+ self._model_name = model
47
+
34
48
  @abstractmethod
35
49
  def generate(self, system_prompt: str, user_prompt: str, **kwargs) -> LLMResponse:
36
50
  """
@@ -95,13 +109,16 @@ class OpenAIProvider(LLMProvider):
95
109
  base_url: Custom API base URL (for compatible APIs)
96
110
  organization: OpenAI organization ID
97
111
  """
98
- try:
99
- from openai import OpenAI
100
- except ImportError:
101
- raise ImportError("OpenAI package not installed. Install with: pip install openai")
112
+ super().__init__(model) # Initialize base class with model name
113
+
114
+ OpenAI = require_package(
115
+ "openai",
116
+ "openai",
117
+ "OpenAI",
118
+ "pip install openai",
119
+ )
102
120
 
103
121
  self.client = OpenAI(api_key=api_key, base_url=base_url, organization=organization)
104
- self._model_name = model
105
122
 
106
123
  def generate(
107
124
  self,
@@ -148,12 +165,15 @@ class OpenAIProvider(LLMProvider):
148
165
  api_params.update(kwargs)
149
166
 
150
167
  # Call OpenAI API
151
- response = self.client.chat.completions.create(**api_params)
168
+ try:
169
+ response = self.client.chat.completions.create(**api_params)
170
+ except Exception as e:
171
+ handle_provider_error(e, "OpenAI", "generate response")
152
172
 
153
173
  choice = response.choices[0]
154
174
  usage = response.usage
155
175
 
156
- return LLMResponse(
176
+ return LLMResponseBuilder.from_openai_format(
157
177
  content=choice.message.content,
158
178
  prompt_tokens=usage.prompt_tokens if usage else None,
159
179
  completion_tokens=usage.completion_tokens if usage else None,
@@ -191,15 +211,16 @@ class AnthropicProvider(LLMProvider):
191
211
  api_key: Anthropic API key (or set ANTHROPIC_API_KEY env var)
192
212
  model: Model name (claude-3-opus, claude-3-sonnet, claude-3-haiku, etc.)
193
213
  """
194
- try:
195
- from anthropic import Anthropic
196
- except ImportError:
197
- raise ImportError(
198
- "Anthropic package not installed. Install with: pip install anthropic"
199
- )
214
+ super().__init__(model) # Initialize base class with model name
215
+
216
+ Anthropic = require_package(
217
+ "anthropic",
218
+ "anthropic",
219
+ "Anthropic",
220
+ "pip install anthropic",
221
+ )
200
222
 
201
223
  self.client = Anthropic(api_key=api_key)
202
- self._model_name = model
203
224
 
204
225
  def generate(
205
226
  self,
@@ -237,21 +258,19 @@ class AnthropicProvider(LLMProvider):
237
258
  api_params.update(kwargs)
238
259
 
239
260
  # Call Anthropic API
240
- response = self.client.messages.create(**api_params)
261
+ try:
262
+ response = self.client.messages.create(**api_params)
263
+ except Exception as e:
264
+ handle_provider_error(e, "Anthropic", "generate response")
241
265
 
242
266
  content = response.content[0].text if response.content else ""
243
267
 
244
- return LLMResponse(
268
+ return LLMResponseBuilder.from_anthropic_format(
245
269
  content=content,
246
- prompt_tokens=response.usage.input_tokens if hasattr(response, "usage") else None,
247
- completion_tokens=response.usage.output_tokens if hasattr(response, "usage") else None,
248
- total_tokens=(
249
- (response.usage.input_tokens + response.usage.output_tokens)
250
- if hasattr(response, "usage")
251
- else None
252
- ),
270
+ input_tokens=response.usage.input_tokens if hasattr(response, "usage") else None,
271
+ output_tokens=response.usage.output_tokens if hasattr(response, "usage") else None,
253
272
  model_name=response.model,
254
- finish_reason=response.stop_reason,
273
+ stop_reason=response.stop_reason,
255
274
  )
256
275
 
257
276
  def supports_json_mode(self) -> bool:
@@ -263,6 +282,215 @@ class AnthropicProvider(LLMProvider):
263
282
  return self._model_name
264
283
 
265
284
 
285
+ class GLMProvider(LLMProvider):
286
+ """
287
+ Zhipu AI GLM provider implementation (GLM-4, GLM-4-Plus, etc.)
288
+
289
+ Requirements:
290
+ pip install zhipuai
291
+
292
+ Example:
293
+ >>> from sentience.llm_provider import GLMProvider
294
+ >>> llm = GLMProvider(api_key="your-api-key", model="glm-4-plus")
295
+ >>> response = llm.generate("You are a helpful assistant", "Hello!")
296
+ >>> print(response.content)
297
+ """
298
+
299
+ def __init__(self, api_key: str | None = None, model: str = "glm-4-plus"):
300
+ """
301
+ Initialize GLM provider
302
+
303
+ Args:
304
+ api_key: Zhipu AI API key (or set GLM_API_KEY env var)
305
+ model: Model name (glm-4-plus, glm-4, glm-4-air, glm-4-flash, etc.)
306
+ """
307
+ super().__init__(model) # Initialize base class with model name
308
+
309
+ ZhipuAI = require_package(
310
+ "zhipuai",
311
+ "zhipuai",
312
+ "ZhipuAI",
313
+ "pip install zhipuai",
314
+ )
315
+
316
+ self.client = ZhipuAI(api_key=api_key)
317
+
318
+ def generate(
319
+ self,
320
+ system_prompt: str,
321
+ user_prompt: str,
322
+ temperature: float = 0.0,
323
+ max_tokens: int | None = None,
324
+ **kwargs,
325
+ ) -> LLMResponse:
326
+ """
327
+ Generate response using GLM API
328
+
329
+ Args:
330
+ system_prompt: System instruction
331
+ user_prompt: User query
332
+ temperature: Sampling temperature (0.0 = deterministic, 1.0 = creative)
333
+ max_tokens: Maximum tokens to generate
334
+ **kwargs: Additional GLM API parameters
335
+
336
+ Returns:
337
+ LLMResponse object
338
+ """
339
+ messages = []
340
+ if system_prompt:
341
+ messages.append({"role": "system", "content": system_prompt})
342
+ messages.append({"role": "user", "content": user_prompt})
343
+
344
+ # Build API parameters
345
+ api_params = {
346
+ "model": self._model_name,
347
+ "messages": messages,
348
+ "temperature": temperature,
349
+ }
350
+
351
+ if max_tokens:
352
+ api_params["max_tokens"] = max_tokens
353
+
354
+ # Merge additional parameters
355
+ api_params.update(kwargs)
356
+
357
+ # Call GLM API
358
+ try:
359
+ response = self.client.chat.completions.create(**api_params)
360
+ except Exception as e:
361
+ handle_provider_error(e, "GLM", "generate response")
362
+
363
+ choice = response.choices[0]
364
+ usage = response.usage
365
+
366
+ return LLMResponseBuilder.from_openai_format(
367
+ content=choice.message.content,
368
+ prompt_tokens=usage.prompt_tokens if usage else None,
369
+ completion_tokens=usage.completion_tokens if usage else None,
370
+ total_tokens=usage.total_tokens if usage else None,
371
+ model_name=response.model,
372
+ finish_reason=choice.finish_reason,
373
+ )
374
+
375
+ def supports_json_mode(self) -> bool:
376
+ """GLM-4 models support JSON mode"""
377
+ return "glm-4" in self._model_name.lower()
378
+
379
+ @property
380
+ def model_name(self) -> str:
381
+ return self._model_name
382
+
383
+
384
+ class GeminiProvider(LLMProvider):
385
+ """
386
+ Google Gemini provider implementation (Gemini 2.0, Gemini 1.5 Pro, etc.)
387
+
388
+ Requirements:
389
+ pip install google-generativeai
390
+
391
+ Example:
392
+ >>> from sentience.llm_provider import GeminiProvider
393
+ >>> llm = GeminiProvider(api_key="your-api-key", model="gemini-2.0-flash-exp")
394
+ >>> response = llm.generate("You are a helpful assistant", "Hello!")
395
+ >>> print(response.content)
396
+ """
397
+
398
+ def __init__(self, api_key: str | None = None, model: str = "gemini-2.0-flash-exp"):
399
+ """
400
+ Initialize Gemini provider
401
+
402
+ Args:
403
+ api_key: Google API key (or set GEMINI_API_KEY or GOOGLE_API_KEY env var)
404
+ model: Model name (gemini-2.0-flash-exp, gemini-1.5-pro, gemini-1.5-flash, etc.)
405
+ """
406
+ super().__init__(model) # Initialize base class with model name
407
+
408
+ genai = require_package(
409
+ "google-generativeai",
410
+ "google.generativeai",
411
+ install_command="pip install google-generativeai",
412
+ )
413
+
414
+ # Configure API key (check parameter first, then environment variables)
415
+ api_key = get_api_key_from_env(["GEMINI_API_KEY", "GOOGLE_API_KEY"], api_key)
416
+ if api_key:
417
+ genai.configure(api_key=api_key)
418
+
419
+ self.genai = genai
420
+ self.model = genai.GenerativeModel(model)
421
+
422
+ def generate(
423
+ self,
424
+ system_prompt: str,
425
+ user_prompt: str,
426
+ temperature: float = 0.0,
427
+ max_tokens: int | None = None,
428
+ **kwargs,
429
+ ) -> LLMResponse:
430
+ """
431
+ Generate response using Gemini API
432
+
433
+ Args:
434
+ system_prompt: System instruction
435
+ user_prompt: User query
436
+ temperature: Sampling temperature (0.0 = deterministic, 2.0 = very creative)
437
+ max_tokens: Maximum tokens to generate
438
+ **kwargs: Additional Gemini API parameters
439
+
440
+ Returns:
441
+ LLMResponse object
442
+ """
443
+ # Combine system and user prompts (Gemini doesn't have separate system role in all versions)
444
+ full_prompt = f"{system_prompt}\n\n{user_prompt}" if system_prompt else user_prompt
445
+
446
+ # Build generation config
447
+ generation_config = {
448
+ "temperature": temperature,
449
+ }
450
+
451
+ if max_tokens:
452
+ generation_config["max_output_tokens"] = max_tokens
453
+
454
+ # Merge additional parameters
455
+ generation_config.update(kwargs)
456
+
457
+ # Call Gemini API
458
+ try:
459
+ response = self.model.generate_content(full_prompt, generation_config=generation_config)
460
+ except Exception as e:
461
+ handle_provider_error(e, "Gemini", "generate response")
462
+
463
+ # Extract content
464
+ content = response.text if response.text else ""
465
+
466
+ # Token usage (if available)
467
+ prompt_tokens = None
468
+ completion_tokens = None
469
+ total_tokens = None
470
+
471
+ if hasattr(response, "usage_metadata") and response.usage_metadata:
472
+ prompt_tokens = response.usage_metadata.prompt_token_count
473
+ completion_tokens = response.usage_metadata.candidates_token_count
474
+ total_tokens = response.usage_metadata.total_token_count
475
+
476
+ return LLMResponseBuilder.from_gemini_format(
477
+ content=content,
478
+ prompt_tokens=prompt_tokens,
479
+ completion_tokens=completion_tokens,
480
+ total_tokens=total_tokens,
481
+ model_name=self._model_name,
482
+ )
483
+
484
+ def supports_json_mode(self) -> bool:
485
+ """Gemini 1.5+ models support JSON mode via response_mime_type"""
486
+ model_lower = self._model_name.lower()
487
+ return any(x in model_lower for x in ["gemini-1.5", "gemini-2.0"])
488
+
489
+ @property
490
+ def model_name(self) -> str:
491
+ return self._model_name
492
+
493
+
266
494
  class LocalLLMProvider(LLMProvider):
267
495
  """
268
496
  Local LLM provider using HuggingFace Transformers
@@ -297,6 +525,9 @@ class LocalLLMProvider(LLMProvider):
297
525
  load_in_8bit: Use 8-bit quantization (saves 50% memory)
298
526
  torch_dtype: Data type ("auto", "float16", "bfloat16", "float32")
299
527
  """
528
+ super().__init__(model_name) # Initialize base class with model name
529
+
530
+ # Import required packages with consistent error handling
300
531
  try:
301
532
  import torch
302
533
  from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig
@@ -306,8 +537,6 @@ class LocalLLMProvider(LLMProvider):
306
537
  "Install with: pip install transformers torch"
307
538
  )
308
539
 
309
- self._model_name = model_name
310
-
311
540
  # Load tokenizer
312
541
  self.tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
313
542
 
@@ -414,11 +643,10 @@ class LocalLLMProvider(LLMProvider):
414
643
  generated_tokens = outputs[0][input_length:]
415
644
  response_text = self.tokenizer.decode(generated_tokens, skip_special_tokens=True).strip()
416
645
 
417
- return LLMResponse(
646
+ return LLMResponseBuilder.from_local_format(
418
647
  content=response_text,
419
648
  prompt_tokens=input_length,
420
649
  completion_tokens=len(generated_tokens),
421
- total_tokens=input_length + len(generated_tokens),
422
650
  model_name=self._model_name,
423
651
  )
424
652
 
@@ -0,0 +1,120 @@
1
+ """
2
+ LLM Provider utility functions for common initialization and error handling.
3
+
4
+ This module provides helper functions to reduce duplication across LLM provider implementations.
5
+ """
6
+
7
+ import os
8
+ from collections.abc import Callable
9
+ from typing import Any, Optional, TypeVar
10
+
11
+ T = TypeVar("T")
12
+
13
+
14
+ def require_package(
15
+ package_name: str,
16
+ module_name: str,
17
+ class_name: str | None = None,
18
+ install_command: str | None = None,
19
+ ) -> Any:
20
+ """
21
+ Import a package with consistent error handling.
22
+
23
+ Args:
24
+ package_name: Name of the package (for error messages)
25
+ module_name: Module name to import (e.g., "openai", "google.generativeai")
26
+ class_name: Optional class name to import from module (e.g., "OpenAI")
27
+ install_command: Installation command (defaults to "pip install {package_name}")
28
+
29
+ Returns:
30
+ Imported module or class
31
+
32
+ Raises:
33
+ ImportError: If package is not installed, with helpful message
34
+
35
+ Example:
36
+ >>> OpenAI = require_package("openai", "openai", "OpenAI", "pip install openai")
37
+ >>> genai = require_package("google-generativeai", "google.generativeai", install_command="pip install google-generativeai")
38
+ """
39
+ if install_command is None:
40
+ install_command = f"pip install {package_name}"
41
+
42
+ try:
43
+ if class_name:
44
+ # Import specific class: from module import class
45
+ module = __import__(module_name, fromlist=[class_name])
46
+ return getattr(module, class_name)
47
+ else:
48
+ # Import entire module
49
+ return __import__(module_name)
50
+ except ImportError:
51
+ raise ImportError(f"{package_name} package not installed. Install with: {install_command}")
52
+
53
+
54
+ def get_api_key_from_env(
55
+ env_vars: list[str],
56
+ api_key: str | None = None,
57
+ ) -> str | None:
58
+ """
59
+ Get API key from parameter or environment variables.
60
+
61
+ Args:
62
+ env_vars: List of environment variable names to check (in order)
63
+ api_key: Optional API key parameter (takes precedence)
64
+
65
+ Returns:
66
+ API key string or None if not found
67
+
68
+ Example:
69
+ >>> key = get_api_key_from_env(["OPENAI_API_KEY"], api_key="sk-...")
70
+ >>> # Returns "sk-..." if provided, otherwise checks OPENAI_API_KEY env var
71
+ """
72
+ if api_key:
73
+ return api_key
74
+
75
+ for env_var in env_vars:
76
+ value = os.getenv(env_var)
77
+ if value:
78
+ return value
79
+
80
+ return None
81
+
82
+
83
+ def handle_provider_error(
84
+ error: Exception,
85
+ provider_name: str,
86
+ operation: str = "operation",
87
+ ) -> None:
88
+ """
89
+ Standardize error handling for LLM provider operations.
90
+
91
+ Args:
92
+ error: Exception that occurred
93
+ provider_name: Name of the provider (e.g., "OpenAI", "Anthropic")
94
+ operation: Description of the operation that failed
95
+
96
+ Raises:
97
+ RuntimeError: With standardized error message
98
+
99
+ Example:
100
+ >>> try:
101
+ ... response = client.chat.completions.create(...)
102
+ ... except Exception as e:
103
+ ... handle_provider_error(e, "OpenAI", "generate response")
104
+ """
105
+ error_msg = str(error)
106
+ if "api key" in error_msg.lower() or "authentication" in error_msg.lower():
107
+ raise RuntimeError(
108
+ f"{provider_name} API key is invalid or missing. "
109
+ f"Please check your API key configuration."
110
+ ) from error
111
+ elif "rate limit" in error_msg.lower() or "429" in error_msg:
112
+ raise RuntimeError(
113
+ f"{provider_name} rate limit exceeded. Please try again later."
114
+ ) from error
115
+ elif "model" in error_msg.lower() and "not found" in error_msg.lower():
116
+ raise RuntimeError(
117
+ f"{provider_name} model not found. Please check the model name."
118
+ ) from error
119
+ else:
120
+ raise RuntimeError(f"{provider_name} {operation} failed: {error_msg}") from error
@@ -0,0 +1,153 @@
1
+ """
2
+ LLM Response building utilities for consistent response construction.
3
+
4
+ This module provides helper functions for building LLMResponse objects
5
+ from various provider API responses.
6
+ """
7
+
8
+ from typing import Any, Optional
9
+
10
+ # Import LLMResponse here to avoid circular dependency
11
+ # We import it inside functions to break the cycle
12
+
13
+
14
+ class LLMResponseBuilder:
15
+ """
16
+ Helper for building LLMResponse objects with consistent structure.
17
+
18
+ Provides static methods for building responses from different provider formats.
19
+ """
20
+
21
+ @staticmethod
22
+ def from_openai_format(
23
+ content: str,
24
+ prompt_tokens: int | None = None,
25
+ completion_tokens: int | None = None,
26
+ total_tokens: int | None = None,
27
+ model_name: str | None = None,
28
+ finish_reason: str | None = None,
29
+ ) -> "LLMResponse":
30
+ """
31
+ Build LLMResponse from OpenAI-style API response.
32
+
33
+ Args:
34
+ content: Response text content
35
+ prompt_tokens: Number of prompt tokens
36
+ completion_tokens: Number of completion tokens
37
+ total_tokens: Total tokens (or sum of prompt + completion)
38
+ model_name: Model identifier
39
+ finish_reason: Finish reason (stop, length, etc.)
40
+
41
+ Returns:
42
+ LLMResponse object
43
+ """
44
+ from .llm_provider import LLMResponse # Import here to avoid circular dependency
45
+
46
+ return LLMResponse(
47
+ content=content,
48
+ prompt_tokens=prompt_tokens,
49
+ completion_tokens=completion_tokens,
50
+ total_tokens=total_tokens
51
+ or (
52
+ (prompt_tokens + completion_tokens) if prompt_tokens and completion_tokens else None
53
+ ),
54
+ model_name=model_name,
55
+ finish_reason=finish_reason,
56
+ )
57
+
58
+ @staticmethod
59
+ def from_anthropic_format(
60
+ content: str,
61
+ input_tokens: int | None = None,
62
+ output_tokens: int | None = None,
63
+ model_name: str | None = None,
64
+ stop_reason: str | None = None,
65
+ ) -> "LLMResponse":
66
+ """
67
+ Build LLMResponse from Anthropic-style API response.
68
+
69
+ Args:
70
+ content: Response text content
71
+ input_tokens: Number of input tokens
72
+ output_tokens: Number of output tokens
73
+ model_name: Model identifier
74
+ stop_reason: Stop reason (end_turn, max_tokens, etc.)
75
+
76
+ Returns:
77
+ LLMResponse object
78
+ """
79
+ from .llm_provider import LLMResponse # Import here to avoid circular dependency
80
+
81
+ return LLMResponse(
82
+ content=content,
83
+ prompt_tokens=input_tokens,
84
+ completion_tokens=output_tokens,
85
+ total_tokens=(input_tokens + output_tokens) if input_tokens and output_tokens else None,
86
+ model_name=model_name,
87
+ finish_reason=stop_reason,
88
+ )
89
+
90
+ @staticmethod
91
+ def from_gemini_format(
92
+ content: str,
93
+ prompt_tokens: int | None = None,
94
+ completion_tokens: int | None = None,
95
+ total_tokens: int | None = None,
96
+ model_name: str | None = None,
97
+ ) -> "LLMResponse":
98
+ """
99
+ Build LLMResponse from Gemini-style API response.
100
+
101
+ Args:
102
+ content: Response text content
103
+ prompt_tokens: Number of prompt tokens
104
+ completion_tokens: Number of completion tokens
105
+ total_tokens: Total tokens
106
+ model_name: Model identifier
107
+
108
+ Returns:
109
+ LLMResponse object
110
+ """
111
+ from .llm_provider import LLMResponse # Import here to avoid circular dependency
112
+
113
+ return LLMResponse(
114
+ content=content,
115
+ prompt_tokens=prompt_tokens,
116
+ completion_tokens=completion_tokens,
117
+ total_tokens=total_tokens
118
+ or (
119
+ (prompt_tokens + completion_tokens) if prompt_tokens and completion_tokens else None
120
+ ),
121
+ model_name=model_name,
122
+ finish_reason=None, # Gemini uses different finish reason format
123
+ )
124
+
125
+ @staticmethod
126
+ def from_local_format(
127
+ content: str,
128
+ prompt_tokens: int,
129
+ completion_tokens: int,
130
+ model_name: str,
131
+ ) -> "LLMResponse":
132
+ """
133
+ Build LLMResponse from local model generation.
134
+
135
+ Args:
136
+ content: Response text content
137
+ prompt_tokens: Number of prompt tokens
138
+ completion_tokens: Number of completion tokens
139
+ model_name: Model identifier
140
+
141
+ Returns:
142
+ LLMResponse object
143
+ """
144
+ from .llm_provider import LLMResponse # Import here to avoid circular dependency
145
+
146
+ return LLMResponse(
147
+ content=content,
148
+ prompt_tokens=prompt_tokens,
149
+ completion_tokens=completion_tokens,
150
+ total_tokens=prompt_tokens + completion_tokens,
151
+ model_name=model_name,
152
+ finish_reason=None,
153
+ )