camel-ai 0.1.5.1__py3-none-any.whl → 0.1.5.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

Files changed (86) hide show
  1. camel/agents/__init__.py +2 -0
  2. camel/agents/chat_agent.py +237 -52
  3. camel/agents/critic_agent.py +6 -9
  4. camel/agents/deductive_reasoner_agent.py +93 -40
  5. camel/agents/embodied_agent.py +6 -9
  6. camel/agents/knowledge_graph_agent.py +49 -27
  7. camel/agents/role_assignment_agent.py +14 -12
  8. camel/agents/search_agent.py +122 -0
  9. camel/agents/task_agent.py +26 -38
  10. camel/bots/__init__.py +20 -0
  11. camel/bots/discord_bot.py +103 -0
  12. camel/bots/telegram_bot.py +84 -0
  13. camel/configs/__init__.py +3 -0
  14. camel/configs/anthropic_config.py +1 -1
  15. camel/configs/litellm_config.py +113 -0
  16. camel/configs/openai_config.py +14 -0
  17. camel/embeddings/__init__.py +2 -0
  18. camel/embeddings/openai_embedding.py +2 -2
  19. camel/embeddings/sentence_transformers_embeddings.py +6 -5
  20. camel/embeddings/vlm_embedding.py +146 -0
  21. camel/functions/__init__.py +9 -0
  22. camel/functions/open_api_function.py +161 -33
  23. camel/functions/open_api_specs/biztoc/__init__.py +13 -0
  24. camel/functions/open_api_specs/biztoc/ai-plugin.json +34 -0
  25. camel/functions/open_api_specs/biztoc/openapi.yaml +21 -0
  26. camel/functions/open_api_specs/create_qr_code/__init__.py +13 -0
  27. camel/functions/open_api_specs/create_qr_code/openapi.yaml +44 -0
  28. camel/functions/open_api_specs/nasa_apod/__init__.py +13 -0
  29. camel/functions/open_api_specs/nasa_apod/openapi.yaml +72 -0
  30. camel/functions/open_api_specs/outschool/__init__.py +13 -0
  31. camel/functions/open_api_specs/outschool/ai-plugin.json +34 -0
  32. camel/functions/open_api_specs/outschool/openapi.yaml +1 -0
  33. camel/functions/open_api_specs/outschool/paths/__init__.py +14 -0
  34. camel/functions/open_api_specs/outschool/paths/get_classes.py +29 -0
  35. camel/functions/open_api_specs/outschool/paths/search_teachers.py +29 -0
  36. camel/functions/open_api_specs/security_config.py +21 -0
  37. camel/functions/open_api_specs/web_scraper/__init__.py +13 -0
  38. camel/functions/open_api_specs/web_scraper/ai-plugin.json +34 -0
  39. camel/functions/open_api_specs/web_scraper/openapi.yaml +71 -0
  40. camel/functions/open_api_specs/web_scraper/paths/__init__.py +13 -0
  41. camel/functions/open_api_specs/web_scraper/paths/scraper.py +29 -0
  42. camel/functions/openai_function.py +3 -1
  43. camel/functions/search_functions.py +104 -171
  44. camel/functions/slack_functions.py +16 -3
  45. camel/human.py +3 -1
  46. camel/loaders/base_io.py +3 -1
  47. camel/loaders/unstructured_io.py +16 -22
  48. camel/messages/base.py +135 -46
  49. camel/models/__init__.py +8 -0
  50. camel/models/anthropic_model.py +24 -16
  51. camel/models/base_model.py +6 -1
  52. camel/models/litellm_model.py +112 -0
  53. camel/models/model_factory.py +44 -16
  54. camel/models/nemotron_model.py +71 -0
  55. camel/models/ollama_model.py +121 -0
  56. camel/models/open_source_model.py +8 -2
  57. camel/models/openai_model.py +14 -5
  58. camel/models/stub_model.py +3 -1
  59. camel/models/zhipuai_model.py +125 -0
  60. camel/prompts/__init__.py +6 -0
  61. camel/prompts/base.py +2 -1
  62. camel/prompts/descripte_video_prompt.py +33 -0
  63. camel/prompts/generate_text_embedding_data.py +79 -0
  64. camel/prompts/task_prompt_template.py +13 -3
  65. camel/retrievers/auto_retriever.py +20 -11
  66. camel/retrievers/base.py +4 -2
  67. camel/retrievers/bm25_retriever.py +2 -1
  68. camel/retrievers/cohere_rerank_retriever.py +2 -1
  69. camel/retrievers/vector_retriever.py +10 -4
  70. camel/societies/babyagi_playing.py +2 -1
  71. camel/societies/role_playing.py +18 -20
  72. camel/storages/graph_storages/base.py +1 -0
  73. camel/storages/graph_storages/neo4j_graph.py +5 -3
  74. camel/storages/vectordb_storages/base.py +2 -1
  75. camel/storages/vectordb_storages/milvus.py +5 -2
  76. camel/toolkits/github_toolkit.py +120 -26
  77. camel/types/__init__.py +5 -2
  78. camel/types/enums.py +95 -4
  79. camel/utils/__init__.py +11 -2
  80. camel/utils/commons.py +78 -4
  81. camel/utils/constants.py +26 -0
  82. camel/utils/token_counting.py +62 -7
  83. {camel_ai-0.1.5.1.dist-info → camel_ai-0.1.5.3.dist-info}/METADATA +82 -53
  84. camel_ai-0.1.5.3.dist-info/RECORD +151 -0
  85. camel_ai-0.1.5.1.dist-info/RECORD +0 -119
  86. {camel_ai-0.1.5.1.dist-info → camel_ai-0.1.5.3.dist-info}/WHEEL +0 -0
camel/types/enums.py CHANGED
@@ -29,6 +29,9 @@ class ModelType(Enum):
29
29
  GPT_4_32K = "gpt-4-32k"
30
30
  GPT_4_TURBO = "gpt-4-turbo"
31
31
  GPT_4O = "gpt-4o"
32
+ GLM_4 = "glm-4"
33
+ GLM_4V = 'glm-4v'
34
+ GLM_3_TURBO = "glm-3-turbo"
32
35
 
33
36
  STUB = "stub"
34
37
 
@@ -42,14 +45,21 @@ class ModelType(Enum):
42
45
  CLAUDE_2_0 = "claude-2.0"
43
46
  CLAUDE_INSTANT_1_2 = "claude-instant-1.2"
44
47
 
45
- # 3 models
48
+ # Claude3 models
46
49
  CLAUDE_3_OPUS = "claude-3-opus-20240229"
47
50
  CLAUDE_3_SONNET = "claude-3-sonnet-20240229"
48
51
  CLAUDE_3_HAIKU = "claude-3-haiku-20240307"
49
52
 
53
+ # Nvidia models
54
+ NEMOTRON_4_REWARD = "nvidia/nemotron-4-340b-reward"
55
+
50
56
  @property
51
57
  def value_for_tiktoken(self) -> str:
52
- return self.value if self is not ModelType.STUB else "gpt-3.5-turbo"
58
+ return (
59
+ self.value
60
+ if self is not ModelType.STUB and not isinstance(self, str)
61
+ else "gpt-3.5-turbo"
62
+ )
53
63
 
54
64
  @property
55
65
  def is_openai(self) -> bool:
@@ -62,6 +72,15 @@ class ModelType(Enum):
62
72
  ModelType.GPT_4O,
63
73
  }
64
74
 
75
+ @property
76
+ def is_zhipuai(self) -> bool:
77
+ r"""Returns whether this type of models is an ZhipuAI model."""
78
+ return self in {
79
+ ModelType.GLM_3_TURBO,
80
+ ModelType.GLM_4,
81
+ ModelType.GLM_4V,
82
+ }
83
+
65
84
  @property
66
85
  def is_open_source(self) -> bool:
67
86
  r"""Returns whether this type of models is open-source."""
@@ -87,6 +106,17 @@ class ModelType(Enum):
87
106
  ModelType.CLAUDE_3_HAIKU,
88
107
  }
89
108
 
109
+ @property
110
+ def is_nvidia(self) -> bool:
111
+ r"""Returns whether this type of models is Nvidia-released model.
112
+
113
+ Returns:
114
+ bool: Whether this type of models is nvidia.
115
+ """
116
+ return self in {
117
+ ModelType.NEMOTRON_4_REWARD,
118
+ }
119
+
90
120
  @property
91
121
  def token_limit(self) -> int:
92
122
  r"""Returns the maximum token limit for a given model.
@@ -103,6 +133,12 @@ class ModelType(Enum):
103
133
  return 128000
104
134
  elif self is ModelType.GPT_4O:
105
135
  return 128000
136
+ elif self == ModelType.GLM_4:
137
+ return 8192
138
+ elif self == ModelType.GLM_3_TURBO:
139
+ return 8192
140
+ elif self == ModelType.GLM_4V:
141
+ return 1024
106
142
  elif self is ModelType.STUB:
107
143
  return 4096
108
144
  elif self is ModelType.LLAMA_2:
@@ -112,7 +148,7 @@ class ModelType(Enum):
112
148
  return 2048
113
149
  elif self is ModelType.VICUNA_16K:
114
150
  return 16384
115
- if self in {ModelType.CLAUDE_2_0, ModelType.CLAUDE_INSTANT_1_2}:
151
+ elif self in {ModelType.CLAUDE_2_0, ModelType.CLAUDE_INSTANT_1_2}:
116
152
  return 100_000
117
153
  elif self in {
118
154
  ModelType.CLAUDE_2_1,
@@ -121,6 +157,8 @@ class ModelType(Enum):
121
157
  ModelType.CLAUDE_3_HAIKU,
122
158
  }:
123
159
  return 200_000
160
+ elif self is ModelType.NEMOTRON_4_REWARD:
161
+ return 4096
124
162
  else:
125
163
  raise ValueError("Unknown model type")
126
164
 
@@ -189,8 +227,10 @@ class TaskType(Enum):
189
227
  EVALUATION = "evaluation"
190
228
  SOLUTION_EXTRACTION = "solution_extraction"
191
229
  ROLE_DESCRIPTION = "role_description"
230
+ GENERATE_TEXT_EMBEDDING_DATA = "generate_text_embedding_data"
192
231
  OBJECT_RECOGNITION = "object_recognition"
193
232
  DEFAULT = "default"
233
+ VIDEO_DESCRIPTION = "video_description"
194
234
 
195
235
 
196
236
  class VectorDistance(Enum):
@@ -238,7 +278,7 @@ class OpenAIImageType(Enum, metaclass=OpenAIImageTypeMeta):
238
278
  GIF = "gif"
239
279
 
240
280
 
241
- class OpenAIImageDetailType(Enum):
281
+ class OpenAIVisionDetailType(Enum):
242
282
  AUTO = "auto"
243
283
  LOW = "low"
244
284
  HIGH = "high"
@@ -253,6 +293,57 @@ class OpenAPIName(Enum):
253
293
  COURSERA = "coursera"
254
294
  KLARNA = "klarna"
255
295
  SPEAK = "speak"
296
+ NASA_APOD = "nasa_apod"
297
+ BIZTOC = "biztoc"
298
+ CREATE_QR_CODE = "create_qr_code"
299
+ OUTSCHOOL = "outschool"
300
+ WEB_SCRAPER = "web_scraper"
301
+
302
+
303
+ class ModelPlatformType(Enum):
304
+ OPENAI = "openai"
305
+ AZURE = "azure"
306
+ ANTHROPIC = "anthropic"
307
+ OPENSOURCE = "opensource"
308
+ OLLAMA = "ollama"
309
+ LITELLM = "litellm"
310
+ ZHIPU = "zhipuai"
311
+ DEFAULT = "default"
312
+
313
+ @property
314
+ def is_openai(self) -> bool:
315
+ r"""Returns whether this platform is openai."""
316
+ return self is ModelPlatformType.OPENAI
317
+
318
+ @property
319
+ def is_azure(self) -> bool:
320
+ r"""Returns whether this platform is azure."""
321
+ return self is ModelPlatformType.AZURE
322
+
323
+ @property
324
+ def is_anthropic(self) -> bool:
325
+ r"""Returns whether this platform is anthropic."""
326
+ return self is ModelPlatformType.ANTHROPIC
327
+
328
+ @property
329
+ def is_ollama(self) -> bool:
330
+ r"""Returns whether this platform is ollama."""
331
+ return self is ModelPlatformType.OLLAMA
332
+
333
+ @property
334
+ def is_litellm(self) -> bool:
335
+ r"""Returns whether this platform is litellm."""
336
+ return self is ModelPlatformType.LITELLM
337
+
338
+ @property
339
+ def is_zhipuai(self) -> bool:
340
+ r"""Returns whether this platform is zhipu."""
341
+ return self is ModelPlatformType.ZHIPU
342
+
343
+ @property
344
+ def is_open_source(self) -> bool:
345
+ r"""Returns whether this platform is opensource."""
346
+ return self is ModelPlatformType.OPENSOURCE
256
347
 
257
348
 
258
349
  class AudioModelType(Enum):
camel/utils/__init__.py CHANGED
@@ -11,30 +11,35 @@
11
11
  # See the License for the specific language governing permissions and
12
12
  # limitations under the License.
13
13
  # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
+
14
15
  from .commons import (
15
16
  PYDANTIC_V2,
16
- api_key_required,
17
17
  api_keys_required,
18
18
  check_server_running,
19
+ create_chunks,
19
20
  dependencies_required,
20
21
  download_tasks,
21
22
  get_first_int,
22
23
  get_prompt_template_key_words,
23
24
  get_system_information,
24
25
  get_task_list,
26
+ model_api_key_required,
25
27
  print_text_animated,
28
+ text_extract_from_web,
26
29
  to_pascal,
27
30
  )
31
+ from .constants import Constants
28
32
  from .token_counting import (
29
33
  AnthropicTokenCounter,
30
34
  BaseTokenCounter,
35
+ LiteLLMTokenCounter,
31
36
  OpenAITokenCounter,
32
37
  OpenSourceTokenCounter,
33
38
  get_model_encoding,
34
39
  )
35
40
 
36
41
  __all__ = [
37
- 'api_key_required',
42
+ 'model_api_key_required',
38
43
  'print_text_animated',
39
44
  'get_prompt_template_key_words',
40
45
  'get_first_int',
@@ -49,6 +54,10 @@ __all__ = [
49
54
  'BaseTokenCounter',
50
55
  'OpenAITokenCounter',
51
56
  'OpenSourceTokenCounter',
57
+ 'LiteLLMTokenCounter',
58
+ 'Constants',
59
+ 'text_extract_from_web',
60
+ 'create_chunks',
52
61
  'dependencies_required',
53
62
  'api_keys_required',
54
63
  ]
camel/utils/commons.py CHANGED
@@ -30,8 +30,9 @@ from camel.types import TaskType
30
30
  F = TypeVar('F', bound=Callable[..., Any])
31
31
 
32
32
 
33
- def api_key_required(func: F) -> F:
34
- r"""Decorator that checks if the API key is available either as an environment variable or passed directly.
33
+ def model_api_key_required(func: F) -> F:
34
+ r"""Decorator that checks if the API key is available either as an
35
+ environment variable or passed directly for a model.
35
36
 
36
37
  Args:
37
38
  func (callable): The function to be wrapped.
@@ -42,6 +43,9 @@ def api_key_required(func: F) -> F:
42
43
  Raises:
43
44
  ValueError: If the API key is not found, either as an environment
44
45
  variable or directly passed.
46
+
47
+ Note:
48
+ Supported model type: `OpenAI` and `Anthropic`.
45
49
  """
46
50
 
47
51
  @wraps(func)
@@ -50,10 +54,18 @@ def api_key_required(func: F) -> F:
50
54
  if not self._api_key and 'OPENAI_API_KEY' not in os.environ:
51
55
  raise ValueError('OpenAI API key not found.')
52
56
  return func(self, *args, **kwargs)
57
+ elif self.model_type.is_zhipuai:
58
+ if 'ZHIPUAI_API_KEY' not in os.environ:
59
+ raise ValueError('ZhiPuAI API key not found.')
60
+ return func(self, *args, **kwargs)
53
61
  elif self.model_type.is_anthropic:
54
- if 'ANTHROPIC_API_KEY' not in os.environ:
62
+ if not self._api_key and 'ANTHROPIC_API_KEY' not in os.environ:
55
63
  raise ValueError('Anthropic API key not found.')
56
64
  return func(self, *args, **kwargs)
65
+ elif self.model_type.is_nvidia:
66
+ if not self._api_key and 'NVIDIA_API_KEY' not in os.environ:
67
+ raise ValueError('NVIDIA API key not found.')
68
+ return func(self, *args, **kwargs)
57
69
  else:
58
70
  raise ValueError('Unsupported model type.')
59
71
 
@@ -274,7 +286,9 @@ def api_keys_required(*required_keys: str) -> Callable[[F], F]:
274
286
  def wrapper(*args: Any, **kwargs: Any) -> Any:
275
287
  missing_keys = [k for k in required_keys if k not in os.environ]
276
288
  if missing_keys:
277
- raise ValueError(f"Missing API keys: {', '.join(missing_keys)}")
289
+ raise ValueError(
290
+ f"Missing API keys: {', '.join(missing_keys)}"
291
+ )
278
292
  return func(*args, **kwargs)
279
293
 
280
294
  return cast(F, wrapper)
@@ -326,3 +340,63 @@ def to_pascal(snake: str) -> str:
326
340
 
327
341
 
328
342
  PYDANTIC_V2 = pydantic.VERSION.startswith("2.")
343
+
344
+
345
+ def text_extract_from_web(url: str) -> str:
346
+ r"""Get the text information from given url.
347
+
348
+ Args:
349
+ url (str): The website you want to search.
350
+
351
+ Returns:
352
+ str: All texts extract from the web.
353
+ """
354
+ try:
355
+ import requests
356
+ from newspaper import Article
357
+
358
+ # Request the target page
359
+ article = Article(url)
360
+ article.download()
361
+ article.parse()
362
+ text = article.text
363
+
364
+ except requests.RequestException as e:
365
+ text = f"Can't access {url}, error: {e}"
366
+
367
+ except Exception as e:
368
+ text = f"Can't extract text from {url}, error: {e}"
369
+
370
+ return text
371
+
372
+
373
+ def create_chunks(text: str, n: int) -> List[str]:
374
+ r"""Returns successive n-sized chunks from provided text. Split a text
375
+ into smaller chunks of size n".
376
+
377
+ Args:
378
+ text (str): The text to be split.
379
+ n (int): The max length of a single chunk.
380
+
381
+ Returns:
382
+ List[str]: A list of split texts.
383
+ """
384
+
385
+ chunks = []
386
+ i = 0
387
+ while i < len(text):
388
+ # Find the nearest end of sentence within a range of 0.5 * n
389
+ # and 1.5 * n tokens
390
+ j = min(i + int(1.2 * n), len(text))
391
+ while j > i + int(0.8 * n):
392
+ # Decode the tokens and check for full stop or newline
393
+ chunk = text[i:j]
394
+ if chunk.endswith(".") or chunk.endswith("\n"):
395
+ break
396
+ j -= 1
397
+ # If no end of sentence found, use n tokens as the chunk size
398
+ if j == i + int(0.8 * n):
399
+ j = min(i + n, len(text))
400
+ chunks.append(text[i:j])
401
+ i = j
402
+ return chunks
@@ -0,0 +1,26 @@
1
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
2
+ # Licensed under the Apache License, Version 2.0 (the “License”);
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an “AS IS” BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
+
15
+
16
+ class Constants:
17
+ # This value defines the default size (both width and height) for images
18
+ # extracted from a video.
19
+ VIDEO_DEFAULT_IMAGE_SIZE = 768
20
+
21
+ # This value defines the interval (in number of frames) at which images
22
+ # are extracted from the video.
23
+ VIDEO_IMAGE_EXTRACTION_INTERVAL = 50
24
+
25
+ # default plug of imageio to read video
26
+ VIDEO_DEFAULT_PLUG_PYAV = "pyav"
@@ -23,7 +23,7 @@ from typing import TYPE_CHECKING, List, Optional
23
23
  from anthropic import Anthropic
24
24
  from PIL import Image
25
25
 
26
- from camel.types import ModelType, OpenAIImageDetailType, OpenAIImageType
26
+ from camel.types import ModelType, OpenAIImageType, OpenAIVisionDetailType
27
27
 
28
28
  if TYPE_CHECKING:
29
29
  from camel.messages import OpenAIMessage
@@ -245,6 +245,7 @@ class OpenAITokenCounter(BaseTokenCounter):
245
245
  elif item["type"] == "image_url":
246
246
  image_str: str = item["image_url"]["url"]
247
247
  detail = item["image_url"]["detail"]
248
+
248
249
  image_prefix_format = "data:image/{};base64,"
249
250
  image_prefix: Optional[str] = None
250
251
  for image_type in list(OpenAIImageType):
@@ -261,7 +262,7 @@ class OpenAITokenCounter(BaseTokenCounter):
261
262
  )
262
263
  image = Image.open(image_bytes)
263
264
  num_tokens += count_tokens_from_image(
264
- image, OpenAIImageDetailType(detail)
265
+ image, OpenAIVisionDetailType(detail)
265
266
  )
266
267
  if key == "name":
267
268
  num_tokens += self.tokens_per_name
@@ -295,13 +296,67 @@ class AnthropicTokenCounter(BaseTokenCounter):
295
296
  Returns:
296
297
  int: Number of tokens in the messages.
297
298
  """
298
- prompt = messages_to_prompt(messages, self.model_type)
299
+ num_tokens = 0
300
+ for message in messages:
301
+ content = str(message["content"])
302
+ num_tokens += self.client.count_tokens(content)
303
+ return num_tokens
304
+
305
+
306
+ class LiteLLMTokenCounter:
307
+ def __init__(self, model_type: str):
308
+ r"""Constructor for the token counter for LiteLLM models.
309
+
310
+ Args:
311
+ model_type (str): Model type for which tokens will be counted.
312
+ """
313
+ self.model_type = model_type
314
+ self._token_counter = None
315
+ self._completion_cost = None
316
+
317
+ @property
318
+ def token_counter(self):
319
+ if self._token_counter is None:
320
+ from litellm import token_counter
321
+
322
+ self._token_counter = token_counter
323
+ return self._token_counter
324
+
325
+ @property
326
+ def completion_cost(self):
327
+ if self._completion_cost is None:
328
+ from litellm import completion_cost
329
+
330
+ self._completion_cost = completion_cost
331
+ return self._completion_cost
299
332
 
300
- return self.client.count_tokens(prompt)
333
+ def count_tokens_from_messages(self, messages: List[OpenAIMessage]) -> int:
334
+ r"""Count number of tokens in the provided message list using
335
+ the tokenizer specific to this type of model.
336
+
337
+ Args:
338
+ messages (List[OpenAIMessage]): Message list with the chat history
339
+ in LiteLLM API format.
340
+
341
+ Returns:
342
+ int: Number of tokens in the messages.
343
+ """
344
+ return self.token_counter(model=self.model_type, messages=messages)
345
+
346
+ def calculate_cost_from_response(self, response: dict) -> float:
347
+ r"""Calculate the cost of the given completion response.
348
+
349
+ Args:
350
+ response (dict): The completion response from LiteLLM.
351
+
352
+ Returns:
353
+ float: The cost of the completion call in USD.
354
+ """
355
+ return self.completion_cost(completion_response=response)
301
356
 
302
357
 
303
358
  def count_tokens_from_image(
304
- image: Image.Image, detail: OpenAIImageDetailType
359
+ image: Image.Image, detail: OpenAIVisionDetailType
305
360
  ) -> int:
306
361
  r"""Count image tokens for OpenAI vision model. An :obj:`"auto"`
307
362
  resolution model will be treated as :obj:`"high"`. All images with
@@ -315,13 +370,13 @@ def count_tokens_from_image(
315
370
 
316
371
  Args:
317
372
  image (PIL.Image.Image): Image to count number of tokens.
318
- detail (OpenAIImageDetailType): Image detail type to count
373
+ detail (OpenAIVisionDetailType): Image detail type to count
319
374
  number of tokens.
320
375
 
321
376
  Returns:
322
377
  int: Number of tokens for the image given a detail type.
323
378
  """
324
- if detail == OpenAIImageDetailType.LOW:
379
+ if detail == OpenAIVisionDetailType.LOW:
325
380
  return LOW_DETAIL_TOKENS
326
381
 
327
382
  width, height = image.size