chatlas 0.9.2__py3-none-any.whl → 0.11.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of chatlas might be problematic. Click here for more details.

@@ -7,7 +7,7 @@ from typing import TYPE_CHECKING, Optional
7
7
  import orjson
8
8
 
9
9
  from ._chat import Chat
10
- from ._provider_openai import OpenAIProvider
10
+ from ._provider_openai import ModelInfo, OpenAIProvider
11
11
  from ._utils import MISSING_TYPE, is_testing
12
12
 
13
13
  if TYPE_CHECKING:
@@ -90,18 +90,19 @@ def ChatOllama(
90
90
  raise RuntimeError("Can't find locally running ollama.")
91
91
 
92
92
  if model is None:
93
- models = ollama_models(base_url)
93
+ models = ollama_model_info(base_url)
94
+ model_ids = [m["id"] for m in models]
94
95
  raise ValueError(
95
- f"Must specify model. Locally installed models: {', '.join(models)}"
96
+ f"Must specify model. Locally installed models: {', '.join(model_ids)}"
96
97
  )
97
98
  if isinstance(seed, MISSING_TYPE):
98
99
  seed = 1014 if is_testing() else None
99
100
 
100
101
  return Chat(
101
- provider=OpenAIProvider(
102
+ provider=OllamaProvider(
102
103
  api_key="ollama", # ignored
103
104
  model=model,
104
- base_url=f"{base_url}/v1",
105
+ base_url=base_url,
105
106
  seed=seed,
106
107
  name="Ollama",
107
108
  kwargs=kwargs,
@@ -110,10 +111,40 @@ def ChatOllama(
110
111
  )
111
112
 
112
113
 
113
- def ollama_models(base_url: str) -> list[str]:
114
- res = urllib.request.urlopen(url=f"{base_url}/api/tags")
115
- data = orjson.loads(res.read())
116
- return [re.sub(":latest$", "", x["name"]) for x in data["models"]]
114
+ class OllamaProvider(OpenAIProvider):
115
+ def __init__(self, *, api_key, model, base_url, seed, name, kwargs):
116
+ super().__init__(
117
+ api_key=api_key,
118
+ model=model,
119
+ base_url=f"{base_url}/v1",
120
+ seed=seed,
121
+ name=name,
122
+ kwargs=kwargs,
123
+ )
124
+ self.base_url = base_url
125
+
126
+ def list_models(self):
127
+ return ollama_model_info(self.base_url)
128
+
129
+
130
+ def ollama_model_info(base_url: str) -> list[ModelInfo]:
131
+ response = urllib.request.urlopen(url=f"{base_url}/api/tags")
132
+ data = orjson.loads(response.read())
133
+ models = data.get("models", [])
134
+ if not models:
135
+ return []
136
+
137
+ res: list[ModelInfo] = []
138
+ for model in models:
139
+ # TODO: add capabilities
140
+ info: ModelInfo = {
141
+ "id": re.sub(":latest$", "", model["name"]),
142
+ "created_at": model["modified_at"],
143
+ "size": model["size"],
144
+ }
145
+ res.append(info)
146
+
147
+ return res
117
148
 
118
149
 
119
150
  def has_ollama(base_url):
@@ -1,6 +1,7 @@
1
1
  from __future__ import annotations
2
2
 
3
3
  import base64
4
+ from datetime import datetime
4
5
  from typing import TYPE_CHECKING, Any, Literal, Optional, cast, overload
5
6
 
6
7
  import orjson
@@ -23,8 +24,8 @@ from ._content import (
23
24
  )
24
25
  from ._logging import log_model_default
25
26
  from ._merge import merge_dicts
26
- from ._provider import Provider, StandardModelParamNames, StandardModelParams
27
- from ._tokens import tokens_log
27
+ from ._provider import ModelInfo, Provider, StandardModelParamNames, StandardModelParams
28
+ from ._tokens import get_token_pricing, tokens_log
28
29
  from ._tools import Tool, basemodel_to_param_schema
29
30
  from ._turn import Turn, user_turn
30
31
  from ._utils import MISSING, MISSING_TYPE, is_testing, split_http_client_kwargs
@@ -200,6 +201,32 @@ class OpenAIProvider(
200
201
  self._client = OpenAI(**sync_kwargs) # type: ignore
201
202
  self._async_client = AsyncOpenAI(**async_kwargs)
202
203
 
204
+ def list_models(self):
205
+ models = self._client.models.list()
206
+
207
+ res: list[ModelInfo] = []
208
+ for m in models:
209
+ pricing = get_token_pricing(self.name, m.id) or {}
210
+ info: ModelInfo = {
211
+ "id": m.id,
212
+ "owned_by": m.owned_by,
213
+ "input": pricing.get("input"),
214
+ "output": pricing.get("output"),
215
+ "cached_input": pricing.get("cached_input"),
216
+ }
217
+ # DeepSeek compatibility
218
+ if m.created is not None:
219
+ info["created_at"] = datetime.fromtimestamp(m.created).date()
220
+ res.append(info)
221
+
222
+ # More recent models first
223
+ res.sort(
224
+ key=lambda x: x.get("created_at", 0),
225
+ reverse=True,
226
+ )
227
+
228
+ return res
229
+
203
230
  @overload
204
231
  def chat_perform(
205
232
  self,
@@ -310,8 +337,7 @@ class OpenAIProvider(
310
337
  del kwargs_full["tools"]
311
338
 
312
339
  if stream and "stream_options" not in kwargs_full:
313
- if self.__class__.__name__ != "DatabricksProvider":
314
- kwargs_full["stream_options"] = {"include_usage": True}
340
+ kwargs_full["stream_options"] = {"include_usage": True}
315
341
 
316
342
  return kwargs_full
317
343
 
@@ -411,7 +437,9 @@ class OpenAIProvider(
411
437
  if isinstance(x, ContentText):
412
438
  content_parts.append({"type": "text", "text": x.text})
413
439
  elif isinstance(x, ContentJson):
414
- content_parts.append({"type": "text", "text": ""})
440
+ content_parts.append(
441
+ {"type": "text", "text": "<structured data/>"}
442
+ )
415
443
  elif isinstance(x, ContentToolRequest):
416
444
  tool_calls.append(
417
445
  {
@@ -450,7 +478,7 @@ class OpenAIProvider(
450
478
  if isinstance(x, ContentText):
451
479
  contents.append({"type": "text", "text": x.text})
452
480
  elif isinstance(x, ContentJson):
453
- contents.append({"type": "text", "text": ""})
481
+ contents.append({"type": "text", "text": "<structured data/>"})
454
482
  elif isinstance(x, ContentPDF):
455
483
  contents.append(
456
484
  {
@@ -522,7 +550,10 @@ class OpenAIProvider(
522
550
  contents: list[Content] = []
523
551
  if message.content is not None:
524
552
  if has_data_model:
525
- data = orjson.loads(message.content)
553
+ data = message.content
554
+ # Some providers (e.g., Cloudflare) may already provide a dict
555
+ if not isinstance(data, dict):
556
+ data = orjson.loads(data)
526
557
  contents = [ContentJson(value=data)]
527
558
  else:
528
559
  contents = [ContentText(text=message.content)]
@@ -0,0 +1,149 @@
1
+ from __future__ import annotations
2
+
3
+ import os
4
+ from typing import TYPE_CHECKING, Optional
5
+
6
+ from ._chat import Chat
7
+ from ._logging import log_model_default
8
+ from ._provider_openai import OpenAIProvider
9
+ from ._utils import MISSING, MISSING_TYPE, is_testing
10
+
11
+ if TYPE_CHECKING:
12
+ from ._provider_openai import ChatCompletion
13
+ from .types.openai import ChatClientArgs, SubmitInputArgs
14
+
15
+
16
+ def ChatOpenRouter(
17
+ *,
18
+ system_prompt: Optional[str] = None,
19
+ model: Optional[str] = None,
20
+ api_key: Optional[str] = None,
21
+ base_url: str = "https://openrouter.ai/api/v1",
22
+ seed: Optional[int] | MISSING_TYPE = MISSING,
23
+ kwargs: Optional["ChatClientArgs"] = None,
24
+ ) -> Chat["SubmitInputArgs", ChatCompletion]:
25
+ """
26
+ Chat with one of the many models hosted on OpenRouter.
27
+
28
+ OpenRouter provides access to a wide variety of language models from different providers
29
+ through a unified API. Support for features depends on the underlying model that you use.
30
+
31
+ Prerequisites
32
+ -------------
33
+
34
+ ::: {.callout-note}
35
+ ## API key
36
+
37
+ Sign up at <https://openrouter.ai> to get an API key.
38
+ :::
39
+
40
+ Examples
41
+ --------
42
+
43
+ ```python
44
+ import os
45
+ from chatlas import ChatOpenRouter
46
+
47
+ chat = ChatOpenRouter(api_key=os.getenv("OPENROUTER_API_KEY"))
48
+ chat.chat("What is the capital of France?")
49
+ ```
50
+
51
+ Parameters
52
+ ----------
53
+ system_prompt
54
+ A system prompt to set the behavior of the assistant.
55
+ model
56
+ The model to use for the chat. The default, None, will pick a reasonable
57
+ default, and warn you about it. We strongly recommend explicitly choosing
58
+ a model for all but the most casual use. See <https://openrouter.ai/models>
59
+ for available models.
60
+ api_key
61
+ The API key to use for authentication. You generally should not supply
62
+ this directly, but instead set the `OPENROUTER_API_KEY` environment variable.
63
+ base_url
64
+ The base URL to the endpoint; the default uses OpenRouter's API.
65
+ seed
66
+ Optional integer seed that the model uses to try and make output more
67
+ reproducible.
68
+ kwargs
69
+ Additional arguments to pass to the `openai.OpenAI()` client constructor.
70
+
71
+ Returns
72
+ -------
73
+ Chat
74
+ A chat object that retains the state of the conversation.
75
+
76
+ Note
77
+ ----
78
+ This function is a lightweight wrapper around [](`~chatlas.ChatOpenAI`) with
79
+ the defaults tweaked for OpenRouter.
80
+
81
+ Note
82
+ ----
83
+ Pasting an API key into a chat constructor (e.g., `ChatOpenRouter(api_key="...")`)
84
+ is the simplest way to get started, and is fine for interactive use, but is
85
+ problematic for code that may be shared with others.
86
+
87
+ Instead, consider using environment variables or a configuration file to manage
88
+ your credentials. One popular way to manage credentials is to use a `.env` file
89
+ to store your credentials, and then use the `python-dotenv` package to load them
90
+ into your environment.
91
+
92
+ ```shell
93
+ pip install python-dotenv
94
+ ```
95
+
96
+ ```shell
97
+ # .env
98
+ OPENROUTER_API_KEY=...
99
+ ```
100
+
101
+ ```python
102
+ from chatlas import ChatOpenRouter
103
+ from dotenv import load_dotenv
104
+
105
+ load_dotenv()
106
+ chat = ChatOpenRouter()
107
+ chat.console()
108
+ ```
109
+
110
+ Another, more general, solution is to load your environment variables into the shell
111
+ before starting Python (maybe in a `.bashrc`, `.zshrc`, etc. file):
112
+
113
+ ```shell
114
+ export OPENROUTER_API_KEY=...
115
+ ```
116
+ """
117
+ if model is None:
118
+ model = log_model_default("gpt-4.1")
119
+
120
+ if api_key is None:
121
+ api_key = os.getenv("OPENROUTER_API_KEY")
122
+
123
+ if isinstance(seed, MISSING_TYPE):
124
+ seed = 1014 if is_testing() else None
125
+
126
+ kwargs2 = add_default_headers(kwargs or {})
127
+
128
+ return Chat(
129
+ provider=OpenAIProvider(
130
+ api_key=api_key,
131
+ model=model,
132
+ base_url=base_url,
133
+ seed=seed,
134
+ name="OpenRouter",
135
+ kwargs=kwargs2,
136
+ ),
137
+ system_prompt=system_prompt,
138
+ )
139
+
140
+
141
+ def add_default_headers(kwargs: "ChatClientArgs") -> "ChatClientArgs":
142
+ headers = kwargs.get("default_headers", None)
143
+ # https://openrouter.ai/docs/api-keys
144
+ default_headers = {
145
+ "HTTP-Referer": "https://posit-dev.github.io/chatlas",
146
+ "X-Title": "chatlas",
147
+ **(headers or {}),
148
+ }
149
+ return {"default_headers": default_headers, **kwargs}
@@ -126,7 +126,7 @@ def ChatPerplexity(
126
126
  seed = 1014 if is_testing() else None
127
127
 
128
128
  return Chat(
129
- provider=OpenAIProvider(
129
+ provider=PerplexityProvider(
130
130
  api_key=api_key,
131
131
  model=model,
132
132
  base_url=base_url,
@@ -136,3 +136,11 @@ def ChatPerplexity(
136
136
  ),
137
137
  system_prompt=system_prompt,
138
138
  )
139
+
140
+
141
+ class PerplexityProvider(OpenAIProvider):
142
+ def list_models(self):
143
+ raise NotImplementedError(
144
+ ".list_models() is not yet implemented for Perplexity."
145
+ " To view available models online, see https://docs.perplexity.ai/getting-started/models"
146
+ )
@@ -0,0 +1,131 @@
1
+ from __future__ import annotations
2
+
3
+ import os
4
+ from typing import TYPE_CHECKING, Optional
5
+
6
+ from ._chat import Chat
7
+ from ._logging import log_model_default
8
+ from ._provider_openai import OpenAIProvider
9
+ from ._utils import drop_none
10
+
11
+ if TYPE_CHECKING:
12
+ from ._provider_openai import ChatCompletion
13
+ from .types.openai import ChatClientArgs, SubmitInputArgs
14
+
15
+
16
+ def ChatPortkey(
17
+ *,
18
+ system_prompt: Optional[str] = None,
19
+ model: Optional[str] = None,
20
+ api_key: Optional[str] = None,
21
+ virtual_key: Optional[str] = None,
22
+ base_url: str = "https://api.portkey.ai/v1",
23
+ kwargs: Optional["ChatClientArgs"] = None,
24
+ ) -> Chat["SubmitInputArgs", ChatCompletion]:
25
+ """
26
+ Chat with a model hosted on PortkeyAI
27
+
28
+ [PortkeyAI](https://portkey.ai/docs/product/ai-gateway/universal-api)
29
+ provides an interface (AI Gateway) to connect through its Universal API to a
30
+ variety of LLMs providers with a single endpoint.
31
+
32
+ Prerequisites
33
+ -------------
34
+
35
+ ::: {.callout-note}
36
+ ## Portkey credentials
37
+
38
+ Follow the instructions at <https://portkey.ai/docs/introduction/make-your-first-request>
39
+ to get started making requests to PortkeyAI. You will need to set the
40
+ `PORTKEY_API_KEY` environment variable to your Portkey API key, and optionally
41
+ the `PORTKEY_VIRTUAL_KEY` environment variable to your virtual key.
42
+ :::
43
+
44
+ Examples
45
+ --------
46
+ ```python
47
+ import os
48
+ from chatlas import ChatPortkey
49
+
50
+ chat = ChatPortkey(api_key=os.getenv("PORTKEY_API_KEY"))
51
+ chat.chat("What is the capital of France?")
52
+ ```
53
+
54
+ Parameters
55
+ ----------
56
+ system_prompt
57
+ A system prompt to set the behavior of the assistant.
58
+ model
59
+ The model to use for the chat. The default, None, will pick a reasonable
60
+ default, and warn you about it. We strongly recommend explicitly
61
+ choosing a model for all but the most casual use.
62
+ api_key
63
+ The API key to use for authentication. You generally should not supply
64
+ this directly, but instead set the `PORTKEY_API_KEY` environment variable.
65
+ virtual_key
66
+ An (optional) virtual identifier, storing the LLM provider's API key. See
67
+ [documentation](https://portkey.ai/docs/product/ai-gateway/virtual-keys).
68
+ You generally should not supply this directly, but instead set the
69
+ `PORTKEY_VIRTUAL_KEY` environment variable.
70
+ base_url
71
+ The base URL for the Portkey API. The default is suitable for most users.
72
+ kwargs
73
+ Additional arguments to pass to the OpenAIProvider, such as headers or
74
+ other client configuration options.
75
+
76
+ Returns
77
+ -------
78
+ Chat
79
+ A chat object that retains the state of the conversation.
80
+
81
+ Notes
82
+ -----
83
+ This function is a lightweight wrapper around [](`~chatlas.ChatOpenAI`) with
84
+ the defaults tweaked for PortkeyAI.
85
+
86
+ """
87
+ if model is None:
88
+ model = log_model_default("gpt-4.1")
89
+ if api_key is None:
90
+ api_key = os.getenv("PORTKEY_API_KEY")
91
+
92
+ kwargs2 = add_default_headers(
93
+ kwargs or {},
94
+ api_key=api_key,
95
+ virtual_key=virtual_key,
96
+ )
97
+
98
+ return Chat(
99
+ provider=OpenAIProvider(
100
+ api_key=api_key,
101
+ model=model,
102
+ base_url=base_url,
103
+ name="Portkey",
104
+ kwargs=kwargs2,
105
+ ),
106
+ system_prompt=system_prompt,
107
+ )
108
+
109
+
110
+ def add_default_headers(
111
+ kwargs: "ChatClientArgs",
112
+ api_key: Optional[str] = None,
113
+ virtual_key: Optional[str] = None,
114
+ ) -> "ChatClientArgs":
115
+ headers = kwargs.get("default_headers", None)
116
+ default_headers = drop_none(
117
+ {
118
+ "x-portkey-api-key": api_key,
119
+ "x-portkey-virtual-key": virtual_key,
120
+ **(headers or {}),
121
+ }
122
+ )
123
+ return {"default_headers": default_headers, **kwargs}
124
+
125
+
126
+ class PortkeyProvider(OpenAIProvider):
127
+ def list_models(self):
128
+ raise NotImplementedError(
129
+ ".list_models() is not yet implemented for Portkey. "
130
+ "To view model availability online, see https://portkey.ai/docs/product/model-catalog"
131
+ )
@@ -194,6 +194,12 @@ class SnowflakeProvider(
194
194
  session = Session.builder.configs(configs).create()
195
195
  self._cortex_service = Root(session).cortex_inference_service
196
196
 
197
+ def list_models(self):
198
+ raise NotImplementedError(
199
+ ".list_models() is not yet implemented for Snowflake. "
200
+ "To view model availability online, see https://docs.snowflake.com/user-guide/snowflake-cortex/aisql#availability"
201
+ )
202
+
197
203
  @overload
198
204
  def chat_perform(
199
205
  self,
chatlas/_tokens.py CHANGED
@@ -8,7 +8,7 @@ from typing import TYPE_CHECKING
8
8
  import orjson
9
9
 
10
10
  from ._logging import logger
11
- from ._typing_extensions import TypedDict
11
+ from ._typing_extensions import NotRequired, TypedDict
12
12
 
13
13
  if TYPE_CHECKING:
14
14
  from ._provider import Provider
@@ -109,11 +109,11 @@ class TokenPrice(TypedDict):
109
109
  """The provider name (e.g., "OpenAI", "Anthropic", etc.)"""
110
110
  model: str
111
111
  """The model name (e.g., "gpt-3.5-turbo", "claude-2", etc.)"""
112
- cached_input: float
112
+ cached_input: NotRequired[float]
113
113
  """The cost per user token in USD per million tokens for cached input"""
114
114
  input: float
115
115
  """The cost per user token in USD per million tokens"""
116
- output: float
116
+ output: NotRequired[float]
117
117
  """The cost per assistant token in USD per million tokens"""
118
118
 
119
119
 
@@ -160,8 +160,8 @@ def compute_cost(
160
160
  if price is None:
161
161
  return None
162
162
  input_price = input_tokens * (price["input"] / 1e6)
163
- output_price = output_tokens * (price["output"] / 1e6)
164
- cached_price = cached_tokens * (price["cached_input"] / 1e6)
163
+ output_price = output_tokens * (price.get("output", 0) / 1e6)
164
+ cached_price = cached_tokens * (price.get("cached_input", 0) / 1e6)
165
165
  return input_price + output_price + cached_price
166
166
 
167
167
 
chatlas/_tools.py CHANGED
@@ -22,6 +22,7 @@ __all__ = (
22
22
  if TYPE_CHECKING:
23
23
  from mcp import ClientSession as MCPClientSession
24
24
  from mcp import Tool as MCPTool
25
+ from mcp.types import ToolAnnotations
25
26
  from openai.types.chat import ChatCompletionToolParam
26
27
 
27
28
 
@@ -42,6 +43,9 @@ class Tool:
42
43
  A description of what the tool does.
43
44
  parameters
44
45
  A dictionary describing the input parameters and their types.
46
+ annotations
47
+ Additional properties that describe the tool and its behavior. Should be
48
+ a `from mcp.types import ToolAnnotations` instance.
45
49
  """
46
50
 
47
51
  func: Callable[..., Any] | Callable[..., Awaitable[Any]]
@@ -53,9 +57,11 @@ class Tool:
53
57
  name: str,
54
58
  description: str,
55
59
  parameters: dict[str, Any],
60
+ annotations: "Optional[ToolAnnotations]" = None,
56
61
  ):
57
62
  self.name = name
58
63
  self.func = func
64
+ self.annotations = annotations
59
65
  self._is_async = _utils.is_async_callable(func)
60
66
  self.schema: "ChatCompletionToolParam" = {
61
67
  "type": "function",
@@ -72,6 +78,7 @@ class Tool:
72
78
  func: Callable[..., Any] | Callable[..., Awaitable[Any]],
73
79
  *,
74
80
  model: Optional[type[BaseModel]] = None,
81
+ annotations: "Optional[ToolAnnotations]" = None,
75
82
  ) -> "Tool":
76
83
  """
77
84
  Create a Tool from a Python function
@@ -86,6 +93,9 @@ class Tool:
86
93
  The primary reason why you might want to provide a model in
87
94
  Note that the name and docstring of the model takes precedence over the
88
95
  name and docstring of the function.
96
+ annotations
97
+ Additional properties that describe the tool and its behavior. Should be
98
+ a `from mcp.types import ToolAnnotations` instance.
89
99
 
90
100
  Returns
91
101
  -------
@@ -118,6 +128,7 @@ class Tool:
118
128
  name=model.__name__ or func.__name__,
119
129
  description=model.__doc__ or func.__doc__ or "",
120
130
  parameters=params,
131
+ annotations=annotations,
121
132
  )
122
133
 
123
134
  @classmethod
@@ -197,6 +208,7 @@ class Tool:
197
208
  name=mcp_tool.name,
198
209
  description=mcp_tool.description or "",
199
210
  parameters=params,
211
+ annotations=mcp_tool.annotations,
200
212
  )
201
213
 
202
214
 
@@ -14,13 +14,13 @@ else:
14
14
  # they should both come from the same typing module.
15
15
  # https://peps.python.org/pep-0655/#usage-in-python-3-11
16
16
  if sys.version_info >= (3, 11):
17
- from typing import Required, TypedDict
17
+ from typing import NotRequired, Required, TypedDict
18
18
  else:
19
- from typing_extensions import Required, TypedDict
19
+ from typing_extensions import NotRequired, Required, TypedDict
20
20
 
21
21
 
22
22
  # The only purpose of the following line is so that pyright will put all of the
23
23
  # conditional imports into the .pyi file when generating type stubs. Without this line,
24
24
  # pyright will not include the above imports in the generated .pyi file, and it will
25
25
  # result in a lot of red squiggles in user code.
26
- _: "ParamSpec | TypeGuard | is_typeddict | Required | TypedDict" # type: ignore
26
+ _: "ParamSpec | TypeGuard | is_typeddict | NotRequired | Required | TypedDict" # type: ignore
chatlas/_version.py CHANGED
@@ -1,7 +1,14 @@
1
1
  # file generated by setuptools-scm
2
2
  # don't change, don't track in version control
3
3
 
4
- __all__ = ["__version__", "__version_tuple__", "version", "version_tuple"]
4
+ __all__ = [
5
+ "__version__",
6
+ "__version_tuple__",
7
+ "version",
8
+ "version_tuple",
9
+ "__commit_id__",
10
+ "commit_id",
11
+ ]
5
12
 
6
13
  TYPE_CHECKING = False
7
14
  if TYPE_CHECKING:
@@ -9,13 +16,19 @@ if TYPE_CHECKING:
9
16
  from typing import Union
10
17
 
11
18
  VERSION_TUPLE = Tuple[Union[int, str], ...]
19
+ COMMIT_ID = Union[str, None]
12
20
  else:
13
21
  VERSION_TUPLE = object
22
+ COMMIT_ID = object
14
23
 
15
24
  version: str
16
25
  __version__: str
17
26
  __version_tuple__: VERSION_TUPLE
18
27
  version_tuple: VERSION_TUPLE
28
+ commit_id: COMMIT_ID
29
+ __commit_id__: COMMIT_ID
19
30
 
20
- __version__ = version = '0.9.2'
21
- __version_tuple__ = version_tuple = (0, 9, 2)
31
+ __version__ = version = '0.11.0'
32
+ __version_tuple__ = version_tuple = (0, 11, 0)
33
+
34
+ __commit_id__ = commit_id = None