clai 0.4.3__tar.gz → 0.4.5__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of clai might be problematic. Click here for more details.

@@ -1,8 +1,8 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: clai
3
- Version: 0.4.3
4
- Summary: PydanticAI CLI: command line interface to chat to LLMs
5
- Author-email: Samuel Colvin <samuel@pydantic.dev>, Marcelo Trylesinski <marcelotryle@gmail.com>, David Montague <david@pydantic.dev>, Alex Hall <alex@pydantic.dev>
3
+ Version: 0.4.5
4
+ Summary: Pydantic AI CLI: command line interface to chat to LLMs
5
+ Author-email: Samuel Colvin <samuel@pydantic.dev>, Marcelo Trylesinski <marcelotryle@gmail.com>, David Montague <david@pydantic.dev>, Alex Hall <alex@pydantic.dev>, Douwe Maan <douwe@pydantic.dev>
6
6
  License-Expression: MIT
7
7
  License-File: LICENSE
8
8
  Classifier: Development Status :: 4 - Beta
@@ -25,7 +25,7 @@ Classifier: Programming Language :: Python :: 3.13
25
25
  Classifier: Topic :: Internet
26
26
  Classifier: Topic :: Software Development :: Libraries :: Python Modules
27
27
  Requires-Python: >=3.9
28
- Requires-Dist: pydantic-ai==0.4.3
28
+ Requires-Dist: pydantic-ai==0.4.5
29
29
  Description-Content-Type: text/markdown
30
30
 
31
31
  # clai
@@ -38,7 +38,7 @@ Description-Content-Type: text/markdown
38
38
 
39
39
  (pronounced "clay")
40
40
 
41
- Command line interface to chat to LLMs, part of the [PydanticAI project](https://github.com/pydantic/pydantic-ai).
41
+ Command line interface to chat to LLMs, part of the [Pydantic AI project](https://github.com/pydantic/pydantic-ai).
42
42
 
43
43
  ## Usage
44
44
 
@@ -85,7 +85,7 @@ Either way, running `clai` will start an interactive session where you can chat
85
85
  ```
86
86
  usage: clai [-h] [-m [MODEL]] [-a AGENT] [-l] [-t [CODE_THEME]] [--no-stream] [--version] [prompt]
87
87
 
88
- PydanticAI CLI v...
88
+ Pydantic AI CLI v...
89
89
 
90
90
  Special prompts:
91
91
  * `/exit` - exit the interactive mode (ctrl-c and ctrl-d also work)
@@ -98,7 +98,7 @@ positional arguments:
98
98
  options:
99
99
  -h, --help show this help message and exit
100
100
  -m [MODEL], --model [MODEL]
101
- Model to use, in format "<provider>:<model>" e.g. "openai:gpt-4o" or "anthropic:claude-3-7-sonnet-latest". Defaults to "openai:gpt-4o".
101
+ Model to use, in format "<provider>:<model>" e.g. "openai:gpt-4.1" or "anthropic:claude-sonnet-4-0". Defaults to "openai:gpt-4.1".
102
102
  -a AGENT, --agent AGENT
103
103
  Custom Agent to use, in format "module:variable", e.g. "mymodule.submodule:my_agent"
104
104
  -l, --list-models List all available models and exit
@@ -8,7 +8,7 @@
8
8
 
9
9
  (pronounced "clay")
10
10
 
11
- Command line interface to chat to LLMs, part of the [PydanticAI project](https://github.com/pydantic/pydantic-ai).
11
+ Command line interface to chat to LLMs, part of the [Pydantic AI project](https://github.com/pydantic/pydantic-ai).
12
12
 
13
13
  ## Usage
14
14
 
@@ -55,7 +55,7 @@ Either way, running `clai` will start an interactive session where you can chat
55
55
  ```
56
56
  usage: clai [-h] [-m [MODEL]] [-a AGENT] [-l] [-t [CODE_THEME]] [--no-stream] [--version] [prompt]
57
57
 
58
- PydanticAI CLI v...
58
+ Pydantic AI CLI v...
59
59
 
60
60
  Special prompts:
61
61
  * `/exit` - exit the interactive mode (ctrl-c and ctrl-d also work)
@@ -68,7 +68,7 @@ positional arguments:
68
68
  options:
69
69
  -h, --help show this help message and exit
70
70
  -m [MODEL], --model [MODEL]
71
- Model to use, in format "<provider>:<model>" e.g. "openai:gpt-4o" or "anthropic:claude-3-7-sonnet-latest". Defaults to "openai:gpt-4o".
71
+ Model to use, in format "<provider>:<model>" e.g. "openai:gpt-4.1" or "anthropic:claude-sonnet-4-0". Defaults to "openai:gpt-4.1".
72
72
  -a AGENT, --agent AGENT
73
73
  Custom Agent to use, in format "module:variable", e.g. "mymodule.submodule:my_agent"
74
74
  -l, --list-models List all available models and exit
@@ -13,12 +13,13 @@ bump = true
13
13
  [project]
14
14
  name = "clai"
15
15
  dynamic = ["version", "dependencies"]
16
- description = "PydanticAI CLI: command line interface to chat to LLMs"
16
+ description = "Pydantic AI CLI: command line interface to chat to LLMs"
17
17
  authors = [
18
- { name = "Samuel Colvin", email = "samuel@pydantic.dev" },
19
- { name = "Marcelo Trylesinski", email = "marcelotryle@gmail.com" },
20
- { name = "David Montague", email = "david@pydantic.dev" },
21
- { name = "Alex Hall", email = "alex@pydantic.dev" },
18
+ { name = "Samuel Colvin", email = "samuel@pydantic.dev" },
19
+ { name = "Marcelo Trylesinski", email = "marcelotryle@gmail.com" },
20
+ { name = "David Montague", email = "david@pydantic.dev" },
21
+ { name = "Alex Hall", email = "alex@pydantic.dev" },
22
+ { name = "Douwe Maan", email = "douwe@pydantic.dev" },
22
23
  ]
23
24
  license = "MIT"
24
25
  readme = "README.md"
@@ -17,7 +17,7 @@ def test_cli_help(capfd: pytest.CaptureFixture[str]):
17
17
 
18
18
  help_output = capfd.readouterr().out.strip()
19
19
  # TODO change when we reach v1
20
- help_output = re.sub(r'(PydanticAI CLI v).+', r'\1...', help_output)
20
+ help_output = re.sub(r'(Pydantic AI CLI v).+', r'\1...', help_output)
21
21
 
22
22
  this_dir = Path(__file__).parent
23
23
  readme = this_dir / 'README.md'
File without changes
File without changes
File without changes
File without changes