llcat 0.6.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of llcat might be problematic. Click here for more details.
- llcat-0.6.0/PKG-INFO +144 -0
- llcat-0.6.0/README.md +121 -0
- llcat-0.6.0/llcat.egg-info/PKG-INFO +144 -0
- llcat-0.6.0/llcat.egg-info/SOURCES.txt +9 -0
- llcat-0.6.0/llcat.egg-info/dependency_links.txt +1 -0
- llcat-0.6.0/llcat.egg-info/entry_points.txt +2 -0
- llcat-0.6.0/llcat.egg-info/requires.txt +1 -0
- llcat-0.6.0/llcat.egg-info/top_level.txt +1 -0
- llcat-0.6.0/llcat.py +164 -0
- llcat-0.6.0/pyproject.toml +41 -0
- llcat-0.6.0/setup.cfg +4 -0
llcat-0.6.0/PKG-INFO
ADDED
|
@@ -0,0 +1,144 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: llcat
|
|
3
|
+
Version: 0.6.0
|
|
4
|
+
Summary: /usr/bin/cat for the LLM era
|
|
5
|
+
Author-email: Chris McKenzie <kristopolous@yahoo.com>
|
|
6
|
+
License-Expression: MIT
|
|
7
|
+
Project-URL: Homepage, https://github.com/day50-dev/llcat
|
|
8
|
+
Project-URL: Bug Tracker, https://github.com/day50-dev/llcat/issues
|
|
9
|
+
Keywords: markdown,terminal,renderer,cli,syntax-highlighting
|
|
10
|
+
Classifier: Development Status :: 4 - Beta
|
|
11
|
+
Classifier: Environment :: Console
|
|
12
|
+
Classifier: Intended Audience :: Developers
|
|
13
|
+
Classifier: Programming Language :: Python :: 3
|
|
14
|
+
Classifier: Programming Language :: Python :: 3.8
|
|
15
|
+
Classifier: Programming Language :: Python :: 3.9
|
|
16
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
17
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
18
|
+
Classifier: Topic :: Text Processing :: Markup
|
|
19
|
+
Classifier: Topic :: Utilities
|
|
20
|
+
Requires-Python: >=3.8
|
|
21
|
+
Description-Content-Type: text/markdown
|
|
22
|
+
Requires-Dist: requests
|
|
23
|
+
|
|
24
|
+
# /usr/bin/cat for LLMs
|
|
25
|
+
**llcat** is an LLM program with very little ambition.
|
|
26
|
+
|
|
27
|
+
That's why it's awesome.
|
|
28
|
+
|
|
29
|
+
<img width="670" height="592" alt="llcat" src="https://github.com/user-attachments/assets/0fac2db4-3b2e-4639-b6b1-1b0a121a5744" />
|
|
30
|
+
|
|
31
|
+
You can handle this!
|
|
32
|
+
|
|
33
|
+
----
|
|
34
|
+
|
|
35
|
+
List the models on [OpenRouter](https://openrouter.ai):
|
|
36
|
+
|
|
37
|
+
`uvx llcat -s https://openrouter.ai/api -m`
|
|
38
|
+
|
|
39
|
+
Go ahead, do that one right now. I'll wait.
|
|
40
|
+
|
|
41
|
+
----
|
|
42
|
+
|
|
43
|
+
**llcat** solves all your problems.
|
|
44
|
+
|
|
45
|
+
Yes. Every one.
|
|
46
|
+
|
|
47
|
+
It can also:
|
|
48
|
+
|
|
49
|
+
* Pipe things from stdin and/or be prompted on the command line.
|
|
50
|
+
* Store **conversation history** optionally, as a boring JSON file.
|
|
51
|
+
* Do **tool calling** using the OpenAI spec. There's an example in this repository (and below).
|
|
52
|
+
* Use local or remote servers, authenticated or not.
|
|
53
|
+
* List **models** using `-m` without arguments. Specify a model with the argument.
|
|
54
|
+
|
|
55
|
+
Free Samples? Sure! It's Free Software.
|
|
56
|
+
|
|
57
|
+
* pipx install llcat
|
|
58
|
+
* uvx llcat
|
|
59
|
+
|
|
60
|
+
Dependencies? Just the requests library.
|
|
61
|
+
|
|
62
|
+
It's **llcat**, not **llmcat**. Let's keep it pronounceable.
|
|
63
|
+
|
|
64
|
+
Feels nice to be unambitious.
|
|
65
|
+
|
|
66
|
+
## Examples
|
|
67
|
+
|
|
68
|
+
Let's start with llama:
|
|
69
|
+
```
|
|
70
|
+
$ llcat -s https://openrouter.ai/api \
|
|
71
|
+
-m meta-llama/llama-3.2-3b-instruct:free \
|
|
72
|
+
-c /tmp/convo.txt \
|
|
73
|
+
-k $(cat openrouter.key) \
|
|
74
|
+
"What is the capital of France?"
|
|
75
|
+
```
|
|
76
|
+
|
|
77
|
+
Continue with Qwen:
|
|
78
|
+
```
|
|
79
|
+
$ llcat -s https://openrouter.ai/api \
|
|
80
|
+
-m qwen/qwen3-4b:free \
|
|
81
|
+
-c /tmp/convo.txt \
|
|
82
|
+
-k $(cat openrouter.key) \
|
|
83
|
+
"And what about Canada?"
|
|
84
|
+
```
|
|
85
|
+
|
|
86
|
+
And finish on the local network:
|
|
87
|
+
```
|
|
88
|
+
$ llcat -s http://192.168.1.21:8080 \
|
|
89
|
+
-c /tmp/convo.txt \
|
|
90
|
+
"And what about Japan?"
|
|
91
|
+
```
|
|
92
|
+
One conversation, hopping across models and servers.
|
|
93
|
+
|
|
94
|
+
Pure sorcery.
|
|
95
|
+
|
|
96
|
+
## Summon Some More
|
|
97
|
+
|
|
98
|
+
Want to store state? Let's go!
|
|
99
|
+
```shell
|
|
100
|
+
$ source fancy.sh
|
|
101
|
+
$ llc-server http://192.168.1.21:8080
|
|
102
|
+
$ llc "write a diss track where the knapsack problem hates on the towers of hanoi"
|
|
103
|
+
```
|
|
104
|
+
Now go [read the four lines of `fancy.sh`](https://github.com/day50-dev/llcat/blob/main/fancy.sh)
|
|
105
|
+
|
|
106
|
+
Surprise! It's just an example. Environment variables and a wrapper function. That's all you need.
|
|
107
|
+
|
|
108
|
+
|
|
109
|
+
## The Tool Call To Rule Them All
|
|
110
|
+
This example, a very strange way to play mp3s, uses the [sophisticated 21 line `example_tool_program.py`](https://github.com/day50-dev/llcat/blob/main/example_tool_program.py) included in this repository.
|
|
111
|
+
|
|
112
|
+
It also uses DA`/50's pretty little [streaming markdown renderer, streamdown](https://github.com/day50-dev/Streamdown).
|
|
113
|
+
|
|
114
|
+
<img width="1919" height="606" alt="tc" src="https://github.com/user-attachments/assets/a704ae5c-cfcb-4abc-b1a7-ad1290e60510" />
|
|
115
|
+
|
|
116
|
+
[Kablam!](https://frustratedfunk.bandcamp.com/track/photographic-photogenic) Alright **a16z** where's my $50 million?
|
|
117
|
+
|
|
118
|
+
The enterprise applications are limitless...
|
|
119
|
+
|
|
120
|
+
### Boring Documentation
|
|
121
|
+
|
|
122
|
+
```shell
|
|
123
|
+
usage: llcat [-h] [-c CONVERSATION] [-m [MODEL]] [-k KEY] [-s SERVER]
|
|
124
|
+
[-tf TOOL_FILE] [-tp TOOL_PROGRAM]
|
|
125
|
+
[prompt ...]
|
|
126
|
+
|
|
127
|
+
positional arguments:
|
|
128
|
+
prompt Your prompt
|
|
129
|
+
|
|
130
|
+
options:
|
|
131
|
+
-h, --help show this help message and exit
|
|
132
|
+
-c, --conversation CONVERSATION
|
|
133
|
+
Conversation history file
|
|
134
|
+
-m, --model [MODEL] Model to use (or list models if no value)
|
|
135
|
+
-k, --key KEY API key for authorization
|
|
136
|
+
-s, --server SERVER Server URL (e.g., http://::1:8080)
|
|
137
|
+
-tf, --tool_file TOOL_FILE
|
|
138
|
+
JSON file with tool definitions
|
|
139
|
+
-tp, --tool_program TOOL_PROGRAM
|
|
140
|
+
Program to execute tool calls
|
|
141
|
+
|
|
142
|
+
```
|
|
143
|
+
|
|
144
|
+
Brought to you by **DA`/50**: Make the future obvious.
|
llcat-0.6.0/README.md
ADDED
|
@@ -0,0 +1,121 @@
|
|
|
1
|
+
# /usr/bin/cat for LLMs
|
|
2
|
+
**llcat** is an LLM program with very little ambition.
|
|
3
|
+
|
|
4
|
+
That's why it's awesome.
|
|
5
|
+
|
|
6
|
+
<img width="670" height="592" alt="llcat" src="https://github.com/user-attachments/assets/0fac2db4-3b2e-4639-b6b1-1b0a121a5744" />
|
|
7
|
+
|
|
8
|
+
You can handle this!
|
|
9
|
+
|
|
10
|
+
----
|
|
11
|
+
|
|
12
|
+
List the models on [OpenRouter](https://openrouter.ai):
|
|
13
|
+
|
|
14
|
+
`uvx llcat -s https://openrouter.ai/api -m`
|
|
15
|
+
|
|
16
|
+
Go ahead, do that one right now. I'll wait.
|
|
17
|
+
|
|
18
|
+
----
|
|
19
|
+
|
|
20
|
+
**llcat** solves all your problems.
|
|
21
|
+
|
|
22
|
+
Yes. Every one.
|
|
23
|
+
|
|
24
|
+
It can also:
|
|
25
|
+
|
|
26
|
+
* Pipe things from stdin and/or be prompted on the command line.
|
|
27
|
+
* Store **conversation history** optionally, as a boring JSON file.
|
|
28
|
+
* Do **tool calling** using the OpenAI spec. There's an example in this repository (and below).
|
|
29
|
+
* Use local or remote servers, authenticated or not.
|
|
30
|
+
* List **models** using `-m` without arguments. Specify a model with the argument.
|
|
31
|
+
|
|
32
|
+
Free Samples? Sure! It's Free Software.
|
|
33
|
+
|
|
34
|
+
* pipx install llcat
|
|
35
|
+
* uvx llcat
|
|
36
|
+
|
|
37
|
+
Dependencies? Just the requests library.
|
|
38
|
+
|
|
39
|
+
It's **llcat**, not **llmcat**. Let's keep it pronounceable.
|
|
40
|
+
|
|
41
|
+
Feels nice to be unambitious.
|
|
42
|
+
|
|
43
|
+
## Examples
|
|
44
|
+
|
|
45
|
+
Let's start with llama:
|
|
46
|
+
```
|
|
47
|
+
$ llcat -s https://openrouter.ai/api \
|
|
48
|
+
-m meta-llama/llama-3.2-3b-instruct:free \
|
|
49
|
+
-c /tmp/convo.txt \
|
|
50
|
+
-k $(cat openrouter.key) \
|
|
51
|
+
"What is the capital of France?"
|
|
52
|
+
```
|
|
53
|
+
|
|
54
|
+
Continue with Qwen:
|
|
55
|
+
```
|
|
56
|
+
$ llcat -s https://openrouter.ai/api \
|
|
57
|
+
-m qwen/qwen3-4b:free \
|
|
58
|
+
-c /tmp/convo.txt \
|
|
59
|
+
-k $(cat openrouter.key) \
|
|
60
|
+
"And what about Canada?"
|
|
61
|
+
```
|
|
62
|
+
|
|
63
|
+
And finish on the local network:
|
|
64
|
+
```
|
|
65
|
+
$ llcat -s http://192.168.1.21:8080 \
|
|
66
|
+
-c /tmp/convo.txt \
|
|
67
|
+
"And what about Japan?"
|
|
68
|
+
```
|
|
69
|
+
One conversation, hopping across models and servers.
|
|
70
|
+
|
|
71
|
+
Pure sorcery.
|
|
72
|
+
|
|
73
|
+
## Summon Some More
|
|
74
|
+
|
|
75
|
+
Want to store state? Let's go!
|
|
76
|
+
```shell
|
|
77
|
+
$ source fancy.sh
|
|
78
|
+
$ llc-server http://192.168.1.21:8080
|
|
79
|
+
$ llc "write a diss track where the knapsack problem hates on the towers of hanoi"
|
|
80
|
+
```
|
|
81
|
+
Now go [read the four lines of `fancy.sh`](https://github.com/day50-dev/llcat/blob/main/fancy.sh)
|
|
82
|
+
|
|
83
|
+
Surprise! It's just an example. Environment variables and a wrapper function. That's all you need.
|
|
84
|
+
|
|
85
|
+
|
|
86
|
+
## The Tool Call To Rule Them All
|
|
87
|
+
This example, a very strange way to play mp3s, uses the [sophisticated 21 line `example_tool_program.py`](https://github.com/day50-dev/llcat/blob/main/example_tool_program.py) included in this repository.
|
|
88
|
+
|
|
89
|
+
It also uses DA`/50's pretty little [streaming markdown renderer, streamdown](https://github.com/day50-dev/Streamdown).
|
|
90
|
+
|
|
91
|
+
<img width="1919" height="606" alt="tc" src="https://github.com/user-attachments/assets/a704ae5c-cfcb-4abc-b1a7-ad1290e60510" />
|
|
92
|
+
|
|
93
|
+
[Kablam!](https://frustratedfunk.bandcamp.com/track/photographic-photogenic) Alright **a16z** where's my $50 million?
|
|
94
|
+
|
|
95
|
+
The enterprise applications are limitless...
|
|
96
|
+
|
|
97
|
+
### Boring Documentation
|
|
98
|
+
|
|
99
|
+
```shell
|
|
100
|
+
usage: llcat [-h] [-c CONVERSATION] [-m [MODEL]] [-k KEY] [-s SERVER]
|
|
101
|
+
[-tf TOOL_FILE] [-tp TOOL_PROGRAM]
|
|
102
|
+
[prompt ...]
|
|
103
|
+
|
|
104
|
+
positional arguments:
|
|
105
|
+
prompt Your prompt
|
|
106
|
+
|
|
107
|
+
options:
|
|
108
|
+
-h, --help show this help message and exit
|
|
109
|
+
-c, --conversation CONVERSATION
|
|
110
|
+
Conversation history file
|
|
111
|
+
-m, --model [MODEL] Model to use (or list models if no value)
|
|
112
|
+
-k, --key KEY API key for authorization
|
|
113
|
+
-s, --server SERVER Server URL (e.g., http://::1:8080)
|
|
114
|
+
-tf, --tool_file TOOL_FILE
|
|
115
|
+
JSON file with tool definitions
|
|
116
|
+
-tp, --tool_program TOOL_PROGRAM
|
|
117
|
+
Program to execute tool calls
|
|
118
|
+
|
|
119
|
+
```
|
|
120
|
+
|
|
121
|
+
Brought to you by **DA`/50**: Make the future obvious.
|
|
@@ -0,0 +1,144 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: llcat
|
|
3
|
+
Version: 0.6.0
|
|
4
|
+
Summary: /usr/bin/cat for the LLM era
|
|
5
|
+
Author-email: Chris McKenzie <kristopolous@yahoo.com>
|
|
6
|
+
License-Expression: MIT
|
|
7
|
+
Project-URL: Homepage, https://github.com/day50-dev/llcat
|
|
8
|
+
Project-URL: Bug Tracker, https://github.com/day50-dev/llcat/issues
|
|
9
|
+
Keywords: markdown,terminal,renderer,cli,syntax-highlighting
|
|
10
|
+
Classifier: Development Status :: 4 - Beta
|
|
11
|
+
Classifier: Environment :: Console
|
|
12
|
+
Classifier: Intended Audience :: Developers
|
|
13
|
+
Classifier: Programming Language :: Python :: 3
|
|
14
|
+
Classifier: Programming Language :: Python :: 3.8
|
|
15
|
+
Classifier: Programming Language :: Python :: 3.9
|
|
16
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
17
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
18
|
+
Classifier: Topic :: Text Processing :: Markup
|
|
19
|
+
Classifier: Topic :: Utilities
|
|
20
|
+
Requires-Python: >=3.8
|
|
21
|
+
Description-Content-Type: text/markdown
|
|
22
|
+
Requires-Dist: requests
|
|
23
|
+
|
|
24
|
+
# /usr/bin/cat for LLMs
|
|
25
|
+
**llcat** is an LLM program with very little ambition.
|
|
26
|
+
|
|
27
|
+
That's why it's awesome.
|
|
28
|
+
|
|
29
|
+
<img width="670" height="592" alt="llcat" src="https://github.com/user-attachments/assets/0fac2db4-3b2e-4639-b6b1-1b0a121a5744" />
|
|
30
|
+
|
|
31
|
+
You can handle this!
|
|
32
|
+
|
|
33
|
+
----
|
|
34
|
+
|
|
35
|
+
List the models on [OpenRouter](https://openrouter.ai):
|
|
36
|
+
|
|
37
|
+
`uvx llcat -s https://openrouter.ai/api -m`
|
|
38
|
+
|
|
39
|
+
Go ahead, do that one right now. I'll wait.
|
|
40
|
+
|
|
41
|
+
----
|
|
42
|
+
|
|
43
|
+
**llcat** solves all your problems.
|
|
44
|
+
|
|
45
|
+
Yes. Every one.
|
|
46
|
+
|
|
47
|
+
It can also:
|
|
48
|
+
|
|
49
|
+
* Pipe things from stdin and/or be prompted on the command line.
|
|
50
|
+
* Store **conversation history** optionally, as a boring JSON file.
|
|
51
|
+
* Do **tool calling** using the OpenAI spec. There's an example in this repository (and below).
|
|
52
|
+
* Use local or remote servers, authenticated or not.
|
|
53
|
+
* List **models** using `-m` without arguments. Specify a model with the argument.
|
|
54
|
+
|
|
55
|
+
Free Samples? Sure! It's Free Software.
|
|
56
|
+
|
|
57
|
+
* pipx install llcat
|
|
58
|
+
* uvx llcat
|
|
59
|
+
|
|
60
|
+
Dependencies? Just the requests library.
|
|
61
|
+
|
|
62
|
+
It's **llcat**, not **llmcat**. Let's keep it pronounceable.
|
|
63
|
+
|
|
64
|
+
Feels nice to be unambitious.
|
|
65
|
+
|
|
66
|
+
## Examples
|
|
67
|
+
|
|
68
|
+
Let's start with llama:
|
|
69
|
+
```
|
|
70
|
+
$ llcat -s https://openrouter.ai/api \
|
|
71
|
+
-m meta-llama/llama-3.2-3b-instruct:free \
|
|
72
|
+
-c /tmp/convo.txt \
|
|
73
|
+
-k $(cat openrouter.key) \
|
|
74
|
+
"What is the capital of France?"
|
|
75
|
+
```
|
|
76
|
+
|
|
77
|
+
Continue with Qwen:
|
|
78
|
+
```
|
|
79
|
+
$ llcat -s https://openrouter.ai/api \
|
|
80
|
+
-m qwen/qwen3-4b:free \
|
|
81
|
+
-c /tmp/convo.txt \
|
|
82
|
+
-k $(cat openrouter.key) \
|
|
83
|
+
"And what about Canada?"
|
|
84
|
+
```
|
|
85
|
+
|
|
86
|
+
And finish on the local network:
|
|
87
|
+
```
|
|
88
|
+
$ llcat -s http://192.168.1.21:8080 \
|
|
89
|
+
-c /tmp/convo.txt \
|
|
90
|
+
"And what about Japan?"
|
|
91
|
+
```
|
|
92
|
+
One conversation, hopping across models and servers.
|
|
93
|
+
|
|
94
|
+
Pure sorcery.
|
|
95
|
+
|
|
96
|
+
## Summon Some More
|
|
97
|
+
|
|
98
|
+
Want to store state? Let's go!
|
|
99
|
+
```shell
|
|
100
|
+
$ source fancy.sh
|
|
101
|
+
$ llc-server http://192.168.1.21:8080
|
|
102
|
+
$ llc "write a diss track where the knapsack problem hates on the towers of hanoi"
|
|
103
|
+
```
|
|
104
|
+
Now go [read the four lines of `fancy.sh`](https://github.com/day50-dev/llcat/blob/main/fancy.sh)
|
|
105
|
+
|
|
106
|
+
Surprise! It's just an example. Environment variables and a wrapper function. That's all you need.
|
|
107
|
+
|
|
108
|
+
|
|
109
|
+
## The Tool Call To Rule Them All
|
|
110
|
+
This example, a very strange way to play mp3s, uses the [sophisticated 21 line `example_tool_program.py`](https://github.com/day50-dev/llcat/blob/main/example_tool_program.py) included in this repository.
|
|
111
|
+
|
|
112
|
+
It also uses DA`/50's pretty little [streaming markdown renderer, streamdown](https://github.com/day50-dev/Streamdown).
|
|
113
|
+
|
|
114
|
+
<img width="1919" height="606" alt="tc" src="https://github.com/user-attachments/assets/a704ae5c-cfcb-4abc-b1a7-ad1290e60510" />
|
|
115
|
+
|
|
116
|
+
[Kablam!](https://frustratedfunk.bandcamp.com/track/photographic-photogenic) Alright **a16z** where's my $50 million?
|
|
117
|
+
|
|
118
|
+
The enterprise applications are limitless...
|
|
119
|
+
|
|
120
|
+
### Boring Documentation
|
|
121
|
+
|
|
122
|
+
```shell
|
|
123
|
+
usage: llcat [-h] [-c CONVERSATION] [-m [MODEL]] [-k KEY] [-s SERVER]
|
|
124
|
+
[-tf TOOL_FILE] [-tp TOOL_PROGRAM]
|
|
125
|
+
[prompt ...]
|
|
126
|
+
|
|
127
|
+
positional arguments:
|
|
128
|
+
prompt Your prompt
|
|
129
|
+
|
|
130
|
+
options:
|
|
131
|
+
-h, --help show this help message and exit
|
|
132
|
+
-c, --conversation CONVERSATION
|
|
133
|
+
Conversation history file
|
|
134
|
+
-m, --model [MODEL] Model to use (or list models if no value)
|
|
135
|
+
-k, --key KEY API key for authorization
|
|
136
|
+
-s, --server SERVER Server URL (e.g., http://::1:8080)
|
|
137
|
+
-tf, --tool_file TOOL_FILE
|
|
138
|
+
JSON file with tool definitions
|
|
139
|
+
-tp, --tool_program TOOL_PROGRAM
|
|
140
|
+
Program to execute tool calls
|
|
141
|
+
|
|
142
|
+
```
|
|
143
|
+
|
|
144
|
+
Brought to you by **DA`/50**: Make the future obvious.
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
requests
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
llcat
|
llcat-0.6.0/llcat.py
ADDED
|
@@ -0,0 +1,164 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
import sys, requests, os, json, argparse, subprocess, select
|
|
3
|
+
|
|
4
|
+
def main():
|
|
5
|
+
parser = argparse.ArgumentParser()
|
|
6
|
+
parser.add_argument('-c', '--conversation', help='Conversation history file')
|
|
7
|
+
parser.add_argument('-m', '--model', nargs='?', const='', help='Model to use (or list models if no value)')
|
|
8
|
+
parser.add_argument('-k', '--key', help='API key for authorization')
|
|
9
|
+
parser.add_argument('-s', '--server', help='Server URL (e.g., http://::1:8080)')
|
|
10
|
+
parser.add_argument('-tf', '--tool_file', help='JSON file with tool definitions')
|
|
11
|
+
parser.add_argument('-tp', '--tool_program', help='Program to execute tool calls')
|
|
12
|
+
parser.add_argument('prompt', nargs='*', help='Your prompt')
|
|
13
|
+
args = parser.parse_args()
|
|
14
|
+
|
|
15
|
+
if args.server:
|
|
16
|
+
base_url = args.server.rstrip('/').rstrip('/v1') + '/v1'
|
|
17
|
+
else:
|
|
18
|
+
parser.print_help()
|
|
19
|
+
print("Error: No server specified. Use -s <server> or set OPENAI_API_BASE/LLM_BASE_URL environment variable.", file=sys.stderr)
|
|
20
|
+
sys.exit(1)
|
|
21
|
+
|
|
22
|
+
headers = {'Content-Type': 'application/json'}
|
|
23
|
+
if args.key:
|
|
24
|
+
headers['Authorization'] = f'Bearer {args.key}'
|
|
25
|
+
|
|
26
|
+
cli_prompt = ' '.join(args.prompt) if args.prompt else ''
|
|
27
|
+
stdin_prompt = sys.stdin.read() if select.select([sys.stdin], [], [], 0.0)[0] else ''
|
|
28
|
+
|
|
29
|
+
if len(stdin_prompt) and len(cli_prompt):
|
|
30
|
+
prompt = f"<ask>{cli_prompt}</ask><content>{stdin_prompt}"
|
|
31
|
+
else:
|
|
32
|
+
prompt = cli_prompt + stdin_prompt
|
|
33
|
+
|
|
34
|
+
if args.model == '' and len(prompt) == 0:
|
|
35
|
+
r = requests.get(f'{base_url}/models', headers=headers)
|
|
36
|
+
try:
|
|
37
|
+
models = r.json()
|
|
38
|
+
for model in models.get('data', []):
|
|
39
|
+
print(model['id'])
|
|
40
|
+
except:
|
|
41
|
+
print(f"{r.text}\n\nError Parsing JSON")
|
|
42
|
+
sys.exit(0)
|
|
43
|
+
|
|
44
|
+
messages = []
|
|
45
|
+
if args.conversation and os.path.exists(args.conversation):
|
|
46
|
+
with open(args.conversation, 'r') as f:
|
|
47
|
+
messages = json.load(f)
|
|
48
|
+
|
|
49
|
+
messages.append({'role': 'user', 'content': prompt})
|
|
50
|
+
|
|
51
|
+
tools = None
|
|
52
|
+
if args.tool_file:
|
|
53
|
+
with open(args.tool_file, 'r') as f:
|
|
54
|
+
tools = json.load(f)
|
|
55
|
+
|
|
56
|
+
req = {'messages': messages, 'stream': True}
|
|
57
|
+
if args.model:
|
|
58
|
+
req['model'] = args.model
|
|
59
|
+
if tools:
|
|
60
|
+
req['tools'] = tools
|
|
61
|
+
|
|
62
|
+
r = requests.post(f'{base_url}/chat/completions', json=req, headers=headers, stream=True)
|
|
63
|
+
|
|
64
|
+
assistant_response = ''
|
|
65
|
+
tool_calls = []
|
|
66
|
+
current_tool_call = None
|
|
67
|
+
|
|
68
|
+
for line in r.iter_lines():
|
|
69
|
+
if line:
|
|
70
|
+
line = line.decode('utf-8')
|
|
71
|
+
if line.startswith('data: '):
|
|
72
|
+
data = line[6:]
|
|
73
|
+
if data == '[DONE]':
|
|
74
|
+
break
|
|
75
|
+
try:
|
|
76
|
+
chunk = json.loads(data)
|
|
77
|
+
delta = chunk['choices'][0]['delta']
|
|
78
|
+
content = delta.get('content', '')
|
|
79
|
+
if content:
|
|
80
|
+
print(content, end='', flush=True)
|
|
81
|
+
assistant_response += content
|
|
82
|
+
|
|
83
|
+
if 'tool_calls' in delta:
|
|
84
|
+
for tc in delta['tool_calls']:
|
|
85
|
+
idx = tc.get('index', 0)
|
|
86
|
+
if idx >= len(tool_calls):
|
|
87
|
+
tool_calls.append({'id': '', 'type': 'function', 'function': {'name': '', 'arguments': ''}})
|
|
88
|
+
current_tool_call = tool_calls[idx]
|
|
89
|
+
|
|
90
|
+
if 'id' in tc:
|
|
91
|
+
tool_calls[idx]['id'] = tc['id']
|
|
92
|
+
if 'function' in tc:
|
|
93
|
+
if 'name' in tc['function']:
|
|
94
|
+
tool_calls[idx]['function']['name'] += tc['function']['name']
|
|
95
|
+
if 'arguments' in tc['function']:
|
|
96
|
+
tool_calls[idx]['function']['arguments'] += tc['function']['arguments']
|
|
97
|
+
except:
|
|
98
|
+
pass
|
|
99
|
+
|
|
100
|
+
if args.tool_program and tool_calls:
|
|
101
|
+
for tool_call in tool_calls:
|
|
102
|
+
tool_input = json.dumps({
|
|
103
|
+
'id': tool_call['id'],
|
|
104
|
+
'name': tool_call['function']['name'],
|
|
105
|
+
'arguments': json.loads(tool_call['function']['arguments'])
|
|
106
|
+
})
|
|
107
|
+
|
|
108
|
+
print(f"<Executing: {tool_call['function']['name']}({tool_call['function']['arguments']})>", file=sys.stderr)
|
|
109
|
+
|
|
110
|
+
result = subprocess.run(
|
|
111
|
+
args.tool_program,
|
|
112
|
+
input=tool_input,
|
|
113
|
+
capture_output=True,
|
|
114
|
+
text=True,
|
|
115
|
+
shell=True
|
|
116
|
+
)
|
|
117
|
+
print(f"<Result: {result}>", file=sys.stderr)
|
|
118
|
+
|
|
119
|
+
messages.append({
|
|
120
|
+
'role': 'assistant',
|
|
121
|
+
'content': assistant_response if assistant_response else None,
|
|
122
|
+
'tool_calls': tool_calls
|
|
123
|
+
})
|
|
124
|
+
messages.append({
|
|
125
|
+
'role': 'tool',
|
|
126
|
+
'tool_call_id': tool_call['id'],
|
|
127
|
+
'content': result.stdout
|
|
128
|
+
})
|
|
129
|
+
|
|
130
|
+
req = {'messages': messages, 'stream': True}
|
|
131
|
+
if args.model:
|
|
132
|
+
req['model'] = args.model
|
|
133
|
+
if tools:
|
|
134
|
+
req['tools'] = tools
|
|
135
|
+
|
|
136
|
+
r = requests.post(f'{base_url}/chat/completions', json=req, headers=headers, stream=True)
|
|
137
|
+
|
|
138
|
+
assistant_response = ''
|
|
139
|
+
for line in r.iter_lines():
|
|
140
|
+
if line:
|
|
141
|
+
line = line.decode('utf-8')
|
|
142
|
+
if line.startswith('data: '):
|
|
143
|
+
data = line[6:]
|
|
144
|
+
if data == '[DONE]':
|
|
145
|
+
break
|
|
146
|
+
try:
|
|
147
|
+
chunk = json.loads(data)
|
|
148
|
+
content = chunk['choices'][0]['delta'].get('content', '')
|
|
149
|
+
if content:
|
|
150
|
+
print(content, end='', flush=True)
|
|
151
|
+
assistant_response += content
|
|
152
|
+
except Exception as ex:
|
|
153
|
+
print(ex)
|
|
154
|
+
pass
|
|
155
|
+
print()
|
|
156
|
+
|
|
157
|
+
if args.conversation:
|
|
158
|
+
if len(assistant_response):
|
|
159
|
+
messages.append({'role': 'assistant', 'content': assistant_response})
|
|
160
|
+
with open(args.conversation, 'w') as f:
|
|
161
|
+
json.dump(messages, f, indent=2)
|
|
162
|
+
|
|
163
|
+
if __name__ == "__main__":
|
|
164
|
+
main()
|
|
@@ -0,0 +1,41 @@
|
|
|
1
|
+
[build-system]
|
|
2
|
+
requires = ["setuptools"]
|
|
3
|
+
build-backend = "setuptools.build_meta"
|
|
4
|
+
|
|
5
|
+
[project]
|
|
6
|
+
name = "llcat"
|
|
7
|
+
version = "0.6.0"
|
|
8
|
+
description = "/usr/bin/cat for the LLM era"
|
|
9
|
+
readme = "README.md"
|
|
10
|
+
requires-python = ">=3.8"
|
|
11
|
+
license = "MIT"
|
|
12
|
+
authors = [
|
|
13
|
+
{name = "Chris McKenzie", email = "kristopolous@yahoo.com"},
|
|
14
|
+
]
|
|
15
|
+
keywords = ["markdown", "terminal", "renderer", "cli", "syntax-highlighting"]
|
|
16
|
+
classifiers = [
|
|
17
|
+
"Development Status :: 4 - Beta",
|
|
18
|
+
"Environment :: Console",
|
|
19
|
+
"Intended Audience :: Developers",
|
|
20
|
+
"Programming Language :: Python :: 3",
|
|
21
|
+
"Programming Language :: Python :: 3.8",
|
|
22
|
+
"Programming Language :: Python :: 3.9",
|
|
23
|
+
"Programming Language :: Python :: 3.10",
|
|
24
|
+
"Programming Language :: Python :: 3.11",
|
|
25
|
+
"Topic :: Text Processing :: Markup",
|
|
26
|
+
"Topic :: Utilities",
|
|
27
|
+
]
|
|
28
|
+
dependencies = [
|
|
29
|
+
"requests"
|
|
30
|
+
]
|
|
31
|
+
|
|
32
|
+
[project.urls]
|
|
33
|
+
"Homepage" = "https://github.com/day50-dev/llcat"
|
|
34
|
+
"Bug Tracker" = "https://github.com/day50-dev/llcat/issues"
|
|
35
|
+
|
|
36
|
+
[tool.setuptools]
|
|
37
|
+
py-modules = ["llcat"]
|
|
38
|
+
|
|
39
|
+
[project.scripts]
|
|
40
|
+
llcat = "llcat:main"
|
|
41
|
+
|
llcat-0.6.0/setup.cfg
ADDED