llcat 0.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
llcat ADDED
@@ -0,0 +1,164 @@
1
+ #!/usr/bin/env python3
2
+ import sys, requests, os, json, argparse, subprocess, select
3
+
4
+ def main():
5
+ parser = argparse.ArgumentParser()
6
+ parser.add_argument('-c', '--conversation', help='Conversation history file')
7
+ parser.add_argument('-m', '--model', nargs='?', const='', help='Model to use (or list models if no value)')
8
+ parser.add_argument('-k', '--key', help='API key for authorization')
9
+ parser.add_argument('-s', '--server', help='Server URL (e.g., http://::1:8080)')
10
+ parser.add_argument('-tf', '--tool_file', help='JSON file with tool definitions')
11
+ parser.add_argument('-tp', '--tool_program', help='Program to execute tool calls')
12
+ parser.add_argument('prompt', nargs='*', help='Your prompt')
13
+ args = parser.parse_args()
14
+
15
+ if args.server:
16
+ base_url = args.server.rstrip('/').rstrip('/v1') + '/v1'
17
+ else:
18
+ parser.print_help()
19
+ print("Error: No server specified. Use -s <server> or set OPENAI_API_BASE/LLM_BASE_URL environment variable.", file=sys.stderr)
20
+ sys.exit(1)
21
+
22
+ headers = {'Content-Type': 'application/json'}
23
+ if args.key:
24
+ headers['Authorization'] = f'Bearer {args.key}'
25
+
26
+ if args.model == '':
27
+ r = requests.get(f'{base_url}/models', headers=headers)
28
+ try:
29
+ models = r.json()
30
+ for model in models.get('data', []):
31
+ print(model['id'])
32
+ except:
33
+ print(f"{r.text}\n\nError Parsing JSON")
34
+ sys.exit(0)
35
+
36
+ cli_prompt = ' '.join(args.prompt) if args.prompt else ''
37
+ stdin_prompt = sys.stdin.read() if select.select([sys.stdin], [], [], 0.0)[0] else ''
38
+
39
+ if len(stdin_prompt) and len(cli_prompt):
40
+ prompt = f"<ask>{cli_prompt}</ask><content>{stdin_prompt}"
41
+ else:
42
+ prompt = cli_prompt + stdin_prompt
43
+
44
+ messages = []
45
+ if args.conversation and os.path.exists(args.conversation):
46
+ with open(args.conversation, 'r') as f:
47
+ messages = json.load(f)
48
+
49
+ messages.append({'role': 'user', 'content': prompt})
50
+
51
+ tools = None
52
+ if args.tool_file:
53
+ with open(args.tool_file, 'r') as f:
54
+ tools = json.load(f)
55
+
56
+ req = {'messages': messages, 'stream': True}
57
+ if args.model:
58
+ req['model'] = args.model
59
+ if tools:
60
+ req['tools'] = tools
61
+
62
+ r = requests.post(f'{base_url}/chat/completions', json=req, headers=headers, stream=True)
63
+
64
+ assistant_response = ''
65
+ tool_calls = []
66
+ current_tool_call = None
67
+
68
+ for line in r.iter_lines():
69
+ if line:
70
+ line = line.decode('utf-8')
71
+ if line.startswith('data: '):
72
+ data = line[6:]
73
+ if data == '[DONE]':
74
+ break
75
+ try:
76
+ chunk = json.loads(data)
77
+ delta = chunk['choices'][0]['delta']
78
+ content = delta.get('content', '')
79
+ if content:
80
+ print(content, end='', flush=True)
81
+ assistant_response += content
82
+
83
+ if 'tool_calls' in delta:
84
+ for tc in delta['tool_calls']:
85
+ idx = tc.get('index', 0)
86
+ if idx >= len(tool_calls):
87
+ tool_calls.append({'id': '', 'type': 'function', 'function': {'name': '', 'arguments': ''}})
88
+ current_tool_call = tool_calls[idx]
89
+
90
+ if 'id' in tc:
91
+ tool_calls[idx]['id'] = tc['id']
92
+ if 'function' in tc:
93
+ if 'name' in tc['function']:
94
+ tool_calls[idx]['function']['name'] += tc['function']['name']
95
+ if 'arguments' in tc['function']:
96
+ tool_calls[idx]['function']['arguments'] += tc['function']['arguments']
97
+ except:
98
+ pass
99
+
100
+ if args.tool_program and tool_calls:
101
+ for tool_call in tool_calls:
102
+ tool_input = json.dumps({
103
+ 'id': tool_call['id'],
104
+ 'name': tool_call['function']['name'],
105
+ 'arguments': json.loads(tool_call['function']['arguments'])
106
+ })
107
+
108
+ print(f"<Executing: {tool_call['function']['name']}({tool_call['function']['arguments']})>", file=sys.stderr)
109
+
110
+ result = subprocess.run(
111
+ args.tool_program,
112
+ input=tool_input,
113
+ capture_output=True,
114
+ text=True,
115
+ shell=True
116
+ )
117
+ print(f"<Result: {result}>", file=sys.stderr)
118
+
119
+ messages.append({
120
+ 'role': 'assistant',
121
+ 'content': assistant_response if assistant_response else None,
122
+ 'tool_calls': tool_calls
123
+ })
124
+ messages.append({
125
+ 'role': 'tool',
126
+ 'tool_call_id': tool_call['id'],
127
+ 'content': result.stdout
128
+ })
129
+
130
+ req = {'messages': messages, 'stream': True}
131
+ if args.model:
132
+ req['model'] = args.model
133
+ if tools:
134
+ req['tools'] = tools
135
+
136
+ r = requests.post(f'{base_url}/chat/completions', json=req, headers=headers, stream=True)
137
+
138
+ assistant_response = ''
139
+ for line in r.iter_lines():
140
+ if line:
141
+ line = line.decode('utf-8')
142
+ if line.startswith('data: '):
143
+ data = line[6:]
144
+ if data == '[DONE]':
145
+ break
146
+ try:
147
+ chunk = json.loads(data)
148
+ content = chunk['choices'][0]['delta'].get('content', '')
149
+ if content:
150
+ print(content, end='', flush=True)
151
+ assistant_response += content
152
+ except Exception as ex:
153
+ print(ex)
154
+ pass
155
+ print()
156
+
157
+ if args.conversation:
158
+ if len(assistant_response):
159
+ messages.append({'role': 'assistant', 'content': assistant_response})
160
+ with open(args.conversation, 'w') as f:
161
+ json.dump(messages, f, indent=2)
162
+
163
+ if __name__ == "__main__":
164
+ main()
@@ -0,0 +1,135 @@
1
+ Metadata-Version: 2.4
2
+ Name: llcat
3
+ Version: 0.3.0
4
+ Summary: /usr/bin/cat for the LLM era
5
+ Project-URL: Homepage, https://github.com/day50-dev/llcat
6
+ Project-URL: Bug Tracker, https://github.com/day50-dev/llcat/issues
7
+ Author-email: Chris McKenzie <kristopolous@yahoo.com>
8
+ License: MIT
9
+ Keywords: cli,markdown,renderer,syntax-highlighting,terminal
10
+ Classifier: Development Status :: 4 - Beta
11
+ Classifier: Environment :: Console
12
+ Classifier: Intended Audience :: Developers
13
+ Classifier: License :: OSI Approved :: MIT License
14
+ Classifier: Programming Language :: Python :: 3
15
+ Classifier: Programming Language :: Python :: 3.8
16
+ Classifier: Programming Language :: Python :: 3.9
17
+ Classifier: Programming Language :: Python :: 3.10
18
+ Classifier: Programming Language :: Python :: 3.11
19
+ Classifier: Topic :: Text Processing :: Markup
20
+ Classifier: Topic :: Utilities
21
+ Requires-Python: >=3.8
22
+ Requires-Dist: requests
23
+ Description-Content-Type: text/markdown
24
+
25
+ # /usr/bin/cat for LLMs
26
+ **llcat** is an LLM program with very little ambition.
27
+
28
+ That's why it's awesome.
29
+
30
+ <img width="670" height="592" alt="llcat" src="https://github.com/user-attachments/assets/0fac2db4-3b2e-4639-b6b1-1b0a121a5744" />
31
+
32
+ You can handle this!
33
+
34
+ **llcat** solves all your problems.
35
+
36
+ Yes. Every one.
37
+
38
+ It can also:
39
+
40
+ * Pipe things from stdin and/or be prompted on the command line.
41
+ * Store **conversation history** optionally, in a normal file.
42
+ * Do **tool calling** using the OpenAI spec. There's an example in this repository (and below).
43
+ * Use local or remote servers, authenticated or not.
44
+ * List **models** using `-m` without arguments. Specify a model with the argument.
45
+
46
+ Free Samples? Sure! It's Free Software.
47
+
48
+ * pipx install llcat
49
+ * uvx llcat
50
+
51
+ It's **llcat**, not **llmcat**. Let's keep it pronounceable.
52
+
53
+ Dependencies? Just the requests library.
54
+
55
+ Pretty unambitious. Pretty nice.
56
+
57
+ ## Examples
58
+
59
+ List the models on [OpenRouter](https://openrouter.ai):
60
+
61
+ `uvx llcat -s https://openrouter.ai/api -m`
62
+
63
+ Go ahead, do that right now.
64
+
65
+ ```
66
+ $ llcat -s https://openrouter.ai/api \
67
+ -m meta-llama/llama-3.2-3b-instruct:free \
68
+ -c /tmp/convo.txt \
69
+ -k $(cat openrouter.key) \
70
+ "What is the capital of France?"
71
+
72
+ $ llcat -s https://openrouter.ai/api \
73
+ -m meta-llama/llama-3.2-3b-instruct:free \
74
+ -c /tmp/convo.txt \
75
+ -k $(cat openrouter.key) \
76
+ "And what about Canada?"
77
+ ```
78
+
79
+ **Let's continue it locally**
80
+
81
+ ```
82
+ $ llcat -s http://192.168.1.21:8080 \
83
+ -c /tmp/convo.txt \
84
+ "And what about Japan?"
85
+ ```
86
+
87
+ Pure sorcery.
88
+
89
+ Want to store state? Let's go!
90
+ ```shell
91
+ $ source fancy.sh
92
+ ...
93
+ $ llc "write a diss track where the knapsack problem hates on the towers of hanoi"
94
+ ```
95
+ What goes in the `...`?
96
+
97
+ You'll have to read the four lines of `fancy.sh`!
98
+
99
+ *(Spoiler Alert: it sets environment variables and has a wrapper function)*
100
+
101
+
102
+ ## The Tool Call To Rule Them All
103
+ This example, a very strange way to play mp3s, uses the sophisticated 21 line `example_tool_program.py` included in this repository.
104
+
105
+ It also uses DA`/50's pretty little [streaming markdown renderer, streamdown](https://github.com/day50-dev/Streamdown).
106
+
107
+ <img width="1919" height="606" alt="tc" src="https://github.com/user-attachments/assets/a704ae5c-cfcb-4abc-b1a7-ad1290e60510" />
108
+
109
+ [Kablam!](https://frustratedfunk.bandcamp.com/track/photographic-photogenic) Alright **a16z** where's my $50 million?
110
+
111
+ The enterprise applications are limitless...
112
+
113
+ ### Boring Documentation
114
+
115
+ ```shell
116
+ usage: llcat [-h] [-c CONVERSATION] [-m [MODEL]] [-k KEY] [-s SERVER]
117
+ [-tf TOOL_FILE] [-tp TOOL_PROGRAM]
118
+ [prompt ...]
119
+
120
+ positional arguments:
121
+ prompt Your prompt
122
+
123
+ options:
124
+ -h, --help show this help message and exit
125
+ -c, --conversation CONVERSATION
126
+ Conversation history file
127
+ -m, --model [MODEL] Model to use (or list models if no value)
128
+ -k, --key KEY API key for authorization
129
+ -s, --server SERVER Server URL (e.g., http://::1:8080)
130
+ -tf, --tool_file TOOL_FILE
131
+ JSON file with tool definitions
132
+ -tp, --tool_program TOOL_PROGRAM
133
+ Program to execute tool calls
134
+
135
+ ```
@@ -0,0 +1,5 @@
1
+ llcat,sha256=LRJzodYvPHYdX8JzXLA6PtBK0MXzEdjequbguzuGSuY,6390
2
+ llcat-0.3.0.dist-info/METADATA,sha256=723dO4Odpq93evhJAqI8irXHijbaibfb7nGiYSwP1is,4170
3
+ llcat-0.3.0.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
4
+ llcat-0.3.0.dist-info/entry_points.txt,sha256=Hjeok2HN7o5yovptEjIgyC2iv4nljbDVD-jMF0mVwig,58
5
+ llcat-0.3.0.dist-info/RECORD,,
@@ -0,0 +1,4 @@
1
+ Wheel-Version: 1.0
2
+ Generator: hatchling 1.28.0
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
@@ -0,0 +1,3 @@
1
+ [console_scripts]
2
+ sd = llcat:main
3
+ streamdown = llcat:main