llcat 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of llcat might be problematic. Click here for more details.

llcat ADDED
@@ -0,0 +1,167 @@
1
+ #!/usr/bin/env python3
2
+ import sys, requests, os, json, argparse, subprocess, select
3
+
4
+ def main():
5
+ parser = argparse.ArgumentParser()
6
+ parser.add_argument('-c', '--conversation', help='Conversation history file')
7
+ parser.add_argument('-m', '--model', nargs='?', const='', help='Model to use (or list models if no value)')
8
+ parser.add_argument('-k', '--key', help='API key for authorization')
9
+ parser.add_argument('-s', '--server', help='Server URL (e.g., http://::1:8080)')
10
+ parser.add_argument('-tf', '--tool_file', help='JSON file with tool definitions')
11
+ parser.add_argument('-tp', '--tool_program', help='Program to execute tool calls')
12
+ parser.add_argument('prompt', nargs='*', help='Your prompt')
13
+ args = parser.parse_args()
14
+
15
+ if args.server:
16
+ base_url = args.server.rstrip('/') + '/v1'
17
+ elif os.getenv('OPENAI_API_BASE') or os.getenv('LLM_BASE_URL'):
18
+ base_url = os.getenv('OPENAI_API_BASE') or os.getenv('LLM_BASE_URL')
19
+ else:
20
+ parser.print_help()
21
+ print("Error: No server specified. Use -s <server> or set OPENAI_API_BASE/LLM_BASE_URL environment variable.", file=sys.stderr)
22
+ sys.exit(1)
23
+
24
+ headers = {'Content-Type': 'application/json'}
25
+ if args.key:
26
+ headers['Authorization'] = f'Bearer {args.key}'
27
+
28
+ if args.model == '':
29
+ r = requests.get(f'{base_url}/models', headers=headers)
30
+ try:
31
+ models = r.json()
32
+ for model in models.get('data', []):
33
+ print(model['id'])
34
+ except:
35
+ print(f"{r.text}\n\nError Parsing JSON")
36
+ sys.exit(0)
37
+
38
+ cli_prompt = ' '.join(args.prompt) if args.prompt else ''
39
+ stdin_prompt = sys.stdin.read() if select.select([sys.stdin], [], [], 0.0)[0] else ''
40
+
41
+ if len(stdin_prompt) and len(cli_prompt):
42
+ prompt = f"<ask>{cli_prompt}</ask><content>{stdin_prompt}"
43
+ else:
44
+ prompt = cli_prompt + stdin_prompt
45
+
46
+ messages = []
47
+ if args.conversation and os.path.exists(args.conversation):
48
+ with open(args.conversation, 'r') as f:
49
+ messages = json.load(f)
50
+
51
+ messages.append({'role': 'user', 'content': prompt})
52
+
53
+ tools = None
54
+ if args.tool_file:
55
+ with open(args.tool_file, 'r') as f:
56
+ tools = json.load(f)
57
+
58
+ req = {'messages': messages, 'stream': True}
59
+ if args.model:
60
+ req['model'] = args.model
61
+ if tools:
62
+ req['tools'] = tools
63
+
64
+ r = requests.post(f'{base_url}/chat/completions', json=req, headers=headers, stream=True)
65
+
66
+ assistant_response = ''
67
+ tool_calls = []
68
+ current_tool_call = None
69
+
70
+ for line in r.iter_lines():
71
+ if line:
72
+ line = line.decode('utf-8')
73
+ if line.startswith('data: '):
74
+ data = line[6:]
75
+ if data == '[DONE]':
76
+ break
77
+ try:
78
+ chunk = json.loads(data)
79
+ delta = chunk['choices'][0]['delta']
80
+ content = delta.get('content', '')
81
+ if content:
82
+ print(content, end='', flush=True)
83
+ assistant_response += content
84
+
85
+ if 'tool_calls' in delta:
86
+ for tc in delta['tool_calls']:
87
+ idx = tc.get('index', 0)
88
+ if idx >= len(tool_calls):
89
+ tool_calls.append({'id': '', 'type': 'function', 'function': {'name': '', 'arguments': ''}})
90
+ current_tool_call = tool_calls[idx]
91
+
92
+ if 'id' in tc:
93
+ tool_calls[idx]['id'] = tc['id']
94
+ if 'function' in tc:
95
+ if 'name' in tc['function']:
96
+ tool_calls[idx]['function']['name'] += tc['function']['name']
97
+ if 'arguments' in tc['function']:
98
+ tool_calls[idx]['function']['arguments'] += tc['function']['arguments']
99
+ except:
100
+ pass
101
+
102
+ print()
103
+
104
+ if args.tool_program and tool_calls:
105
+ for tool_call in tool_calls:
106
+ tool_input = json.dumps({
107
+ 'id': tool_call['id'],
108
+ 'name': tool_call['function']['name'],
109
+ 'arguments': json.loads(tool_call['function']['arguments'])
110
+ })
111
+
112
+ print(f"\n[Executing tool: {tool_call['function']['name']}]", file=sys.stderr)
113
+
114
+ result = subprocess.run(
115
+ args.tool_program,
116
+ input=tool_input,
117
+ capture_output=True,
118
+ text=True,
119
+ shell=True
120
+ )
121
+
122
+ messages.append({
123
+ 'role': 'assistant',
124
+ 'content': assistant_response if assistant_response else None,
125
+ 'tool_calls': tool_calls
126
+ })
127
+ messages.append({
128
+ 'role': 'tool',
129
+ 'tool_call_id': tool_call['id'],
130
+ 'content': result.stdout
131
+ })
132
+
133
+ req = {'messages': messages, 'stream': True}
134
+ if args.model:
135
+ req['model'] = args.model
136
+ if tools:
137
+ req['tools'] = tools
138
+
139
+ r = requests.post(f'{base_url}/chat/completions', json=req, headers=headers, stream=True)
140
+
141
+ assistant_response = ''
142
+ for line in r.iter_lines():
143
+ if line:
144
+ line = line.decode('utf-8')
145
+ if line.startswith('data: '):
146
+ data = line[6:]
147
+ if data == '[DONE]':
148
+ break
149
+ try:
150
+ chunk = json.loads(data)
151
+ content = chunk['choices'][0]['delta'].get('content', '')
152
+ if content:
153
+ print(content, end='', flush=True)
154
+ assistant_response += content
155
+ except Exception as ex:
156
+ print(ex)
157
+ pass
158
+ print()
159
+
160
+ if args.conversation:
161
+ if len(assistant_response):
162
+ messages.append({'role': 'assistant', 'content': assistant_response})
163
+ with open(args.conversation, 'w') as f:
164
+ json.dump(messages, f, indent=2)
165
+
166
+ if __name__ == "__main__":
167
+ main()
@@ -0,0 +1,100 @@
1
+ Metadata-Version: 2.4
2
+ Name: llcat
3
+ Version: 0.1.0
4
+ Summary: /usr/bin/cat for the LLM era
5
+ Project-URL: Homepage, https://github.com/day50-dev/llcat
6
+ Project-URL: Bug Tracker, https://github.com/day50-dev/llcat/issues
7
+ Author-email: Chris McKenzie <kristopolous@yahoo.com>
8
+ License: MIT
9
+ Keywords: cli,markdown,renderer,syntax-highlighting,terminal
10
+ Classifier: Development Status :: 4 - Beta
11
+ Classifier: Environment :: Console
12
+ Classifier: Intended Audience :: Developers
13
+ Classifier: License :: OSI Approved :: MIT License
14
+ Classifier: Programming Language :: Python :: 3
15
+ Classifier: Programming Language :: Python :: 3.8
16
+ Classifier: Programming Language :: Python :: 3.9
17
+ Classifier: Programming Language :: Python :: 3.10
18
+ Classifier: Programming Language :: Python :: 3.11
19
+ Classifier: Topic :: Text Processing :: Markup
20
+ Classifier: Topic :: Utilities
21
+ Requires-Python: >=3.8
22
+ Description-Content-Type: text/markdown
23
+
24
+ # /usr/bin/cat for LLMs
25
+ <img width="670" height="592" alt="llcat" src="https://github.com/user-attachments/assets/0fac2db4-3b2e-4639-b6b1-1b0a121a5744" />
26
+
27
+ **llcat** is an LLM program with very little ambition in life.
28
+
29
+ That's what makes it useful.
30
+
31
+ * Pipe things into it or prompt it on the command line.
32
+ * **Conversation history** is an optional file.
33
+ * **Tool Calling** is done by using the toolcalling OpenAI spec. There's a file and a progrma example
34
+ * Servers:
35
+ * `OPENAI_API_BASE` and `LLM_BASE_URL` are supported along with -s for one off.
36
+ * **Authentication tokens** are passed with -k. You can do `$(< somefile)` or whatever obfuscation you want, that's on you.
37
+ * **Models** are listed with the `-m` option and specified with it as well.
38
+
39
+ Here's some things you can do
40
+
41
+ * pipx install llcat
42
+ * uv tool run llcat
43
+
44
+ There's no *requirements.txt*? That's right.
45
+
46
+ ## Examples
47
+
48
+ **We start with Openrouter**
49
+
50
+ Listing the models on openrouter
51
+
52
+ `llmcat -s https://openrouter.ai/api -m`
53
+
54
+ You can run that, unauthenticated, no problem.
55
+
56
+ ```
57
+ $ llcat -s https://openrouter.ai/api \
58
+ -m meta-llama/llama-3.2-3b-instruct:free \
59
+ -c /tmp/convo.txt \
60
+ -k $(cat openrouter.key) \
61
+ "What is the capital of France?"
62
+
63
+ $ llcat -s https://openrouter.ai/api \
64
+ -m meta-llama/llama-3.2-3b-instruct:free \
65
+ -c /tmp/convo.txt \
66
+ -k $(cat openrouter.key) \
67
+ "And what about Canada?"
68
+ ```
69
+
70
+ **Let's continue it locally**
71
+
72
+ ```
73
+ $ llcat -s http://192.168.1.21:8080 \
74
+ -c /tmp/convo.txt \
75
+ "And what about Japan?"
76
+ ```
77
+
78
+ ## Full documentation
79
+
80
+ ```shell
81
+ usage: llcat [-h] [-c CONVERSATION] [-m [MODEL]] [-k KEY] [-s SERVER]
82
+ [-tf TOOL_FILE] [-tp TOOL_PROGRAM]
83
+ [prompt ...]
84
+
85
+ positional arguments:
86
+ prompt Your prompt
87
+
88
+ options:
89
+ -h, --help show this help message and exit
90
+ -c, --conversation CONVERSATION
91
+ Conversation history file
92
+ -m, --model [MODEL] Model to use (or list models if no value)
93
+ -k, --key KEY API key for authorization
94
+ -s, --server SERVER Server URL (e.g., http://::1:8080)
95
+ -tf, --tool_file TOOL_FILE
96
+ JSON file with tool definitions
97
+ -tp, --tool_program TOOL_PROGRAM
98
+ Program to execute tool calls
99
+
100
+ ```
@@ -0,0 +1,5 @@
1
+ llcat,sha256=lYCHk-ZuWhEwJQ8AslfepNpfAHk8VV4NuZsyA0E94iU,6445
2
+ llcat-0.1.0.dist-info/METADATA,sha256=1elBRdG7Uo156vvzy_LKu23tQMuu5c0xXAeLaCMKC_k,3233
3
+ llcat-0.1.0.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
4
+ llcat-0.1.0.dist-info/entry_points.txt,sha256=Hjeok2HN7o5yovptEjIgyC2iv4nljbDVD-jMF0mVwig,58
5
+ llcat-0.1.0.dist-info/RECORD,,
@@ -0,0 +1,4 @@
1
+ Wheel-Version: 1.0
2
+ Generator: hatchling 1.28.0
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
@@ -0,0 +1,3 @@
1
+ [console_scripts]
2
+ sd = llcat:main
3
+ streamdown = llcat:main