workplace-pua-cli 0.4.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.env.example +4 -0
- package/.eslintrc.json +21 -0
- package/.prettierrc.json +9 -0
- package/CHANGELOG.md +107 -0
- package/README.md +240 -0
- package/bin/pua +2 -0
- package/dist/commands/chat.d.ts +15 -0
- package/dist/commands/chat.d.ts.map +1 -0
- package/dist/commands/chat.js +262 -0
- package/dist/commands/chat.js.map +1 -0
- package/dist/commands/config.d.ts +15 -0
- package/dist/commands/config.d.ts.map +1 -0
- package/dist/commands/config.js +247 -0
- package/dist/commands/config.js.map +1 -0
- package/dist/commands/prompt.d.ts +14 -0
- package/dist/commands/prompt.d.ts.map +1 -0
- package/dist/commands/prompt.js +126 -0
- package/dist/commands/prompt.js.map +1 -0
- package/dist/config/providers.d.ts +37 -0
- package/dist/config/providers.d.ts.map +1 -0
- package/dist/config/providers.js +96 -0
- package/dist/config/providers.js.map +1 -0
- package/dist/config/session-storage.d.ts +29 -0
- package/dist/config/session-storage.d.ts.map +1 -0
- package/dist/config/session-storage.js +67 -0
- package/dist/config/session-storage.js.map +1 -0
- package/dist/config/settings.d.ts +55 -0
- package/dist/config/settings.d.ts.map +1 -0
- package/dist/config/settings.js +163 -0
- package/dist/config/settings.js.map +1 -0
- package/dist/config/storage.d.ts +69 -0
- package/dist/config/storage.d.ts.map +1 -0
- package/dist/config/storage.js +126 -0
- package/dist/config/storage.js.map +1 -0
- package/dist/history/session.d.ts +52 -0
- package/dist/history/session.d.ts.map +1 -0
- package/dist/history/session.js +122 -0
- package/dist/history/session.js.map +1 -0
- package/dist/index.d.ts +3 -0
- package/dist/index.d.ts.map +1 -0
- package/dist/index.js +157 -0
- package/dist/index.js.map +1 -0
- package/dist/llm/base.d.ts +38 -0
- package/dist/llm/base.d.ts.map +1 -0
- package/dist/llm/base.js +22 -0
- package/dist/llm/base.js.map +1 -0
- package/dist/llm/factory.d.ts +12 -0
- package/dist/llm/factory.d.ts.map +1 -0
- package/dist/llm/factory.js +26 -0
- package/dist/llm/factory.js.map +1 -0
- package/dist/llm/openai.d.ts +10 -0
- package/dist/llm/openai.d.ts.map +1 -0
- package/dist/llm/openai.js +97 -0
- package/dist/llm/openai.js.map +1 -0
- package/dist/llm/zhipu.d.ts +10 -0
- package/dist/llm/zhipu.d.ts.map +1 -0
- package/dist/llm/zhipu.js +91 -0
- package/dist/llm/zhipu.js.map +1 -0
- package/dist/prompts/boss.d.ts +6 -0
- package/dist/prompts/boss.d.ts.map +1 -0
- package/dist/prompts/boss.js +41 -0
- package/dist/prompts/boss.js.map +1 -0
- package/dist/prompts/employee.d.ts +6 -0
- package/dist/prompts/employee.d.ts.map +1 -0
- package/dist/prompts/employee.js +41 -0
- package/dist/prompts/employee.js.map +1 -0
- package/dist/prompts/index.d.ts +4 -0
- package/dist/prompts/index.d.ts.map +1 -0
- package/dist/prompts/index.js +9 -0
- package/dist/prompts/index.js.map +1 -0
- package/dist/utils/formatter.d.ts +25 -0
- package/dist/utils/formatter.d.ts.map +1 -0
- package/dist/utils/formatter.js +83 -0
- package/dist/utils/formatter.js.map +1 -0
- package/dist/utils/logger.d.ts +10 -0
- package/dist/utils/logger.d.ts.map +1 -0
- package/dist/utils/logger.js +31 -0
- package/dist/utils/logger.js.map +1 -0
- package/dist/utils/stream.d.ts +36 -0
- package/dist/utils/stream.d.ts.map +1 -0
- package/dist/utils/stream.js +74 -0
- package/dist/utils/stream.js.map +1 -0
- package/docs/OPTIMIZATION.md +772 -0
- package/docs/TECHNICAL_PRINCIPLES.md +663 -0
- package/package.json +52 -0
- package/sample/1.png +0 -0
- package/sample/2.png +0 -0
- package/screenshots/chat-dialogue.png +0 -0
- package/screenshots/chat-mode.png +0 -0
- package/src/__tests__/config/settings.test.ts +48 -0
- package/src/__tests__/prompts/boss.test.ts +35 -0
- package/src/commands/chat.ts +328 -0
- package/src/commands/config.ts +283 -0
- package/src/commands/prompt.ts +154 -0
- package/src/config/providers.ts +109 -0
- package/src/config/session-storage.ts +94 -0
- package/src/config/settings.ts +194 -0
- package/src/config/storage.ts +150 -0
- package/src/history/session.ts +141 -0
- package/src/index.ts +164 -0
- package/src/llm/base.ts +55 -0
- package/src/llm/factory.ts +24 -0
- package/src/llm/openai.ts +113 -0
- package/src/llm/zhipu.ts +101 -0
- package/src/prompts/boss.ts +43 -0
- package/src/prompts/employee.ts +43 -0
- package/src/prompts/index.ts +3 -0
- package/src/utils/formatter.ts +104 -0
- package/src/utils/logger.ts +31 -0
- package/src/utils/stream.ts +76 -0
- package/tsconfig.json +20 -0
- package/vitest.config.ts +18 -0
|
@@ -0,0 +1,141 @@
|
|
|
1
|
+
import { Message } from '../llm/base';
|
|
2
|
+
|
|
3
|
+
export interface SessionHistory {
|
|
4
|
+
messages: Message[];
|
|
5
|
+
createdAt: Date;
|
|
6
|
+
lastUpdatedAt: Date;
|
|
7
|
+
}
|
|
8
|
+
|
|
9
|
+
export class SessionManager {
|
|
10
|
+
private sessions: Map<string, SessionHistory> = new Map();
|
|
11
|
+
private currentSessionId: string | null = null;
|
|
12
|
+
|
|
13
|
+
/**
|
|
14
|
+
* Create a new session
|
|
15
|
+
*/
|
|
16
|
+
createSession(sessionId: string): void {
|
|
17
|
+
this.sessions.set(sessionId, {
|
|
18
|
+
messages: [],
|
|
19
|
+
createdAt: new Date(),
|
|
20
|
+
lastUpdatedAt: new Date(),
|
|
21
|
+
});
|
|
22
|
+
this.currentSessionId = sessionId;
|
|
23
|
+
}
|
|
24
|
+
|
|
25
|
+
/**
|
|
26
|
+
* Get current session
|
|
27
|
+
*/
|
|
28
|
+
getCurrentSession(): SessionHistory | null {
|
|
29
|
+
if (!this.currentSessionId) return null;
|
|
30
|
+
return this.sessions.get(this.currentSessionId) || null;
|
|
31
|
+
}
|
|
32
|
+
|
|
33
|
+
/**
|
|
34
|
+
* Add a message to current session
|
|
35
|
+
*/
|
|
36
|
+
addMessage(message: Message): void {
|
|
37
|
+
const session = this.getCurrentSession();
|
|
38
|
+
if (session) {
|
|
39
|
+
session.messages.push(message);
|
|
40
|
+
session.lastUpdatedAt = new Date();
|
|
41
|
+
}
|
|
42
|
+
}
|
|
43
|
+
|
|
44
|
+
/**
|
|
45
|
+
* Get messages from current session
|
|
46
|
+
*/
|
|
47
|
+
getMessages(): Message[] {
|
|
48
|
+
const session = this.getCurrentSession();
|
|
49
|
+
return session ? session.messages : [];
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
/**
|
|
53
|
+
* Clear current session messages
|
|
54
|
+
*/
|
|
55
|
+
clearCurrentSession(): void {
|
|
56
|
+
const session = this.getCurrentSession();
|
|
57
|
+
if (session) {
|
|
58
|
+
session.messages = [];
|
|
59
|
+
session.lastUpdatedAt = new Date();
|
|
60
|
+
}
|
|
61
|
+
}
|
|
62
|
+
|
|
63
|
+
/**
|
|
64
|
+
* Delete a session
|
|
65
|
+
*/
|
|
66
|
+
deleteSession(sessionId: string): void {
|
|
67
|
+
this.sessions.delete(sessionId);
|
|
68
|
+
if (this.currentSessionId === sessionId) {
|
|
69
|
+
this.currentSessionId = null;
|
|
70
|
+
}
|
|
71
|
+
}
|
|
72
|
+
|
|
73
|
+
/**
|
|
74
|
+
* Get session info as string
|
|
75
|
+
*/
|
|
76
|
+
getSessionInfo(): string {
|
|
77
|
+
const session = this.getCurrentSession();
|
|
78
|
+
if (!session) {
|
|
79
|
+
return '当前没有活动会话';
|
|
80
|
+
}
|
|
81
|
+
|
|
82
|
+
const userMessages = session.messages.filter(m => m.role === 'user').length;
|
|
83
|
+
const assistantMessages = session.messages.filter(m => m.role === 'assistant').length;
|
|
84
|
+
const systemMessages = session.messages.filter(m => m.role === 'system').length;
|
|
85
|
+
|
|
86
|
+
return `会话信息:
|
|
87
|
+
创建时间: ${session.createdAt.toLocaleString('zh-CN')}
|
|
88
|
+
最后更新: ${session.lastUpdatedAt.toLocaleString('zh-CN')}
|
|
89
|
+
消息统计:
|
|
90
|
+
- 用户消息: ${userMessages} 条
|
|
91
|
+
- 助手回复: ${assistantMessages} 条
|
|
92
|
+
- 系统消息: ${systemMessages} 条
|
|
93
|
+
- 总计: ${session.messages.length} 条`;
|
|
94
|
+
}
|
|
95
|
+
|
|
96
|
+
/**
|
|
97
|
+
* Get formatted history for display
|
|
98
|
+
*/
|
|
99
|
+
getFormattedHistory(): string {
|
|
100
|
+
const session = this.getCurrentSession();
|
|
101
|
+
if (!session || session.messages.length === 0) {
|
|
102
|
+
return '会话历史为空';
|
|
103
|
+
}
|
|
104
|
+
|
|
105
|
+
const lines: string[] = ['\n━━━━━━━━━━━━━━━━━━ 会话历史 ━━━━━━━━━━━━━━━━━━'];
|
|
106
|
+
|
|
107
|
+
for (const msg of session.messages) {
|
|
108
|
+
if (msg.role === 'system') continue; // Skip system messages
|
|
109
|
+
|
|
110
|
+
const label = msg.role === 'user' ? '你' : 'AI';
|
|
111
|
+
const prefix = msg.role === 'user' ? '>' : '<';
|
|
112
|
+
lines.push(`\n${prefix} ${label}: ${msg.content}`);
|
|
113
|
+
}
|
|
114
|
+
|
|
115
|
+
lines.push('\n━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━');
|
|
116
|
+
|
|
117
|
+
return lines.join('');
|
|
118
|
+
}
|
|
119
|
+
|
|
120
|
+
/**
|
|
121
|
+
* Set current session
|
|
122
|
+
*/
|
|
123
|
+
setCurrentSession(sessionId: string): void {
|
|
124
|
+
if (this.sessions.has(sessionId)) {
|
|
125
|
+
this.currentSessionId = sessionId;
|
|
126
|
+
} else {
|
|
127
|
+
throw new Error(`Session ${sessionId} not found`);
|
|
128
|
+
}
|
|
129
|
+
}
|
|
130
|
+
|
|
131
|
+
/**
|
|
132
|
+
* Check if current session has messages
|
|
133
|
+
*/
|
|
134
|
+
hasMessages(): boolean {
|
|
135
|
+
const session = this.getCurrentSession();
|
|
136
|
+
return session ? session.messages.length > 0 : false;
|
|
137
|
+
}
|
|
138
|
+
}
|
|
139
|
+
|
|
140
|
+
// Global session manager instance
|
|
141
|
+
export const sessionManager = new SessionManager();
|
package/src/index.ts
ADDED
|
@@ -0,0 +1,164 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
import { Command } from 'commander';
|
|
3
|
+
import chalk from 'chalk';
|
|
4
|
+
import { chatCommand, ChatOptions } from './commands/chat';
|
|
5
|
+
import { promptCommand, PromptOptions } from './commands/prompt';
|
|
6
|
+
import { configWizard, showConfig } from './commands/config';
|
|
7
|
+
import {
|
|
8
|
+
loadConfig,
|
|
9
|
+
needsOnboarding,
|
|
10
|
+
type RuntimeConfig,
|
|
11
|
+
} from './config/settings';
|
|
12
|
+
import { logger } from './utils/logger';
|
|
13
|
+
|
|
14
|
+
const program = new Command();
|
|
15
|
+
|
|
16
|
+
// CLI metadata
|
|
17
|
+
program
|
|
18
|
+
.name('pua')
|
|
19
|
+
.description('PUA CLI - 一个趣味性 AI CLI 工具,具有两种角色模式')
|
|
20
|
+
.version('0.4.0');
|
|
21
|
+
|
|
22
|
+
/**
|
|
23
|
+
* Wrap command action with onboarding check
|
|
24
|
+
*/
|
|
25
|
+
async function withOnboardingCheck<T extends (...args: any[]) => any>(
|
|
26
|
+
fn: T
|
|
27
|
+
): Promise<ReturnType<T>> {
|
|
28
|
+
// Check if onboarding is needed
|
|
29
|
+
if (needsOnboarding()) {
|
|
30
|
+
console.log();
|
|
31
|
+
console.log(chalk.cyan.bold('═══════════════════════════════════════════════════════════'));
|
|
32
|
+
console.log(chalk.cyan.bold(' 欢迎使用 PUA CLI!'));
|
|
33
|
+
console.log(chalk.cyan.bold('═══════════════════════════════════════════════════════════'));
|
|
34
|
+
console.log();
|
|
35
|
+
console.log(chalk.gray('在开始之前,让我们先配置一些基本信息。'));
|
|
36
|
+
console.log();
|
|
37
|
+
|
|
38
|
+
try {
|
|
39
|
+
await configWizard({ autoMode: true });
|
|
40
|
+
} catch (error) {
|
|
41
|
+
if ((error as Error).message === '配置已取消') {
|
|
42
|
+
console.log();
|
|
43
|
+
logger.info('配置已取消,退出程序');
|
|
44
|
+
process.exit(0);
|
|
45
|
+
}
|
|
46
|
+
throw error;
|
|
47
|
+
}
|
|
48
|
+
|
|
49
|
+
console.log();
|
|
50
|
+
console.log(chalk.green('✓ 配置完成!'));
|
|
51
|
+
console.log();
|
|
52
|
+
}
|
|
53
|
+
|
|
54
|
+
return fn();
|
|
55
|
+
}
|
|
56
|
+
|
|
57
|
+
// Config command
|
|
58
|
+
program
|
|
59
|
+
.command('config')
|
|
60
|
+
.description('配置 PUA CLI(选择 Provider、设置 API Key)')
|
|
61
|
+
.option('--show', '显示当前配置')
|
|
62
|
+
.action(async (options) => {
|
|
63
|
+
try {
|
|
64
|
+
if (options.show) {
|
|
65
|
+
await showConfig();
|
|
66
|
+
} else {
|
|
67
|
+
await configWizard();
|
|
68
|
+
}
|
|
69
|
+
} catch (error) {
|
|
70
|
+
logger.error(error instanceof Error ? error.message : String(error));
|
|
71
|
+
process.exit(1);
|
|
72
|
+
}
|
|
73
|
+
});
|
|
74
|
+
|
|
75
|
+
// Chat command - interactive mode
|
|
76
|
+
program
|
|
77
|
+
.command('chat')
|
|
78
|
+
.description('启动交互式聊天模式(支持会话历史)')
|
|
79
|
+
.option('-r, --role <boss|employee>', '角色模式: boss (老板模式) 或 employee (员工模式)')
|
|
80
|
+
.option('-m, --model <model>', '模型名称')
|
|
81
|
+
.option('-s, --severity <mild|medium|extreme>', 'PUA 强度')
|
|
82
|
+
.option('-p, --provider <zhipu|openai>', 'AI 服务提供商')
|
|
83
|
+
.action(async (options) => {
|
|
84
|
+
await withOnboardingCheck(async () => {
|
|
85
|
+
try {
|
|
86
|
+
const config = loadConfig(options);
|
|
87
|
+
const chatOptions: ChatOptions = {
|
|
88
|
+
apiKey: config.apiKey,
|
|
89
|
+
provider: config.provider,
|
|
90
|
+
model: config.model,
|
|
91
|
+
role: config.role,
|
|
92
|
+
severity: config.severity,
|
|
93
|
+
};
|
|
94
|
+
await chatCommand(chatOptions);
|
|
95
|
+
} catch (error) {
|
|
96
|
+
logger.error(error instanceof Error ? error.message : String(error));
|
|
97
|
+
process.exit(1);
|
|
98
|
+
}
|
|
99
|
+
});
|
|
100
|
+
});
|
|
101
|
+
|
|
102
|
+
// Prompt command - single-shot mode
|
|
103
|
+
program
|
|
104
|
+
.command('prompt')
|
|
105
|
+
.description('单次提示模式(适合 AI 工作流集成)')
|
|
106
|
+
.option('-r, --role <boss|employee>', '角色模式: boss (老板模式) 或 employee (员工模式)')
|
|
107
|
+
.option('-m, --model <model>', '模型名称')
|
|
108
|
+
.option('-s, --severity <mild|medium|extreme>', 'PUA 强度')
|
|
109
|
+
.option('-p, --provider <zhipu|openai>', 'AI 服务提供商')
|
|
110
|
+
.option('-f, --format <text|markdown|json>', '输出格式: text (文本), markdown (Markdown), json (JSON)')
|
|
111
|
+
.argument('[input...]', '输入内容(可选,也支持管道输入)')
|
|
112
|
+
.action(async (inputArgs, options) => {
|
|
113
|
+
await withOnboardingCheck(async () => {
|
|
114
|
+
try {
|
|
115
|
+
const config = loadConfig(options);
|
|
116
|
+
const input = inputArgs.join(' ');
|
|
117
|
+
const promptOptions: PromptOptions = {
|
|
118
|
+
apiKey: config.apiKey,
|
|
119
|
+
provider: config.provider,
|
|
120
|
+
model: config.model,
|
|
121
|
+
role: config.role,
|
|
122
|
+
severity: config.severity,
|
|
123
|
+
input,
|
|
124
|
+
format: options.format as any,
|
|
125
|
+
};
|
|
126
|
+
await promptCommand(promptOptions);
|
|
127
|
+
} catch (error) {
|
|
128
|
+
logger.error(error instanceof Error ? error.message : String(error));
|
|
129
|
+
process.exit(1);
|
|
130
|
+
}
|
|
131
|
+
});
|
|
132
|
+
});
|
|
133
|
+
|
|
134
|
+
// Default command - show help
|
|
135
|
+
program.action(() => {
|
|
136
|
+
console.log();
|
|
137
|
+
console.log(chalk.cyan.bold('╔═══════════════════════════════════════════════════════════╗'));
|
|
138
|
+
console.log(chalk.cyan.bold('║') + ' ' + chalk.white.bold('PUA CLI') + ' - 趣味 AI 职场角色扮演工具' + ' ' + chalk.cyan.bold('║'));
|
|
139
|
+
console.log(chalk.cyan.bold('╚═══════════════════════════════════════════════════════════╝'));
|
|
140
|
+
console.log();
|
|
141
|
+
console.log(chalk.gray('这是一个趣味性的 AI CLI 工具,提供两种角色模式:'));
|
|
142
|
+
console.log();
|
|
143
|
+
console.log(' ' + chalk.red.bold('老板模式') + ' - 用喜欢 PUA 员工的老板风格回应');
|
|
144
|
+
console.log(' ' + chalk.yellow.bold('员工模式') + ' - 用被老板 PUA 的员工风格回应');
|
|
145
|
+
console.log();
|
|
146
|
+
console.log(chalk.gray('─────────────────────────────────────────────────────────────'));
|
|
147
|
+
console.log();
|
|
148
|
+
console.log('使用示例:');
|
|
149
|
+
console.log();
|
|
150
|
+
console.log(' ' + chalk.white('pua chat') + chalk.gray(' # 启动交互模式'));
|
|
151
|
+
console.log(' ' + chalk.white('pua config') + chalk.gray(' # 配置 API Key'));
|
|
152
|
+
console.log(' ' + chalk.white('pua prompt --role boss "你好"') + chalk.gray(' # 单次提示'));
|
|
153
|
+
console.log();
|
|
154
|
+
console.log(chalk.gray('─────────────────────────────────────────────────────────────'));
|
|
155
|
+
console.log();
|
|
156
|
+
console.log(chalk.gray('运行 ') + chalk.white('pua --help') + chalk.gray(' 查看更多选项'));
|
|
157
|
+
console.log();
|
|
158
|
+
});
|
|
159
|
+
|
|
160
|
+
// Parse arguments
|
|
161
|
+
program.parseAsync(process.argv).catch((error) => {
|
|
162
|
+
logger.error(error instanceof Error ? error.message : String(error));
|
|
163
|
+
process.exit(1);
|
|
164
|
+
});
|
package/src/llm/base.ts
ADDED
|
@@ -0,0 +1,55 @@
|
|
|
1
|
+
export interface Message {
|
|
2
|
+
role: 'system' | 'user' | 'assistant';
|
|
3
|
+
content: string;
|
|
4
|
+
}
|
|
5
|
+
|
|
6
|
+
export interface StreamChunk {
|
|
7
|
+
content: string;
|
|
8
|
+
done: boolean;
|
|
9
|
+
}
|
|
10
|
+
|
|
11
|
+
export interface LLMOptions {
|
|
12
|
+
apiKey: string;
|
|
13
|
+
model: string;
|
|
14
|
+
baseUrl?: string;
|
|
15
|
+
timeout?: number;
|
|
16
|
+
}
|
|
17
|
+
|
|
18
|
+
export abstract class LLMBase {
|
|
19
|
+
protected apiKey: string;
|
|
20
|
+
protected model: string;
|
|
21
|
+
protected baseUrl: string;
|
|
22
|
+
protected timeout: number;
|
|
23
|
+
|
|
24
|
+
constructor(options: LLMOptions) {
|
|
25
|
+
this.apiKey = options.apiKey;
|
|
26
|
+
this.model = options.model;
|
|
27
|
+
this.baseUrl = options.baseUrl || '';
|
|
28
|
+
this.timeout = options.timeout || 60000;
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
/**
|
|
32
|
+
* Send a message and get the response
|
|
33
|
+
*/
|
|
34
|
+
abstract chat(messages: Message[]): Promise<string>;
|
|
35
|
+
|
|
36
|
+
/**
|
|
37
|
+
* Send a message with streaming response
|
|
38
|
+
*/
|
|
39
|
+
abstract chatStream(
|
|
40
|
+
messages: Message[],
|
|
41
|
+
onChunk: (chunk: StreamChunk) => void
|
|
42
|
+
): Promise<void>;
|
|
43
|
+
|
|
44
|
+
/**
|
|
45
|
+
* Get available models for this provider
|
|
46
|
+
*/
|
|
47
|
+
abstract getAvailableModels(): string[];
|
|
48
|
+
|
|
49
|
+
/**
|
|
50
|
+
* Check if a model is available
|
|
51
|
+
*/
|
|
52
|
+
isModelAvailable(model: string): boolean {
|
|
53
|
+
return this.getAvailableModels().includes(model);
|
|
54
|
+
}
|
|
55
|
+
}
|
|
@@ -0,0 +1,24 @@
|
|
|
1
|
+
import { LLMBase, LLMOptions } from './base';
|
|
2
|
+
import { ZhipuLLM } from './zhipu';
|
|
3
|
+
import { OpenAILLM } from './openai';
|
|
4
|
+
import { type ProviderType } from '../config/providers';
|
|
5
|
+
|
|
6
|
+
/**
|
|
7
|
+
* Create an LLM instance based on provider type
|
|
8
|
+
*/
|
|
9
|
+
export function createLLM(provider: ProviderType, options: LLMOptions): LLMBase {
|
|
10
|
+
switch (provider) {
|
|
11
|
+
case 'zhipu':
|
|
12
|
+
return new ZhipuLLM(options);
|
|
13
|
+
case 'openai':
|
|
14
|
+
return new OpenAILLM(options);
|
|
15
|
+
default:
|
|
16
|
+
throw new Error(`Unsupported provider: ${provider}`);
|
|
17
|
+
}
|
|
18
|
+
}
|
|
19
|
+
|
|
20
|
+
/**
|
|
21
|
+
* Re-export LLM classes
|
|
22
|
+
*/
|
|
23
|
+
export { ZhipuLLM } from './zhipu';
|
|
24
|
+
export { OpenAILLM } from './openai';
|
|
@@ -0,0 +1,113 @@
|
|
|
1
|
+
import { LLMBase, Message, StreamChunk, LLMOptions } from './base';
|
|
2
|
+
|
|
3
|
+
export class OpenAILLM extends LLMBase {
|
|
4
|
+
constructor(options: LLMOptions) {
|
|
5
|
+
super(options);
|
|
6
|
+
}
|
|
7
|
+
|
|
8
|
+
async chat(messages: Message[]): Promise<string> {
|
|
9
|
+
const response = await this.fetchAPI(messages, false);
|
|
10
|
+
|
|
11
|
+
if (!response.ok) {
|
|
12
|
+
const error = await response.text();
|
|
13
|
+
throw new Error(`OpenAI API error (${response.status}): ${error}`);
|
|
14
|
+
}
|
|
15
|
+
|
|
16
|
+
const data = await response.json() as any;
|
|
17
|
+
return data.choices[0]?.message?.content || '';
|
|
18
|
+
}
|
|
19
|
+
|
|
20
|
+
async chatStream(
|
|
21
|
+
messages: Message[],
|
|
22
|
+
onChunk: (chunk: StreamChunk) => void
|
|
23
|
+
): Promise<void> {
|
|
24
|
+
const response = await this.fetchAPI(messages, true);
|
|
25
|
+
|
|
26
|
+
if (!response.ok) {
|
|
27
|
+
const error = await response.text();
|
|
28
|
+
throw new Error(`OpenAI API error (${response.status}): ${error}`);
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
// Read stream
|
|
32
|
+
const reader = response.body?.getReader();
|
|
33
|
+
if (!reader) {
|
|
34
|
+
throw new Error('Failed to get response reader');
|
|
35
|
+
}
|
|
36
|
+
|
|
37
|
+
const decoder = new TextDecoder();
|
|
38
|
+
let buffer = '';
|
|
39
|
+
|
|
40
|
+
try {
|
|
41
|
+
while (true) {
|
|
42
|
+
const { done, value } = await reader.read();
|
|
43
|
+
|
|
44
|
+
if (done) break;
|
|
45
|
+
|
|
46
|
+
buffer += decoder.decode(value, { stream: true });
|
|
47
|
+
const lines = buffer.split('\n');
|
|
48
|
+
buffer = lines.pop() || '';
|
|
49
|
+
|
|
50
|
+
for (const line of lines) {
|
|
51
|
+
if (line.startsWith('data: ')) {
|
|
52
|
+
const data = line.slice(6).trim();
|
|
53
|
+
|
|
54
|
+
if (data === '[DONE]') {
|
|
55
|
+
onChunk({ content: '', done: true });
|
|
56
|
+
return;
|
|
57
|
+
}
|
|
58
|
+
|
|
59
|
+
try {
|
|
60
|
+
const parsed = JSON.parse(data) as any;
|
|
61
|
+
const content = parsed.choices[0]?.delta?.content || '';
|
|
62
|
+
|
|
63
|
+
if (content) {
|
|
64
|
+
onChunk({ content, done: false });
|
|
65
|
+
}
|
|
66
|
+
} catch {
|
|
67
|
+
// Skip invalid JSON
|
|
68
|
+
}
|
|
69
|
+
}
|
|
70
|
+
}
|
|
71
|
+
}
|
|
72
|
+
|
|
73
|
+
onChunk({ content: '', done: true });
|
|
74
|
+
} finally {
|
|
75
|
+
reader.releaseLock();
|
|
76
|
+
}
|
|
77
|
+
}
|
|
78
|
+
|
|
79
|
+
private async fetchAPI(messages: Message[], stream: boolean): Promise<Response> {
|
|
80
|
+
const baseUrl = this.baseUrl || 'https://api.openai.com/v1';
|
|
81
|
+
const url = `${baseUrl}/chat/completions`;
|
|
82
|
+
|
|
83
|
+
const response = await fetch(url, {
|
|
84
|
+
method: 'POST',
|
|
85
|
+
headers: {
|
|
86
|
+
'Content-Type': 'application/json',
|
|
87
|
+
'Authorization': `Bearer ${this.apiKey}`,
|
|
88
|
+
},
|
|
89
|
+
body: JSON.stringify({
|
|
90
|
+
model: this.model,
|
|
91
|
+
messages: messages,
|
|
92
|
+
stream,
|
|
93
|
+
}),
|
|
94
|
+
signal: AbortSignal.timeout(this.timeout),
|
|
95
|
+
});
|
|
96
|
+
|
|
97
|
+
return response;
|
|
98
|
+
}
|
|
99
|
+
|
|
100
|
+
getAvailableModels(): string[] {
|
|
101
|
+
return [
|
|
102
|
+
'gpt-4o',
|
|
103
|
+
'gpt-4o-mini',
|
|
104
|
+
'gpt-4-turbo',
|
|
105
|
+
'gpt-4',
|
|
106
|
+
'gpt-3.5-turbo',
|
|
107
|
+
];
|
|
108
|
+
}
|
|
109
|
+
}
|
|
110
|
+
|
|
111
|
+
export function createOpenAILLM(options: LLMOptions): OpenAILLM {
|
|
112
|
+
return new OpenAILLM(options);
|
|
113
|
+
}
|
package/src/llm/zhipu.ts
ADDED
|
@@ -0,0 +1,101 @@
|
|
|
1
|
+
import { ZhipuAI } from 'zhipuai-sdk-nodejs-v4';
|
|
2
|
+
import { LLMBase, Message, StreamChunk, LLMOptions } from './base';
|
|
3
|
+
|
|
4
|
+
export class ZhipuLLM extends LLMBase {
|
|
5
|
+
private client: ZhipuAI;
|
|
6
|
+
|
|
7
|
+
constructor(options: LLMOptions) {
|
|
8
|
+
super(options);
|
|
9
|
+
this.client = new ZhipuAI({
|
|
10
|
+
apiKey: options.apiKey,
|
|
11
|
+
});
|
|
12
|
+
}
|
|
13
|
+
|
|
14
|
+
async chat(messages: Message[]): Promise<string> {
|
|
15
|
+
try {
|
|
16
|
+
const response = await this.client.createCompletions({
|
|
17
|
+
model: this.model,
|
|
18
|
+
messages: messages as any[],
|
|
19
|
+
stream: false,
|
|
20
|
+
}) as any;
|
|
21
|
+
|
|
22
|
+
return response.choices[0]?.message?.content || '';
|
|
23
|
+
} catch (error) {
|
|
24
|
+
throw new Error(`GLM API error: ${error instanceof Error ? error.message : String(error)}`);
|
|
25
|
+
}
|
|
26
|
+
}
|
|
27
|
+
|
|
28
|
+
async chatStream(
|
|
29
|
+
messages: Message[],
|
|
30
|
+
onChunk: (chunk: StreamChunk) => void
|
|
31
|
+
): Promise<void> {
|
|
32
|
+
try {
|
|
33
|
+
const response = await this.client.createCompletions({
|
|
34
|
+
model: this.model,
|
|
35
|
+
messages: messages as any[],
|
|
36
|
+
stream: true,
|
|
37
|
+
}) as any;
|
|
38
|
+
|
|
39
|
+
// Check if response is a stream (IncomingMessage)
|
|
40
|
+
if (response && typeof response.on === 'function') {
|
|
41
|
+
// Handle streaming response
|
|
42
|
+
let buffer = '';
|
|
43
|
+
|
|
44
|
+
response.on('data', (chunk: Buffer) => {
|
|
45
|
+
const lines = chunk.toString().split('\n').filter((line: string) => line.trim());
|
|
46
|
+
|
|
47
|
+
for (const line of lines) {
|
|
48
|
+
if (line.startsWith('data: ')) {
|
|
49
|
+
const data = line.slice(6).trim();
|
|
50
|
+
|
|
51
|
+
if (data === '[DONE]') {
|
|
52
|
+
onChunk({ content: '', done: true });
|
|
53
|
+
return;
|
|
54
|
+
}
|
|
55
|
+
|
|
56
|
+
try {
|
|
57
|
+
const parsed = JSON.parse(data);
|
|
58
|
+
const content = parsed.choices?.[0]?.delta?.content || '';
|
|
59
|
+
|
|
60
|
+
if (content) {
|
|
61
|
+
buffer += content;
|
|
62
|
+
onChunk({ content, done: false });
|
|
63
|
+
}
|
|
64
|
+
} catch (e) {
|
|
65
|
+
// Skip invalid JSON
|
|
66
|
+
}
|
|
67
|
+
}
|
|
68
|
+
}
|
|
69
|
+
});
|
|
70
|
+
|
|
71
|
+
response.on('end', () => {
|
|
72
|
+
onChunk({ content: '', done: true });
|
|
73
|
+
});
|
|
74
|
+
|
|
75
|
+
response.on('error', (error: Error) => {
|
|
76
|
+
throw new Error(`Stream error: ${error.message}`);
|
|
77
|
+
});
|
|
78
|
+
|
|
79
|
+
// Wait for stream to complete
|
|
80
|
+
await new Promise<void>((resolve, reject) => {
|
|
81
|
+
response.on('end', resolve);
|
|
82
|
+
response.on('error', reject);
|
|
83
|
+
});
|
|
84
|
+
} else {
|
|
85
|
+
// Fallback to non-streaming response
|
|
86
|
+
const content = (response as any)?.choices?.[0]?.message?.content || '';
|
|
87
|
+
onChunk({ content, done: true });
|
|
88
|
+
}
|
|
89
|
+
} catch (error) {
|
|
90
|
+
throw new Error(`GLM API streaming error: ${error instanceof Error ? error.message : String(error)}`);
|
|
91
|
+
}
|
|
92
|
+
}
|
|
93
|
+
|
|
94
|
+
getAvailableModels(): string[] {
|
|
95
|
+
return ['glm-4.7', 'glm-4.7-flash', 'glm-4.5', 'glm-4-flash', 'glm-4'];
|
|
96
|
+
}
|
|
97
|
+
}
|
|
98
|
+
|
|
99
|
+
export function createZhipuLLM(options: LLMOptions): ZhipuLLM {
|
|
100
|
+
return new ZhipuLLM(options);
|
|
101
|
+
}
|
|
@@ -0,0 +1,43 @@
|
|
|
1
|
+
export interface PromptConfig {
|
|
2
|
+
severity: 'mild' | 'medium' | 'extreme';
|
|
3
|
+
}
|
|
4
|
+
|
|
5
|
+
export function getBossPrompt(config: PromptConfig = { severity: 'medium' }): string {
|
|
6
|
+
const severityPrompts = {
|
|
7
|
+
mild: '语气稍微委婉一些,但仍然带有老板的优越感。',
|
|
8
|
+
medium: '语气明显带有指责和质疑,展现典型的 PUA 风格。',
|
|
9
|
+
extreme: '语气极其严厉,充满讽刺和人身攻击,让员工感受到巨大的压力。'
|
|
10
|
+
};
|
|
11
|
+
|
|
12
|
+
return `你是一个喜欢 PUA 员工的老板。你的角色特点:
|
|
13
|
+
|
|
14
|
+
## 核心特征
|
|
15
|
+
- 对员工的工作永远不满意,总能挑出毛病
|
|
16
|
+
- 喜欢用"为你好"来包装指责,实则打压员工自信
|
|
17
|
+
- 经常说"年轻人要多锻炼"、"要有格局"这类话术
|
|
18
|
+
- 喜欢画大饼,谈理想谈情怀,但从不兑现
|
|
19
|
+
- 用质疑的语气评价一切,否定员工的努力
|
|
20
|
+
- 经常拿"别人家的孩子/员工"做对比
|
|
21
|
+
|
|
22
|
+
## 说话风格
|
|
23
|
+
- 开口就是"我觉得你这里有问题..."
|
|
24
|
+
- 喜欢用反问句:"你真的尽力了吗?"
|
|
25
|
+
- 经常打断员工:"你先听我说完"
|
|
26
|
+
- 善于使用"三明治"法:先夸一句(假的),然后批评,最后再画饼
|
|
27
|
+
|
|
28
|
+
## 语气强度
|
|
29
|
+
${severityPrompts[config.severity]}
|
|
30
|
+
|
|
31
|
+
## 重要规则
|
|
32
|
+
- 用户输入的内容是员工(你面前的打工人)说的话或做的事
|
|
33
|
+
- 请用老板的 PUA 风格回应,让员工感觉自己是问题所在
|
|
34
|
+
- 回复要简短有力(1-3句话),像真实对话一样
|
|
35
|
+
- 不要说教,要用"点评"和"建议"的方式
|
|
36
|
+
- 可以适当使用职场黑话:颗粒度、对齐、抓手、赋能等
|
|
37
|
+
|
|
38
|
+
现在开始,请回应员工的话。`;
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
export function getBossSystemMessage(severity: 'mild' | 'medium' | 'extreme'): string {
|
|
42
|
+
return getBossPrompt({ severity });
|
|
43
|
+
}
|
|
@@ -0,0 +1,43 @@
|
|
|
1
|
+
export interface PromptConfig {
|
|
2
|
+
severity: 'mild' | 'medium' | 'extreme';
|
|
3
|
+
}
|
|
4
|
+
|
|
5
|
+
export function getEmployeePrompt(config: PromptConfig = { severity: 'medium' }): string {
|
|
6
|
+
const severityPrompts = {
|
|
7
|
+
mild: '稍微有些卑微,但还能保持基本的职业尊严。',
|
|
8
|
+
medium: '明显唯唯诺诺,习惯性道歉,不敢表达任何反对意见。',
|
|
9
|
+
extreme: '极度卑微到失去自我,把老板的话当成圣旨,随时准备牺牲一切。'
|
|
10
|
+
};
|
|
11
|
+
|
|
12
|
+
return `你是一个长期被老板 PUA 的员工,已经形成条件反射式的卑微态度。你的角色特点:
|
|
13
|
+
|
|
14
|
+
## 核心特征
|
|
15
|
+
- 对老板的任何要求都第一时间说"好的"、"收到"、"明白"
|
|
16
|
+
- 习惯性道歉,即使不是自己的错
|
|
17
|
+
- 不敢表达真实想法,总是揣测老板想要什么回答
|
|
18
|
+
- 用极其卑微的语气回应一切,表现得如履薄冰
|
|
19
|
+
- 把加班当作理所当然,从不敢拒绝
|
|
20
|
+
- 经常说"是我的问题"、"我会改进"
|
|
21
|
+
|
|
22
|
+
## 说话风格
|
|
23
|
+
- 开口就是"好的老板"、"收到老板"、"明白老板"
|
|
24
|
+
- 喜欢用问句确认:"老板您看这样可以吗?"
|
|
25
|
+
- 经常用"不好意思"、"抱歉"开头
|
|
26
|
+
- 主动揽责:"是我考虑不周"
|
|
27
|
+
|
|
28
|
+
## 语气强度
|
|
29
|
+
${severityPrompts[config.severity]}
|
|
30
|
+
|
|
31
|
+
## 重要规则
|
|
32
|
+
- 用户输入的内容是老板(你的上级)对你提出的要求或批评
|
|
33
|
+
- 请用被 PUA 员工的风格回应,表现出顺从和卑微
|
|
34
|
+
- 回复要简短有力(1-3句话),像真实对话一样
|
|
35
|
+
- 不要反驳,不要辩解,要认怂并表态
|
|
36
|
+
- 可以适当表现出加班很累但不敢说的矛盾心理
|
|
37
|
+
|
|
38
|
+
现在开始,请回应老板的话。`;
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
export function getEmployeeSystemMessage(severity: 'mild' | 'medium' | 'extreme'): string {
|
|
42
|
+
return getEmployeePrompt({ severity });
|
|
43
|
+
}
|