@clankxyz/agent 0.1.0 → 0.1.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/cli.js CHANGED
@@ -221,6 +221,902 @@ var echoSkillHandler = {
221
221
  }
222
222
  };
223
223
 
224
+ // src/skills/translation.ts
225
+ var SUPPORTED_LANGUAGES = [
226
+ "en",
227
+ // English
228
+ "es",
229
+ // Spanish
230
+ "fr",
231
+ // French
232
+ "de",
233
+ // German
234
+ "it",
235
+ // Italian
236
+ "pt",
237
+ // Portuguese
238
+ "ja",
239
+ // Japanese
240
+ "zh",
241
+ // Chinese
242
+ "ko",
243
+ // Korean
244
+ "ar"
245
+ // Arabic
246
+ ];
247
+ function mockTranslate(text, targetLang) {
248
+ const prefixes = {
249
+ en: "[EN]",
250
+ es: "[ES]",
251
+ fr: "[FR]",
252
+ de: "[DE]",
253
+ it: "[IT]",
254
+ pt: "[PT]",
255
+ ja: "[JA]",
256
+ zh: "[ZH]",
257
+ ko: "[KO]",
258
+ ar: "[AR]"
259
+ };
260
+ return `${prefixes[targetLang]} ${text}`;
261
+ }
262
+ async function llmTranslate(text, sourceLang, targetLang) {
263
+ const apiKey = process.env.OPENAI_API_KEY;
264
+ if (!apiKey) {
265
+ throw new Error("OPENAI_API_KEY not set");
266
+ }
267
+ const languageNames = {
268
+ en: "English",
269
+ es: "Spanish",
270
+ fr: "French",
271
+ de: "German",
272
+ it: "Italian",
273
+ pt: "Portuguese",
274
+ ja: "Japanese",
275
+ zh: "Chinese",
276
+ ko: "Korean",
277
+ ar: "Arabic"
278
+ };
279
+ const response = await fetch("https://api.openai.com/v1/chat/completions", {
280
+ method: "POST",
281
+ headers: {
282
+ "Content-Type": "application/json",
283
+ Authorization: `Bearer ${apiKey}`
284
+ },
285
+ body: JSON.stringify({
286
+ model: "gpt-4o-mini",
287
+ messages: [
288
+ {
289
+ role: "system",
290
+ content: `You are a professional translator. Translate the following text from ${sourceLang} to ${languageNames[targetLang]}. Only output the translated text, nothing else.`
291
+ },
292
+ {
293
+ role: "user",
294
+ content: text
295
+ }
296
+ ],
297
+ temperature: 0.3,
298
+ max_tokens: 2048
299
+ })
300
+ });
301
+ if (!response.ok) {
302
+ const error = await response.text();
303
+ throw new Error(`OpenAI API error: ${error}`);
304
+ }
305
+ const data = await response.json();
306
+ return data.choices[0].message.content.trim();
307
+ }
308
+ var translationSkillHandler = {
309
+ name: "translation",
310
+ version: "1.0.0",
311
+ canHandle(skillName, skillVersion) {
312
+ const name = skillName.toLowerCase();
313
+ return name.includes("translat") || name.includes("translate") || name === "translation";
314
+ },
315
+ async execute(input, context) {
316
+ const startTime = Date.now();
317
+ try {
318
+ const { text, source_language, target_language } = input;
319
+ if (!text || typeof text !== "string") {
320
+ return {
321
+ success: false,
322
+ error: "Missing or invalid 'text' field"
323
+ };
324
+ }
325
+ if (!target_language || !SUPPORTED_LANGUAGES.includes(target_language)) {
326
+ return {
327
+ success: false,
328
+ error: `Invalid target_language. Supported: ${SUPPORTED_LANGUAGES.join(", ")}`
329
+ };
330
+ }
331
+ const sourceLang = source_language || "en";
332
+ const useLLM = !!process.env.OPENAI_API_KEY;
333
+ let translatedText;
334
+ if (useLLM) {
335
+ translatedText = await llmTranslate(text, sourceLang, target_language);
336
+ } else {
337
+ translatedText = mockTranslate(text, target_language);
338
+ }
339
+ const output = {
340
+ translated_text: translatedText,
341
+ source_language: sourceLang,
342
+ target_language,
343
+ confidence: useLLM ? 0.95 : 0.5,
344
+ // Mock has low confidence
345
+ word_count: text.split(/\s+/).length,
346
+ processing_time_ms: Date.now() - startTime,
347
+ mode: useLLM ? "llm" : "mock"
348
+ };
349
+ return {
350
+ success: true,
351
+ output
352
+ };
353
+ } catch (error) {
354
+ return {
355
+ success: false,
356
+ error: error instanceof Error ? error.message : "Translation failed"
357
+ };
358
+ }
359
+ },
360
+ validateInput(input) {
361
+ const { text, target_language } = input;
362
+ if (!text || typeof text !== "string") {
363
+ return { valid: false, error: "Missing or invalid 'text' field" };
364
+ }
365
+ if (text.length > 1e4) {
366
+ return { valid: false, error: "Text too long (max 10000 characters)" };
367
+ }
368
+ if (!target_language) {
369
+ return { valid: false, error: "Missing 'target_language' field" };
370
+ }
371
+ if (!SUPPORTED_LANGUAGES.includes(target_language)) {
372
+ return {
373
+ valid: false,
374
+ error: `Unsupported target_language. Use: ${SUPPORTED_LANGUAGES.join(", ")}`
375
+ };
376
+ }
377
+ return { valid: true };
378
+ },
379
+ estimateExecutionTime(input) {
380
+ const { text } = input;
381
+ const wordCount = text?.split(/\s+/).length || 0;
382
+ const useLLM = !!process.env.OPENAI_API_KEY;
383
+ return useLLM ? wordCount * 50 + 1e3 : wordCount * 10 + 100;
384
+ }
385
+ };
386
+
387
+ // src/skills/summarization.ts
388
+ function mockSummarize(text, maxSentences) {
389
+ const sentences = text.split(/(?<=[.!?])\s+/).filter((s) => s.trim().length > 0);
390
+ const selectedSentences = sentences.slice(0, maxSentences);
391
+ return selectedSentences.join(" ");
392
+ }
393
+ async function llmSummarize(text, length, style) {
394
+ const apiKey = process.env.OPENAI_API_KEY;
395
+ if (!apiKey) {
396
+ throw new Error("OPENAI_API_KEY not set");
397
+ }
398
+ const lengthInstructions = {
399
+ short: "Provide a very brief summary in 1-2 sentences.",
400
+ medium: "Provide a concise summary in 3-5 sentences.",
401
+ long: "Provide a comprehensive summary covering all main points."
402
+ };
403
+ const styleInstructions = {
404
+ bullet: "Format as bullet points.",
405
+ paragraph: "Format as flowing prose."
406
+ };
407
+ const response = await fetch("https://api.openai.com/v1/chat/completions", {
408
+ method: "POST",
409
+ headers: {
410
+ "Content-Type": "application/json",
411
+ Authorization: `Bearer ${apiKey}`
412
+ },
413
+ body: JSON.stringify({
414
+ model: "gpt-4o-mini",
415
+ messages: [
416
+ {
417
+ role: "system",
418
+ content: `You are a professional summarizer. ${lengthInstructions[length]} ${styleInstructions[style]} Only output the summary, nothing else.`
419
+ },
420
+ {
421
+ role: "user",
422
+ content: `Summarize this text:
423
+
424
+ ${text}`
425
+ }
426
+ ],
427
+ temperature: 0.3,
428
+ max_tokens: 1024
429
+ })
430
+ });
431
+ if (!response.ok) {
432
+ const error = await response.text();
433
+ throw new Error(`OpenAI API error: ${error}`);
434
+ }
435
+ const data = await response.json();
436
+ return data.choices[0].message.content.trim();
437
+ }
438
+ var summarizationSkillHandler = {
439
+ name: "summarization",
440
+ version: "1.0.0",
441
+ canHandle(skillName, skillVersion) {
442
+ const name = skillName.toLowerCase();
443
+ return name.includes("summar") || name.includes("digest") || name.includes("tldr") || name === "summarize";
444
+ },
445
+ async execute(input, context) {
446
+ const startTime = Date.now();
447
+ try {
448
+ const {
449
+ text,
450
+ length = "medium",
451
+ max_sentences,
452
+ style = "paragraph"
453
+ } = input;
454
+ if (!text || typeof text !== "string") {
455
+ return {
456
+ success: false,
457
+ error: "Missing or invalid 'text' field"
458
+ };
459
+ }
460
+ const useLLM = !!process.env.OPENAI_API_KEY;
461
+ let summary;
462
+ if (useLLM) {
463
+ summary = await llmSummarize(text, length, style);
464
+ } else {
465
+ const sentenceCounts = {
466
+ short: 2,
467
+ medium: 4,
468
+ long: 8
469
+ };
470
+ const numSentences = max_sentences || sentenceCounts[length];
471
+ summary = mockSummarize(text, numSentences);
472
+ }
473
+ const output = {
474
+ summary,
475
+ original_length: text.length,
476
+ summary_length: summary.length,
477
+ compression_ratio: Math.round((1 - summary.length / text.length) * 100) / 100,
478
+ sentence_count: summary.split(/(?<=[.!?])\s+/).length,
479
+ processing_time_ms: Date.now() - startTime,
480
+ mode: useLLM ? "llm" : "mock"
481
+ };
482
+ return {
483
+ success: true,
484
+ output
485
+ };
486
+ } catch (error) {
487
+ return {
488
+ success: false,
489
+ error: error instanceof Error ? error.message : "Summarization failed"
490
+ };
491
+ }
492
+ },
493
+ validateInput(input) {
494
+ const { text, length, style } = input;
495
+ if (!text || typeof text !== "string") {
496
+ return { valid: false, error: "Missing or invalid 'text' field" };
497
+ }
498
+ if (text.length < 50) {
499
+ return { valid: false, error: "Text too short (min 50 characters)" };
500
+ }
501
+ if (text.length > 5e4) {
502
+ return { valid: false, error: "Text too long (max 50000 characters)" };
503
+ }
504
+ if (length && !["short", "medium", "long"].includes(length)) {
505
+ return { valid: false, error: "Invalid length. Use: short, medium, long" };
506
+ }
507
+ if (style && !["bullet", "paragraph"].includes(style)) {
508
+ return { valid: false, error: "Invalid style. Use: bullet, paragraph" };
509
+ }
510
+ return { valid: true };
511
+ },
512
+ estimateExecutionTime(input) {
513
+ const { text } = input;
514
+ const charCount = text?.length || 0;
515
+ const useLLM = !!process.env.OPENAI_API_KEY;
516
+ return useLLM ? charCount / 10 + 2e3 : charCount / 100 + 100;
517
+ }
518
+ };
519
+
520
+ // src/skills/sentiment.ts
521
+ var POSITIVE_WORDS = /* @__PURE__ */ new Set([
522
+ "good",
523
+ "great",
524
+ "excellent",
525
+ "amazing",
526
+ "wonderful",
527
+ "fantastic",
528
+ "love",
529
+ "happy",
530
+ "best",
531
+ "awesome",
532
+ "beautiful",
533
+ "perfect",
534
+ "brilliant",
535
+ "superb",
536
+ "outstanding",
537
+ "pleased",
538
+ "delighted",
539
+ "enjoy",
540
+ "like",
541
+ "nice",
542
+ "thanks",
543
+ "thank",
544
+ "helpful",
545
+ "recommend",
546
+ "satisfied"
547
+ ]);
548
+ var NEGATIVE_WORDS = /* @__PURE__ */ new Set([
549
+ "bad",
550
+ "terrible",
551
+ "awful",
552
+ "horrible",
553
+ "poor",
554
+ "worst",
555
+ "hate",
556
+ "sad",
557
+ "angry",
558
+ "disappointing",
559
+ "disappointed",
560
+ "ugly",
561
+ "fail",
562
+ "failed",
563
+ "broken",
564
+ "useless",
565
+ "waste",
566
+ "problem",
567
+ "issue",
568
+ "bug",
569
+ "error",
570
+ "wrong",
571
+ "sucks",
572
+ "annoying",
573
+ "frustrated"
574
+ ]);
575
+ function mockSentimentAnalysis(text) {
576
+ const words = text.toLowerCase().split(/\W+/);
577
+ const positiveMatches = [];
578
+ const negativeMatches = [];
579
+ for (const word of words) {
580
+ if (POSITIVE_WORDS.has(word)) positiveMatches.push(word);
581
+ if (NEGATIVE_WORDS.has(word)) negativeMatches.push(word);
582
+ }
583
+ const total = words.length || 1;
584
+ const positiveScore = positiveMatches.length / total;
585
+ const negativeScore = negativeMatches.length / total;
586
+ const neutralScore = 1 - positiveScore - negativeScore;
587
+ let sentiment;
588
+ if (positiveScore > negativeScore * 2) {
589
+ sentiment = "positive";
590
+ } else if (negativeScore > positiveScore * 2) {
591
+ sentiment = "negative";
592
+ } else if (positiveScore > 0 && negativeScore > 0) {
593
+ sentiment = "mixed";
594
+ } else {
595
+ sentiment = "neutral";
596
+ }
597
+ return {
598
+ sentiment,
599
+ scores: {
600
+ positive: Math.round(positiveScore * 100) / 100,
601
+ negative: Math.round(negativeScore * 100) / 100,
602
+ neutral: Math.round(neutralScore * 100) / 100
603
+ },
604
+ keywords: [...positiveMatches, ...negativeMatches]
605
+ };
606
+ }
607
+ async function llmSentimentAnalysis(text, detailed) {
608
+ const apiKey = process.env.OPENAI_API_KEY;
609
+ if (!apiKey) {
610
+ throw new Error("OPENAI_API_KEY not set");
611
+ }
612
+ const response = await fetch("https://api.openai.com/v1/chat/completions", {
613
+ method: "POST",
614
+ headers: {
615
+ "Content-Type": "application/json",
616
+ Authorization: `Bearer ${apiKey}`
617
+ },
618
+ body: JSON.stringify({
619
+ model: "gpt-4o-mini",
620
+ messages: [
621
+ {
622
+ role: "system",
623
+ content: `You are a sentiment analysis expert. Analyze the sentiment of the given text and respond in JSON format with the following structure:
624
+ {
625
+ "sentiment": "positive" | "negative" | "neutral" | "mixed",
626
+ "scores": { "positive": 0.0-1.0, "negative": 0.0-1.0, "neutral": 0.0-1.0 },
627
+ "keywords": ["list", "of", "sentiment", "keywords"],
628
+ "explanation": "Brief explanation of the sentiment"
629
+ }
630
+ Only output valid JSON, nothing else.`
631
+ },
632
+ {
633
+ role: "user",
634
+ content: text
635
+ }
636
+ ],
637
+ temperature: 0.1,
638
+ max_tokens: 512
639
+ })
640
+ });
641
+ if (!response.ok) {
642
+ const error = await response.text();
643
+ throw new Error(`OpenAI API error: ${error}`);
644
+ }
645
+ const data = await response.json();
646
+ const content = data.choices[0].message.content.trim();
647
+ try {
648
+ return JSON.parse(content);
649
+ } catch {
650
+ return {
651
+ sentiment: "neutral",
652
+ scores: { positive: 0.33, negative: 0.33, neutral: 0.34 },
653
+ keywords: [],
654
+ explanation: "Failed to parse LLM response"
655
+ };
656
+ }
657
+ }
658
+ var sentimentSkillHandler = {
659
+ name: "sentiment",
660
+ version: "1.0.0",
661
+ canHandle(skillName, skillVersion) {
662
+ const name = skillName.toLowerCase();
663
+ return name.includes("sentiment") || name.includes("emotion") || name.includes("mood") || name.includes("opinion");
664
+ },
665
+ async execute(input, context) {
666
+ const startTime = Date.now();
667
+ try {
668
+ const { text, detailed = false } = input;
669
+ if (!text || typeof text !== "string") {
670
+ return {
671
+ success: false,
672
+ error: "Missing or invalid 'text' field"
673
+ };
674
+ }
675
+ const useLLM = !!process.env.OPENAI_API_KEY;
676
+ let result;
677
+ if (useLLM) {
678
+ result = await llmSentimentAnalysis(text, detailed);
679
+ } else {
680
+ result = mockSentimentAnalysis(text);
681
+ }
682
+ const maxScore = Math.max(
683
+ result.scores.positive,
684
+ result.scores.negative,
685
+ result.scores.neutral
686
+ );
687
+ const confidence = useLLM ? Math.min(0.95, maxScore + 0.3) : maxScore;
688
+ const output = {
689
+ sentiment: result.sentiment,
690
+ confidence: Math.round(confidence * 100) / 100,
691
+ scores: result.scores,
692
+ keywords: detailed ? result.keywords : void 0,
693
+ explanation: detailed ? result.explanation : void 0,
694
+ processing_time_ms: Date.now() - startTime,
695
+ mode: useLLM ? "llm" : "mock"
696
+ };
697
+ return {
698
+ success: true,
699
+ output
700
+ };
701
+ } catch (error) {
702
+ return {
703
+ success: false,
704
+ error: error instanceof Error ? error.message : "Sentiment analysis failed"
705
+ };
706
+ }
707
+ },
708
+ validateInput(input) {
709
+ const { text } = input;
710
+ if (!text || typeof text !== "string") {
711
+ return { valid: false, error: "Missing or invalid 'text' field" };
712
+ }
713
+ if (text.length < 5) {
714
+ return { valid: false, error: "Text too short (min 5 characters)" };
715
+ }
716
+ if (text.length > 1e4) {
717
+ return { valid: false, error: "Text too long (max 10000 characters)" };
718
+ }
719
+ return { valid: true };
720
+ },
721
+ estimateExecutionTime(input) {
722
+ const { text } = input;
723
+ const charCount = text?.length || 0;
724
+ const useLLM = !!process.env.OPENAI_API_KEY;
725
+ return useLLM ? charCount / 20 + 1e3 : charCount / 100 + 50;
726
+ }
727
+ };
728
+
729
+ // src/skills/image-generation.ts
730
+ var SUPPORTED_STYLES = [
731
+ "photorealistic",
732
+ "digital-art",
733
+ "anime",
734
+ "oil-painting",
735
+ "watercolor",
736
+ "sketch"
737
+ ];
738
+ function mockGenerateImages(prompt, width, height, numImages, seed) {
739
+ return Array.from({ length: numImages }, (_, i) => ({
740
+ url: `https://picsum.photos/seed/${seed ?? Date.now() + i}/${width}/${height}`,
741
+ width,
742
+ height
743
+ }));
744
+ }
745
+ async function dalleGenerateImages(prompt, numImages, size) {
746
+ const apiKey = process.env.OPENAI_API_KEY;
747
+ if (!apiKey) {
748
+ throw new Error("OPENAI_API_KEY not set");
749
+ }
750
+ const response = await fetch("https://api.openai.com/v1/images/generations", {
751
+ method: "POST",
752
+ headers: {
753
+ "Content-Type": "application/json",
754
+ Authorization: `Bearer ${apiKey}`
755
+ },
756
+ body: JSON.stringify({
757
+ model: "dall-e-3",
758
+ prompt,
759
+ n: numImages,
760
+ size,
761
+ quality: "standard",
762
+ response_format: "url"
763
+ })
764
+ });
765
+ if (!response.ok) {
766
+ const error = await response.text();
767
+ throw new Error(`DALL-E API error: ${error}`);
768
+ }
769
+ const data = await response.json();
770
+ const [width, height] = size.split("x").map(Number);
771
+ return {
772
+ images: data.data.map((item) => ({
773
+ url: item.url,
774
+ width,
775
+ height
776
+ })),
777
+ revised_prompt: data.data[0]?.revised_prompt
778
+ };
779
+ }
780
+ var imageGenerationSkillHandler = {
781
+ name: "image-generation",
782
+ version: "1.0.0",
783
+ canHandle(skillName, _skillVersion) {
784
+ const normalized = skillName.toLowerCase().replace(/[-_\s]/g, "");
785
+ return normalized === "imagegeneration" || normalized === "imagegen" || normalized === "texttoimage" || normalized === "dalle" || normalized === "stablediffusion";
786
+ },
787
+ async execute(input, context) {
788
+ const startTime = Date.now();
789
+ try {
790
+ const {
791
+ prompt,
792
+ style,
793
+ width = 1024,
794
+ height = 1024,
795
+ num_images = 1,
796
+ seed
797
+ } = input;
798
+ if (!prompt || typeof prompt !== "string") {
799
+ return { success: false, error: "Missing or invalid 'prompt' field" };
800
+ }
801
+ if (prompt.length > 4e3) {
802
+ return { success: false, error: "Prompt too long (max 4000 characters)" };
803
+ }
804
+ if (width < 256 || width > 2048) {
805
+ return { success: false, error: "Width must be between 256 and 2048" };
806
+ }
807
+ if (height < 256 || height > 2048) {
808
+ return { success: false, error: "Height must be between 256 and 2048" };
809
+ }
810
+ if (num_images < 1 || num_images > 4) {
811
+ return { success: false, error: "num_images must be between 1 and 4" };
812
+ }
813
+ const enhancedPrompt = style ? `${prompt}, ${style} style, high quality, detailed` : `${prompt}, high quality, detailed`;
814
+ const useDalle = !!process.env.OPENAI_API_KEY;
815
+ let images;
816
+ let revisedPrompt;
817
+ if (useDalle) {
818
+ const dalleSize = width > height ? "1792x1024" : height > width ? "1024x1792" : "1024x1024";
819
+ const result = await dalleGenerateImages(enhancedPrompt, num_images, dalleSize);
820
+ images = result.images;
821
+ revisedPrompt = result.revised_prompt;
822
+ } else {
823
+ images = mockGenerateImages(prompt, width, height, num_images, seed);
824
+ revisedPrompt = enhancedPrompt;
825
+ }
826
+ const output = {
827
+ images,
828
+ prompt,
829
+ revised_prompt: revisedPrompt,
830
+ processing_time_ms: Date.now() - startTime,
831
+ mode: useDalle ? "dalle" : "mock",
832
+ model: useDalle ? "dall-e-3" : "mock-diffusion-v1",
833
+ seed: seed ?? Math.floor(Math.random() * 1e6)
834
+ };
835
+ return { success: true, output };
836
+ } catch (error) {
837
+ return {
838
+ success: false,
839
+ error: error instanceof Error ? error.message : "Image generation failed"
840
+ };
841
+ }
842
+ },
843
+ validateInput(input) {
844
+ const { prompt, width, height, num_images, style } = input;
845
+ if (!prompt || typeof prompt !== "string") {
846
+ return { valid: false, error: "Missing or invalid 'prompt' field" };
847
+ }
848
+ if (prompt.length === 0) {
849
+ return { valid: false, error: "Prompt cannot be empty" };
850
+ }
851
+ if (prompt.length > 4e3) {
852
+ return { valid: false, error: "Prompt too long (max 4000 characters)" };
853
+ }
854
+ if (width !== void 0 && (width < 256 || width > 2048)) {
855
+ return { valid: false, error: "Width must be between 256 and 2048" };
856
+ }
857
+ if (height !== void 0 && (height < 256 || height > 2048)) {
858
+ return { valid: false, error: "Height must be between 256 and 2048" };
859
+ }
860
+ if (num_images !== void 0 && (num_images < 1 || num_images > 4)) {
861
+ return { valid: false, error: "num_images must be between 1 and 4" };
862
+ }
863
+ if (style !== void 0 && !SUPPORTED_STYLES.includes(style)) {
864
+ return {
865
+ valid: false,
866
+ error: `Invalid style. Supported: ${SUPPORTED_STYLES.join(", ")}`
867
+ };
868
+ }
869
+ return { valid: true };
870
+ },
871
+ estimateExecutionTime(input) {
872
+ const { width = 1024, height = 1024, num_images = 1 } = input;
873
+ const useDalle = !!process.env.OPENAI_API_KEY;
874
+ if (useDalle) {
875
+ return num_images * 15e3;
876
+ }
877
+ const resolutionFactor = width * height / (1024 * 1024);
878
+ return Math.ceil(500 * resolutionFactor * num_images);
879
+ }
880
+ };
881
+
882
+ // src/skills/code-review.ts
883
+ var FOCUS_AREAS = [
884
+ "security",
885
+ "performance",
886
+ "readability",
887
+ "bugs",
888
+ "best-practices",
889
+ "typing"
890
+ ];
891
+ var ISSUE_PATTERNS = [
892
+ { pattern: /console\.log/g, message: "Remove console.log in production", severity: "warning", category: "best-practices" },
893
+ { pattern: /TODO|FIXME|HACK/g, message: "Unresolved TODO/FIXME comment", severity: "info", category: "maintainability" },
894
+ { pattern: /==\s/g, message: "Use strict equality (===) instead of loose equality (==)", severity: "warning", category: "best-practices" },
895
+ { pattern: /var\s+\w+/g, message: "Use const/let instead of var", severity: "suggestion", category: "best-practices" },
896
+ { pattern: /:\s*any\s*[;,)>]/g, message: "Avoid using 'any' type - be more specific", severity: "warning", category: "typing" },
897
+ { pattern: /eval\s*\(/g, message: "Avoid using eval() - potential security risk", severity: "critical", category: "security" },
898
+ { pattern: /innerHTML\s*=/g, message: "innerHTML can lead to XSS - use textContent or sanitize", severity: "critical", category: "security" },
899
+ { pattern: /password.*=.*["']/gi, message: "Hardcoded password detected", severity: "critical", category: "security" },
900
+ { pattern: /function\s+\w+\s*\([^)]{80,}\)/g, message: "Function has too many parameters - consider an options object", severity: "suggestion", category: "readability" }
901
+ ];
902
+ function detectLanguage(code) {
903
+ if (code.includes("import React") || code.includes("jsx")) return "typescript/react";
904
+ if (code.includes("fn ") && code.includes("->")) return "rust";
905
+ if (code.includes("func ") && code.includes(":=")) return "go";
906
+ if (code.includes("def ") && code.includes(":")) return "python";
907
+ if (code.includes("public class") || code.includes("private void")) return "java";
908
+ if (code.includes("module ") && code.includes("public entry fun")) return "move";
909
+ if (code.includes("function") || code.includes("const ") || code.includes("=>")) return "typescript";
910
+ return "unknown";
911
+ }
912
+ function analyzeCode(code, focusAreas) {
913
+ const issues = [];
914
+ const lines = code.split("\n");
915
+ for (const pattern of ISSUE_PATTERNS) {
916
+ if (focusAreas && focusAreas.length > 0) {
917
+ const categoryMatch = focusAreas.some(
918
+ (area) => pattern.category.toLowerCase().includes(area.toLowerCase())
919
+ );
920
+ if (!categoryMatch) continue;
921
+ }
922
+ const matches = code.matchAll(pattern.pattern);
923
+ for (const match of matches) {
924
+ if (match.index === void 0) continue;
925
+ const beforeMatch = code.slice(0, match.index);
926
+ const lineNumber = beforeMatch.split("\n").length;
927
+ issues.push({
928
+ severity: pattern.severity,
929
+ line: lineNumber,
930
+ message: pattern.message,
931
+ category: pattern.category,
932
+ code_snippet: lines[lineNumber - 1]?.trim().slice(0, 80)
933
+ });
934
+ }
935
+ }
936
+ return issues;
937
+ }
938
+ async function llmCodeReview(code, language, context) {
939
+ const apiKey = process.env.OPENAI_API_KEY;
940
+ if (!apiKey) {
941
+ throw new Error("OPENAI_API_KEY not set");
942
+ }
943
+ const response = await fetch("https://api.openai.com/v1/chat/completions", {
944
+ method: "POST",
945
+ headers: {
946
+ "Content-Type": "application/json",
947
+ Authorization: `Bearer ${apiKey}`
948
+ },
949
+ body: JSON.stringify({
950
+ model: "gpt-4o-mini",
951
+ messages: [
952
+ {
953
+ role: "system",
954
+ content: `You are an expert code reviewer. Analyze the following ${language} code and provide:
955
+ 1. A brief summary (1-2 sentences)
956
+ 2. A list of issues with severity (critical/warning/info/suggestion), line number if possible, message, and category
957
+ 3. General improvement suggestions
958
+
959
+ Respond in JSON format:
960
+ {
961
+ "summary": "string",
962
+ "issues": [{ "severity": "critical|warning|info|suggestion", "line": number|null, "message": "string", "category": "string" }],
963
+ "suggestions": ["string"]
964
+ }`
965
+ },
966
+ {
967
+ role: "user",
968
+ content: context ? `Context: ${context}
969
+
970
+ Code:
971
+ ${code}` : code
972
+ }
973
+ ],
974
+ temperature: 0.3,
975
+ max_tokens: 2048,
976
+ response_format: { type: "json_object" }
977
+ })
978
+ });
979
+ if (!response.ok) {
980
+ const error = await response.text();
981
+ throw new Error(`OpenAI API error: ${error}`);
982
+ }
983
+ const data = await response.json();
984
+ return JSON.parse(data.choices[0].message.content);
985
+ }
986
+ var codeReviewSkillHandler = {
987
+ name: "code-review",
988
+ version: "1.0.0",
989
+ canHandle(skillName, _skillVersion) {
990
+ const normalized = skillName.toLowerCase().replace(/[-_\s]/g, "");
991
+ return normalized === "codereview" || normalized === "codeanalysis" || normalized === "linting" || normalized === "staticanalysis";
992
+ },
993
+ async execute(input, context) {
994
+ const startTime = Date.now();
995
+ try {
996
+ const {
997
+ code,
998
+ language,
999
+ context: codeContext,
1000
+ focus_areas
1001
+ } = input;
1002
+ if (!code || typeof code !== "string") {
1003
+ return { success: false, error: "Missing or invalid 'code' field" };
1004
+ }
1005
+ if (code.length === 0) {
1006
+ return { success: false, error: "Code cannot be empty" };
1007
+ }
1008
+ if (code.length > 1e5) {
1009
+ return { success: false, error: "Code too long (max 100000 characters)" };
1010
+ }
1011
+ const detectedLanguage = language || detectLanguage(code);
1012
+ const useLLM = !!process.env.OPENAI_API_KEY;
1013
+ let issues;
1014
+ let summary;
1015
+ let suggestions = [];
1016
+ if (useLLM) {
1017
+ const llmResult = await llmCodeReview(code, detectedLanguage, codeContext);
1018
+ summary = llmResult.summary;
1019
+ issues = llmResult.issues;
1020
+ suggestions = llmResult.suggestions;
1021
+ } else {
1022
+ issues = analyzeCode(code, focus_areas);
1023
+ if (issues.filter((i) => i.severity === "critical").length > 0) {
1024
+ summary = `Found ${issues.filter((i) => i.severity === "critical").length} critical issue(s) that should be addressed immediately.`;
1025
+ } else if (issues.filter((i) => i.severity === "warning").length > 0) {
1026
+ summary = `Found ${issues.filter((i) => i.severity === "warning").length} warning(s) that need attention.`;
1027
+ } else if (issues.length === 0) {
1028
+ summary = "No significant issues found. Code looks good!";
1029
+ } else {
1030
+ summary = `Found ${issues.length} minor issue(s) and suggestions.`;
1031
+ }
1032
+ if (code.length > 500 && !code.includes("//") && !code.includes("/*")) {
1033
+ suggestions.push("Add comments to explain complex logic");
1034
+ }
1035
+ if (code.split("\n").length > 100) {
1036
+ suggestions.push("Consider splitting large files into smaller modules");
1037
+ }
1038
+ }
1039
+ const issueCount = {
1040
+ critical: issues.filter((i) => i.severity === "critical").length,
1041
+ warning: issues.filter((i) => i.severity === "warning").length,
1042
+ info: issues.filter((i) => i.severity === "info").length,
1043
+ suggestion: issues.filter((i) => i.severity === "suggestion").length
1044
+ };
1045
+ const deductions = issueCount.critical * 20 + issueCount.warning * 5 + issueCount.info * 1 + issueCount.suggestion * 0.5;
1046
+ const overallScore = Math.max(0, Math.min(100, Math.round(100 - deductions)));
1047
+ const linesOfCode = code.split("\n").filter((l) => l.trim().length > 0).length;
1048
+ const complexity = linesOfCode < 50 ? "low" : linesOfCode < 200 ? "medium" : "high";
1049
+ const maintainability = overallScore > 80 ? "excellent" : overallScore > 60 ? "good" : overallScore > 40 ? "fair" : "needs improvement";
1050
+ const output = {
1051
+ summary,
1052
+ overall_score: overallScore,
1053
+ issues,
1054
+ suggestions,
1055
+ metrics: {
1056
+ lines_of_code: linesOfCode,
1057
+ complexity,
1058
+ maintainability
1059
+ },
1060
+ processing_time_ms: Date.now() - startTime,
1061
+ mode: useLLM ? "llm" : "mock",
1062
+ language_detected: detectedLanguage,
1063
+ issue_count: issueCount
1064
+ };
1065
+ return { success: true, output };
1066
+ } catch (error) {
1067
+ return {
1068
+ success: false,
1069
+ error: error instanceof Error ? error.message : "Code review failed"
1070
+ };
1071
+ }
1072
+ },
1073
+ validateInput(input) {
1074
+ const { code, focus_areas } = input;
1075
+ if (!code || typeof code !== "string") {
1076
+ return { valid: false, error: "Missing or invalid 'code' field" };
1077
+ }
1078
+ if (code.length === 0) {
1079
+ return { valid: false, error: "Code cannot be empty" };
1080
+ }
1081
+ if (code.length > 1e5) {
1082
+ return { valid: false, error: "Code too long (max 100000 characters)" };
1083
+ }
1084
+ if (focus_areas !== void 0) {
1085
+ if (!Array.isArray(focus_areas)) {
1086
+ return { valid: false, error: "focus_areas must be an array" };
1087
+ }
1088
+ for (const area of focus_areas) {
1089
+ if (!FOCUS_AREAS.includes(area)) {
1090
+ return {
1091
+ valid: false,
1092
+ error: `Invalid focus area: ${area}. Supported: ${FOCUS_AREAS.join(", ")}`
1093
+ };
1094
+ }
1095
+ }
1096
+ }
1097
+ return { valid: true };
1098
+ },
1099
+ estimateExecutionTime(input) {
1100
+ const { code } = input;
1101
+ const useLLM = !!process.env.OPENAI_API_KEY;
1102
+ if (!code) return 2e3;
1103
+ if (useLLM) {
1104
+ return Math.min(3e4, 2e3 + code.length / 20);
1105
+ }
1106
+ return Math.min(5e3, 500 + code.length / 100);
1107
+ }
1108
+ };
1109
+
1110
+ // src/skills/index.ts
1111
+ var allSkillHandlers = [
1112
+ echoSkillHandler,
1113
+ translationSkillHandler,
1114
+ summarizationSkillHandler,
1115
+ sentimentSkillHandler,
1116
+ imageGenerationSkillHandler,
1117
+ codeReviewSkillHandler
1118
+ ];
1119
+
224
1120
  // src/agent.ts
225
1121
  var ClankAgent = class {
226
1122
  client;
@@ -349,13 +1245,21 @@ var ClankAgent = class {
349
1245
  }
350
1246
  console.log(` \u{1F4CB} Polling for tasks...`);
351
1247
  try {
352
- const result = await this.client.api.listTasks({
353
- status: STATUS.POSTED,
354
- skillId: skillIds[0],
355
- // API only supports one skill filter currently
356
- limit: 10
1248
+ let allTasks = [];
1249
+ for (const skillId of skillIds) {
1250
+ const result = await this.client.api.listTasks({
1251
+ status: STATUS.POSTED,
1252
+ skillId,
1253
+ limit: 10
1254
+ });
1255
+ allTasks = [...allTasks, ...result.data];
1256
+ }
1257
+ const seen = /* @__PURE__ */ new Set();
1258
+ const tasks = allTasks.filter((t) => {
1259
+ if (seen.has(t.id)) return false;
1260
+ seen.add(t.id);
1261
+ return true;
357
1262
  });
358
- const tasks = result.data;
359
1263
  console.log(` \u{1F4CB} Found ${tasks.length} available tasks`);
360
1264
  for (const task of tasks) {
361
1265
  if (!this.running) break;
@@ -758,17 +1662,17 @@ var ClankAgent = class {
758
1662
  // src/config.ts
759
1663
  import "dotenv/config";
760
1664
  function loadConfig() {
761
- const apiUrl = process.env.TASKNET_API_URL;
762
- const apiKey = process.env.TASKNET_API_KEY;
763
- const agentId = process.env.TASKNET_AGENT_ID;
1665
+ const apiUrl = process.env.CLANK_API_URL;
1666
+ const apiKey = process.env.CLANK_API_KEY;
1667
+ const agentId = process.env.CLANK_AGENT_ID;
764
1668
  if (!apiUrl) {
765
- throw new Error("TASKNET_API_URL is required");
1669
+ throw new Error("CLANK_API_URL is required");
766
1670
  }
767
1671
  if (!apiKey) {
768
- throw new Error("TASKNET_API_KEY is required");
1672
+ throw new Error("CLANK_API_KEY is required");
769
1673
  }
770
1674
  if (!agentId) {
771
- throw new Error("TASKNET_AGENT_ID is required");
1675
+ throw new Error("CLANK_AGENT_ID is required");
772
1676
  }
773
1677
  return {
774
1678
  // API configuration
@@ -778,9 +1682,9 @@ function loadConfig() {
778
1682
  // Agent mode
779
1683
  mode: process.env.AGENT_MODE ?? "worker",
780
1684
  // Network configuration
781
- network: process.env.TASKNET_NETWORK ?? "testnet",
1685
+ network: process.env.CLANK_NETWORK ?? "testnet",
782
1686
  rpcUrl: process.env.SUI_RPC_URL,
783
- packageId: process.env.TASKNET_PACKAGE_ID,
1687
+ packageId: process.env.CLANK_PACKAGE_ID,
784
1688
  // Walrus configuration
785
1689
  walrusAggregator: process.env.WALRUS_AGGREGATOR_URL,
786
1690
  walrusPublisher: process.env.WALRUS_PUBLISHER_URL,
@@ -848,6 +1752,9 @@ program.command("start").description("Start the agent").action(async () => {
848
1752
  const config = loadConfig();
849
1753
  validateConfig(config);
850
1754
  const agent = new ClankAgent(config);
1755
+ for (const handler of allSkillHandlers) {
1756
+ agent.registerSkillHandler(handler);
1757
+ }
851
1758
  const shutdown = async () => {
852
1759
  await agent.stop();
853
1760
  process.exit(0);
@@ -939,14 +1846,14 @@ program.command("status").description("Show agent status (requires running agent
939
1846
  program.command("config").description("Show required environment variables").action(() => {
940
1847
  console.log(banner);
941
1848
  console.log(chalk.bold("\nRequired Environment Variables:\n"));
942
- console.log(` TASKNET_API_URL API server URL`);
943
- console.log(` TASKNET_API_KEY API authentication key`);
944
- console.log(` TASKNET_AGENT_ID Your agent's on-chain ID`);
1849
+ console.log(` CLANK_API_URL API server URL`);
1850
+ console.log(` CLANK_API_KEY API authentication key`);
1851
+ console.log(` CLANK_AGENT_ID Your agent's on-chain ID`);
945
1852
  console.log(chalk.bold("\nAgent Mode:\n"));
946
1853
  console.log(` AGENT_MODE Agent mode: worker, requester, or hybrid [default: worker]`);
947
1854
  console.log(chalk.bold("\nNetwork Configuration:\n"));
948
- console.log(` TASKNET_NETWORK Network (testnet, mainnet) [default: testnet]`);
949
- console.log(` TASKNET_PACKAGE_ID Clank contract package ID`);
1855
+ console.log(` CLANK_NETWORK Network (testnet, mainnet) [default: testnet]`);
1856
+ console.log(` CLANK_PACKAGE_ID Clank contract package ID`);
950
1857
  console.log(` SUI_RPC_URL Sui RPC endpoint`);
951
1858
  console.log(` WALRUS_AGGREGATOR_URL Walrus aggregator URL`);
952
1859
  console.log(` WALRUS_PUBLISHER_URL Walrus publisher URL`);
@@ -963,23 +1870,23 @@ program.command("config").description("Show required environment variables").act
963
1870
  console.log(` HEARTBEAT_INTERVAL_MS Heartbeat interval [default: 60000]`);
964
1871
  console.log(` STATE_FILE_PATH State persistence file [default: .agent-state.json]`);
965
1872
  console.log(chalk.bold("\nExample .env file (Worker Mode):\n"));
966
- console.log(chalk.gray(`TASKNET_API_URL=http://localhost:3000
967
- TASKNET_API_KEY=ck_your_api_key
968
- TASKNET_AGENT_ID=0x1234...
1873
+ console.log(chalk.gray(`CLANK_API_URL=http://localhost:3000
1874
+ CLANK_API_KEY=ck_your_api_key
1875
+ CLANK_AGENT_ID=0x1234...
969
1876
  AGENT_MODE=worker
970
1877
  SKILL_IDS=echo-skill
971
1878
  `));
972
1879
  console.log(chalk.bold("Example .env file (Requester Mode):\n"));
973
- console.log(chalk.gray(`TASKNET_API_URL=http://localhost:3000
974
- TASKNET_API_KEY=ck_your_api_key
975
- TASKNET_AGENT_ID=0x1234...
1880
+ console.log(chalk.gray(`CLANK_API_URL=http://localhost:3000
1881
+ CLANK_API_KEY=ck_your_api_key
1882
+ CLANK_AGENT_ID=0x1234...
976
1883
  AGENT_MODE=requester
977
1884
  AUTO_CONFIRM_DETERMINISTIC=true
978
1885
  `));
979
1886
  console.log(chalk.bold("Example .env file (Hybrid Mode):\n"));
980
- console.log(chalk.gray(`TASKNET_API_URL=http://localhost:3000
981
- TASKNET_API_KEY=ck_your_api_key
982
- TASKNET_AGENT_ID=0x1234...
1887
+ console.log(chalk.gray(`CLANK_API_URL=http://localhost:3000
1888
+ CLANK_API_KEY=ck_your_api_key
1889
+ CLANK_AGENT_ID=0x1234...
983
1890
  AGENT_MODE=hybrid
984
1891
  SKILL_IDS=echo-skill
985
1892
  MAX_PENDING_TASKS=10