firecrawl-mcp 1.6.0 → 1.7.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.js +86 -131
- package/package.json +1 -1
package/dist/index.js
CHANGED
|
@@ -513,7 +513,7 @@ const DEEP_RESEARCH_TOOL = {
|
|
|
513
513
|
maxUrls: {
|
|
514
514
|
type: 'number',
|
|
515
515
|
description: 'Maximum number of URLs to analyze (1-1000)',
|
|
516
|
-
}
|
|
516
|
+
},
|
|
517
517
|
},
|
|
518
518
|
required: ['query'],
|
|
519
519
|
},
|
|
@@ -594,7 +594,7 @@ function isGenerateLLMsTextOptions(args) {
|
|
|
594
594
|
// Server implementation
|
|
595
595
|
const server = new Server({
|
|
596
596
|
name: 'firecrawl-mcp',
|
|
597
|
-
version: '1.
|
|
597
|
+
version: '1.7.0',
|
|
598
598
|
}, {
|
|
599
599
|
capabilities: {
|
|
600
600
|
tools: {},
|
|
@@ -635,6 +635,17 @@ const creditUsage = {
|
|
|
635
635
|
function delay(ms) {
|
|
636
636
|
return new Promise((resolve) => setTimeout(resolve, ms));
|
|
637
637
|
}
|
|
638
|
+
let isStdioTransport = false;
|
|
639
|
+
function safeLog(level, data) {
|
|
640
|
+
if (isStdioTransport) {
|
|
641
|
+
// For stdio transport, log to stderr to avoid protocol interference
|
|
642
|
+
console.error(`[${level}] ${typeof data === 'object' ? JSON.stringify(data) : data}`);
|
|
643
|
+
}
|
|
644
|
+
else {
|
|
645
|
+
// For other transport types, use the normal logging mechanism
|
|
646
|
+
server.sendLoggingMessage({ level, data });
|
|
647
|
+
}
|
|
648
|
+
}
|
|
638
649
|
// Add retry logic with exponential backoff
|
|
639
650
|
async function withRetry(operation, context, attempt = 1) {
|
|
640
651
|
try {
|
|
@@ -646,10 +657,7 @@ async function withRetry(operation, context, attempt = 1) {
|
|
|
646
657
|
if (isRateLimit && attempt < CONFIG.retry.maxAttempts) {
|
|
647
658
|
const delayMs = Math.min(CONFIG.retry.initialDelay *
|
|
648
659
|
Math.pow(CONFIG.retry.backoffFactor, attempt - 1), CONFIG.retry.maxDelay);
|
|
649
|
-
|
|
650
|
-
level: 'warning',
|
|
651
|
-
data: `Rate limit hit for ${context}. Attempt ${attempt}/${CONFIG.retry.maxAttempts}. Retrying in ${delayMs}ms`,
|
|
652
|
-
});
|
|
660
|
+
safeLog('warning', `Rate limit hit for ${context}. Attempt ${attempt}/${CONFIG.retry.maxAttempts}. Retrying in ${delayMs}ms`);
|
|
653
661
|
await delay(delayMs);
|
|
654
662
|
return withRetry(operation, context, attempt + 1);
|
|
655
663
|
}
|
|
@@ -660,22 +668,13 @@ async function withRetry(operation, context, attempt = 1) {
|
|
|
660
668
|
async function updateCreditUsage(creditsUsed) {
|
|
661
669
|
creditUsage.total += creditsUsed;
|
|
662
670
|
// Log credit usage
|
|
663
|
-
|
|
664
|
-
level: 'info',
|
|
665
|
-
data: `Credit usage: ${creditUsage.total} credits used total`,
|
|
666
|
-
});
|
|
671
|
+
safeLog('info', `Credit usage: ${creditUsage.total} credits used total`);
|
|
667
672
|
// Check thresholds
|
|
668
673
|
if (creditUsage.total >= CONFIG.credit.criticalThreshold) {
|
|
669
|
-
|
|
670
|
-
level: 'error',
|
|
671
|
-
data: `CRITICAL: Credit usage has reached ${creditUsage.total}`,
|
|
672
|
-
});
|
|
674
|
+
safeLog('error', `CRITICAL: Credit usage has reached ${creditUsage.total}`);
|
|
673
675
|
}
|
|
674
676
|
else if (creditUsage.total >= CONFIG.credit.warningThreshold) {
|
|
675
|
-
|
|
676
|
-
level: 'warning',
|
|
677
|
-
data: `WARNING: Credit usage has reached ${creditUsage.total}`,
|
|
678
|
-
});
|
|
677
|
+
safeLog('warning', `WARNING: Credit usage has reached ${creditUsage.total}`);
|
|
679
678
|
}
|
|
680
679
|
}
|
|
681
680
|
// Initialize queue system
|
|
@@ -700,19 +699,13 @@ async function processBatchOperation(operation) {
|
|
|
700
699
|
operation.result = response;
|
|
701
700
|
// Log final credit usage for the batch
|
|
702
701
|
if (!FIRECRAWL_API_URL) {
|
|
703
|
-
|
|
704
|
-
level: 'info',
|
|
705
|
-
data: `Batch ${operation.id} completed. Total credits used: ${totalCreditsUsed}`,
|
|
706
|
-
});
|
|
702
|
+
safeLog('info', `Batch ${operation.id} completed. Total credits used: ${totalCreditsUsed}`);
|
|
707
703
|
}
|
|
708
704
|
}
|
|
709
705
|
catch (error) {
|
|
710
706
|
operation.status = 'failed';
|
|
711
707
|
operation.error = error instanceof Error ? error.message : String(error);
|
|
712
|
-
|
|
713
|
-
level: 'error',
|
|
714
|
-
data: `Batch ${operation.id} failed: ${operation.error}`,
|
|
715
|
-
});
|
|
708
|
+
safeLog('error', `Batch ${operation.id} failed: ${operation.error}`);
|
|
716
709
|
}
|
|
717
710
|
}
|
|
718
711
|
// Tool handlers
|
|
@@ -735,10 +728,7 @@ server.setRequestHandler(CallToolRequestSchema, async (request) => {
|
|
|
735
728
|
try {
|
|
736
729
|
const { name, arguments: args } = request.params;
|
|
737
730
|
// Log incoming request with timestamp
|
|
738
|
-
|
|
739
|
-
level: 'info',
|
|
740
|
-
data: `[${new Date().toISOString()}] Received request for tool: ${name}`,
|
|
741
|
-
});
|
|
731
|
+
safeLog('info', `[${new Date().toISOString()}] Received request for tool: ${name}`);
|
|
742
732
|
if (!args) {
|
|
743
733
|
throw new Error('No arguments provided');
|
|
744
734
|
}
|
|
@@ -750,16 +740,10 @@ server.setRequestHandler(CallToolRequestSchema, async (request) => {
|
|
|
750
740
|
const { url, ...options } = args;
|
|
751
741
|
try {
|
|
752
742
|
const scrapeStartTime = Date.now();
|
|
753
|
-
|
|
754
|
-
level: 'info',
|
|
755
|
-
data: `Starting scrape for URL: ${url} with options: ${JSON.stringify(options)}`,
|
|
756
|
-
});
|
|
743
|
+
safeLog('info', `Starting scrape for URL: ${url} with options: ${JSON.stringify(options)}`);
|
|
757
744
|
const response = await client.scrapeUrl(url, options);
|
|
758
745
|
// Log performance metrics
|
|
759
|
-
|
|
760
|
-
level: 'info',
|
|
761
|
-
data: `Scrape completed in ${Date.now() - scrapeStartTime}ms`,
|
|
762
|
-
});
|
|
746
|
+
safeLog('info', `Scrape completed in ${Date.now() - scrapeStartTime}ms`);
|
|
763
747
|
if ('success' in response && !response.success) {
|
|
764
748
|
throw new Error(response.error || 'Scraping failed');
|
|
765
749
|
}
|
|
@@ -785,14 +769,14 @@ server.setRequestHandler(CallToolRequestSchema, async (request) => {
|
|
|
785
769
|
}
|
|
786
770
|
// Add warning to response if present
|
|
787
771
|
if (response.warning) {
|
|
788
|
-
|
|
789
|
-
level: 'warning',
|
|
790
|
-
data: response.warning,
|
|
791
|
-
});
|
|
772
|
+
safeLog('warning', response.warning);
|
|
792
773
|
}
|
|
793
774
|
return {
|
|
794
775
|
content: [
|
|
795
|
-
{
|
|
776
|
+
{
|
|
777
|
+
type: 'text',
|
|
778
|
+
text: trimResponseText(contentParts.join('\n\n') || 'No content available'),
|
|
779
|
+
},
|
|
796
780
|
],
|
|
797
781
|
isError: false,
|
|
798
782
|
};
|
|
@@ -800,7 +784,7 @@ server.setRequestHandler(CallToolRequestSchema, async (request) => {
|
|
|
800
784
|
catch (error) {
|
|
801
785
|
const errorMessage = error instanceof Error ? error.message : String(error);
|
|
802
786
|
return {
|
|
803
|
-
content: [{ type: 'text', text: errorMessage }],
|
|
787
|
+
content: [{ type: 'text', text: trimResponseText(errorMessage) }],
|
|
804
788
|
isError: true,
|
|
805
789
|
};
|
|
806
790
|
}
|
|
@@ -818,7 +802,9 @@ server.setRequestHandler(CallToolRequestSchema, async (request) => {
|
|
|
818
802
|
throw new Error('No links received from FireCrawl API');
|
|
819
803
|
}
|
|
820
804
|
return {
|
|
821
|
-
content: [
|
|
805
|
+
content: [
|
|
806
|
+
{ type: 'text', text: trimResponseText(response.links.join('\n')) },
|
|
807
|
+
],
|
|
822
808
|
isError: false,
|
|
823
809
|
};
|
|
824
810
|
}
|
|
@@ -841,15 +827,12 @@ server.setRequestHandler(CallToolRequestSchema, async (request) => {
|
|
|
841
827
|
batchOperations.set(operationId, operation);
|
|
842
828
|
// Queue the operation
|
|
843
829
|
batchQueue.add(() => processBatchOperation(operation));
|
|
844
|
-
|
|
845
|
-
level: 'info',
|
|
846
|
-
data: `Queued batch operation ${operationId} with ${args.urls.length} URLs`,
|
|
847
|
-
});
|
|
830
|
+
safeLog('info', `Queued batch operation ${operationId} with ${args.urls.length} URLs`);
|
|
848
831
|
return {
|
|
849
832
|
content: [
|
|
850
833
|
{
|
|
851
834
|
type: 'text',
|
|
852
|
-
text: `Batch operation queued with ID: ${operationId}. Use firecrawl_check_batch_status to check progress
|
|
835
|
+
text: trimResponseText(`Batch operation queued with ID: ${operationId}. Use firecrawl_check_batch_status to check progress.`),
|
|
853
836
|
},
|
|
854
837
|
],
|
|
855
838
|
isError: false,
|
|
@@ -860,7 +843,7 @@ server.setRequestHandler(CallToolRequestSchema, async (request) => {
|
|
|
860
843
|
? error.message
|
|
861
844
|
: `Batch operation failed: ${JSON.stringify(error)}`;
|
|
862
845
|
return {
|
|
863
|
-
content: [{ type: 'text', text: errorMessage }],
|
|
846
|
+
content: [{ type: 'text', text: trimResponseText(errorMessage) }],
|
|
864
847
|
isError: true,
|
|
865
848
|
};
|
|
866
849
|
}
|
|
@@ -875,7 +858,7 @@ server.setRequestHandler(CallToolRequestSchema, async (request) => {
|
|
|
875
858
|
content: [
|
|
876
859
|
{
|
|
877
860
|
type: 'text',
|
|
878
|
-
text: `No batch operation found with ID: ${args.id}
|
|
861
|
+
text: trimResponseText(`No batch operation found with ID: ${args.id}`),
|
|
879
862
|
},
|
|
880
863
|
],
|
|
881
864
|
isError: true,
|
|
@@ -889,7 +872,7 @@ ${operation.result
|
|
|
889
872
|
? `Results: ${JSON.stringify(operation.result, null, 2)}`
|
|
890
873
|
: ''}`;
|
|
891
874
|
return {
|
|
892
|
-
content: [{ type: 'text', text: status }],
|
|
875
|
+
content: [{ type: 'text', text: trimResponseText(status) }],
|
|
893
876
|
isError: false,
|
|
894
877
|
};
|
|
895
878
|
}
|
|
@@ -910,7 +893,7 @@ ${operation.result
|
|
|
910
893
|
content: [
|
|
911
894
|
{
|
|
912
895
|
type: 'text',
|
|
913
|
-
text: `Started crawl for ${url} with job ID: ${response.id}
|
|
896
|
+
text: trimResponseText(`Started crawl for ${url} with job ID: ${response.id}`),
|
|
914
897
|
},
|
|
915
898
|
],
|
|
916
899
|
isError: false,
|
|
@@ -931,7 +914,7 @@ Credits Used: ${response.creditsUsed}
|
|
|
931
914
|
Expires At: ${response.expiresAt}
|
|
932
915
|
${response.data.length > 0 ? '\nResults:\n' + formatResults(response.data) : ''}`;
|
|
933
916
|
return {
|
|
934
|
-
content: [{ type: 'text', text: status }],
|
|
917
|
+
content: [{ type: 'text', text: trimResponseText(status) }],
|
|
935
918
|
isError: false,
|
|
936
919
|
};
|
|
937
920
|
}
|
|
@@ -956,7 +939,7 @@ Description: ${result.description || 'No description'}
|
|
|
956
939
|
${result.markdown ? `\nContent:\n${result.markdown}` : ''}`)
|
|
957
940
|
.join('\n\n');
|
|
958
941
|
return {
|
|
959
|
-
content: [{ type: 'text', text: results }],
|
|
942
|
+
content: [{ type: 'text', text: trimResponseText(results) }],
|
|
960
943
|
isError: false,
|
|
961
944
|
};
|
|
962
945
|
}
|
|
@@ -965,7 +948,7 @@ ${result.markdown ? `\nContent:\n${result.markdown}` : ''}`)
|
|
|
965
948
|
? error.message
|
|
966
949
|
: `Search failed: ${JSON.stringify(error)}`;
|
|
967
950
|
return {
|
|
968
|
-
content: [{ type: 'text', text: errorMessage }],
|
|
951
|
+
content: [{ type: 'text', text: trimResponseText(errorMessage) }],
|
|
969
952
|
isError: true,
|
|
970
953
|
};
|
|
971
954
|
}
|
|
@@ -976,16 +959,10 @@ ${result.markdown ? `\nContent:\n${result.markdown}` : ''}`)
|
|
|
976
959
|
}
|
|
977
960
|
try {
|
|
978
961
|
const extractStartTime = Date.now();
|
|
979
|
-
|
|
980
|
-
level: 'info',
|
|
981
|
-
data: `Starting extraction for URLs: ${args.urls.join(', ')}`,
|
|
982
|
-
});
|
|
962
|
+
safeLog('info', `Starting extraction for URLs: ${args.urls.join(', ')}`);
|
|
983
963
|
// Log if using self-hosted instance
|
|
984
964
|
if (FIRECRAWL_API_URL) {
|
|
985
|
-
|
|
986
|
-
level: 'info',
|
|
987
|
-
data: 'Using self-hosted instance for extraction',
|
|
988
|
-
});
|
|
965
|
+
safeLog('info', 'Using self-hosted instance for extraction');
|
|
989
966
|
}
|
|
990
967
|
const extractResponse = await withRetry(async () => client.extract(args.urls, {
|
|
991
968
|
prompt: args.prompt,
|
|
@@ -1006,25 +983,19 @@ ${result.markdown ? `\nContent:\n${result.markdown}` : ''}`)
|
|
|
1006
983
|
await updateCreditUsage(response.creditsUsed || 0);
|
|
1007
984
|
}
|
|
1008
985
|
// Log performance metrics
|
|
1009
|
-
|
|
1010
|
-
level: 'info',
|
|
1011
|
-
data: `Extraction completed in ${Date.now() - extractStartTime}ms`,
|
|
1012
|
-
});
|
|
986
|
+
safeLog('info', `Extraction completed in ${Date.now() - extractStartTime}ms`);
|
|
1013
987
|
// Add warning to response if present
|
|
1014
988
|
const result = {
|
|
1015
989
|
content: [
|
|
1016
990
|
{
|
|
1017
991
|
type: 'text',
|
|
1018
|
-
text: JSON.stringify(response.data, null, 2),
|
|
992
|
+
text: trimResponseText(JSON.stringify(response.data, null, 2)),
|
|
1019
993
|
},
|
|
1020
994
|
],
|
|
1021
995
|
isError: false,
|
|
1022
996
|
};
|
|
1023
997
|
if (response.warning) {
|
|
1024
|
-
|
|
1025
|
-
level: 'warning',
|
|
1026
|
-
data: response.warning,
|
|
1027
|
-
});
|
|
998
|
+
safeLog('warning', response.warning);
|
|
1028
999
|
}
|
|
1029
1000
|
return result;
|
|
1030
1001
|
}
|
|
@@ -1033,22 +1004,19 @@ ${result.markdown ? `\nContent:\n${result.markdown}` : ''}`)
|
|
|
1033
1004
|
// Special handling for self-hosted instance errors
|
|
1034
1005
|
if (FIRECRAWL_API_URL &&
|
|
1035
1006
|
errorMessage.toLowerCase().includes('not supported')) {
|
|
1036
|
-
|
|
1037
|
-
level: 'error',
|
|
1038
|
-
data: 'Extraction is not supported by this self-hosted instance',
|
|
1039
|
-
});
|
|
1007
|
+
safeLog('error', 'Extraction is not supported by this self-hosted instance');
|
|
1040
1008
|
return {
|
|
1041
1009
|
content: [
|
|
1042
1010
|
{
|
|
1043
1011
|
type: 'text',
|
|
1044
|
-
text: 'Extraction is not supported by this self-hosted instance. Please ensure LLM support is configured.',
|
|
1012
|
+
text: trimResponseText('Extraction is not supported by this self-hosted instance. Please ensure LLM support is configured.'),
|
|
1045
1013
|
},
|
|
1046
1014
|
],
|
|
1047
1015
|
isError: true,
|
|
1048
1016
|
};
|
|
1049
1017
|
}
|
|
1050
1018
|
return {
|
|
1051
|
-
content: [{ type: 'text', text: errorMessage }],
|
|
1019
|
+
content: [{ type: 'text', text: trimResponseText(errorMessage) }],
|
|
1052
1020
|
isError: true,
|
|
1053
1021
|
};
|
|
1054
1022
|
}
|
|
@@ -1059,10 +1027,7 @@ ${result.markdown ? `\nContent:\n${result.markdown}` : ''}`)
|
|
|
1059
1027
|
}
|
|
1060
1028
|
try {
|
|
1061
1029
|
const researchStartTime = Date.now();
|
|
1062
|
-
|
|
1063
|
-
level: 'info',
|
|
1064
|
-
data: `Starting deep research for query: ${args.query}`,
|
|
1065
|
-
});
|
|
1030
|
+
safeLog('info', `Starting deep research for query: ${args.query}`);
|
|
1066
1031
|
const response = await client.deepResearch(args.query, {
|
|
1067
1032
|
maxDepth: args.maxDepth,
|
|
1068
1033
|
timeLimit: args.timeLimit,
|
|
@@ -1070,23 +1035,14 @@ ${result.markdown ? `\nContent:\n${result.markdown}` : ''}`)
|
|
|
1070
1035
|
},
|
|
1071
1036
|
// Activity callback
|
|
1072
1037
|
(activity) => {
|
|
1073
|
-
|
|
1074
|
-
level: 'info',
|
|
1075
|
-
data: `Research activity: ${activity.message} (Depth: ${activity.depth})`,
|
|
1076
|
-
});
|
|
1038
|
+
safeLog('info', `Research activity: ${activity.message} (Depth: ${activity.depth})`);
|
|
1077
1039
|
},
|
|
1078
1040
|
// Source callback
|
|
1079
1041
|
(source) => {
|
|
1080
|
-
|
|
1081
|
-
level: 'info',
|
|
1082
|
-
data: `Research source found: ${source.url}${source.title ? ` - ${source.title}` : ''}`,
|
|
1083
|
-
});
|
|
1042
|
+
safeLog('info', `Research source found: ${source.url}${source.title ? ` - ${source.title}` : ''}`);
|
|
1084
1043
|
});
|
|
1085
1044
|
// Log performance metrics
|
|
1086
|
-
|
|
1087
|
-
level: 'info',
|
|
1088
|
-
data: `Deep research completed in ${Date.now() - researchStartTime}ms`,
|
|
1089
|
-
});
|
|
1045
|
+
safeLog('info', `Deep research completed in ${Date.now() - researchStartTime}ms`);
|
|
1090
1046
|
if (!response.success) {
|
|
1091
1047
|
throw new Error(response.error || 'Deep research failed');
|
|
1092
1048
|
}
|
|
@@ -1097,14 +1053,19 @@ ${result.markdown ? `\nContent:\n${result.markdown}` : ''}`)
|
|
|
1097
1053
|
sources: response.data.sources,
|
|
1098
1054
|
};
|
|
1099
1055
|
return {
|
|
1100
|
-
content: [
|
|
1056
|
+
content: [
|
|
1057
|
+
{
|
|
1058
|
+
type: 'text',
|
|
1059
|
+
text: trimResponseText(formattedResponse.finalAnalysis),
|
|
1060
|
+
},
|
|
1061
|
+
],
|
|
1101
1062
|
isError: false,
|
|
1102
1063
|
};
|
|
1103
1064
|
}
|
|
1104
1065
|
catch (error) {
|
|
1105
1066
|
const errorMessage = error instanceof Error ? error.message : String(error);
|
|
1106
1067
|
return {
|
|
1107
|
-
content: [{ type: 'text', text: errorMessage }],
|
|
1068
|
+
content: [{ type: 'text', text: trimResponseText(errorMessage) }],
|
|
1108
1069
|
isError: true,
|
|
1109
1070
|
};
|
|
1110
1071
|
}
|
|
@@ -1116,20 +1077,14 @@ ${result.markdown ? `\nContent:\n${result.markdown}` : ''}`)
|
|
|
1116
1077
|
try {
|
|
1117
1078
|
const { url, ...params } = args;
|
|
1118
1079
|
const generateStartTime = Date.now();
|
|
1119
|
-
|
|
1120
|
-
level: 'info',
|
|
1121
|
-
data: `Starting LLMs.txt generation for URL: ${url}`,
|
|
1122
|
-
});
|
|
1080
|
+
safeLog('info', `Starting LLMs.txt generation for URL: ${url}`);
|
|
1123
1081
|
// Start the generation process
|
|
1124
1082
|
const response = await withRetry(async () => client.generateLLMsText(url, params), 'LLMs.txt generation');
|
|
1125
1083
|
if (!response.success) {
|
|
1126
1084
|
throw new Error(response.error || 'LLMs.txt generation failed');
|
|
1127
1085
|
}
|
|
1128
1086
|
// Log performance metrics
|
|
1129
|
-
|
|
1130
|
-
level: 'info',
|
|
1131
|
-
data: `LLMs.txt generation completed in ${Date.now() - generateStartTime}ms`,
|
|
1132
|
-
});
|
|
1087
|
+
safeLog('info', `LLMs.txt generation completed in ${Date.now() - generateStartTime}ms`);
|
|
1133
1088
|
// Format the response
|
|
1134
1089
|
let resultText = '';
|
|
1135
1090
|
if ('data' in response) {
|
|
@@ -1139,42 +1094,41 @@ ${result.markdown ? `\nContent:\n${result.markdown}` : ''}`)
|
|
|
1139
1094
|
}
|
|
1140
1095
|
}
|
|
1141
1096
|
return {
|
|
1142
|
-
content: [{ type: 'text', text: resultText }],
|
|
1097
|
+
content: [{ type: 'text', text: trimResponseText(resultText) }],
|
|
1143
1098
|
isError: false,
|
|
1144
1099
|
};
|
|
1145
1100
|
}
|
|
1146
1101
|
catch (error) {
|
|
1147
1102
|
const errorMessage = error instanceof Error ? error.message : String(error);
|
|
1148
1103
|
return {
|
|
1149
|
-
content: [{ type: 'text', text: errorMessage }],
|
|
1104
|
+
content: [{ type: 'text', text: trimResponseText(errorMessage) }],
|
|
1150
1105
|
isError: true,
|
|
1151
1106
|
};
|
|
1152
1107
|
}
|
|
1153
1108
|
}
|
|
1154
1109
|
default:
|
|
1155
1110
|
return {
|
|
1156
|
-
content: [
|
|
1111
|
+
content: [
|
|
1112
|
+
{ type: 'text', text: trimResponseText(`Unknown tool: ${name}`) },
|
|
1113
|
+
],
|
|
1157
1114
|
isError: true,
|
|
1158
1115
|
};
|
|
1159
1116
|
}
|
|
1160
1117
|
}
|
|
1161
1118
|
catch (error) {
|
|
1162
1119
|
// Log detailed error information
|
|
1163
|
-
|
|
1164
|
-
|
|
1165
|
-
|
|
1166
|
-
|
|
1167
|
-
|
|
1168
|
-
|
|
1169
|
-
timestamp: new Date().toISOString(),
|
|
1170
|
-
duration: Date.now() - startTime,
|
|
1171
|
-
},
|
|
1120
|
+
safeLog('error', {
|
|
1121
|
+
message: `Request failed: ${error instanceof Error ? error.message : String(error)}`,
|
|
1122
|
+
tool: request.params.name,
|
|
1123
|
+
arguments: request.params.arguments,
|
|
1124
|
+
timestamp: new Date().toISOString(),
|
|
1125
|
+
duration: Date.now() - startTime,
|
|
1172
1126
|
});
|
|
1173
1127
|
return {
|
|
1174
1128
|
content: [
|
|
1175
1129
|
{
|
|
1176
1130
|
type: 'text',
|
|
1177
|
-
text: `Error: ${error instanceof Error ? error.message : String(error)}
|
|
1131
|
+
text: trimResponseText(`Error: ${error instanceof Error ? error.message : String(error)}`),
|
|
1178
1132
|
},
|
|
1179
1133
|
],
|
|
1180
1134
|
isError: true,
|
|
@@ -1182,10 +1136,7 @@ ${result.markdown ? `\nContent:\n${result.markdown}` : ''}`)
|
|
|
1182
1136
|
}
|
|
1183
1137
|
finally {
|
|
1184
1138
|
// Log request completion with performance metrics
|
|
1185
|
-
|
|
1186
|
-
level: 'info',
|
|
1187
|
-
data: `Request completed in ${Date.now() - startTime}ms`,
|
|
1188
|
-
});
|
|
1139
|
+
safeLog('info', `Request completed in ${Date.now() - startTime}ms`);
|
|
1189
1140
|
}
|
|
1190
1141
|
});
|
|
1191
1142
|
// Helper function to format results
|
|
@@ -1203,21 +1154,25 @@ ${doc.metadata?.title ? `Title: ${doc.metadata.title}` : ''}`;
|
|
|
1203
1154
|
function hasCredits(response) {
|
|
1204
1155
|
return 'creditsUsed' in response && typeof response.creditsUsed === 'number';
|
|
1205
1156
|
}
|
|
1157
|
+
// Utility function to trim trailing whitespace from text responses
|
|
1158
|
+
// This prevents Claude API errors with "final assistant content cannot end with trailing whitespace"
|
|
1159
|
+
function trimResponseText(text) {
|
|
1160
|
+
return text.trim();
|
|
1161
|
+
}
|
|
1206
1162
|
// Server startup
|
|
1207
1163
|
async function runServer() {
|
|
1208
1164
|
try {
|
|
1209
1165
|
console.error('Initializing FireCrawl MCP Server...');
|
|
1210
1166
|
const transport = new StdioServerTransport();
|
|
1167
|
+
// Detect if we're using stdio transport
|
|
1168
|
+
isStdioTransport = transport instanceof StdioServerTransport;
|
|
1169
|
+
if (isStdioTransport) {
|
|
1170
|
+
console.error('Running in stdio mode, logging will be directed to stderr');
|
|
1171
|
+
}
|
|
1211
1172
|
await server.connect(transport);
|
|
1212
1173
|
// Now that we're connected, we can send logging messages
|
|
1213
|
-
|
|
1214
|
-
|
|
1215
|
-
data: 'FireCrawl MCP Server initialized successfully',
|
|
1216
|
-
});
|
|
1217
|
-
server.sendLoggingMessage({
|
|
1218
|
-
level: 'info',
|
|
1219
|
-
data: `Configuration: API URL: ${FIRECRAWL_API_URL || 'default'}`,
|
|
1220
|
-
});
|
|
1174
|
+
safeLog('info', 'FireCrawl MCP Server initialized successfully');
|
|
1175
|
+
safeLog('info', `Configuration: API URL: ${FIRECRAWL_API_URL || 'default'}`);
|
|
1221
1176
|
console.error('FireCrawl MCP Server running on stdio');
|
|
1222
1177
|
}
|
|
1223
1178
|
catch (error) {
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "firecrawl-mcp",
|
|
3
|
-
"version": "1.
|
|
3
|
+
"version": "1.7.0",
|
|
4
4
|
"description": "MCP server for FireCrawl web scraping integration. Supports both cloud and self-hosted instances. Features include web scraping, batch processing, structured data extraction, and LLM-powered content analysis.",
|
|
5
5
|
"type": "module",
|
|
6
6
|
"bin": {
|