firecrawl-mcp 1.6.0 → 1.7.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.js +90 -131
- package/package.json +1 -1
package/dist/index.js
CHANGED
|
@@ -513,7 +513,7 @@ const DEEP_RESEARCH_TOOL = {
|
|
|
513
513
|
maxUrls: {
|
|
514
514
|
type: 'number',
|
|
515
515
|
description: 'Maximum number of URLs to analyze (1-1000)',
|
|
516
|
-
}
|
|
516
|
+
},
|
|
517
517
|
},
|
|
518
518
|
required: ['query'],
|
|
519
519
|
},
|
|
@@ -594,7 +594,7 @@ function isGenerateLLMsTextOptions(args) {
|
|
|
594
594
|
// Server implementation
|
|
595
595
|
const server = new Server({
|
|
596
596
|
name: 'firecrawl-mcp',
|
|
597
|
-
version: '1.
|
|
597
|
+
version: '1.7.0',
|
|
598
598
|
}, {
|
|
599
599
|
capabilities: {
|
|
600
600
|
tools: {},
|
|
@@ -635,6 +635,17 @@ const creditUsage = {
|
|
|
635
635
|
function delay(ms) {
|
|
636
636
|
return new Promise((resolve) => setTimeout(resolve, ms));
|
|
637
637
|
}
|
|
638
|
+
let isStdioTransport = false;
|
|
639
|
+
function safeLog(level, data) {
|
|
640
|
+
if (isStdioTransport) {
|
|
641
|
+
// For stdio transport, log to stderr to avoid protocol interference
|
|
642
|
+
console.error(`[${level}] ${typeof data === 'object' ? JSON.stringify(data) : data}`);
|
|
643
|
+
}
|
|
644
|
+
else {
|
|
645
|
+
// For other transport types, use the normal logging mechanism
|
|
646
|
+
server.sendLoggingMessage({ level, data });
|
|
647
|
+
}
|
|
648
|
+
}
|
|
638
649
|
// Add retry logic with exponential backoff
|
|
639
650
|
async function withRetry(operation, context, attempt = 1) {
|
|
640
651
|
try {
|
|
@@ -646,10 +657,7 @@ async function withRetry(operation, context, attempt = 1) {
|
|
|
646
657
|
if (isRateLimit && attempt < CONFIG.retry.maxAttempts) {
|
|
647
658
|
const delayMs = Math.min(CONFIG.retry.initialDelay *
|
|
648
659
|
Math.pow(CONFIG.retry.backoffFactor, attempt - 1), CONFIG.retry.maxDelay);
|
|
649
|
-
|
|
650
|
-
level: 'warning',
|
|
651
|
-
data: `Rate limit hit for ${context}. Attempt ${attempt}/${CONFIG.retry.maxAttempts}. Retrying in ${delayMs}ms`,
|
|
652
|
-
});
|
|
660
|
+
safeLog('warning', `Rate limit hit for ${context}. Attempt ${attempt}/${CONFIG.retry.maxAttempts}. Retrying in ${delayMs}ms`);
|
|
653
661
|
await delay(delayMs);
|
|
654
662
|
return withRetry(operation, context, attempt + 1);
|
|
655
663
|
}
|
|
@@ -660,22 +668,13 @@ async function withRetry(operation, context, attempt = 1) {
|
|
|
660
668
|
async function updateCreditUsage(creditsUsed) {
|
|
661
669
|
creditUsage.total += creditsUsed;
|
|
662
670
|
// Log credit usage
|
|
663
|
-
|
|
664
|
-
level: 'info',
|
|
665
|
-
data: `Credit usage: ${creditUsage.total} credits used total`,
|
|
666
|
-
});
|
|
671
|
+
safeLog('info', `Credit usage: ${creditUsage.total} credits used total`);
|
|
667
672
|
// Check thresholds
|
|
668
673
|
if (creditUsage.total >= CONFIG.credit.criticalThreshold) {
|
|
669
|
-
|
|
670
|
-
level: 'error',
|
|
671
|
-
data: `CRITICAL: Credit usage has reached ${creditUsage.total}`,
|
|
672
|
-
});
|
|
674
|
+
safeLog('error', `CRITICAL: Credit usage has reached ${creditUsage.total}`);
|
|
673
675
|
}
|
|
674
676
|
else if (creditUsage.total >= CONFIG.credit.warningThreshold) {
|
|
675
|
-
|
|
676
|
-
level: 'warning',
|
|
677
|
-
data: `WARNING: Credit usage has reached ${creditUsage.total}`,
|
|
678
|
-
});
|
|
677
|
+
safeLog('warning', `WARNING: Credit usage has reached ${creditUsage.total}`);
|
|
679
678
|
}
|
|
680
679
|
}
|
|
681
680
|
// Initialize queue system
|
|
@@ -700,19 +699,13 @@ async function processBatchOperation(operation) {
|
|
|
700
699
|
operation.result = response;
|
|
701
700
|
// Log final credit usage for the batch
|
|
702
701
|
if (!FIRECRAWL_API_URL) {
|
|
703
|
-
|
|
704
|
-
level: 'info',
|
|
705
|
-
data: `Batch ${operation.id} completed. Total credits used: ${totalCreditsUsed}`,
|
|
706
|
-
});
|
|
702
|
+
safeLog('info', `Batch ${operation.id} completed. Total credits used: ${totalCreditsUsed}`);
|
|
707
703
|
}
|
|
708
704
|
}
|
|
709
705
|
catch (error) {
|
|
710
706
|
operation.status = 'failed';
|
|
711
707
|
operation.error = error instanceof Error ? error.message : String(error);
|
|
712
|
-
|
|
713
|
-
level: 'error',
|
|
714
|
-
data: `Batch ${operation.id} failed: ${operation.error}`,
|
|
715
|
-
});
|
|
708
|
+
safeLog('error', `Batch ${operation.id} failed: ${operation.error}`);
|
|
716
709
|
}
|
|
717
710
|
}
|
|
718
711
|
// Tool handlers
|
|
@@ -735,10 +728,7 @@ server.setRequestHandler(CallToolRequestSchema, async (request) => {
|
|
|
735
728
|
try {
|
|
736
729
|
const { name, arguments: args } = request.params;
|
|
737
730
|
// Log incoming request with timestamp
|
|
738
|
-
|
|
739
|
-
level: 'info',
|
|
740
|
-
data: `[${new Date().toISOString()}] Received request for tool: ${name}`,
|
|
741
|
-
});
|
|
731
|
+
safeLog('info', `[${new Date().toISOString()}] Received request for tool: ${name}`);
|
|
742
732
|
if (!args) {
|
|
743
733
|
throw new Error('No arguments provided');
|
|
744
734
|
}
|
|
@@ -750,16 +740,10 @@ server.setRequestHandler(CallToolRequestSchema, async (request) => {
|
|
|
750
740
|
const { url, ...options } = args;
|
|
751
741
|
try {
|
|
752
742
|
const scrapeStartTime = Date.now();
|
|
753
|
-
|
|
754
|
-
level: 'info',
|
|
755
|
-
data: `Starting scrape for URL: ${url} with options: ${JSON.stringify(options)}`,
|
|
756
|
-
});
|
|
743
|
+
safeLog('info', `Starting scrape for URL: ${url} with options: ${JSON.stringify(options)}`);
|
|
757
744
|
const response = await client.scrapeUrl(url, options);
|
|
758
745
|
// Log performance metrics
|
|
759
|
-
|
|
760
|
-
level: 'info',
|
|
761
|
-
data: `Scrape completed in ${Date.now() - scrapeStartTime}ms`,
|
|
762
|
-
});
|
|
746
|
+
safeLog('info', `Scrape completed in ${Date.now() - scrapeStartTime}ms`);
|
|
763
747
|
if ('success' in response && !response.success) {
|
|
764
748
|
throw new Error(response.error || 'Scraping failed');
|
|
765
749
|
}
|
|
@@ -783,16 +767,20 @@ server.setRequestHandler(CallToolRequestSchema, async (request) => {
|
|
|
783
767
|
if (options.formats?.includes('extract') && response.extract) {
|
|
784
768
|
contentParts.push(JSON.stringify(response.extract, null, 2));
|
|
785
769
|
}
|
|
770
|
+
// If options.formats is empty, default to markdown
|
|
771
|
+
if (!options.formats || options.formats.length === 0) {
|
|
772
|
+
options.formats = ['markdown'];
|
|
773
|
+
}
|
|
786
774
|
// Add warning to response if present
|
|
787
775
|
if (response.warning) {
|
|
788
|
-
|
|
789
|
-
level: 'warning',
|
|
790
|
-
data: response.warning,
|
|
791
|
-
});
|
|
776
|
+
safeLog('warning', response.warning);
|
|
792
777
|
}
|
|
793
778
|
return {
|
|
794
779
|
content: [
|
|
795
|
-
{
|
|
780
|
+
{
|
|
781
|
+
type: 'text',
|
|
782
|
+
text: trimResponseText(contentParts.join('\n\n') || 'No content available'),
|
|
783
|
+
},
|
|
796
784
|
],
|
|
797
785
|
isError: false,
|
|
798
786
|
};
|
|
@@ -800,7 +788,7 @@ server.setRequestHandler(CallToolRequestSchema, async (request) => {
|
|
|
800
788
|
catch (error) {
|
|
801
789
|
const errorMessage = error instanceof Error ? error.message : String(error);
|
|
802
790
|
return {
|
|
803
|
-
content: [{ type: 'text', text: errorMessage }],
|
|
791
|
+
content: [{ type: 'text', text: trimResponseText(errorMessage) }],
|
|
804
792
|
isError: true,
|
|
805
793
|
};
|
|
806
794
|
}
|
|
@@ -818,7 +806,9 @@ server.setRequestHandler(CallToolRequestSchema, async (request) => {
|
|
|
818
806
|
throw new Error('No links received from FireCrawl API');
|
|
819
807
|
}
|
|
820
808
|
return {
|
|
821
|
-
content: [
|
|
809
|
+
content: [
|
|
810
|
+
{ type: 'text', text: trimResponseText(response.links.join('\n')) },
|
|
811
|
+
],
|
|
822
812
|
isError: false,
|
|
823
813
|
};
|
|
824
814
|
}
|
|
@@ -841,15 +831,12 @@ server.setRequestHandler(CallToolRequestSchema, async (request) => {
|
|
|
841
831
|
batchOperations.set(operationId, operation);
|
|
842
832
|
// Queue the operation
|
|
843
833
|
batchQueue.add(() => processBatchOperation(operation));
|
|
844
|
-
|
|
845
|
-
level: 'info',
|
|
846
|
-
data: `Queued batch operation ${operationId} with ${args.urls.length} URLs`,
|
|
847
|
-
});
|
|
834
|
+
safeLog('info', `Queued batch operation ${operationId} with ${args.urls.length} URLs`);
|
|
848
835
|
return {
|
|
849
836
|
content: [
|
|
850
837
|
{
|
|
851
838
|
type: 'text',
|
|
852
|
-
text: `Batch operation queued with ID: ${operationId}. Use firecrawl_check_batch_status to check progress
|
|
839
|
+
text: trimResponseText(`Batch operation queued with ID: ${operationId}. Use firecrawl_check_batch_status to check progress.`),
|
|
853
840
|
},
|
|
854
841
|
],
|
|
855
842
|
isError: false,
|
|
@@ -860,7 +847,7 @@ server.setRequestHandler(CallToolRequestSchema, async (request) => {
|
|
|
860
847
|
? error.message
|
|
861
848
|
: `Batch operation failed: ${JSON.stringify(error)}`;
|
|
862
849
|
return {
|
|
863
|
-
content: [{ type: 'text', text: errorMessage }],
|
|
850
|
+
content: [{ type: 'text', text: trimResponseText(errorMessage) }],
|
|
864
851
|
isError: true,
|
|
865
852
|
};
|
|
866
853
|
}
|
|
@@ -875,7 +862,7 @@ server.setRequestHandler(CallToolRequestSchema, async (request) => {
|
|
|
875
862
|
content: [
|
|
876
863
|
{
|
|
877
864
|
type: 'text',
|
|
878
|
-
text: `No batch operation found with ID: ${args.id}
|
|
865
|
+
text: trimResponseText(`No batch operation found with ID: ${args.id}`),
|
|
879
866
|
},
|
|
880
867
|
],
|
|
881
868
|
isError: true,
|
|
@@ -889,7 +876,7 @@ ${operation.result
|
|
|
889
876
|
? `Results: ${JSON.stringify(operation.result, null, 2)}`
|
|
890
877
|
: ''}`;
|
|
891
878
|
return {
|
|
892
|
-
content: [{ type: 'text', text: status }],
|
|
879
|
+
content: [{ type: 'text', text: trimResponseText(status) }],
|
|
893
880
|
isError: false,
|
|
894
881
|
};
|
|
895
882
|
}
|
|
@@ -910,7 +897,7 @@ ${operation.result
|
|
|
910
897
|
content: [
|
|
911
898
|
{
|
|
912
899
|
type: 'text',
|
|
913
|
-
text: `Started crawl for ${url} with job ID: ${response.id}
|
|
900
|
+
text: trimResponseText(`Started crawl for ${url} with job ID: ${response.id}`),
|
|
914
901
|
},
|
|
915
902
|
],
|
|
916
903
|
isError: false,
|
|
@@ -931,7 +918,7 @@ Credits Used: ${response.creditsUsed}
|
|
|
931
918
|
Expires At: ${response.expiresAt}
|
|
932
919
|
${response.data.length > 0 ? '\nResults:\n' + formatResults(response.data) : ''}`;
|
|
933
920
|
return {
|
|
934
|
-
content: [{ type: 'text', text: status }],
|
|
921
|
+
content: [{ type: 'text', text: trimResponseText(status) }],
|
|
935
922
|
isError: false,
|
|
936
923
|
};
|
|
937
924
|
}
|
|
@@ -956,7 +943,7 @@ Description: ${result.description || 'No description'}
|
|
|
956
943
|
${result.markdown ? `\nContent:\n${result.markdown}` : ''}`)
|
|
957
944
|
.join('\n\n');
|
|
958
945
|
return {
|
|
959
|
-
content: [{ type: 'text', text: results }],
|
|
946
|
+
content: [{ type: 'text', text: trimResponseText(results) }],
|
|
960
947
|
isError: false,
|
|
961
948
|
};
|
|
962
949
|
}
|
|
@@ -965,7 +952,7 @@ ${result.markdown ? `\nContent:\n${result.markdown}` : ''}`)
|
|
|
965
952
|
? error.message
|
|
966
953
|
: `Search failed: ${JSON.stringify(error)}`;
|
|
967
954
|
return {
|
|
968
|
-
content: [{ type: 'text', text: errorMessage }],
|
|
955
|
+
content: [{ type: 'text', text: trimResponseText(errorMessage) }],
|
|
969
956
|
isError: true,
|
|
970
957
|
};
|
|
971
958
|
}
|
|
@@ -976,16 +963,10 @@ ${result.markdown ? `\nContent:\n${result.markdown}` : ''}`)
|
|
|
976
963
|
}
|
|
977
964
|
try {
|
|
978
965
|
const extractStartTime = Date.now();
|
|
979
|
-
|
|
980
|
-
level: 'info',
|
|
981
|
-
data: `Starting extraction for URLs: ${args.urls.join(', ')}`,
|
|
982
|
-
});
|
|
966
|
+
safeLog('info', `Starting extraction for URLs: ${args.urls.join(', ')}`);
|
|
983
967
|
// Log if using self-hosted instance
|
|
984
968
|
if (FIRECRAWL_API_URL) {
|
|
985
|
-
|
|
986
|
-
level: 'info',
|
|
987
|
-
data: 'Using self-hosted instance for extraction',
|
|
988
|
-
});
|
|
969
|
+
safeLog('info', 'Using self-hosted instance for extraction');
|
|
989
970
|
}
|
|
990
971
|
const extractResponse = await withRetry(async () => client.extract(args.urls, {
|
|
991
972
|
prompt: args.prompt,
|
|
@@ -1006,25 +987,19 @@ ${result.markdown ? `\nContent:\n${result.markdown}` : ''}`)
|
|
|
1006
987
|
await updateCreditUsage(response.creditsUsed || 0);
|
|
1007
988
|
}
|
|
1008
989
|
// Log performance metrics
|
|
1009
|
-
|
|
1010
|
-
level: 'info',
|
|
1011
|
-
data: `Extraction completed in ${Date.now() - extractStartTime}ms`,
|
|
1012
|
-
});
|
|
990
|
+
safeLog('info', `Extraction completed in ${Date.now() - extractStartTime}ms`);
|
|
1013
991
|
// Add warning to response if present
|
|
1014
992
|
const result = {
|
|
1015
993
|
content: [
|
|
1016
994
|
{
|
|
1017
995
|
type: 'text',
|
|
1018
|
-
text: JSON.stringify(response.data, null, 2),
|
|
996
|
+
text: trimResponseText(JSON.stringify(response.data, null, 2)),
|
|
1019
997
|
},
|
|
1020
998
|
],
|
|
1021
999
|
isError: false,
|
|
1022
1000
|
};
|
|
1023
1001
|
if (response.warning) {
|
|
1024
|
-
|
|
1025
|
-
level: 'warning',
|
|
1026
|
-
data: response.warning,
|
|
1027
|
-
});
|
|
1002
|
+
safeLog('warning', response.warning);
|
|
1028
1003
|
}
|
|
1029
1004
|
return result;
|
|
1030
1005
|
}
|
|
@@ -1033,22 +1008,19 @@ ${result.markdown ? `\nContent:\n${result.markdown}` : ''}`)
|
|
|
1033
1008
|
// Special handling for self-hosted instance errors
|
|
1034
1009
|
if (FIRECRAWL_API_URL &&
|
|
1035
1010
|
errorMessage.toLowerCase().includes('not supported')) {
|
|
1036
|
-
|
|
1037
|
-
level: 'error',
|
|
1038
|
-
data: 'Extraction is not supported by this self-hosted instance',
|
|
1039
|
-
});
|
|
1011
|
+
safeLog('error', 'Extraction is not supported by this self-hosted instance');
|
|
1040
1012
|
return {
|
|
1041
1013
|
content: [
|
|
1042
1014
|
{
|
|
1043
1015
|
type: 'text',
|
|
1044
|
-
text: 'Extraction is not supported by this self-hosted instance. Please ensure LLM support is configured.',
|
|
1016
|
+
text: trimResponseText('Extraction is not supported by this self-hosted instance. Please ensure LLM support is configured.'),
|
|
1045
1017
|
},
|
|
1046
1018
|
],
|
|
1047
1019
|
isError: true,
|
|
1048
1020
|
};
|
|
1049
1021
|
}
|
|
1050
1022
|
return {
|
|
1051
|
-
content: [{ type: 'text', text: errorMessage }],
|
|
1023
|
+
content: [{ type: 'text', text: trimResponseText(errorMessage) }],
|
|
1052
1024
|
isError: true,
|
|
1053
1025
|
};
|
|
1054
1026
|
}
|
|
@@ -1059,10 +1031,7 @@ ${result.markdown ? `\nContent:\n${result.markdown}` : ''}`)
|
|
|
1059
1031
|
}
|
|
1060
1032
|
try {
|
|
1061
1033
|
const researchStartTime = Date.now();
|
|
1062
|
-
|
|
1063
|
-
level: 'info',
|
|
1064
|
-
data: `Starting deep research for query: ${args.query}`,
|
|
1065
|
-
});
|
|
1034
|
+
safeLog('info', `Starting deep research for query: ${args.query}`);
|
|
1066
1035
|
const response = await client.deepResearch(args.query, {
|
|
1067
1036
|
maxDepth: args.maxDepth,
|
|
1068
1037
|
timeLimit: args.timeLimit,
|
|
@@ -1070,23 +1039,14 @@ ${result.markdown ? `\nContent:\n${result.markdown}` : ''}`)
|
|
|
1070
1039
|
},
|
|
1071
1040
|
// Activity callback
|
|
1072
1041
|
(activity) => {
|
|
1073
|
-
|
|
1074
|
-
level: 'info',
|
|
1075
|
-
data: `Research activity: ${activity.message} (Depth: ${activity.depth})`,
|
|
1076
|
-
});
|
|
1042
|
+
safeLog('info', `Research activity: ${activity.message} (Depth: ${activity.depth})`);
|
|
1077
1043
|
},
|
|
1078
1044
|
// Source callback
|
|
1079
1045
|
(source) => {
|
|
1080
|
-
|
|
1081
|
-
level: 'info',
|
|
1082
|
-
data: `Research source found: ${source.url}${source.title ? ` - ${source.title}` : ''}`,
|
|
1083
|
-
});
|
|
1046
|
+
safeLog('info', `Research source found: ${source.url}${source.title ? ` - ${source.title}` : ''}`);
|
|
1084
1047
|
});
|
|
1085
1048
|
// Log performance metrics
|
|
1086
|
-
|
|
1087
|
-
level: 'info',
|
|
1088
|
-
data: `Deep research completed in ${Date.now() - researchStartTime}ms`,
|
|
1089
|
-
});
|
|
1049
|
+
safeLog('info', `Deep research completed in ${Date.now() - researchStartTime}ms`);
|
|
1090
1050
|
if (!response.success) {
|
|
1091
1051
|
throw new Error(response.error || 'Deep research failed');
|
|
1092
1052
|
}
|
|
@@ -1097,14 +1057,19 @@ ${result.markdown ? `\nContent:\n${result.markdown}` : ''}`)
|
|
|
1097
1057
|
sources: response.data.sources,
|
|
1098
1058
|
};
|
|
1099
1059
|
return {
|
|
1100
|
-
content: [
|
|
1060
|
+
content: [
|
|
1061
|
+
{
|
|
1062
|
+
type: 'text',
|
|
1063
|
+
text: trimResponseText(formattedResponse.finalAnalysis),
|
|
1064
|
+
},
|
|
1065
|
+
],
|
|
1101
1066
|
isError: false,
|
|
1102
1067
|
};
|
|
1103
1068
|
}
|
|
1104
1069
|
catch (error) {
|
|
1105
1070
|
const errorMessage = error instanceof Error ? error.message : String(error);
|
|
1106
1071
|
return {
|
|
1107
|
-
content: [{ type: 'text', text: errorMessage }],
|
|
1072
|
+
content: [{ type: 'text', text: trimResponseText(errorMessage) }],
|
|
1108
1073
|
isError: true,
|
|
1109
1074
|
};
|
|
1110
1075
|
}
|
|
@@ -1116,20 +1081,14 @@ ${result.markdown ? `\nContent:\n${result.markdown}` : ''}`)
|
|
|
1116
1081
|
try {
|
|
1117
1082
|
const { url, ...params } = args;
|
|
1118
1083
|
const generateStartTime = Date.now();
|
|
1119
|
-
|
|
1120
|
-
level: 'info',
|
|
1121
|
-
data: `Starting LLMs.txt generation for URL: ${url}`,
|
|
1122
|
-
});
|
|
1084
|
+
safeLog('info', `Starting LLMs.txt generation for URL: ${url}`);
|
|
1123
1085
|
// Start the generation process
|
|
1124
1086
|
const response = await withRetry(async () => client.generateLLMsText(url, params), 'LLMs.txt generation');
|
|
1125
1087
|
if (!response.success) {
|
|
1126
1088
|
throw new Error(response.error || 'LLMs.txt generation failed');
|
|
1127
1089
|
}
|
|
1128
1090
|
// Log performance metrics
|
|
1129
|
-
|
|
1130
|
-
level: 'info',
|
|
1131
|
-
data: `LLMs.txt generation completed in ${Date.now() - generateStartTime}ms`,
|
|
1132
|
-
});
|
|
1091
|
+
safeLog('info', `LLMs.txt generation completed in ${Date.now() - generateStartTime}ms`);
|
|
1133
1092
|
// Format the response
|
|
1134
1093
|
let resultText = '';
|
|
1135
1094
|
if ('data' in response) {
|
|
@@ -1139,42 +1098,41 @@ ${result.markdown ? `\nContent:\n${result.markdown}` : ''}`)
|
|
|
1139
1098
|
}
|
|
1140
1099
|
}
|
|
1141
1100
|
return {
|
|
1142
|
-
content: [{ type: 'text', text: resultText }],
|
|
1101
|
+
content: [{ type: 'text', text: trimResponseText(resultText) }],
|
|
1143
1102
|
isError: false,
|
|
1144
1103
|
};
|
|
1145
1104
|
}
|
|
1146
1105
|
catch (error) {
|
|
1147
1106
|
const errorMessage = error instanceof Error ? error.message : String(error);
|
|
1148
1107
|
return {
|
|
1149
|
-
content: [{ type: 'text', text: errorMessage }],
|
|
1108
|
+
content: [{ type: 'text', text: trimResponseText(errorMessage) }],
|
|
1150
1109
|
isError: true,
|
|
1151
1110
|
};
|
|
1152
1111
|
}
|
|
1153
1112
|
}
|
|
1154
1113
|
default:
|
|
1155
1114
|
return {
|
|
1156
|
-
content: [
|
|
1115
|
+
content: [
|
|
1116
|
+
{ type: 'text', text: trimResponseText(`Unknown tool: ${name}`) },
|
|
1117
|
+
],
|
|
1157
1118
|
isError: true,
|
|
1158
1119
|
};
|
|
1159
1120
|
}
|
|
1160
1121
|
}
|
|
1161
1122
|
catch (error) {
|
|
1162
1123
|
// Log detailed error information
|
|
1163
|
-
|
|
1164
|
-
|
|
1165
|
-
|
|
1166
|
-
|
|
1167
|
-
|
|
1168
|
-
|
|
1169
|
-
timestamp: new Date().toISOString(),
|
|
1170
|
-
duration: Date.now() - startTime,
|
|
1171
|
-
},
|
|
1124
|
+
safeLog('error', {
|
|
1125
|
+
message: `Request failed: ${error instanceof Error ? error.message : String(error)}`,
|
|
1126
|
+
tool: request.params.name,
|
|
1127
|
+
arguments: request.params.arguments,
|
|
1128
|
+
timestamp: new Date().toISOString(),
|
|
1129
|
+
duration: Date.now() - startTime,
|
|
1172
1130
|
});
|
|
1173
1131
|
return {
|
|
1174
1132
|
content: [
|
|
1175
1133
|
{
|
|
1176
1134
|
type: 'text',
|
|
1177
|
-
text: `Error: ${error instanceof Error ? error.message : String(error)}
|
|
1135
|
+
text: trimResponseText(`Error: ${error instanceof Error ? error.message : String(error)}`),
|
|
1178
1136
|
},
|
|
1179
1137
|
],
|
|
1180
1138
|
isError: true,
|
|
@@ -1182,10 +1140,7 @@ ${result.markdown ? `\nContent:\n${result.markdown}` : ''}`)
|
|
|
1182
1140
|
}
|
|
1183
1141
|
finally {
|
|
1184
1142
|
// Log request completion with performance metrics
|
|
1185
|
-
|
|
1186
|
-
level: 'info',
|
|
1187
|
-
data: `Request completed in ${Date.now() - startTime}ms`,
|
|
1188
|
-
});
|
|
1143
|
+
safeLog('info', `Request completed in ${Date.now() - startTime}ms`);
|
|
1189
1144
|
}
|
|
1190
1145
|
});
|
|
1191
1146
|
// Helper function to format results
|
|
@@ -1203,21 +1158,25 @@ ${doc.metadata?.title ? `Title: ${doc.metadata.title}` : ''}`;
|
|
|
1203
1158
|
function hasCredits(response) {
|
|
1204
1159
|
return 'creditsUsed' in response && typeof response.creditsUsed === 'number';
|
|
1205
1160
|
}
|
|
1161
|
+
// Utility function to trim trailing whitespace from text responses
|
|
1162
|
+
// This prevents Claude API errors with "final assistant content cannot end with trailing whitespace"
|
|
1163
|
+
function trimResponseText(text) {
|
|
1164
|
+
return text.trim();
|
|
1165
|
+
}
|
|
1206
1166
|
// Server startup
|
|
1207
1167
|
async function runServer() {
|
|
1208
1168
|
try {
|
|
1209
1169
|
console.error('Initializing FireCrawl MCP Server...');
|
|
1210
1170
|
const transport = new StdioServerTransport();
|
|
1171
|
+
// Detect if we're using stdio transport
|
|
1172
|
+
isStdioTransport = transport instanceof StdioServerTransport;
|
|
1173
|
+
if (isStdioTransport) {
|
|
1174
|
+
console.error('Running in stdio mode, logging will be directed to stderr');
|
|
1175
|
+
}
|
|
1211
1176
|
await server.connect(transport);
|
|
1212
1177
|
// Now that we're connected, we can send logging messages
|
|
1213
|
-
|
|
1214
|
-
|
|
1215
|
-
data: 'FireCrawl MCP Server initialized successfully',
|
|
1216
|
-
});
|
|
1217
|
-
server.sendLoggingMessage({
|
|
1218
|
-
level: 'info',
|
|
1219
|
-
data: `Configuration: API URL: ${FIRECRAWL_API_URL || 'default'}`,
|
|
1220
|
-
});
|
|
1178
|
+
safeLog('info', 'FireCrawl MCP Server initialized successfully');
|
|
1179
|
+
safeLog('info', `Configuration: API URL: ${FIRECRAWL_API_URL || 'default'}`);
|
|
1221
1180
|
console.error('FireCrawl MCP Server running on stdio');
|
|
1222
1181
|
}
|
|
1223
1182
|
catch (error) {
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "firecrawl-mcp",
|
|
3
|
-
"version": "1.
|
|
3
|
+
"version": "1.7.1",
|
|
4
4
|
"description": "MCP server for FireCrawl web scraping integration. Supports both cloud and self-hosted instances. Features include web scraping, batch processing, structured data extraction, and LLM-powered content analysis.",
|
|
5
5
|
"type": "module",
|
|
6
6
|
"bin": {
|