使用Scrape.do API和Google Sheets进行SERP竞争对手研究
中级
这是一个Content Creation, Multimodal AI领域的自动化工作流,包含 12 个节点。主要使用 Code, HttpRequest, GoogleSheets, ManualTrigger 等节点。 使用Scrape.do API和Google Sheets进行SERP竞争对手研究
前置要求
- •可能需要目标 API 的认证凭证
- •Google Sheets API 凭证
工作流预览
可视化展示节点连接关系,支持缩放和平移
导出工作流
复制以下 JSON 配置到 n8n 导入,即可使用此工作流
{
"meta": {
"instanceId": "cb5caf45c9475b848c7e83772505bb02340e165acdd8de77e25011192306257c"
},
"nodes": [
{
"id": "cf7497dc-a934-41d5-9860-e27599d3bf90",
"name": "当点击\"执行工作流\"时",
"type": "n8n-nodes-base.manualTrigger",
"position": [
-832,
-64
],
"parameters": {},
"typeVersion": 1
},
{
"id": "f0e7adde-83ce-428d-9590-f1f35b6c0ba3",
"name": "从表格获取关键词",
"type": "n8n-nodes-base.googleSheets",
"position": [
-544,
-64
],
"parameters": {
"options": {},
"sheetName": {
"__rl": true,
"mode": "list",
"value": "gid=0",
"cachedResultUrl": "https://docs.google.com/spreadsheets/d/1VXVbyqRPPHgO0tyoxpEajCyPX1DZdY3hRu_jCOdDriE/edit#gid=0",
"cachedResultName": "Keywords"
},
"documentId": {
"__rl": true,
"mode": "list",
"value": "1VXVbyqRPPHgO0tyoxpEajCyPX1DZdY3hRu_jCOdDriE",
"cachedResultUrl": "https://docs.google.com/spreadsheets/d/1VXVbyqRPPHgO0tyoxpEajCyPX1DZdY3hRu_jCOdDriE/edit?usp=drivesdk",
"cachedResultName": "Competitor tracker"
}
},
"credentials": {
"googleSheetsOAuth2Api": {
"id": "df8r9D022KIAOHTC",
"name": "Google Sheets account"
}
},
"typeVersion": 4.6
},
{
"id": "2b8e096f-cd26-4886-9cc2-78d67b49c76d",
"name": "URL 编码关键词",
"type": "n8n-nodes-base.code",
"position": [
-272,
-64
],
"parameters": {
"jsCode": "const encode = encodeURIComponent;\nfor (const item of $input.all()) {\n item.json.Keyword = encode(item.json.Keyword);\n}\nreturn $input.all();"
},
"typeVersion": 2
},
{
"id": "2c743dde-c8f3-4ce8-9a11-89d95accffc3",
"name": "获取 Google 搜索结果",
"type": "n8n-nodes-base.httpRequest",
"position": [
80,
-96
],
"parameters": {
"url": "=https://api.scrape.do/?token={{$vars.SCRAPEDO_TOKEN}}&url={{ encodeURIComponent('https://www.google.com/search?q=' + $json.Keyword) }}&geoCode={{ $json['Target Country'] || 'us' }}&render=true",
"options": {
"timeout": 60000,
"redirect": {
"redirect": {}
}
},
"sendHeaders": true,
"headerParameters": {
"parameters": [
{
"name": "Accept",
"value": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8"
}
]
}
},
"typeVersion": 4.2,
"alwaysOutputData": true
},
{
"id": "581d6f6b-b4b7-460b-8d99-f61de812f2eb",
"name": "从 HTML 提取竞争对手数据",
"type": "n8n-nodes-base.code",
"position": [
368,
-96
],
"parameters": {
"jsCode": "// Parse the HTML response from Scrape.do\nconst html = $input.first().json.data || $input.first().json.body || '';\n\nconst results = [];\n\n// Simple regex patterns to extract search results\n// Pattern for organic results\nconst resultPattern = /<div[^>]*class=\\\"[^\\\"]*Ww4FFb[^\\\"]*\\\"[^>]*>([\\s\\S]*?)<\\/div>/gi;\nconst matches = html.matchAll(resultPattern);\n\nlet position = 1;\nfor (const match of matches) {\n const resultHtml = match[1];\n \n // Extract title (h3 tag)\n const titleMatch = resultHtml.match(/<h3[^>]*>([^<]*)<\\/h3>/i);\n const title = titleMatch ? titleMatch[1].replace(/<[^>]*>/g, '').trim() : '';\n \n // Extract URL\n const urlMatch = resultHtml.match(/href=\\\"([^\\\"]+)\\\"/i);\n const url = urlMatch ? urlMatch[1] : '';\n \n // Extract description\n const descMatch = resultHtml.match(/<div[^>]*class=\\\"[^\\\"]*VwiC3b[^\\\"]*\\\"[^>]*>([^<]*)</i);\n const description = descMatch ? descMatch[1].replace(/<[^>]*>/g, '').trim() : '';\n \n if (title && url) {\n results.push({\n position: position++,\n websiteTitle: title,\n websiteUrl: url,\n websiteDescription: description || 'No description available',\n keyword: $input.first().json.originalKeyword || $input.first().json.Keyword || ''\n });\n }\n \n // Stop after 10 organic results\n if (position > 10) break;\n}\n\n// If no results found with the above method, try a simpler approach\nif (results.length === 0) {\n // Look for any links that appear to be search results\n const linkPattern = /<a[^>]*href=\\\"(https?:\\/\\/[^\\\"]+)\\\"[^>]*>([^<]+)<\\/a>/gi;\n const linkMatches = html.matchAll(linkPattern);\n \n position = 1;\n for (const linkMatch of linkMatches) {\n const url = linkMatch[1];\n const title = linkMatch[2].replace(/<[^>]*>/g, '').trim();\n \n // Filter out Google's own links\n if (url && !url.includes('google.com') && !url.includes('googleapis.com')) {\n results.push({\n position: position++,\n websiteTitle: title || 'No title',\n websiteUrl: url,\n websiteDescription: 'Description not available',\n keyword: $input.first().json.originalKeyword || $input.first().json.Keyword || ''\n });\n \n if (position > 10) break;\n }\n }\n}\n\n// Return results or a default if none found\nreturn results.length > 0 ? results.map(r => ({json: r})) : [{json: {\n position: 1,\n websiteTitle: 'No results found',\n websiteUrl: 'N/A',\n websiteDescription: 'Please check the HTML response',\n keyword: $input.first().json.originalKeyword || $input.first().json.Keyword || '',\n htmlLength: html.length\n}}];"
},
"typeVersion": 2,
"alwaysOutputData": true
},
{
"id": "fe0422c4-be3e-4a41-aa36-0e631c4450b4",
"name": "将结果附加到表格",
"type": "n8n-nodes-base.googleSheets",
"position": [
640,
-96
],
"parameters": {
"columns": {
"value": {},
"schema": [],
"mappingMode": "autoMapInputData",
"matchingColumns": [],
"attemptToConvertTypes": false,
"convertFieldsToString": false
},
"options": {},
"operation": "append",
"sheetName": {
"__rl": true,
"mode": "list",
"value": 1113333162,
"cachedResultUrl": "https://docs.google.com/spreadsheets/d/1VXVbyqRPPHgO0tyoxpEajCyPX1DZdY3hRu_jCOdDriE/edit#gid=1113333162",
"cachedResultName": "Competitor Results"
},
"documentId": {
"__rl": true,
"mode": "list",
"value": "1VXVbyqRPPHgO0tyoxpEajCyPX1DZdY3hRu_jCOdDriE",
"cachedResultUrl": "https://docs.google.com/spreadsheets/d/1VXVbyqRPPHgO0tyoxpEajCyPX1DZdY3hRu_jCOdDriE/edit?usp=drivesdk",
"cachedResultName": "Competitor tracker"
}
},
"typeVersion": 4.6
},
{
"id": "1fbe216c-35dc-4eb1-bed9-ef130ffa15b5",
"name": "工作流开始",
"type": "n8n-nodes-base.stickyNote",
"position": [
-880,
96
],
"parameters": {
"height": 192,
"content": "此工作流通过点击“执行工作流”手动触发。"
},
"typeVersion": 1
},
{
"id": "6dc607b7-d5f4-4fcd-809e-9545e1488972",
"name": "读取关键词",
"type": "n8n-nodes-base.stickyNote",
"position": [
-576,
112
],
"parameters": {
"height": 192,
"content": "从指定的 Google Sheets 读取数据。"
},
"typeVersion": 1
},
{
"id": "9868274a-1c01-459b-bdf9-47cf1ef14d7a",
"name": "编码关键词",
"type": "n8n-nodes-base.stickyNote",
"position": [
-304,
80
],
"parameters": {
"height": 192,
"content": "使用 `encodeURIComponent` 对“关键词”值进行编码。"
},
"typeVersion": 1
},
{
"id": "a9158b83-ce5a-45d2-908c-9a504990dd71",
"name": "使用 Scrape.do 获取 SERP 数据",
"type": "n8n-nodes-base.stickyNote",
"position": [
16,
64
],
"parameters": {
"content": "**Scrape.do 配置:**"
},
"typeVersion": 1
},
{
"id": "4b9de746-a1ae-4e43-91d6-3b42524f7ddf",
"name": "提取竞争对手数据",
"type": "n8n-nodes-base.stickyNote",
"position": [
320,
80
],
"parameters": {
"content": "解析来自 Scrape.do 的 HTML 响应以提取:"
},
"typeVersion": 1
},
{
"id": "b51511f4-0d7c-4411-a5a4-36cc976fcf6a",
"name": "附加到结果表格",
"type": "n8n-nodes-base.stickyNote",
"position": [
640,
80
],
"parameters": {
"content": "将解析后的 SERP 行附加到“结果”表格中。预期列:"
},
"typeVersion": 1
}
],
"pinData": {},
"connections": {
"URL Encode Keywords": {
"main": [
[
{
"node": "Fetch Google Search Results",
"type": "main",
"index": 0
}
]
]
},
"Get Keywords from Sheet": {
"main": [
[
{
"node": "URL Encode Keywords",
"type": "main",
"index": 0
}
]
]
},
"Fetch Google Search Results": {
"main": [
[
{
"node": "Extract Competitor Data from HTML",
"type": "main",
"index": 0
}
]
]
},
"When clicking 'Execute workflow'": {
"main": [
[
{
"node": "Get Keywords from Sheet",
"type": "main",
"index": 0
}
]
]
},
"Extract Competitor Data from HTML": {
"main": [
[
{
"node": "Append Results to Sheet",
"type": "main",
"index": 0
}
]
]
}
}
}常见问题
如何使用这个工作流?
复制上方的 JSON 配置代码,在您的 n8n 实例中创建新工作流并选择「从 JSON 导入」,粘贴配置后根据需要修改凭证设置即可。
这个工作流适合什么场景?
中级 - 内容创作, 多模态 AI
需要付费吗?
本工作流完全免费,您可以直接导入使用。但请注意,工作流中使用的第三方服务(如 OpenAI API)可能需要您自行付费。
相关工作流推荐
使用Scrape.do提取Zillow房产数据到Google Sheets
使用Scrape.do提取Zillow房产数据到Google Sheets
If
Code
Http Request
+2
6 节点Onur
内容创作
LinkedIn内容工厂(含OpenAI研究与品牌图像生成)
LinkedIn内容工厂,集成OpenAI研究与Replicate品牌图像生成
If
Set
Code
+10
23 节点Onur
社交媒体
应聘者追踪自动化
使用GPT-4.1简历解析、Google表格和Gmail提醒实现应聘者追踪自动化
If
Code
Gmail
+6
13 节点Onur
内容创作
Shopify自动发送WhatsApp感谢消息和忠诚度优惠券(使用Rapiwa API)
使用Rapiwa从Shopify自动发送WhatsApp感谢消息和忠诚度优惠券
If
Code
Wait
+5
17 节点SpaGreen Creative
内容创作
模板HP - 内容排期
使用GPT-4、Apify和Google Sheets自动生成Instagram内容排期
Set
Code
Wait
+9
24 节点keisha kalra
内容创作
Printify自动化 - 更新标题和描述 - AlexK1919
使用GPT-4o-mini为Printify自动生成SEO产品标题和描述
If
Set
Code
+10
26 节点Amit Mehta
内容创作
工作流信息
难度等级
中级
节点数量12
分类2
节点类型5
作者
Onur
@onurpolat05Hello, I'm Onur I've been working as a freelance software developer for about four years. In addition, I develop my own projects. For some time, I have been improving myself and providing various services related to AI and AI workflows. Both by writing low code and code. If you have any questions, don't hesitate to contact me.
外部链接
在 n8n.io 查看 →
分享此工作流