YouTube评论情感和关键词提取器
高级
这是一个Market Research, Multimodal AI领域的自动化工作流,包含 20 个节点。主要使用 Set, Code, Telegram, Aggregate, HttpRequest 等节点。 使用Gemini AI分析YouTube评论情感和关键词并通过Telegram报告
前置要求
- •Telegram Bot Token
- •可能需要目标 API 的认证凭证
- •Google Sheets API 凭证
使用的节点 (20)
工作流预览
可视化展示节点连接关系,支持缩放和平移
导出工作流
复制以下 JSON 配置到 n8n 导入,即可使用此工作流
{
"id": "1mZD3H0r79bhY71M",
"meta": {
"instanceId": "0d045f8fe3802ff2be0bb9a9ea445ee6c9ed61973377effe00767e483681e2f4",
"templateCredsSetupCompleted": true
},
"name": "YouTube Comment Sentiment & Keyword Extractor",
"tags": [],
"nodes": [
{
"id": "55d27fc7-ac2c-4ccc-a98f-f8c4ab7547c4",
"name": "计划触发器",
"type": "n8n-nodes-base.scheduleTrigger",
"position": [
0,
0
],
"parameters": {
"rule": {
"interval": [
{}
]
}
},
"typeVersion": 1.2
},
{
"id": "bd198bd2-705f-4fc9-ba30-c1facfa06359",
"name": "获取表格中的行",
"type": "n8n-nodes-base.googleSheets",
"position": [
220,
0
],
"parameters": {
"options": {},
"sheetName": {
"__rl": true,
"mode": "list",
"value": ""
},
"documentId": {
"__rl": true,
"mode": "list",
"value": ""
}
},
"typeVersion": 4.6
},
{
"id": "514959bb-14aa-4947-9d78-f903cb3201e3",
"name": "遍历项目",
"type": "n8n-nodes-base.splitInBatches",
"position": [
740,
0
],
"parameters": {
"options": {}
},
"typeVersion": 3
},
{
"id": "1a194433-415d-42cd-aeda-c983b5d81883",
"name": "HTTP 请求",
"type": "n8n-nodes-base.httpRequest",
"position": [
980,
120
],
"parameters": {
"url": "=https://www.googleapis.com/youtube/v3/commentThreads?part=snippet&videoId={{ $('Get row(s) in sheet').item.json.video_id }}&maxResults=100&key={{ $json['Youtube API Key'] }}",
"options": {}
},
"typeVersion": 4.2
},
{
"id": "93821e4a-b8d9-4364-aa9c-2e9200c49691",
"name": "Youtube API Key",
"type": "n8n-nodes-base.set",
"position": [
480,
0
],
"parameters": {
"options": {},
"assignments": {
"assignments": [
{
"id": "77fd67c8-e3a8-44a9-a347-5d10797ab01b",
"name": "Youtube API Key",
"type": "string",
"value": "YOUR_YOUTUBE_API_KEY"
}
]
}
},
"typeVersion": 3.4
},
{
"id": "5e91edf9-2ea6-4615-8a8c-710ac509a1f2",
"name": "代码",
"type": "n8n-nodes-base.code",
"position": [
1200,
120
],
"parameters": {
"jsCode": "// Input: HTTP Request result (object with .items)\n// Output: one item per comment, flat data ready to write\nconst out = [];\nconst root = items[0].json || {};\nfor (const it of (root.items || [])) {\n const s = it.snippet || {};\n const tl = (s.topLevelComment || {}).snippet || {};\n out.push({\n json: {\n comment_id: it.id,\n video_id: s.videoId,\n author: tl.authorDisplayName,\n comment_text: tl.textOriginal,\n like_count: tl.likeCount || 0,\n published_at: tl.publishedAt,\n author_channel_id: (tl.authorChannelId || {}).value,\n author_channel_url: tl.authorChannelUrl\n }\n });\n}\nreturn out;\n"
},
"typeVersion": 2
},
{
"id": "bfd6ff22-ae88-4d21-ac46-9a92c518cd87",
"name": "基础 LLM 链",
"type": "@n8n/n8n-nodes-langchain.chainLlm",
"position": [
1420,
120
],
"parameters": {
"text": "=Data to analyze:\nyoutube_comment_text: \"{{ $json.comment_text }}\"\nlike_count: {{ $json.like_count }}\n\nConsider like_count only as a weak signal (slightly increase |score| if like_count is high and sentiment is clear).\nReturn the JSON only.\n",
"batching": {},
"messages": {
"messageValues": [
{
"message": "=You are a concise social-listening analyst. \nReturn ONLY valid JSON (no markdown, no backticks).\nRules:\n- sentiment ∈ [\"positive\",\"neutral\",\"negative\"] based on overall tone.\n- score is a float in [-1, 1]; map highly positive ≈ 0.7..1, neutral ≈ -0.2..0.2, highly negative ≈ -1..-0.7.\n- keywords is an array of 3–8 short keywords (lowercase, ascii, no emojis/hashtags/stopwords, no duplicates).\n- language is a 2-letter ISO 639-1 code (e.g., \"id\", \"en\").\n- Remove emojis, excessive punctuation, URLs from consideration (but keep meaning).\n- If text is spam/promotional or meaningless, set sentiment=\"neutral\" and score≈0 and still extract topic keywords.\n- Output must be STRICT JSON with exactly these keys: sentiment, score, keywords, language.\n"
}
]
},
"promptType": "define",
"hasOutputParser": true
},
"executeOnce": false,
"typeVersion": 1.7
},
{
"id": "29802a0-fd43-467a-8afd-534f9099c492",
"name": "多表:您可以连接多个表以实现有组织的数据结构",
"type": "@n8n/n8n-nodes-langchain.outputParserStructured",
"position": [
1580,
340
],
"parameters": {
"jsonSchemaExample": "{\n \"sentiment\": \"neutral\",\n \"score\": 0.05,\n \"keywords\": [\"jogja\", \"nyimak\", \"harapan\", \"promo\"],\n \"language\": \"id\"\n}"
},
"typeVersion": 1.3
},
{
"id": "67469c87-b1cf-4ed7-8b41-ffadf44fc32c",
"name": "OpenRouter Chat Model",
"type": "@n8n/n8n-nodes-langchain.lmChatOpenRouter",
"position": [
1420,
340
],
"parameters": {
"model": "google/gemini-2.0-flash-exp:free",
"options": {}
},
"typeVersion": 1
},
{
"id": "2aa4b295-b562-43ff-b9df-b411dc781181",
"name": "代码1",
"type": "n8n-nodes-base.code",
"position": [
1780,
120
],
"parameters": {
"jsCode": "// n8n Code node: Normalize LLM JSON -> {sentiment, score, keywords, language}\nconst out = [];\n\nfor (const item of items) {\n // 1) Get candidate payload from several possible formats\n let payload = item.json?.output ?? item.json?.data ?? item.json;\n\n // 2) If string, try to parse JSON\n if (typeof payload === 'string') {\n try { payload = JSON.parse(payload); } catch { payload = {}; }\n }\n\n // 3) If there's still payload.output as string JSON, parse it too\n if (payload && typeof payload.output === 'string') {\n try { payload = JSON.parse(payload.output); } catch {}\n }\n\n // 4) Normalize fields\n const allowed = new Set(['positive','neutral','negative']);\n let sentiment = String(payload?.sentiment ?? '').toLowerCase();\n if (!allowed.has(sentiment)) sentiment = 'neutral';\n\n let score = Number(payload?.score);\n if (!Number.isFinite(score)) score = 0;\n if (score > 1) score = 1;\n if (score < -1) score = -1;\n\n let keywords = Array.isArray(payload?.keywords) ? payload.keywords : [];\n keywords = keywords\n .map(k => String(k).toLowerCase().trim())\n .filter(k => k && k.length <= 32)\n .slice(0, 8);\n\n let language = String(payload?.language ?? '').slice(0, 2).toLowerCase();\n if (!language) language = 'en';\n\n out.push({ json: { sentiment, score, keywords, language } });\n}\n\nreturn out;\n"
},
"typeVersion": 2
},
{
"id": "1a6a9a96-abb2-468c-b7b2-45e527ac1e36",
"name": "在表格中追加或更新行",
"type": "n8n-nodes-base.googleSheets",
"position": [
2000,
120
],
"parameters": {
"columns": {
"value": {
"author": "={{ $('Code').item.json.author }}",
"keywords": "={{ $json.keywords }}",
"video_id": "={{ $('Code').item.json.video_id }}",
"sentiment": "={{ $json.sentiment }}",
"comment_id": "={{ $('Code').item.json.comment_id }}",
"like_count": "={{ $('Code').item.json.like_count }}",
"comment_text": "={{ $('Code').item.json.comment_text }}",
"published_at": "={{ $('Code').item.json.published_at }}",
"sentiment_score": "={{ $json.score }}",
"author_channel_id": "={{ $('Code').item.json.author_channel_id }}",
"author_channel_url": "={{ $('Code').item.json.author_channel_url }}"
},
"schema": [
{
"id": "video_id",
"type": "string",
"display": true,
"removed": false,
"required": false,
"displayName": "video_id",
"defaultMatch": false,
"canBeUsedToMatch": true
},
{
"id": "comment_id",
"type": "string",
"display": true,
"removed": false,
"required": false,
"displayName": "comment_id",
"defaultMatch": false,
"canBeUsedToMatch": true
},
{
"id": "author",
"type": "string",
"display": true,
"required": false,
"displayName": "author",
"defaultMatch": false,
"canBeUsedToMatch": true
},
{
"id": "comment_text",
"type": "string",
"display": true,
"required": false,
"displayName": "comment_text",
"defaultMatch": false,
"canBeUsedToMatch": true
},
{
"id": "sentiment",
"type": "string",
"display": true,
"required": false,
"displayName": "sentiment",
"defaultMatch": false,
"canBeUsedToMatch": true
},
{
"id": "sentiment_score",
"type": "string",
"display": true,
"required": false,
"displayName": "sentiment_score",
"defaultMatch": false,
"canBeUsedToMatch": true
},
{
"id": "keywords",
"type": "string",
"display": true,
"required": false,
"displayName": "keywords",
"defaultMatch": false,
"canBeUsedToMatch": true
},
{
"id": "like_count",
"type": "string",
"display": true,
"required": false,
"displayName": "like_count",
"defaultMatch": false,
"canBeUsedToMatch": true
},
{
"id": "published_at",
"type": "string",
"display": true,
"required": false,
"displayName": "published_at",
"defaultMatch": false,
"canBeUsedToMatch": true
},
{
"id": "author_channel_id",
"type": "string",
"display": true,
"required": false,
"displayName": "author_channel_id",
"defaultMatch": false,
"canBeUsedToMatch": true
},
{
"id": "author_channel_url",
"type": "string",
"display": true,
"required": false,
"displayName": "author_channel_url",
"defaultMatch": false,
"canBeUsedToMatch": true
}
],
"mappingMode": "defineBelow",
"matchingColumns": [
"comment_id"
],
"attemptToConvertTypes": false,
"convertFieldsToString": false
},
"options": {},
"operation": "appendOrUpdate",
"sheetName": {
"__rl": true,
"mode": "list",
"value": ""
},
"documentId": {
"__rl": true,
"mode": "list",
"value": ""
}
},
"typeVersion": 4.6
},
{
"id": "ddfbf71a-2c92-415d-b3aa-fe087c9c9835",
"name": "聚合",
"type": "n8n-nodes-base.aggregate",
"position": [
2220,
120
],
"parameters": {
"options": {},
"fieldsToAggregate": {
"fieldToAggregate": [
{
"fieldToAggregate": "sentiment"
},
{
"fieldToAggregate": "sentiment_score"
},
{
"fieldToAggregate": "keywords"
}
]
}
},
"typeVersion": 1
},
{
"id": "655ebe44-a4df-4184-87b9-ac18c5734c62",
"name": "代码2",
"type": "n8n-nodes-base.code",
"position": [
2440,
120
],
"parameters": {
"jsCode": "// n8n Code node: Summarize sentiments & top keywords\nconst isAggregatedArrays =\n items.length === 1 &&\n items[0].json &&\n (Array.isArray(items[0].json.sentiment) || Array.isArray(items[0].json.keywords));\n\nfunction summarizeFromArrays(j) {\n const sentiments = Array.isArray(j.sentiment) ? j.sentiment : [];\n const scores = Array.isArray(j.sentiment_score) ? j.sentiment_score : [];\n let keywordsNested = Array.isArray(j.keywords) ? j.keywords : [];\n\n // ensure keywordsNested = array of arrays\n keywordsNested = keywordsNested.map(k =>\n Array.isArray(k) ? k : (typeof k === 'string' ? k.split(',') : [])\n );\n\n const total = Math.max(sentiments.length, keywordsNested.length, scores.length, 0);\n const cnt = { positive: 0, neutral: 0, negative: 0 };\n const kwFreq = {};\n\n for (let i = 0; i < total; i++) {\n const s = String((sentiments[i] ?? 'neutral')).toLowerCase();\n const normS = (s === 'positive' || s === 'negative' || s === 'neutral') ? s : 'neutral';\n cnt[normS]++;\n\n const kws = Array.isArray(keywordsNested[i]) ? keywordsNested[i] : [];\n for (let k of kws) {\n const key = String(k).toLowerCase().trim();\n if (!key) continue;\n kwFreq[key] = (kwFreq[key] || 0) + 1;\n }\n }\n\n const pct = v => total ? Math.round((v / total) * 100) : 0;\n const top_keywords = Object.entries(kwFreq)\n .sort((a, b) => (b[1] - a[1]) || a[0].localeCompare(b[0]))\n .slice(0, 10)\n .map(([k, v]) => `${k} (${v})`);\n\n return {\n total_comments: total,\n positive: cnt.positive,\n neutral: cnt.neutral,\n negative: cnt.negative,\n positive_pct: pct(cnt.positive),\n neutral_pct: pct(cnt.neutral),\n negative_pct: pct(cnt.negative),\n top_keywords\n };\n}\n\nfunction summarizeFromItems(items) {\n const cnt = { positive: 0, neutral: 0, negative: 0 };\n const kwFreq = {};\n let total = 0;\n\n for (const it of items) {\n const s = String(it.json?.sentiment ?? 'neutral').toLowerCase();\n const normS = (s === 'positive' || s === 'negative' || s === 'neutral') ? s : 'neutral';\n cnt[normS]++;\n total++;\n\n const kws = Array.isArray(it.json?.keywords) ? it.json.keywords : [];\n for (let k of kws) {\n const key = String(k).toLowerCase().trim();\n if (!key) continue;\n kwFreq[key] = (kwFreq[key] || 0) + 1;\n }\n }\n\n const pct = v => total ? Math.round((v / total) * 100) : 0;\n const top_keywords = Object.entries(kwFreq)\n .sort((a, b) => (b[1] - a[1]) || a[0].localeCompare(b[0]))\n .slice(0, 10)\n .map(([k, v]) => `${k} (${v})`);\n\n return {\n total_comments: total,\n positive: cnt.positive,\n neutral: cnt.neutral,\n negative: cnt.negative,\n positive_pct: pct(cnt.positive),\n neutral_pct: pct(cnt.neutral),\n negative_pct: pct(cnt.negative),\n top_keywords\n };\n}\n\nconst summary = isAggregatedArrays\n ? summarizeFromArrays(items[0].json)\n : summarizeFromItems(items);\n\nreturn [{ json: summary }];\n"
},
"typeVersion": 2
},
{
"id": "c1f271f1-bc1b-4447-9902-aacbfd0e9750",
"name": "发送短信",
"type": "n8n-nodes-base.telegram",
"position": [
2660,
120
],
"parameters": {
"text": "==📊 YouTube Comments Report\nVideo : {{ $('Get row(s) in sheet').item.json.video_url }}\nTotal: {{ $json.total_comments }}\n👍 Positive: {{$json.positive}} ({{ $json.positive_pct }}%)\n😐 Neutral: {{$json.neutral}} ({{ $json.neutral_pct }}%)\n👎 Negative: {{$json.negative}} ({{ $json.negative_pct }}%)\n\n🔎 Top Keywords:\n{{ $json.top_keywords.join(\", \") }}\n\n_Ran at {{new Date().toLocaleString()}}_\n",
"chatId": "YOUR_TELEGRAM_CHAT_ID",
"additionalFields": {
"appendAttribution": false
}
},
"typeVersion": 1.2
},
{
"id": "f010f65d-6c4d-4468-afcb-57f2a2f0cc9a",
"name": "无操作,不执行任何动作",
"type": "n8n-nodes-base.noOp",
"position": [
960,
-100
],
"parameters": {},
"typeVersion": 1
},
{
"id": "923d6e70-ef03-4533-b1f1-535f76f93ce6",
"name": "便签1",
"type": "n8n-nodes-base.stickyNote",
"position": [
-660,
-400
],
"parameters": {
"width": 580,
"height": 1040,
"content": "# YouTube Comment Sentiment & Keyword Extractor\n\nThis workflow automatically retrieves comments from YouTube videos listed in Google Sheets, analyzes sentiment and keywords using an LLM, stores the results, then generates an aggregate summary and sends a report to Telegram.\n\n\n## ✨ Key Features\n- **Automated Scheduling** – Runs the process at regular intervals.\n- **Google Sheets Integration** – Fetches a list of videos and stores analysis results directly in Google Sheets.\n- **YouTube Comment Retrieval** – Retrieves top-level comments (up to 100 per video; can be extended with pagination).\n- **Comment Parsing** – Flattens the API response into one item per comment.\n- **AI Sentiment & Keyword Analysis** – Powered by an LLM model to extract:\n - Sentiment (`positive`, `neutral`, `negative`)\n - Sentiment score (-1 to 1)\n - Keywords (cleaned from stopwords, emojis, hashtags)\n - Language (ISO 639-1, 2-letter code)\n- **Data Normalization** – Ensures LLM output is consistent and safe to write to the sheet.\n- **Telegram Reporting** – Sends a concise report for each video directly to a Telegram chat.\n\n---\n\n## 🔐 Required Credentials\n- **Google Sheets OAuth2 API** – Read & write access to the spreadsheet.\n- **YouTube Data API Key** – To fetch comments via `commentThreads`.\n- **OpenRouter API Key** – To access the LLM model.\n- **Telegram Bot API** – To send reports via a Telegram bot.\n\n---\n\n## 🎁 Benefits\n- **End-to-End Automation** – From comment retrieval to reporting, no manual steps required.\n- **No Duplication** – Each comment is uniquely tracked by `comment_id`.\n- **Data Transparency** – Raw and analyzed data stored neatly in Google Sheets.\n- **Actionable Insights** – Sentiment and keyword summaries ready for content strategy decisions.\n- **Scalable** – Can be extended for pagination, reply comment analysis, or additional metrics.\n- **Cross-Platform** – Direct Telegram integration for easy monitoring on any device.\n"
},
"typeVersion": 1
},
{
"id": "53fbf0dd-4131-4833-abca-96a4ca171241",
"name": "便签",
"type": "n8n-nodes-base.stickyNote",
"position": [
-40,
-220
],
"parameters": {
"color": 2,
"width": 660,
"height": 400,
"content": "- Run the workflow on a schedule.\n- Fetch the data row that contains the video_id and video_url from the spreadsheet.\n- Add a YouTube API key field so that subsequent records (HTTP requests to the YouTube API) can access it."
},
"typeVersion": 1
},
{
"id": "a7500b30-f211-4311-843e-0b94a1c7efa4",
"name": "便签2",
"type": "n8n-nodes-base.stickyNote",
"position": [
660,
-300
],
"parameters": {
"color": 4,
"width": 660,
"height": 560,
"content": "- Fetch data from the previous node and process it either one by one or in batches.\n- Call the YouTube Data API endpoint.\n- Receive the JSON results from the YouTube API and flatten the response so that each comment becomes a separate item."
},
"typeVersion": 1
},
{
"id": "da86e6c7-637a-4a02-8755-e687d22cb8f7",
"name": "便签3",
"type": "n8n-nodes-base.stickyNote",
"position": [
1360,
-80
],
"parameters": {
"color": 5,
"width": 800,
"height": 560,
"content": "- Use the LLM model to analyze the comment text.\n- Ensure the LLM output follows a strict JSON structure based on the example schema, with no incorrect formats or keys.\n- Normalize the LLM JSON output.\n- Save the analysis results along with the comment metadata to Google Sheets."
},
"typeVersion": 1
},
{
"id": "83530f28-16df-418f-8763-5b6096b860e5",
"name": "便签4",
"type": "n8n-nodes-base.stickyNote",
"position": [
2180,
-80
],
"parameters": {
"color": 6,
"width": 640,
"height": 560,
"content": "- Combine values from multiple comment analysis columns.\n- Create a summary based on the aggregated results.\n- Send the summary report to a Telegram chat."
},
"typeVersion": 1
}
],
"active": false,
"pinData": {},
"settings": {
"executionOrder": "v1"
},
"versionId": "ab1692ad-3351-4c24-b8dd-f7416de30a65",
"connections": {
"Code": {
"main": [
[
{
"node": "Basic LLM Chain",
"type": "main",
"index": 0
}
]
]
},
"Code1": {
"main": [
[
{
"node": "Append or update row in sheet",
"type": "main",
"index": 0
}
]
]
},
"Code2": {
"main": [
[
{
"node": "Send a text message",
"type": "main",
"index": 0
}
]
]
},
"Aggregate": {
"main": [
[
{
"node": "Code2",
"type": "main",
"index": 0
}
]
]
},
"HTTP Request": {
"main": [
[
{
"node": "Code",
"type": "main",
"index": 0
}
]
]
},
"Basic LLM Chain": {
"main": [
[
{
"node": "Code1",
"type": "main",
"index": 0
}
]
]
},
"Loop Over Items": {
"main": [
[
{
"node": "No Operation, do nothing",
"type": "main",
"index": 0
}
],
[
{
"node": "HTTP Request",
"type": "main",
"index": 0
}
]
]
},
"Youtube API Key": {
"main": [
[
{
"node": "Loop Over Items",
"type": "main",
"index": 0
}
]
]
},
"Schedule Trigger": {
"main": [
[
{
"node": "Get row(s) in sheet",
"type": "main",
"index": 0
}
]
]
},
"Get row(s) in sheet": {
"main": [
[
{
"node": "Youtube API Key",
"type": "main",
"index": 0
}
]
]
},
"Send a text message": {
"main": [
[
{
"node": "Loop Over Items",
"type": "main",
"index": 0
}
]
]
},
"OpenRouter Chat Model": {
"ai_languageModel": [
[
{
"node": "Basic LLM Chain",
"type": "ai_languageModel",
"index": 0
}
]
]
},
"Structured Output Parser": {
"ai_outputParser": [
[
{
"node": "Basic LLM Chain",
"type": "ai_outputParser",
"index": 0
}
]
]
},
"Append or update row in sheet": {
"main": [
[
{
"node": "Aggregate",
"type": "main",
"index": 0
}
]
]
}
}
}常见问题
如何使用这个工作流?
复制上方的 JSON 配置代码,在您的 n8n 实例中创建新工作流并选择「从 JSON 导入」,粘贴配置后根据需要修改凭证设置即可。
这个工作流适合什么场景?
高级 - 市场调研, 多模态 AI
需要付费吗?
本工作流完全免费,您可以直接导入使用。但请注意,工作流中使用的第三方服务(如 OpenAI API)可能需要您自行付费。
相关工作流推荐
使用AI和RSS的多平台YouTube内容生成器
使用AI和RSS从YouTube生成多平台内容
If
Set
Code
+10
37 节点Budi SJ
内容创作
每日 WhatsApp 群组智能分析:GPT-4.1 分析与语音消息转录
每日 WhatsApp 群组智能分析:GPT-4.1 分析与语音消息转录
If
Set
Code
+20
52 节点Daniel Lianes
杂项
基于 AI、社交媒体和 WhatsApp 链接的自动化本地商业线索生成器
使用 AI、社交媒体和 WhatsApp 链接自动生成本地商业线索
If
Code
Limit
+9
29 节点Budi SJ
潜在客户开发
使用Vision API和LLM实现自动化发票收集与数据提取
使用Google Vision OCR、Gemini LLM和Google Sheets提取和结构化发票数据
Set
Code
Telegram
+8
17 节点Budi SJ
发票处理
使用GPT-5和fal.ai图像从关键词到WordPress自动化SEO博客流程
使用GPT-5和fal.ai图像从关键词到WordPress自动化SEO博客流程
Set
Code
Wait
+20
96 节点Paul
内容创作
LinkedIn和X病毒内容自动引擎
使用AI生成和发布自动创建LinkedIn和X的病毒内容
If
Set
Wait
+26
156 节点Diptamoy Barman
内容创作
工作流信息
难度等级
高级
节点数量20
分类2
节点类型13
作者
Budi SJ
@budisjI’m a Product Designer who also works as an Automation Developer. With a background in product design and systems thinking, I build user-centered workflows. My focus is on helping teams and businesses work more productively through impactful automation systems.
外部链接
在 n8n.io 查看 →
分享此工作流