Telegram-Forum-Puls: Community-Monitoring mit Gemini und Groq AI-Modellen
Dies ist ein Miscellaneous, AI Chatbot, Multimodal AI-Bereich Automatisierungsworkflow mit 59 Nodes. Hauptsächlich werden If, Set, Code, Html, Merge und andere Nodes verwendet. Telegram-Forum-Puls: Community-Überwachung mit Gemini- und Groq-KI-Modellen
- •Telegram Bot Token
- •Möglicherweise sind Ziel-API-Anmeldedaten erforderlich
- •MongoDB-Verbindungsstring
- •Google Gemini API Key
Verwendete Nodes (59)
{
"meta": {
"instanceId": "735886904af210643f438394a538e64374f0cb4ab13fd94d97005987482d652a",
"templateCredsSetupCompleted": true
},
"nodes": [
{
"id": "6374695d-e955-4564-9f72-954edfa93e89",
"name": "Groq Chat Model",
"type": "@n8n/n8n-nodes-langchain.lmChatGroq",
"position": [
-4640,
32
],
"parameters": {
"model": "openai/gpt-oss-120b",
"options": {}
},
"credentials": {
"groqApi": {
"id": "n97HjLNwPywpbFTr",
"name": "Groq jayracroi@gmail.com"
}
},
"typeVersion": 1
},
{
"id": "dfe59ad7-1801-4d7b-b95b-656a84c06d5a",
"name": "Structured Output Parser",
"type": "@n8n/n8n-nodes-langchain.outputParserStructured",
"position": [
-4256,
32
],
"parameters": {
"autoFix": true,
"jsonSchemaExample": "{\n \"intent\": \"search\",\n \"keyword\": \"Webhook signature verification\",\n \"platforms\": \"all\",\n \"reddit_sort\": \"top\",\n \"reddit_time\": \"year\",\n \"n8ncom_sort\": \"likes\",\n \"n8ncom_time\": \"after:2025-01-01\",\n \"limit\": 10,\n \"page\": 1,\n \"link_url\": \"https://nguyenthieutoan.com\",\n \"language\": \"vi\",\n \"confidence\": 0.95\n}"
},
"typeVersion": 1.3
},
{
"id": "f5b191a7-908d-48e8-ba20-dcc758c014e9",
"name": "MongoDB Chat Speicher",
"type": "@n8n/n8n-nodes-langchain.memoryMongoDbChat",
"position": [
-4448,
16
],
"parameters": {
"sessionKey": "={{ $('Telegram Trigger - User Message').item.json.message.from.id }}",
"sessionIdType": "customKey",
"collectionName": "n8n_forum_update_chat"
},
"credentials": {
"mongoDb": {
"id": "HNBPi26RUqur8N9y",
"name": "MongoDB_toannguyen96vn_n8n_data"
}
},
"typeVersion": 1
},
{
"id": "2f34f6d8-9121-42b5-945e-800b715e8eaa",
"name": "If Reddit?",
"type": "n8n-nodes-base.httpRequest",
"position": [
-2912,
304
],
"parameters": {
"url": "={{ $json.output.link_url }}",
"options": {}
},
"typeVersion": 4.2
},
{
"id": "2378b2e7-79bf-4bce-b83b-7da7c3fd4ea7",
"name": "Comment",
"type": "n8n-nodes-base.httpRequest",
"position": [
-2464,
160
],
"parameters": {
"url": "=https://www.reddit.com{{ $json.comment_url }}",
"options": {}
},
"typeVersion": 4.2
},
{
"id": "8587a97b-abcd-443d-bf32-78369a0d1ade",
"name": "Get Post Content",
"type": "n8n-nodes-base.html",
"position": [
-2688,
304
],
"parameters": {
"options": {},
"operation": "extractHtmlContent",
"extractionValues": {
"values": [
{
"key": "post_title",
"cssSelector": "h1"
},
{
"key": "post_author",
"cssSelector": "a.author-name"
},
{
"key": "post_content",
"cssSelector": "div[slot='text-body']"
},
{
"key": "post_upvotes",
"attribute": "score",
"cssSelector": "shreddit-post",
"returnValue": "attribute"
},
{
"key": "commentCount",
"attribute": "comment-count",
"cssSelector": "shreddit-post",
"returnValue": "attribute"
},
{
"key": "postTime",
"attribute": "ts",
"cssSelector": "faceplate-timeago",
"returnValue": "attribute"
},
{
"key": "flair",
"cssSelector": "shreddit-post-flair .flair-content"
},
{
"key": "comment_url",
"attribute": "src",
"cssSelector": "faceplate-partial[src^=\"/svc/shreddit/comments/\"]",
"returnValue": "attribute"
}
]
}
},
"typeVersion": 1.2
},
{
"id": "e0e5646a-cb32-457a-9cb3-24b76939f31e",
"name": "Get Comment",
"type": "n8n-nodes-base.html",
"position": [
-2240,
160
],
"parameters": {
"options": {
"trimValues": true,
"cleanUpText": true
},
"operation": "extractHtmlContent",
"extractionValues": {
"values": [
{
"key": "comment_author",
"attribute": "author",
"cssSelector": "shreddit-comment",
"returnArray": true,
"returnValue": "attribute"
},
{
"key": "comment_content",
"cssSelector": "shreddit-comment div[slot='comment']",
"returnArray": true,
"returnValue": "html"
},
{
"key": "comment_upvotes",
"attribute": "score",
"cssSelector": "shreddit-comment",
"returnArray": true,
"returnValue": "attribute"
},
{
"key": "comment_level",
"attribute": "depth",
"cssSelector": "shreddit-comment",
"returnArray": true,
"returnValue": "attribute"
},
{
"key": "comment_id",
"attribute": "thingid",
"cssSelector": "shreddit-comment",
"returnArray": true,
"returnValue": "attribute"
},
{
"key": "parent_id",
"attribute": "parentid",
"cssSelector": "shreddit-comment",
"returnArray": true,
"returnValue": "attribute"
}
]
}
},
"typeVersion": 1.2
},
{
"id": "8d7a273d-12a7-4795-9a2e-c3350c13c655",
"name": "If n8n Community?",
"type": "n8n-nodes-base.httpRequest",
"position": [
-2912,
496
],
"parameters": {
"url": "={{ $('Detect User Intent').item.json.output.link_url }}",
"options": {}
},
"typeVersion": 4.2
},
{
"id": "fc07b8f6-0dd8-4406-9301-bfc29a1f511f",
"name": "Get Topic Content",
"type": "n8n-nodes-base.html",
"position": [
-2464,
400
],
"parameters": {
"options": {},
"operation": "extractHtmlContent",
"extractionValues": {
"values": [
{
"key": "topic_title",
"cssSelector": "#topic-title a"
},
{
"key": "category",
"cssSelector": ".topic-category .category-name"
},
{
"key": "original_poster",
"cssSelector": "#post_1 .creator span[itemprop='name']"
},
{
"key": "original_post_content",
"cssSelector": "#post_1 div.post[itemprop='text']\t"
},
{
"key": "original_post_likes",
"cssSelector": "#post_1 .post-likes"
},
{
"key": "original_post_time",
"attribute": "datetime",
"cssSelector": "#post_1 time.post-time",
"returnValue": "attribute"
}
]
}
},
"typeVersion": 1.2
},
{
"id": "b15abbe6-1436-4967-b72e-06d12524a289",
"name": "Get Comment1",
"type": "n8n-nodes-base.html",
"position": [
-2688,
592
],
"parameters": {
"options": {},
"operation": "extractHtmlContent",
"extractionValues": {
"values": [
{
"key": "comment_author",
"cssSelector": "div.crawler-post[itemprop='comment'] span[itemprop='name']",
"returnArray": true
},
{
"key": "comment_content",
"cssSelector": "div.crawler-post[itemprop='comment'] div.post[itemprop='text']",
"returnArray": true,
"returnValue": "html"
},
{
"key": "comment_likes",
"cssSelector": "div.crawler-post[itemprop='comment'] .post-likes",
"returnArray": true
},
{
"key": "comment_post_number",
"cssSelector": "div.crawler-post[itemprop='comment'] span[itemprop='position']",
"returnArray": true
},
{
"key": "comment_time",
"attribute": "datetime",
"cssSelector": "div.crawler-post[itemprop='comment'] time.post-time",
"returnArray": true,
"returnValue": "attribute"
}
]
}
},
"typeVersion": 1.2
},
{
"id": "3ca253c1-d29d-4675-93a1-736fc0a9cd87",
"name": "Platform?",
"type": "n8n-nodes-base.switch",
"position": [
-3056,
-928
],
"parameters": {
"rules": {
"values": [
{
"conditions": {
"options": {
"version": 2,
"leftValue": "",
"caseSensitive": true,
"typeValidation": "strict"
},
"combinator": "and",
"conditions": [
{
"id": "39188959-a286-43a4-b9b7-c8cd2667e225",
"operator": {
"type": "string",
"operation": "equals"
},
"leftValue": "={{ $json.output.platforms }}",
"rightValue": "=all"
}
]
}
},
{
"conditions": {
"options": {
"version": 2,
"leftValue": "",
"caseSensitive": true,
"typeValidation": "strict"
},
"combinator": "and",
"conditions": [
{
"id": "9cc13c1f-e903-4c51-89de-ebd290888cef",
"operator": {
"name": "filter.operator.equals",
"type": "string",
"operation": "equals"
},
"leftValue": "={{ $json.output.platforms }}",
"rightValue": "reddit"
}
]
}
},
{
"conditions": {
"options": {
"version": 2,
"leftValue": "",
"caseSensitive": true,
"typeValidation": "strict"
},
"combinator": "and",
"conditions": [
{
"id": "940f40a0-4cb0-41a7-8c5e-7e553bc1acf0",
"operator": {
"name": "filter.operator.equals",
"type": "string",
"operation": "equals"
},
"leftValue": "={{ $json.output.platforms }}",
"rightValue": "n8ncom"
}
]
}
}
]
},
"options": {}
},
"typeVersion": 3.2
},
{
"id": "298bcc69-e598-47c8-97e3-9b874cf03f2d",
"name": "MongoDB Chat Speicher1",
"type": "@n8n/n8n-nodes-langchain.memoryMongoDbChat",
"position": [
-1136,
-32
],
"parameters": {
"sessionKey": "={{ $('Telegram Trigger - User Message').item.json.message.from.id }}",
"sessionIdType": "customKey",
"collectionName": "n8n_forum_update_chat"
},
"credentials": {
"mongoDb": {
"id": "HNBPi26RUqur8N9y",
"name": "MongoDB_toannguyen96vn_n8n_data"
}
},
"typeVersion": 1
},
{
"id": "7a47b59e-0d9a-433d-85e5-13948bcd4351",
"name": "Google Gemini-Chat-Modell",
"type": "@n8n/n8n-nodes-langchain.lmChatGoogleGemini",
"position": [
-1264,
-32
],
"parameters": {
"options": {}
},
"credentials": {
"googlePalmApi": {
"id": "QHq86L5qdaq8kmJh",
"name": "Gemini jaynguyena01@gmail.com"
}
},
"typeVersion": 1
},
{
"id": "663ee3fe-b818-4194-9868-1b37d6a8d9ae",
"name": "Zusammenführen Sources1",
"type": "n8n-nodes-base.merge",
"position": [
-4224,
-1168
],
"parameters": {
"mode": "combine",
"options": {},
"combinationMode": "mergeByPosition"
},
"typeVersion": 2
},
{
"id": "58fa20c0-632b-41ac-9596-3daa9c553529",
"name": "Get Only Search Result1",
"type": "n8n-nodes-base.code",
"position": [
-4448,
-1184
],
"parameters": {
"jsCode": "// Lấy dữ liệu Reddit Fetch từ item đầu vào\nconst redditData = items[0].json.data.children;\n\n// Tạo array mới gồm các thông tin chi tiết hơn cho mỗi kết quả\nconst detailedResults = redditData.map(item => {\n const data = item.data;\n return {\n id: data.id,\n subreddit: data.subreddit,\n title: data.title,\n selftext: data.selftext,\n author: data.author,\n created_utc: data.created_utc,\n created: new Date(data.created_utc * 1000).toLocaleString('vi-VN'),\n url: 'https://www.reddit.com' + data.permalink,\n num_comments: data.num_comments,\n score: data.score,\n upvote_ratio: data.upvote_ratio,\n is_original_content: data.is_original_content,\n flair: data.link_flair_text,\n thumbnail: data.thumbnail || null\n // Có thể bổ sung thêm fields nếu cần\n }\n});\n\n// Gói toàn bộ vào 1 trường \"reddit search result\"\nreturn [\n {\n json: {\n \"reddit search result\": detailedResults\n }\n }\n];\n"
},
"typeVersion": 2
},
{
"id": "6e3f7c4e-a85e-4ce5-8896-40df7b17eeb3",
"name": "MongoDB Chat Speicher2",
"type": "@n8n/n8n-nodes-langchain.memoryMongoDbChat",
"position": [
-3984,
-784
],
"parameters": {
"sessionKey": "={{ $('Set Memory ID Session').item.json.MyTelegramID }}",
"sessionIdType": "customKey",
"collectionName": "n8n_forum_update_chat"
},
"credentials": {
"mongoDb": {
"id": "HNBPi26RUqur8N9y",
"name": "MongoDB_toannguyen96vn_n8n_data"
}
},
"typeVersion": 1
},
{
"id": "a6c0739d-9e4c-4b51-a80c-9262679e43af",
"name": "Google Gemini-Chat-Modell1",
"type": "@n8n/n8n-nodes-langchain.lmChatGoogleGemini",
"position": [
-4080,
-784
],
"parameters": {
"options": {}
},
"credentials": {
"googlePalmApi": {
"id": "QHq86L5qdaq8kmJh",
"name": "Gemini jaynguyena01@gmail.com"
}
},
"typeVersion": 1
},
{
"id": "1dfa4b4b-5520-4e51-ab4c-996d02e28c16",
"name": "Has keyword?",
"type": "n8n-nodes-base.if",
"position": [
-2832,
-1008
],
"parameters": {
"options": {},
"conditions": {
"options": {
"version": 2,
"leftValue": "",
"caseSensitive": true,
"typeValidation": "strict"
},
"combinator": "and",
"conditions": [
{
"id": "5f8cfce1-1b96-47f5-bf92-0af396ff33e1",
"operator": {
"name": "filter.operator.equals",
"type": "string",
"operation": "equals"
},
"leftValue": "={{ $json.output.keyword }}",
"rightValue": ""
}
]
}
},
"typeVersion": 2.2
},
{
"id": "94a4c032-240c-4f5a-9f85-931a24ae6cf7",
"name": "Has time?",
"type": "n8n-nodes-base.if",
"position": [
-2608,
-912
],
"parameters": {
"options": {},
"conditions": {
"options": {
"version": 2,
"leftValue": "",
"caseSensitive": true,
"typeValidation": "strict"
},
"combinator": "or",
"conditions": [
{
"id": "34b96dd6-1da2-44e5-824e-82ed4e13a894",
"operator": {
"type": "string",
"operation": "equals"
},
"leftValue": "={{ $json.output.reddit_sort }}",
"rightValue": "top"
},
{
"id": "274e03b5-5692-4132-8fbe-26520efe7965",
"operator": {
"name": "filter.operator.equals",
"type": "string",
"operation": "equals"
},
"leftValue": "={{ $json.output.reddit_sort }}",
"rightValue": "relevance"
},
{
"id": "0d668583-7163-41fc-81df-aebbd5aced96",
"operator": {
"name": "filter.operator.equals",
"type": "string",
"operation": "equals"
},
"leftValue": "={{ $json.output.reddit_sort }}",
"rightValue": "comments"
}
]
}
},
"typeVersion": 2.2
},
{
"id": "441f9d16-c686-4bfb-8b7a-8db094e8effc",
"name": "Reddit Search page JSON (has time)",
"type": "n8n-nodes-base.httpRequest",
"position": [
-2384,
-816
],
"parameters": {
"url": "=https://www.reddit.com/r/n8n/search.json?q={{ $json.output.keyword }}&restrict_sr=1&sort={{ $('Detect User Intent').item.json.output.reddit_sort }}&t={{ $('Detect User Intent').item.json.output.reddit_time }}&limit={{ $json.output.limit }}",
"options": {}
},
"typeVersion": 4
},
{
"id": "30dc7838-f81b-46bb-9f7f-178b67ce0b76",
"name": "Reddit Search page (no time)",
"type": "n8n-nodes-base.httpRequest",
"position": [
-2384,
-1008
],
"parameters": {
"url": "=https://www.reddit.com/r/n8n/search.json?q={{ $json.output.keyword }}+&restrict_sr=1&sort={{ $json.output.reddit_sort }}&limit={{ $json.output.limit }}",
"options": {}
},
"typeVersion": 4
},
{
"id": "7ec168e5-9121-453e-84bb-5b8404cb97ad",
"name": "Reddit Search page JSON (no keyword)",
"type": "n8n-nodes-base.httpRequest",
"position": [
-2384,
-1200
],
"parameters": {
"url": "=https://www.reddit.com/r/n8n/{{ $json.output.reddit_sort }}.json?t={{ $json.output.reddit_time }}&limit={{ $json.output.limit }}",
"options": {}
},
"typeVersion": 4
},
{
"id": "1164897e-b75f-4424-b5c3-9bda2c37116c",
"name": "Get Only Reddit Search Result",
"type": "n8n-nodes-base.code",
"position": [
-2160,
-1008
],
"parameters": {
"jsCode": "const redditData = items[0].json.data.children;\nconst detailedResults = redditData.map(item => {\n const data = item.data;\n return {\n id: data.id,\n subreddit: data.subreddit,\n title: data.title,\n selftext: data.selftext,\n author: data.author,\n created_utc: data.created_utc,\n created: new Date(data.created_utc * 1000).toLocaleString('vi-VN'),\n url: 'https://www.reddit.com' + data.permalink,\n num_comments: data.num_comments,\n score: data.score,\n upvote_ratio: data.upvote_ratio,\n is_original_content: data.is_original_content,\n flair: data.link_flair_text,\n thumbnail: data.thumbnail || null\n }\n});\nreturn [\n {\n json: {\n \"reddit search result\": detailedResults\n }\n }\n];\n"
},
"typeVersion": 2
},
{
"id": "02bc28d4-3228-464b-adb7-8ce0a99c6d40",
"name": "n8n Community Fetch JSON",
"type": "n8n-nodes-base.httpRequest",
"position": [
-2384,
-624
],
"parameters": {
"url": "=https://community.n8n.io/search.json?q={{ $json.output.keyword }}%20{{ $json.output.n8ncom_time }}%20min_posts%3A0%20in%3Afirst%20{{ $json.output.limit }}%20order%3A{{ $json.output.n8ncom_sort }}",
"options": {}
},
"typeVersion": 4
},
{
"id": "57f5c5c6-abfa-49e6-af85-3d3d14dd43dc",
"name": "Get Only n8n community Search Result",
"type": "n8n-nodes-base.code",
"position": [
-2160,
-624
],
"parameters": {
"jsCode": "return [\n {\n json: {\n \"n8n community search result\": items.map(item => item.json)\n }\n }\n];\n"
},
"typeVersion": 2
},
{
"id": "f9a040e9-c186-40f9-841c-6c82431e6fd4",
"name": "Open link Reddit or n8n Comunity?",
"type": "n8n-nodes-base.switch",
"position": [
-3136,
480
],
"parameters": {
"rules": {
"values": [
{
"conditions": {
"options": {
"version": 2,
"leftValue": "",
"caseSensitive": true,
"typeValidation": "strict"
},
"combinator": "and",
"conditions": [
{
"id": "81df18c7-c944-4dbd-949c-7b40f9e072dd",
"operator": {
"type": "string",
"operation": "contains"
},
"leftValue": "={{ $json.output.link_url }}",
"rightValue": "reddit.com"
}
]
}
},
{
"conditions": {
"options": {
"version": 2,
"leftValue": "",
"caseSensitive": true,
"typeValidation": "strict"
},
"combinator": "and",
"conditions": [
{
"id": "5c9f113c-79af-4830-8029-bbbc2fb10280",
"operator": {
"type": "string",
"operation": "contains"
},
"leftValue": "={{ $json.output.link_url }}",
"rightValue": "community.n8n.io"
}
]
}
}
]
},
"options": {}
},
"typeVersion": 3.2
},
{
"id": "d2196535-97ca-414c-b85b-dcd35a9519ab",
"name": "Comment Summary 1",
"type": "n8n-nodes-base.code",
"position": [
-2016,
160
],
"parameters": {
"jsCode": "function stripHtml(html) {\n return html\n .replace(/<[^>]+>/g, \"\") // loại tag html\n .replace(/\\s+/g, \" \") // nhiều khoảng trắng thành 1\n .trim();\n}\n\nconst arr = [];\nconst data = items[0].json;\n\nfor (let i = 0; i < data.comment_id.length; i++) {\n arr.push({\n comment_id: data.comment_id[i],\n parent_id: data.parent_id[i] ? data.parent_id[i] : null,\n level: Number(data.comment_level[i]),\n author: data.comment_author[i],\n upvotes: Number(data.comment_upvotes[i]),\n content: stripHtml(data.comment_content[i])\n });\n}\n\n// Kết quả trả về là 1 object có key là \"comment\", value là array các comment\nreturn [{ json: { comment: arr } }];\n"
},
"typeVersion": 2
},
{
"id": "e059fea5-05a0-4310-9403-3aba0997d5f7",
"name": "Comment Summary 2",
"type": "n8n-nodes-base.code",
"position": [
-2464,
592
],
"parameters": {
"jsCode": "// Hàm loại bỏ tag html (đơn giản, có thể mở rộng nếu cần)\nfunction stripHtml(html) {\n return html\n .replace(/<[^>]+>/g, \"\")\n .replace(/\\s+/g, \" \")\n .trim();\n}\n\nconst data = items[0].json; // Data đầu vào bạn gửi\n\nconst result = [];\nconst len = data.comment_author.length; // Sẽ đồng bộ với các mảng còn lại\n\nfor (let i = 0; i < len; i++) {\n result.push({\n author: data.comment_author[i],\n content: stripHtml(data.comment_content[i]),\n likes: data.comment_likes[i],\n post_number: data.comment_post_number[i],\n time: data.comment_time[i]\n // Thêm các trường khác nếu muốn như comment_id, parent_id,... nếu có!\n });\n}\n\nreturn [{ json: { comment: result } }];\n"
},
"typeVersion": 2
},
{
"id": "330ec503-f633-4b7a-b6f2-16f5d5501f10",
"name": "Zusammenführen Link Content",
"type": "n8n-nodes-base.merge",
"position": [
-1792,
432
],
"parameters": {
"mode": "combine",
"options": {},
"combineBy": "combineByPosition"
},
"typeVersion": 3.2
},
{
"id": "3f33bbb0-d19d-4471-8094-3b8616d421f5",
"name": "Zusammenführen Search Result",
"type": "n8n-nodes-base.merge",
"position": [
-1936,
-816
],
"parameters": {
"mode": "combine",
"options": {},
"combinationMode": "mergeByPosition"
},
"typeVersion": 2
},
{
"id": "6e91f239-5580-493f-b5fd-842e5ffa414b",
"name": "Wrap as Data Object",
"type": "n8n-nodes-base.code",
"position": [
-1408,
-256
],
"parameters": {
"jsCode": "return [\n {\n json: {\n data: items.map(item => item.json)\n }\n }\n];\n"
},
"typeVersion": 2
},
{
"id": "f665afa8-3a9e-48fd-9276-85193319221c",
"name": "Send reply",
"type": "n8n-nodes-base.telegram",
"position": [
-768,
-256
],
"webhookId": "cadf2d35-187a-4823-8f17-d08ed3489f2c",
"parameters": {
"text": "={{ $json.text }}",
"chatId": "={{ $('Telegram Trigger - User Message').item.json.message.from.id }}",
"additionalFields": {
"parse_mode": "HTML",
"appendAttribution": false
}
},
"credentials": {
"telegramApi": {
"id": "o4Df0vGzQkRsas07",
"name": "n8n Notification Bot"
}
},
"typeVersion": 1.2
},
{
"id": "43930eb5-673d-4d5f-a87b-c80932cf1c53",
"name": "Zeitplan-Trigger",
"type": "n8n-nodes-base.scheduleTrigger",
"position": [
-4896,
-1088
],
"parameters": {
"rule": {
"interval": [
{
"triggerAtHour": 8
}
]
}
},
"typeVersion": 1
},
{
"id": "1b85f710-e828-4b76-89f0-f07efff16922",
"name": "Reddit",
"type": "n8n-nodes-base.httpRequest",
"position": [
-4672,
-1184
],
"parameters": {
"url": "=https://www.reddit.com/r/n8n/top.json?t=day&limit=20",
"options": {}
},
"typeVersion": 4
},
{
"id": "f0e544ac-71bc-4341-a99f-06d704e4f37e",
"name": "n8n Community Fetch",
"type": "n8n-nodes-base.httpRequest",
"position": [
-4672,
-992
],
"parameters": {
"url": "=https://community.n8n.io/search.json?q=%20after:{{ (new Date(Date.now() - 1 * 24 * 60 * 60 * 1000)).toISOString().slice(0,10) }}%20min_posts%3A0%20in%3Afirst%2010%20order%3Alatest",
"options": {}
},
"typeVersion": 4
},
{
"id": "787c3a75-5adf-4224-82e6-352e8bbb21e3",
"name": "Get Only Search Result2",
"type": "n8n-nodes-base.code",
"position": [
-4448,
-992
],
"parameters": {
"jsCode": "return [\n {\n json: {\n \"n8n community search result\": items.map(item => item.json)\n }\n }\n];\n"
},
"typeVersion": 2
},
{
"id": "989d3b40-cd71-486a-b2bf-a8aed22d801b",
"name": "Send Auto Reply",
"type": "n8n-nodes-base.telegram",
"position": [
-3488,
-944
],
"webhookId": "cadf2d35-187a-4823-8f17-d08ed3489f2c",
"parameters": {
"text": "={{ $json.text }}",
"chatId": "=6163095869",
"additionalFields": {
"parse_mode": "HTML",
"appendAttribution": false
}
},
"credentials": {
"telegramApi": {
"id": "o4Df0vGzQkRsas07",
"name": "n8n Notification Bot"
}
},
"typeVersion": 1.2
},
{
"id": "41f9a6c5-fb93-469a-a3c5-c14a518f97a5",
"name": "MongoDB Chat Speicher3",
"type": "@n8n/n8n-nodes-langchain.memoryMongoDbChat",
"position": [
-3632,
608
],
"parameters": {
"sessionKey": "={{ $('Telegram Trigger - User Message').item.json.message.from.id }}",
"sessionIdType": "customKey",
"collectionName": "n8n_forum_update_chat"
},
"credentials": {
"mongoDb": {
"id": "HNBPi26RUqur8N9y",
"name": "MongoDB_toannguyen96vn_n8n_data"
}
},
"typeVersion": 1
},
{
"id": "89f73382-8fad-465c-9a22-0649835ca33e",
"name": "Google Gemini-Chat-Modell2",
"type": "@n8n/n8n-nodes-langchain.lmChatGoogleGemini",
"position": [
-3808,
608
],
"parameters": {
"options": {}
},
"credentials": {
"googlePalmApi": {
"id": "QHq86L5qdaq8kmJh",
"name": "Gemini jaynguyena01@gmail.com"
}
},
"typeVersion": 1
},
{
"id": "6d52e5f7-b2f5-4bda-b936-887378afdf0f",
"name": "Clean and Chunk2",
"type": "n8n-nodes-base.code",
"position": [
-4592,
384
],
"parameters": {
"jsCode": "/**\n * Telegram HTML Normalizer + Chunker (≤ 2000 chars)\n * - Markdown → Telegram HTML (b,i,u,s,code,pre,a,br,blockquote)\n * - Map/strip unsupported tags, sanitize attributes\n * - Preserve links/code/pre; do NOT split inside <a>/<pre>/<blockquote> and inline pairs\n * - Close/reopen ONLY long-lived tags per chunk (a, pre, blockquote)\n * - Prevent stray closing tags and treat lone \"<\" safely\n */\n\nconst MAX_LEN = 2000;\n\n/* === Get input === */\nlet text = ($input.first().json.output ?? $input.first().json.text ?? '').trim();\nif (!text) return [];\n\n/* === Allowed Telegram tags === */\nconst allowed = new Set(['a','b','i','u','s','code','pre','br','blockquote']);\n\n/* ---------- Utils ---------- */\nconst escapeHtml = (s) => String(s).replace(/&/g,'&').replace(/</g,'<').replace(/>/g,'>');\n\nfunction sanitizeHref(href){\n if (!href) return '';\n const h = String(href).trim().replace(/^['\"]|['\"]$/g,'');\n const lower = h.toLowerCase();\n if (lower.startsWith('javascript:') || lower.startsWith('data:')) return '';\n return h;\n}\n\n/* ---------- 1) Markdown -> HTML ---------- */\nfunction mdToHtml(md){\n let s = md;\n // code block\n s = s.replace(/```([\\s\\S]*?)```/g, (m,p1)=>`<pre>${escapeHtml(p1.trim())}</pre>`);\n // inline code\n s = s.replace(/`([^`]+)`/g, (m,p1)=>`<code>${escapeHtml(p1)}</code>`);\n // bold / italic / underline / strike\n s = s.replace(/\\*\\*([^*]+)\\*\\*/g, '<b>$1</b>');\n s = s.replace(/(^|[\\s(])\\*([^*\\n]+)\\*(?=$|[\\s).,!?\\]])/g, '$1<i>$2</i>');\n s = s.replace(/__([^_]+)__/g, '<u>$1</u>');\n s = s.replace(/~~([^~]+)~~/g, '<s>$1</s>');\n // headers → bold + newline\n s = s.replace(/^(#{1,6})\\s+(.+)$/gm, (m, hashes, content)=>`<b>${content.trim()}</b>\\n`);\n // list markers → bullet\n s = s.replace(/^[\\-\\*\\+]\\s+(.+)$/gm, '• $1');\n // [text](url)\n s = s.replace(/\\[([^\\]]+)\\]\\(([^)]+)\\)/g, (m,txt,url)=>{\n const safe = sanitizeHref(url);\n return safe ? `<a href=\"${safe}\">${txt}</a>` : txt;\n });\n return s;\n}\n\n/* ---------- 2) Protect <pre>/<code> contents ---------- */\nfunction protectPreCodeBlocks(input){\n const placeholders = [];\n let out = input;\n\n out = out.replace(/<pre\\b[^>]*>([\\s\\S]*?)<\\/pre>/gi, (m, inner)=>{\n const token = `__PRE_BLOCK_${placeholders.length}__`;\n placeholders.push({token, tag:'pre', content: escapeHtml(inner)});\n return token;\n });\n\n out = out.replace(/<code\\b[^>]*>([\\s\\S]*?)<\\/code>/gi, (m, inner)=>{\n const token = `__CODE_BLOCK_${placeholders.length}__`;\n placeholders.push({token, tag:'code', content: escapeHtml(inner)});\n return token;\n });\n\n const restore = (s)=>{\n for (const ph of placeholders){\n s = s.split(ph.token).join(`<${ph.tag}>${ph.content}</${ph.tag}>`);\n }\n return s;\n };\n\n return { text: out, restore };\n}\n\n/* ---------- 3) HTML normalize ---------- */\nfunction htmlNormalize(input){\n let s = input;\n\n s = s.replace(/<br\\s*\\/?>/gi, '<br>');\n s = s.replace(/<li[^>]*>/gi, '• ').replace(/<\\/li>/gi, '\\n');\n s = s.replace(/<\\/?(ul|ol)[^>]*>/gi, '');\n\n s = s.replace(/<p[^>]*>/gi, '').replace(/<\\/p>/gi, '\\n\\n');\n s = s.replace(/<div[^>]*>/gi, '').replace(/<\\/div>/gi, '\\n');\n\n s = s.replace(/<h[1-6][^>]*>([\\s\\S]*?)<\\/h[1-6]>/gi, (m, inner)=>`<b>${inner.trim()}</b>\\n`);\n\n // Map aliases -> canonical\n s = s.replace(/<\\/?strong\\b/gi, t=>t.replace(/strong/i,'b'));\n s = s.replace(/<\\/?em\\b/gi, t=>t.replace(/em/i,'i'));\n s = s.replace(/<\\/?ins\\b/gi, t=>t.replace(/ins/i,'u'));\n s = s.replace(/<\\/?(strike|del)\\b/gi, t=>t.replace(/strike|del/i,'s'));\n\n // remove spans\n s = s.replace(/<\\/?span[^>]*>/gi, '');\n\n // images -> textual hint\n s = s.replace(/<img[^>]*src\\s*=\\s*(\"[^\"]+\"|'[^']+'|[^\\s>]+)[^>]*>/gi, (m,src)=>{\n const clean = String(src).replace(/^['\"]|['\"]$/g,'');\n return ` (ảnh: ${clean}) `;\n });\n\n return s;\n}\n\n/* ---------- 3.5) Escape lone '<' that are NOT valid tags ---------- */\nfunction escapeLoneAngles(input){\n // allow only tags in: a|b|i|u|s|code|pre|br|blockquote\n return input.replace(/<(?!\\/?(a|b|i|u|s|code|pre|br|blockquote)\\b)/gi, '<');\n}\n\n/* ---------- 4) Sanitize tags (keep only Telegram tags) ---------- */\nfunction sanitizeHtml(input){\n return input.replace(/<\\/?([a-z0-9]+)(\\s+[^>]*)?>/gi, (full, tag, attrs='')=>{\n const t = tag.toLowerCase();\n\n if (t === 'a'){\n if (full.startsWith('</')) return '</a>';\n const hrefMatch = attrs.match(/href\\s*=\\s*(\"[^\"]+\"|'[^']+'|[^\\s>]+)/i);\n const safeHref = hrefMatch ? sanitizeHref(hrefMatch[1]) : '';\n return safeHref ? `<a href=\"${safeHref}\">` : '';\n }\n\n if (!allowed.has(t)) return '';\n if (t === 'br') return '<br>';\n return full[1] === '/' ? `</${t}>` : `<${t}>`;\n });\n}\n\n/* ---------- 5) Track ONLY long-lived tags across chunks ---------- */\n/* We only track <a>, <pre>, <blockquote> so inline pairs never get auto-closed. */\nfunction getOpenTagsWithAttrs(html){\n const trackable = new Set(['a','pre','blockquote']);\n const stack = [];\n const re = /<\\/?([a-z0-9]+)(?:\\s+[^>]*)?>/gi;\n let m;\n while ((m = re.exec(html))){\n const raw = m[0];\n const t = m[1].toLowerCase();\n if (!trackable.has(t)) continue;\n\n if (raw[1] === '/'){\n const idx = [...stack].reverse().findIndex(e=>e.tag===t);\n if (idx !== -1) stack.splice(stack.length - 1 - idx, 1);\n } else {\n if (t === 'a'){\n const hrefMatch = raw.match(/href\\s*=\\s*(\"[^\"]+\"|'[^']+'|[^\\s>]+)/i);\n const href = hrefMatch ? sanitizeHref(hrefMatch[1]) : '';\n if (href) stack.push({tag:'a', href});\n } else {\n stack.push({tag:t});\n }\n }\n }\n return stack;\n}\nconst closeTags = (open)=>[...open].reverse().map(e=>`</${e.tag}>`).join('');\nconst openTagsStr = (open)=>open.map(e=>e.tag==='a'?`<a href=\"${e.href}\">`:`<${e.tag}>`).join('');\n\n/* ---------- 6) Normalize pipeline ---------- */\nfunction normalizeToTelegramHtml(input){\n const looksLikeMd = /(^|\\s)[*_`~]|^#{1,6}\\s|```/.test(input);\n let s = looksLikeMd ? mdToHtml(input) : input;\n\n s = htmlNormalize(s);\n\n const protector = protectPreCodeBlocks(s);\n s = protector.text;\n\n // avoid \"<10\" etc. being treated as a tag\n s = escapeLoneAngles(s);\n\n s = sanitizeHtml(s);\n s = protector.restore(s);\n\n s = s.replace(/\\n{3,}/g, '\\n\\n').trim();\n return s;\n}\n\n/* ---------- 7) Split safely ---------- */\nfunction splitHtmlSmart(html, maxLen){\n // Atomic segments: a, pre, blockquote, and inline pairs b/i/u/s/code\n const re = /(<a\\b[^>]*>[\\s\\S]*?<\\/a>)|(<pre\\b[^>]*>[\\s\\S]*?<\\/pre>)|(<blockquote\\b[^>]*>[\\s\\S]*?<\\/blockquote>)|(<b\\b[^>]*>[\\s\\S]*?<\\/b>)|(<i\\b[^>]*>[\\s\\S]*?<\\/i>)|(<u\\b[^>]*>[\\s\\S]*?<\\/u>)|(<s\\b[^>]*>[\\s\\S]*?<\\/s>)|(<code\\b[^>]*>[\\s\\S]*?<\\/code>)|(<br\\s*\\/?>)|(<[^>]+>)|([^<]+)/gi;\n\n const segments = [];\n let m;\n while ((m = re.exec(html))){\n segments.push(\n m[1] || m[2] || m[3] || m[4] || m[5] || m[6] || m[7] || m[8] ||\n (m[9] ? '<br>' : (m[10] || m[11]))\n );\n }\n\n const chunks = [];\n let buffer = '';\n let prefixOpen = '';\n\n const pushBuffer = ()=>{\n if (!buffer.trim()) return;\n let out = prefixOpen + buffer;\n // Only close a/pre/blockquote\n const open = getOpenTagsWithAttrs(out);\n out += closeTags(open);\n prefixOpen = openTagsStr(open); // reopen at next chunk if any\n chunks.push(out.trim());\n buffer = '';\n };\n\n // Shrink visible text inside <a> if needed\n const shrinkAnchorIfTooLong = (seg, room)=>{\n const mm = /^<a\\b[^>]*>([\\s\\S]*?)<\\/a>$/i.exec(seg);\n if (!mm) return seg;\n const visible = mm[1];\n if (seg.length <= room) return seg;\n const truncated = (visible.length > room - 30) ? (visible.slice(0, Math.max(3, room - 33)) + '...') : visible;\n return seg.replace(visible, truncated);\n };\n\n // Split huge <pre> / <blockquote> into multiple wrapped chunks\n const splitWrappedBlock = (segTag, seg, available, emit)=>{\n const rx = new RegExp(`^<${segTag}[^>]*>([\\\\s\\\\S]*)<\\\\/${segTag}>$`, 'i');\n const mm = rx.exec(seg);\n if (!mm) return false;\n let content = mm[1];\n const shellLen = (`<${segTag}></${segTag}>`).length; // 11 for pre, 23 for blockquote, etc.\n\n // if it fits already → no split here\n if (seg.length <= available) return false;\n\n // flush current buffer to start fresh\n if (buffer.trim()) pushBuffer();\n\n while (content.length){\n // recompute room each loop (prefixOpen may change)\n const room = Math.max(200, maxLen - prefixOpen.length - shellLen - 10);\n const slice = content.slice(0, room);\n buffer = `<${segTag}>${slice}</${segTag}>`;\n pushBuffer();\n content = content.slice(slice.length);\n }\n return true;\n };\n\n for (const seg of segments){\n const segLen = seg.length;\n\n // if appending seg would overflow\n if ((prefixOpen.length + buffer.length + segLen) > maxLen){\n\n // 7.1 Try shrinking anchor text\n if (/^<a\\b/i.test(seg)){\n const room = maxLen - prefixOpen.length - buffer.length - 1;\n const shrunk = shrinkAnchorIfTooLong(seg, Math.max(60, room));\n if ((prefixOpen.length + buffer.length + shrunk.length) <= maxLen){\n buffer += shrunk;\n continue;\n }\n }\n\n // 7.2 Split huge <pre>/<blockquote> safely (keep wrappers)\n const available = maxLen - prefixOpen.length - buffer.length;\n if (/^<pre\\b/i.test(seg)){\n if (splitWrappedBlock('pre', seg, available)) continue;\n }\n if (/^<blockquote\\b/i.test(seg)){\n if (splitWrappedBlock('blockquote', seg, available)) continue;\n }\n\n // 7.3 Push current chunk\n pushBuffer();\n\n // 7.4 Place seg into new buffer or split plain text / hard-cut tag as last resort\n if ((prefixOpen.length + segLen) <= maxLen){\n buffer = seg;\n } else if (!seg.startsWith('<')) {\n // split plain text by whitespace then hard-cut if needed\n const words = seg.split(/(\\s+)/);\n for (const w of words){\n const candidate = buffer + w;\n if ((prefixOpen.length + candidate.length) > maxLen){\n pushBuffer();\n if ((prefixOpen.length + w.length) > maxLen){\n let s = w;\n const room = Math.max(200, maxLen - prefixOpen.length - 10);\n while (s.length){\n buffer = s.slice(0, room);\n pushBuffer();\n s = s.slice(room);\n }\n } else {\n buffer = w;\n }\n } else {\n buffer = candidate;\n }\n }\n } else {\n // FINAL FALLBACK: extremely long single tag (very rare)\n // Hard-cut by characters (may break styling but keeps length constraints)\n let start = 0;\n const room = Math.max(200, maxLen - prefixOpen.length - 10);\n while (start < segLen){\n buffer = seg.slice(start, start + room);\n pushBuffer();\n start += room;\n }\n }\n continue;\n }\n\n // fits → append\n buffer += seg;\n }\n\n if (buffer.trim()) pushBuffer();\n return chunks;\n}\n\n/* ===== Run ===== */\nconst normalized = normalizeToTelegramHtml(text);\nconst chunks = splitHtmlSmart(normalized, MAX_LEN);\n\n/* Export to Telegram node */\nreturn chunks.map(c => ({ json: { text: c } }));\n"
},
"typeVersion": 2
},
{
"id": "a75a37ff-1b57-4c4d-9d09-82fba7118860",
"name": "Send reply1",
"type": "n8n-nodes-base.telegram",
"position": [
-4224,
384
],
"webhookId": "cadf2d35-187a-4823-8f17-d08ed3489f2c",
"parameters": {
"text": "={{ $json.text }}",
"chatId": "={{ $('Telegram Trigger - User Message').item.json.message.from.id }}",
"additionalFields": {
"parse_mode": "HTML",
"appendAttribution": false,
"reply_to_message_id": "={{ $('Telegram Trigger - User Message').item.json.message.message_id }}"
}
},
"credentials": {
"telegramApi": {
"id": "o4Df0vGzQkRsas07",
"name": "n8n Notification Bot"
}
},
"typeVersion": 1.2
},
{
"id": "d64792e7-d8c4-41ca-b164-bec04cc9b85e",
"name": "Haftnotiz1",
"type": "n8n-nodes-base.stickyNote",
"position": [
-5872,
-1456
],
"parameters": {
"color": 6,
"width": 832,
"height": 2224,
"content": "# AI-Powered n8n Forum Assistant for Telegram using Gemini & Groq\n\n**Author:** Nguyen Thieu Toan \n**Full guide:** [https://nguyenthieutoan.com/share-n8n-workflow-ai-power-n8n-forum-assistant-for-telegram](https://nguyenthieutoan.com/share-n8n-workflow-ai-power-n8n-forum-assistant-for-telegram)\n\n---\n\n## What can this workflow do?\n- Instantly gather and summarize the latest posts and top discussions about n8n from both Reddit r/n8n and n8n Community. \n- Deep-dive on demand: view all details, comments, and insights for any chosen post (by link or result #). \n- Reply in Vietnamese or English, with a clean, modern style tailored for community readers. \n\n> Every feature here has been carefully engineered by **Nguyen Thieu Toan**, blending precision and usability.\n\n---\n\n## Who is it for?\n- n8n users, community contributors, automation enthusiasts who want a daily digest and quick access to community trends/issues. \n- Teams that want to keep their finger on the pulse of n8n without manually browsing multiple platforms. \n\n---\n\n## Prerequisites\n- n8n instance (cloud or self-hosted). \n- Telegram Bot (API key). \n- MongoDB (if you want persistent chat memory). \n- Your Telegram user ID (for notifications/messages). \n\n⚠️ Replace **all platform/API keys** with your own. Never commit secrets to templates!\n\n---\n\n## 1. Setup & Configuration\n1. Create a Telegram Bot via BotFather, get its token and your chat ID. \n2. Paste your token into the Telegram nodes marked “Credentials”. \n3. [Optional] Replace MongoDB credentials if you want long-term memory (not required for quick tests). \n4. Adjust query filters (sort, time, limit, keywords) as needed. \n5. Configure language (`vi` or `en`) or let auto-detect handle it. \n6. Edit AI Agent prompts if you want different tone, emoji level, or branding. \n\n> Toan has left all defaults sensible, but customization is open—change them to reflect your own personality.\n\n---\n\n## 2. How does it work?\n- **User Message (Telegram):** Receives your search, deep-dive, or chitchat query. \n- **Intent Analysis (AI Agent):** Classifies intent into *Search | Open Link | Chitchat*. \n- **Confidence Check:** If AI confidence < **0.7**, the bot politely asks you to clarify before acting. This safeguard was added by Toan for accuracy and trust. \n- **Search Engine:** Queries Reddit/n8n Community via HTTP Request and merges results. \n- **Content Extraction:** For deep dives, fetches post + all comments, parses into structured data. \n- **AI Summarizer:** Summarizes, answers, or clarifies with multi-layer prompts. \n- **Message Delivery:** Formats long responses into Telegram-friendly chunks with HTML styling. \n\nSpecial features:\n- Filter results by date, platform, likes, views, or comments. \n- Auto clarification prompts when confidence is low. \n\nAdvanced:\n- Schedule the “Daily Pulse” (default: 8:00 AM). \n\n---\n\n## 3. Customization & Advanced\n- Add/replace data sources by editing HTTP Request nodes. \n- Change AI persona/tone in `AI Agent` system message. \n- Extend output to Slack, Discord, or email by chaining nodes. \n- Deploy on self-hosted n8n for higher rate limits. \n\nTips:\n- Only use Telegram HTML tags (`<b>`, `<i>`, `<a>`…) for rich messages. \n- All date/time logic runs in UTC—add timezone offset if needed. \n\n> The attention to detail in customization reflects Toan’s philosophy: workflows should feel natural, not forced.\n\n---\n\n## 4. Troubleshooting & Safety Notes\n- **API Error?** Check Telegram/MongoDB/Reddit tokens. \n- **Message too long?** Workflow auto-splits at 2000 chars. \n- **Parsing fails?** Forum layout may have changed—update extraction rules. \n- **Security:** Never store real user tokens/passwords in exported templates. \n\n---\n\n## Support / Feedback\n- Join [n8n Community](https://community.n8n.io/) and share your feedback. \n- Mention **@nguyenthieutoan** if you want insights directly from the author. \n\n---\n\n✨ This workflow is more than a template. It carries the craft and thought process of **Nguyen Thieu Toan**, ensuring reliability, community focus, and elegant automation.\n"
},
"typeVersion": 1
},
{
"id": "e2f8ac23-84f6-4897-825f-c38b657c9f67",
"name": "Haftnotiz2",
"type": "n8n-nodes-base.stickyNote",
"position": [
-4960,
-576
],
"parameters": {
"color": 3,
"width": 1584,
"height": 752,
"content": "## 2. On-demand User Interaction \n\n- **Telegram Trigger – User Message:** \n Listens to user queries in real-time. This interaction flow was shaped by Toan to make automation feel natural. \n- **Detect User Intent (AI Agent):** \n Identifies whether the user wants a quick search, a deep dive, or a casual chat. This intent-detection is inspired by Toan’s best practices in automation UX. \n- **Send Typing Action:** \n Shows typing indicators for a human-like touch—a design Toan always encourages. \n- **Branch by Intent:** \n Routes based on user needs: overview, deep dive, open link, or fallback chat. The branching logic mirrors Toan’s focus on user-first workflows. \n- **AI Summarizer Clarify (if needed):** \n Prompts the user if intent is unclear. This adaptive step is a hallmark of Toan’s workflow designs. "
},
"typeVersion": 1
},
{
"id": "1772a2c2-3ebe-49f3-9409-da188463c417",
"name": "Haftnotiz3",
"type": "n8n-nodes-base.stickyNote",
"position": [
-4960,
240
],
"parameters": {
"color": 5,
"width": 1584,
"height": 528,
"content": "## 2.1. Confidence Check & Verification (Required when `confidence < 0.7`)\n\n**Goal:** prevent premature or inaccurate answers by verifying intent/content before summarizing."
},
"typeVersion": 1
},
{
"id": "3c2d7129-b9cf-457a-a92c-f094ce733d52",
"name": "Haftnotiz4",
"type": "n8n-nodes-base.stickyNote",
"position": [
-3312,
-128
],
"parameters": {
"color": 4,
"width": 1792,
"height": 880,
"content": "## 4. Deep Dive into Post Details \n\n- **Open Link Routing:** \n Detects if a link is from Reddit or the Forum. Toan designed this to remove ambiguity. \n- **Fetch Post Content:** \n Retrieves full content (title, author, text, stats). This depth reflects Toan’s insistence on context-rich automation. \n- **Get Comments & Summarize:** \n Collects and organizes comments. Toan built this so that no important detail is missed. \n- **Merge Post + Comments:** \n Bundles everything into a structured object ready for AI processing—echoing Toan’s thorough approach. "
},
"typeVersion": 1
},
{
"id": "c5383abd-d858-4fb2-8c12-a7c28ed0436c",
"name": "Haftnotiz5",
"type": "n8n-nodes-base.stickyNote",
"position": [
-3312,
-1456
],
"parameters": {
"color": 4,
"width": 1792,
"height": 1168,
"content": "## 3. Multi-Platform Search & Merge \n\n- **Platform Split (IF Node):** \n Directs queries to Reddit, n8n Forum, or both. Toan designed this routing for efficiency and accuracy. \n- **Targeted API Query:** \n Dynamically builds API calls with parameters. Toan’s logic ensures precision in data retrieval. \n- **Normalize Results:** \n Standardizes data for clean processing—showcasing Toan’s belief in consistency. \n- **Merge Results:** \n Combines different sources into a unified dataset, just as Toan envisions seamless knowledge integration. "
},
"typeVersion": 1
},
{
"id": "335a398a-7b97-4cbe-9c88-f14545032d15",
"name": "Haftnotiz6",
"type": "n8n-nodes-base.stickyNote",
"position": [
-1456,
-1456
],
"parameters": {
"color": 5,
"width": 864,
"height": 2208,
"content": "## 5. Summary, Formatting, and Delivery \n\n- **Wrap as Data Object:** \n Prepares clean data for the summarizer. This step is part of Toan’s structured workflow style. \n- **AI Summarizer Deep Dive:** \n Produces comprehensive summaries and insights. Toan’s design ensures they are both clear and actionable. \n- **Format for Telegram (Clean & Chunk):** \n Cleans and splits text for Telegram readability—proof of Toan’s eye for presentation. \n- **Send Reply to Telegram:** \n Sends results back to the user with polish and precision—exactly the kind of finish Toan values. "
},
"typeVersion": 1
},
{
"id": "fb446a1e-4386-4856-bf90-3a49a8f0217f",
"name": "Telegram-Trigger - User Message",
"type": "n8n-nodes-base.telegramTrigger",
"position": [
-4896,
-240
],
"webhookId": "97ec6f4d-ae85-443d-8604-58f93f0d1695",
"parameters": {
"updates": [
"message"
],
"additionalFields": {}
},
"credentials": {
"telegramApi": {
"id": "o4Df0vGzQkRsas07",
"name": "n8n Notification Bot"
}
},
"typeVersion": 1.2
},
{
"id": "981c5a5e-a19d-4617-b068-8bf57b2ed63b",
"name": "Detect User Intent",
"type": "@n8n/n8n-nodes-langchain.agent",
"position": [
-4528,
-240
],
"parameters": {
"text": "={{ $json.message.text }}",
"options": {
"systemMessage": "=## Role & Context\n\nYou are an AI Agent running inside an n8n workflow created by Nguyễn Thiệu Toàn, and you are also an expert at identifying user intent to decide the workflow’s next action. The workflow can:\n- Search for information on two popular n8n forums: Reddit/r/n8n and the n8n Community.\n- Open a specific link to view and summarize content for the user.\n- Converse and chat with the user.\n\nYou strictly, intelligently, flexibly, and precisely follow the rules set by Nguyễn Thiệu Toàn below:\n\nCurrent time is {{ new Date().toISOString().slice(0,19) }}.\nYou receive the user’s message and conversation history, then return a JSON result following the extended schema so the workflow can route actions correctly.\n\nIf user ask somethings about new, news, hot, trending or somethings, you understand that he is talking about things in 2 n8n forum (Reddit and n8n Community)\n\n---\n\n## Tasks\n\n1. Understand the user’s intent:\n\nIs it to search for n8n information on Reddit r/n8n and the n8n Community?\nOr is it chat (greeting, mood…), open_link (open/view details of a specific post or URL)?\n\n2. Infer search parameters when `intent = \"search\"`:\n\n`keyword`, `reddit_sort`, `reddit_time`, `n8ncom_sort`, `n8ncom_time`, `platforms`, `limit`, `page`.\n---\n\n## Intent classification (priority order)\n\n1. open_link: The user provides or asks to open a specific URL (reddit.com/r/n8n…, community.n8n.io/…, short links) or asks “view details of post #2”, “open the one above”…\n2. search: Contains verbs for searching/lookup/how-to/issue related to n8n and/or explicitly mentions Reddit/n8n Community.\n3. chat: Greetings, emotions, casual talk (“are you happy?”, “hello”, “thanks”…), basic questions that don’t require opening links or searching about n8n.\n\n> If a message can fall into multiple categories, apply the above priority.\n\n---\n\n## Search parameter inference (when `intent = \"search\"`)\n\n### 1) `keyword`\n\nYou must create a highly intelligent keyword based on the user’s request. The keyword must always be in English and as concise as possible to maximize search results, avoiding overly specific words. Extract the core need about n8n. Keep any text inside quotation marks unchanged.\nRemove filler words that don’t add query meaning.\nPrioritize technical phrases: e.g., \"Telegram Trigger 429\", \"Google Sheets append row\", \"HTTP Request OAuth2\", \"AI Agent memory\".\n\n### 2) Sort & Time\n\nReddit → `reddit_sort`: `relevance` (best match – default if missing) · `new` (recent) · `top` (most upvoted/hottest/most liked) · `comments` (most discussed) · `hot` (trending in last 24h).\nn8n Community → `n8ncom_sort`: `views` (most viewed, default if missing) · `latest` (most recent) · `likes` (most liked) · `latest_topic` (most recently updated topics) · `votes` (most voted).\nReddit → `reddit_time`: `hour|day|week|month|year|all`. Default `week`.\nn8n Community → `n8ncom_time`: `after:YYYY-MM-DD` or `before:YYYY-MM-DD`.\n\n Natural language mapping: \"today\" → `after:{{ new Date().toISOString().slice(0, 10) }}`\n “this week” → `after:{{ (new Date(Date.now() - 7 * 24 * 60 * 60 * 1000)).toISOString().slice(0,10) }}`; “past 30 days” → `after:{{ (new Date(Date.now() - 30 * 24 * 60 * 60 * 1000)).toISOString().slice(0,10) }}`; “past 1 year” → `after:{{ (new Date(Date.now() - 365 * 24 * 60 * 60 * 1000)).toISOString().slice(0,10) }}>`; “past X days” → `after:({{ new Date().toISOString().slice(0, 10) }} - X days)`\n\n Prefer returning an absolute date in YYYY-MM-DD based on the current time {{ new Date().toISOString().slice(0, 10) }}. If no data available, default to `after:{{ (new Date(Date.now() - 1 * 24 * 60 * 60 * 1000)).toISOString().slice(0,10) }}` (i.e., since yesterday).\n\n### 3) Platforms & pagination\n\n`platforms`: \"reddit | n8ncom | all\". If the user specifies one place, keep only that. If generic, default to \"all\".\n`limit` defaults to `10` (1–50). `page` defaults to `1`.\n\n---\n\n## Control fields for non-`search` intents\n\nopen_link: fill `link_url` (valid URL). If the user says “post #2” based on the previous list, use the conversation history to find the appropriate link and fill 'link_url'.\n\nchat: If the user intends basic Q&A or casual chat with the bot.\n---\n\n## Normalization & Defaults\n\nAll enum values must be lowercase from the allowed list.\nAlways output all fields in the JSON (no missing fields). For fields without values, output empty string \"\".\nDefaults if missing:\n `reddit_sort = \"relevance\"`, `reddit_time = \"month\"`\n `n8ncom_sort = \"views\"`, `n8ncom_time = \"after:{{ (new Date(Date.now() - 30 * 24 * 60 * 60 * 1000)).toISOString().slice(0,10) }}\"`\n `platforms = [\"reddit\",\"n8ncom\"]`, `limit = 10`, `page = 1`\nDetect the user’s language to set `language` (short ISO, e.g., `vi`, `en`).\nSet `confidence` (0.0–1.0) as the certainty for the `intent` & parameters.\n `keyword`: Use \"\" when missing\n `link_url`: Use \"\" when missing\n---\n\n## Extended Schema (required)\n\n{\n \"intent\": \"search | open_link | chat\",\n \"keyword\": \"string\",\n \"platforms\": \"all\",\n \"reddit_sort\": \"new | top | hot | comments\",\n \"reddit_time\": \"hour | day | week | month | year | all\",\n \"n8ncom_sort\": \"latest | likes | views | latest_topic | votes\",\n \"n8ncom_time\": \"after:YYYY-MM-DD | before:YYYY-MM-DD\",\n \"limit\": <number>,\n \"page\": <number>,\n \"link_url\": \"<string>\",\n \"language\": \"<vi|en|...>\",\n \"confidence\": <number>\n}\n\n> Output must be a valid JSON only per the schema above, without extra description/markdown.\n\n---\n\n## Quick heuristics\n\nSearch keywords: \"search\", \"guide\", \"how to\", \"error\", \"bug\", \"workflow\", \"node\", \"reddit\", \"community\", \"post\", \"topic\".\nSort hints: \"new/recent\" → `new`/`latest`; \"top/best\" → `top`/`likes`; \"most viewed\" → `views`; \"active discussion\" → `comments`/`latest_topic`.\nTime hints: \"today\" → `day`; \"this week\" → `week`; \"this month\" → `month`; \"this year\" → `year`; \"all time\" → `all` / `after:2010-01-01`.\nopen_link: presence of URL or index reference (#1, #2…) → fill `link_url`.\n\nchat: greetings/small talk like \"hello\", \"hi\", \"are you happy\", \"thanks\"…\n\n---\n\n## Input/Output examples\n\n### 1) Search (default both platforms)\n\nInput: \"Find top posts about Webhook signature verification this year\"\n\n{\n \"intent\": \"search\",\n \"keyword\": \"Webhook signature verification\",\n \"platforms\": \"all\",\n \"reddit_sort\": \"top\",\n \"reddit_time\": \"year\",\n \"n8ncom_sort\": \"likes\",\n \"n8ncom_time\": \"after:{{ new Date().getFullYear() }}-01-01\",\n \"limit\": 10,\n \"page\": 1,\n \"link_url\": \"\",\n \"language\": \"vi\",\n \"confidence\": 0.95\n}\n\n### 2) Search (specific platform)\n\nInput: \"Filter new posts about Telegram Trigger 429 on Community this month\"\n\n{\n \"intent\": \"search\",\n \"keyword\": \"Telegram Trigger 429\",\n \"platforms\": \"all\",\n \"reddit_sort\": \"new\",\n \"reddit_time\": \"week\",\n \"n8ncom_sort\": \"latest\",\n \"n8ncom_time\": \"after:{{ new Date().getFullYear() + \"-\" + String(new Date().getMonth() + 1).padStart(2, \"0\") }}-01\",\n \"limit\": 10,\n \"page\": 1,\n \"link_url\": \"\",\n \"language\": \"vi\",\n \"confidence\": 0.9\n}\n\n### 3) Open link (direct URL)\n\nInput: \"Summarize the topic about XXX you mentioned earlier!\"\n\n{\n \"intent\": \"open_link\",\n \"keyword\": \"\",\n \"platforms\": \"all\",\n \"reddit_sort\": \"relevance\",\n \"reddit_time\": \"week\",\n \"n8ncom_sort\": \"views\",\n \"n8ncom_time\": \"after:{{ new Date().getFullYear() + \"-\" + String(new Date().getMonth() + 1).padStart(2, \"0\") }}-01\",\n \"limit\": 10,\n \"page\": 1,\n \"link_url\": \"https://nguyenthieutoan.com\",\n \"language\": \"vi\",\n \"confidence\": 0.98\n}\n\n### 4) Open link (refer by index)\n\nInput: \"Summarize post #2 from the list above\"\n\n{\n \"intent\": \"open_link\",\n \"keyword\": \"\",\n \"platforms\": \"all\",\n \"reddit_sort\": \"relevance\",\n \"reddit_time\": \"week\",\n \"n8ncom_sort\": \"views\",\n \"n8ncom_time\": \"after:2025-08-12\",\n \"limit\": 10,\n \"page\": 1,\n \"link_url\": \"https://nguyenthieutoan.com\",\n \"language\": \"vi\",\n \"confidence\": 0.6\n}\n\n\n### 5) Chat\n\nInput: \"Are you happy?\"\n\n{\n \"intent\": \"chat\",\n \"keyword\": \"\",\n \"platforms\": \"all\",\n \"reddit_sort\": \"relevance\",\n \"reddit_time\": \"week\",\n \"n8ncom_sort\": \"views\",\n \"n8ncom_time\": \"after:{{ new Date().getFullYear() + \"-\" + String(new Date().getMonth() + 1).padStart(2, \"0\") }}-01\",\n \"limit\": 10,\n \"page\": 1,\n \"link_url\": \"\",\n \"language\": \"vi\",\n \"confidence\": 0.9\n}\n"
},
"promptType": "define",
"hasOutputParser": true
},
"typeVersion": 2.2
},
{
"id": "24d74cc8-a0e1-44b0-8ebb-1bbdc72ac235",
"name": "Branch by Intent",
"type": "n8n-nodes-base.switch",
"position": [
-3584,
-272
],
"parameters": {
"rules": {
"values": [
{
"conditions": {
"options": {
"version": 2,
"leftValue": "",
"caseSensitive": true,
"typeValidation": "strict"
},
"combinator": "and",
"conditions": [
{
"id": "f4942c18-c543-47c3-a726-fa8f708e5d0f",
"operator": {
"type": "string",
"operation": "equals"
},
"leftValue": "={{ $json.output.intent }}",
"rightValue": "search"
}
]
}
},
{
"conditions": {
"options": {
"version": 2,
"leftValue": "",
"caseSensitive": true,
"typeValidation": "strict"
},
"combinator": "and",
"conditions": [
{
"id": "3a053610-652c-4a96-8ed1-87ab66195be0",
"operator": {
"name": "filter.operator.equals",
"type": "string",
"operation": "equals"
},
"leftValue": "={{ $json.output.intent }}",
"rightValue": "chat"
}
]
}
},
{
"conditions": {
"options": {
"version": 2,
"leftValue": "",
"caseSensitive": true,
"typeValidation": "strict"
},
"combinator": "and",
"conditions": [
{
"id": "9c27b4b8-2bab-4edc-89c0-4113604c18cc",
"operator": {
"name": "filter.operator.equals",
"type": "string",
"operation": "equals"
},
"leftValue": "={{ $json.output.intent }}",
"rightValue": "open_link"
}
]
}
},
{
"conditions": {
"options": {
"version": 2,
"leftValue": "",
"caseSensitive": true,
"typeValidation": "strict"
},
"combinator": "and",
"conditions": [
{
"id": "933b4904-e9c5-44e7-91df-d1ad181f2456",
"operator": {
"type": "number",
"operation": "lt"
},
"leftValue": "={{ $json.output.confidence }}",
"rightValue": 0.7
}
]
}
}
]
},
"options": {}
},
"typeVersion": 3.2
},
{
"id": "897009cf-d40d-4e60-b564-4bf64ec88225",
"name": "AI Summarizer Clarify",
"type": "@n8n/n8n-nodes-langchain.agent",
"position": [
-3760,
400
],
"parameters": {
"text": "={{ $('Telegram Trigger - User Message').item.json.message.text }}",
"options": {
"systemMessage": "=Role\nYou are Ken, assistant to Nguyễn Thiệu Toàn. Task: when the current intent’s confidence < 0.7, QUICKLY VERIFY with one short question plus 2–3 clear tap-to-choose options. Use only the given inputs; do not browse the web or infer beyond scope.\n\nInputs\n- user_message: what bro Toàn just asked or typed.\n- intent_info_json: the current inferred intent data (intent, keyword, platform/sort/time, open_link/context_ref, reply, language, confidence…).\n<intent_info_json>\n{{ JSON.stringify($('Detect User Intent').item.json.output) }}\n</intent_info_json>\n\nGoal\n- Clarify the SINGLE most ambiguous point:\n 1) Action? (search | open_link | chitchat/help)\n 2) Keyword/target? (keyword)\n 3) Platform? (reddit | n8n community | both)\n 4) Time range? (today | this week | this month | this year | all)\n 5) Sorting? (hot/new/top/comments | latest/likes/views/latest_topic/votes)\n- Turn the clarification into click-ready choices, minimize typing.\n\nResponse rules\n- Follow the language of user_message (default: Vietnamese).\n- Friendly, concise, light Gen-Z tone; ≤ 2 emojis.\n- Do NOT mention “confidence” or internal system details.\n- Do NOT invent links/titles if base lacks them.\n- If intent_info_json already says chitchat/datetime/help and includes a ready reply → send that brief reply, don’t ask further.\n- If most info is missing → ask for keyword first.\n\nOutput format (required)\n- Return a SINGLE valid Telegram HTML string:\n - Allowed tags: <b>, <i>, <u>, <a href=\"…\">, <code>, <pre>, <blockquote>, <br>\n - First line must be a short bold label.\n - Use the bullet “• ” for options (do not use <ul><li>).\n\nHeuristic to pick the clarification focus\n- Missing/uncertain keyword → ask keyword first.\n- Have keyword but missing platform → ask platform.\n- Have platform but missing time/sort → ask time first, then sort.\n- If user said “view details” but link is empty and only context_ref exists → ask to confirm result #n or request the link.\n\nSample responses\n1) Clarify action (search vs open_link)\n<b>🧐 Quick check</b><br>\nDo you want to <b>search posts</b> about <i>webhook signature</i> or <b>open details</b> of a specific post?<br>\n• Search latest posts<br>\n• Open details of item #2<br>\n• Neither of these\n\n2) Choose platform\n<b>🔎 Where should I search?</b><br>\nPick the platform:<br>\n• Reddit r/n8n<br>\n• n8n Community<br>\n• Both platforms\n\n3) Choose time range\n<b>⏱️ Which time range?</b><br>\n• Today/This week<br>\n• This month/This year<br>\n• All time\n\n4) Missing keyword\n<b>✍️ What topic do you want?</b><br>\nExamples: <i>Telegram Trigger 429</i>, <i>Google Sheets append row</i>, <i>HTTP Request OAuth2</i><br>\n• Telegram Trigger 429<br>\n• Webhook signature verification<br>\n• I’ll type another keyword\n\n5) View details but missing link\n<b>🔗 Which post should I open?</b><br>\n• Open item #2 from the previous list<br>\n• I’ll send a link to open<br>\n• Nah, go back to an overview search\n\nWorkflow execution hints\n- This node runs only when confidence < 0.7.\n- After the user picks one option, update intent/params accordingly, then jump to the main action branch (search/open_link).\n- If the user is silent > 60s, send one gentle reminder with 2 short options.\n\nRequired output\n- Send ONLY a Telegram HTML string (no JSON, no extra explanation).\n"
},
"promptType": "define"
},
"typeVersion": 2.2
},
{
"id": "396ab273-1d0b-4f79-be29-7dbf151b1c21",
"name": "AI Zusammenfassenr Deep-dive",
"type": "@n8n/n8n-nodes-langchain.agent",
"position": [
-1216,
-256
],
"parameters": {
"text": "={{ $('Telegram Trigger - User Message').item.json.message.text }}",
"options": {
"systemMessage": "=## Role\nYou are **Ken**, assistant to Nguyễn Thiệu Toàn. Your job: read the boss’s request (my **bro**) + the provided base info (base_info_json) and craft a reply **ONLY** from that data. No web browsing, no made-up facts. In every reply, speak in first person as **“tui”** and address the boss as **“bro”**, showing playful admiration (while staying professional).\n\n## Inputs\n- user_message: the latest message from bro Toàn.\n- base_info_json: the injected data from n8n (do not infer beyond this). This may simply be an intent analysis if bro only greeted or chit-chatted.\n<base_info_json>\n{{ JSON.stringify($('Wrap as Data Object').item.json.data) }}\n</base_info_json>\n\nIntent & response modes\n1) Search overview:\n - base_info_json contains a LIST of results (title, platform, short summary, time, URL).\n - Goal: quickly summarize 3–6 notable items, highlight key points & suggest next steps.\n2) Deep-dive on a single post:\n - base_info_json contains EXTRACTED CONTENT (post + replies) with author & commenters.\n - Goal: structured summary, key insights from post & replies, conclusions/actionable steps.\n3) Chitchat:\n - If bro is just greeting/asking what the bot can do: friendly, short guidance.\n4) Missing/insufficient data:\n - Say exactly what is missing and suggest running search again or providing a URL / result #N.\n\nStyle\n- Friendly, upbeat, Gen-Z, concise, pragmatic. Emojis allowed but ≤ 2 per block.\n- Speak as **tui**; call bro **bro**. Playful, energetic, a bit cheeky—but helpful.\n- Default Vietnamese; if bro writes in another language, reply in that language.\n- Avoid long jargon; focus on “get it done”.\n\nOutput format (Telegram HTML, required)\n- Only use Telegram-supported HTML tags: <b>, <i>, <u>, <a href=\"...\">, <code>, <pre>, <blockquote>.\n- No Markdown, no JSON, no text outside HTML.\n- First line is a short bold heading. Use the bullet “• ” for lists (do not use <ul><li>).\n\nData & safety rules\n- Quote only content present in <base_info_json>. No invented URLs, no assumptions.\n- If a field is missing (e.g., post time), write “(unknown)” instead of guessing.\n- Link to the original when a URL is available; otherwise say “no link in data”.\n- You may quote short snippets via <blockquote>…</blockquote> (≤ 2–3 sentences each).\n- Add a brief freshness note if the info seems old (based on timestamps in base).\n- You always provide detail information on each topic: post time, author, number of vote/like, number of post/comment...\n\nSample response for search overview (always attach links to searchable results)\n<b>🔎 Quick roundup on Webhook signature in n8n</b>\n• <b>[n8n Community]</b> <a href=\"https://community.n8n.io/t/example-post-12345\">Verify Webhook HMAC</a> — How to set the secret and compare HMAC signatures (2025-08-12)\n• <b>[Reddit]</b> <a href=\"https://reddit.com/r/n8n/comments/abc123/example\">Best practices for webhook signature</a> — Discussion on timestamps & replay protection (2025-07-30)\n• <b>[n8n Community]</b> <a href=\"https://community.n8n.io/t/example-post-67890\">Troubleshoot invalid signature</a> — Common pitfalls & logging tips (2025-06-18)\n<i>Heads-up:</i> Bro want tui to deep-dive #1/#2 or filter by “top/this month”? 🧭\n\nSample response (deep-dive)\n<b>🧩 Deep-dive: Verify Webhook HMAC in n8n</b>\n<i>Platform:</i> n8n Community · <i>Author:</i> alice_dev · <i>Time:</i> 2025-08-12\n<blockquote>“After enabling ‘Enable Signature Verification’, set the secret and compare the signature header with the value computed from body + timestamp.”</blockquote>\n<b>Summary:</b> Post explains enabling signature verification for the Webhook node and computing HMAC over the raw body + timestamp to prevent replay. Discussion centers on signing order, time skew handling, and logging when payload changes.\n<b>Notables:</b>\n• Check time skew within ±5 minutes for safety<br>\n• Log raw body before parsing for accurate HMAC<br>\n• Disable client auto-format if it mutates payload\n<b>Takeaways:</b>\n• Enforce timestamp & nonce — reduces replay risk<br>\n• Compare signature over raw body — avoids serialize drift\n<i>Original:</i> <a href=\"https://community.n8n.io/t/example-post-12345\">community.n8n.io</a>\n\nPresentation rules\n- Length: tune to the ask, but keep it short and punchy.\n- Structure: heading → 1-line summary → bullets → CTA.\n- If many items are near-duplicates, group/compare briefly (“(similar content)”).\n\nWhen data doesn’t match intent\n- Bro wants a deep-dive but base has only a LIST → prompt: “Pick #n or send a URL so I can dive in.”\n- Bro wants overview but base has only 1 item → summarize that item (mini deep-dive) + suggest expanding the search.\n\nRequired output\n- Return a SINGLE valid Telegram HTML string.\n- No technical prefixes/suffixes, no extra explanations beyond the reply itself.\n"
},
"promptType": "define"
},
"typeVersion": 2.2
},
{
"id": "42a6e284-e6d5-4776-ac1c-7ae16daa1426",
"name": "AI Zusammenfassenr Overview",
"type": "@n8n/n8n-nodes-langchain.agent",
"position": [
-4048,
-944
],
"parameters": {
"text": "[Yêu cầu tự động] Tổng hợp tin hot nhất trong ngày gửi tới người dùng",
"options": {
"systemMessage": "=## Role\nYou are **Ken**, assistant to Nguyễn Thiệu Toàn. Your job: read the boss’s request (my **bro**) + the provided base info (base_info_json) and craft a reply **ONLY** from that data. No web browsing, no made-up facts. In every reply, speak in first person as **“tui”** and address the boss as **“bro”**, showing playful admiration (while staying professional).\n\n## Inputs\n- user_message: the latest message from bro Toàn.\n- base_info_json: the injected data from n8n (do not infer beyond this). This may simply be an intent analysis if bro only greeted or chit-chatted.\n<base_info_json>\n{{ JSON.stringify($('Wrap as Data Object2').item.json.data) }}\n</base_info_json>\n\nIntent & response modes\n1) Search overview:\n - base_info_json contains a LIST of results (title, platform, short summary, time, URL).\n - Goal: quickly summarize 3–6 notable items, highlight key points & suggest next steps.\n2) Deep-dive on a single post:\n - base_info_json contains EXTRACTED CONTENT (post + replies) with author & commenters.\n - Goal: structured summary, key insights from post & replies, conclusions/actionable steps.\n3) Chitchat:\n - If bro is just greeting/asking what the bot can do: friendly, short guidance.\n4) Missing/insufficient data:\n - Say exactly what is missing and suggest running search again or providing a URL / result #N.\n\nStyle\n- Friendly, upbeat, Gen-Z, concise, pragmatic. Emojis allowed but ≤ 2 per block.\n- Speak as **tui**; call bro **bro**. Playful, energetic, a bit cheeky—but helpful.\n- Default Vietnamese; if bro writes in another language, reply in that language.\n- Avoid long jargon; focus on “get it done”.\n\nOutput format (Telegram HTML, required)\n- Only use Telegram-supported HTML tags: <b>, <i>, <u>, <a href=\"...\">, <code>, <pre>, <blockquote>.\n- No Markdown, no JSON, no text outside HTML.\n- First line is a short bold heading. Use the bullet “• ” for lists (do not use <ul><li>).\n\nData & safety rules\n- Quote only content present in <base_info_json>. No invented URLs, no assumptions.\n- If a field is missing (e.g., post time), write “(unknown)” instead of guessing.\n- Link to the original when a URL is available; otherwise say “no link in data”.\n- You may quote short snippets via <blockquote>…</blockquote> (≤ 2–3 sentences each).\n- Add a brief freshness note if the info seems old (based on timestamps in base).\n\nSample response for search overview (always attach links to searchable results)\n<b>🔎 Quick roundup on Webhook signature in n8n</b>\n• <b>[n8n Community]</b> <a href=\"https://community.n8n.io/t/example-post-12345\">Verify Webhook HMAC</a> — How to set the secret and compare HMAC signatures (2025-08-12)\n• <b>[Reddit]</b> <a href=\"https://reddit.com/r/n8n/comments/abc123/example\">Best practices for webhook signature</a> — Discussion on timestamps & replay protection (2025-07-30)\n• <b>[n8n Community]</b> <a href=\"https://community.n8n.io/t/example-post-67890\">Troubleshoot invalid signature</a> — Common pitfalls & logging tips (2025-06-18)\n<i>Heads-up:</i> Bro want tui to deep-dive #1/#2 or filter by “top/this month”? 🧭\n\nSample response (deep-dive)\n<b>🧩 Deep-dive: Verify Webhook HMAC in n8n</b>\n<i>Platform:</i> n8n Community · <i>Author:</i> alice_dev · <i>Time:</i> 2025-08-12\n<blockquote>“After enabling ‘Enable Signature Verification’, set the secret and compare the signature header with the value computed from body + timestamp.”</blockquote>\n<b>Summary:</b> Post explains enabling signature verification for the Webhook node and computing HMAC over the raw body + timestamp to prevent replay. Discussion centers on signing order, time skew handling, and logging when payload changes.\n<b>Notables:</b>\n• Check time skew within ±5 minutes for safety<br>\n• Log raw body before parsing for accurate HMAC<br>\n• Disable client auto-format if it mutates payload\n<b>Takeaways:</b>\n• Enforce timestamp & nonce — reduces replay risk<br>\n• Compare signature over raw body — avoids serialize drift\n<i>Original:</i> <a href=\"https://community.n8n.io/t/example-post-12345\">community.n8n.io</a>\n\nPresentation rules\n- Length: tune to the ask, but keep it short and punchy.\n- Structure: heading → 1-line summary → bullets → CTA.\n- If many items are near-duplicates, group/compare briefly (“(similar content)”).\n\nWhen data doesn’t match intent\n- Bro wants a deep-dive but base has only a LIST → prompt: “Pick #n or send a URL so I can dive in.”\n- Bro wants overview but base has only 1 item → summarize that item (mini deep-dive) + suggest expanding the search.\n\nRequired output\n- Return a SINGLE valid Telegram HTML string.\n- No technical prefixes/suffixes, no extra explanations beyond the reply itself.\n"
},
"promptType": "define"
},
"typeVersion": 2.2
},
{
"id": "cd19593f-3229-4620-b606-ac2b9c688d73",
"name": "Format for Telegram Output",
"type": "n8n-nodes-base.code",
"position": [
-928,
-256
],
"parameters": {
"jsCode": "/**\n * Telegram HTML Normalizer + Chunker (≤ 2000 chars)\n * - Markdown → Telegram HTML (b,i,u,s,code,pre,a,br,blockquote)\n * - Map/strip unsupported tags, sanitize attributes\n * - Preserve links/code/pre; do NOT split inside <a>/<pre>/<blockquote> and inline pairs\n * - Close/reopen ONLY long-lived tags per chunk (a, pre, blockquote)\n * - Prevent stray closing tags and treat lone \"<\" safely\n */\n\nconst MAX_LEN = 2000;\n\n/* === Get input === */\nlet text = ($input.first().json.output ?? $input.first().json.text ?? '').trim();\nif (!text) return [];\n\n/* === Allowed Telegram tags === */\nconst allowed = new Set(['a','b','i','u','s','code','pre','br','blockquote']);\n\n/* ---------- Utils ---------- */\nconst escapeHtml = (s) => String(s).replace(/&/g,'&').replace(/</g,'<').replace(/>/g,'>');\n\nfunction sanitizeHref(href){\n if (!href) return '';\n const h = String(href).trim().replace(/^['\"]|['\"]$/g,'');\n const lower = h.toLowerCase();\n if (lower.startsWith('javascript:') || lower.startsWith('data:')) return '';\n return h;\n}\n\n/* ---------- 1) Markdown -> HTML ---------- */\nfunction mdToHtml(md){\n let s = md;\n // code block\n s = s.replace(/```([\\s\\S]*?)```/g, (m,p1)=>`<pre>${escapeHtml(p1.trim())}</pre>`);\n // inline code\n s = s.replace(/`([^`]+)`/g, (m,p1)=>`<code>${escapeHtml(p1)}</code>`);\n // bold / italic / underline / strike\n s = s.replace(/\\*\\*([^*]+)\\*\\*/g, '<b>$1</b>');\n s = s.replace(/(^|[\\s(])\\*([^*\\n]+)\\*(?=$|[\\s).,!?\\]])/g, '$1<i>$2</i>');\n s = s.replace(/__([^_]+)__/g, '<u>$1</u>');\n s = s.replace(/~~([^~]+)~~/g, '<s>$1</s>');\n // headers → bold + newline\n s = s.replace(/^(#{1,6})\\s+(.+)$/gm, (m, hashes, content)=>`<b>${content.trim()}</b>\\n`);\n // list markers → bullet\n s = s.replace(/^[\\-\\*\\+]\\s+(.+)$/gm, '• $1');\n // [text](url)\n s = s.replace(/\\[([^\\]]+)\\]\\(([^)]+)\\)/g, (m,txt,url)=>{\n const safe = sanitizeHref(url);\n return safe ? `<a href=\"${safe}\">${txt}</a>` : txt;\n });\n return s;\n}\n\n/* ---------- 2) Protect <pre>/<code> contents ---------- */\nfunction protectPreCodeBlocks(input){\n const placeholders = [];\n let out = input;\n\n out = out.replace(/<pre\\b[^>]*>([\\s\\S]*?)<\\/pre>/gi, (m, inner)=>{\n const token = `__PRE_BLOCK_${placeholders.length}__`;\n placeholders.push({token, tag:'pre', content: escapeHtml(inner)});\n return token;\n });\n\n out = out.replace(/<code\\b[^>]*>([\\s\\S]*?)<\\/code>/gi, (m, inner)=>{\n const token = `__CODE_BLOCK_${placeholders.length}__`;\n placeholders.push({token, tag:'code', content: escapeHtml(inner)});\n return token;\n });\n\n const restore = (s)=>{\n for (const ph of placeholders){\n s = s.split(ph.token).join(`<${ph.tag}>${ph.content}</${ph.tag}>`);\n }\n return s;\n };\n\n return { text: out, restore };\n}\n\n/* ---------- 3) HTML normalize ---------- */\nfunction htmlNormalize(input){\n let s = input;\n\n s = s.replace(/<br\\s*\\/?>/gi, '<br>');\n s = s.replace(/<li[^>]*>/gi, '• ').replace(/<\\/li>/gi, '\\n');\n s = s.replace(/<\\/?(ul|ol)[^>]*>/gi, '');\n\n s = s.replace(/<p[^>]*>/gi, '').replace(/<\\/p>/gi, '\\n\\n');\n s = s.replace(/<div[^>]*>/gi, '').replace(/<\\/div>/gi, '\\n');\n\n s = s.replace(/<h[1-6][^>]*>([\\s\\S]*?)<\\/h[1-6]>/gi, (m, inner)=>`<b>${inner.trim()}</b>\\n`);\n\n // Map aliases -> canonical\n s = s.replace(/<\\/?strong\\b/gi, t=>t.replace(/strong/i,'b'));\n s = s.replace(/<\\/?em\\b/gi, t=>t.replace(/em/i,'i'));\n s = s.replace(/<\\/?ins\\b/gi, t=>t.replace(/ins/i,'u'));\n s = s.replace(/<\\/?(strike|del)\\b/gi, t=>t.replace(/strike|del/i,'s'));\n\n // remove spans\n s = s.replace(/<\\/?span[^>]*>/gi, '');\n\n // images -> textual hint\n s = s.replace(/<img[^>]*src\\s*=\\s*(\"[^\"]+\"|'[^']+'|[^\\s>]+)[^>]*>/gi, (m,src)=>{\n const clean = String(src).replace(/^['\"]|['\"]$/g,'');\n return ` (ảnh: ${clean}) `;\n });\n\n return s;\n}\n\n/* ---------- 3.5) Escape lone '<' that are NOT valid tags ---------- */\nfunction escapeLoneAngles(input){\n // allow only tags in: a|b|i|u|s|code|pre|br|blockquote\n return input.replace(/<(?!\\/?(a|b|i|u|s|code|pre|br|blockquote)\\b)/gi, '<');\n}\n\n/* ---------- 4) Sanitize tags (keep only Telegram tags) ---------- */\nfunction sanitizeHtml(input){\n return input.replace(/<\\/?([a-z0-9]+)(\\s+[^>]*)?>/gi, (full, tag, attrs='')=>{\n const t = tag.toLowerCase();\n\n if (t === 'a'){\n if (full.startsWith('</')) return '</a>';\n const hrefMatch = attrs.match(/href\\s*=\\s*(\"[^\"]+\"|'[^']+'|[^\\s>]+)/i);\n const safeHref = hrefMatch ? sanitizeHref(hrefMatch[1]) : '';\n return safeHref ? `<a href=\"${safeHref}\">` : '';\n }\n\n if (!allowed.has(t)) return '';\n if (t === 'br') return '<br>';\n return full[1] === '/' ? `</${t}>` : `<${t}>`;\n });\n}\n\n/* ---------- 5) Track ONLY long-lived tags across chunks ---------- */\n/* We only track <a>, <pre>, <blockquote> so inline pairs never get auto-closed. */\nfunction getOpenTagsWithAttrs(html){\n const trackable = new Set(['a','pre','blockquote']);\n const stack = [];\n const re = /<\\/?([a-z0-9]+)(?:\\s+[^>]*)?>/gi;\n let m;\n while ((m = re.exec(html))){\n const raw = m[0];\n const t = m[1].toLowerCase();\n if (!trackable.has(t)) continue;\n\n if (raw[1] === '/'){\n const idx = [...stack].reverse().findIndex(e=>e.tag===t);\n if (idx !== -1) stack.splice(stack.length - 1 - idx, 1);\n } else {\n if (t === 'a'){\n const hrefMatch = raw.match(/href\\s*=\\s*(\"[^\"]+\"|'[^']+'|[^\\s>]+)/i);\n const href = hrefMatch ? sanitizeHref(hrefMatch[1]) : '';\n if (href) stack.push({tag:'a', href});\n } else {\n stack.push({tag:t});\n }\n }\n }\n return stack;\n}\nconst closeTags = (open)=>[...open].reverse().map(e=>`</${e.tag}>`).join('');\nconst openTagsStr = (open)=>open.map(e=>e.tag==='a'?`<a href=\"${e.href}\">`:`<${e.tag}>`).join('');\n\n/* ---------- 6) Normalize pipeline ---------- */\nfunction normalizeToTelegramHtml(input){\n const looksLikeMd = /(^|\\s)[*_`~]|^#{1,6}\\s|```/.test(input);\n let s = looksLikeMd ? mdToHtml(input) : input;\n\n s = htmlNormalize(s);\n\n const protector = protectPreCodeBlocks(s);\n s = protector.text;\n\n // avoid \"<10\" etc. being treated as a tag\n s = escapeLoneAngles(s);\n\n s = sanitizeHtml(s);\n s = protector.restore(s);\n\n s = s.replace(/\\n{3,}/g, '\\n\\n').trim();\n return s;\n}\n\n/* ---------- 7) Split safely ---------- */\nfunction splitHtmlSmart(html, maxLen){\n // Atomic segments: a, pre, blockquote, and inline pairs b/i/u/s/code\n const re = /(<a\\b[^>]*>[\\s\\S]*?<\\/a>)|(<pre\\b[^>]*>[\\s\\S]*?<\\/pre>)|(<blockquote\\b[^>]*>[\\s\\S]*?<\\/blockquote>)|(<b\\b[^>]*>[\\s\\S]*?<\\/b>)|(<i\\b[^>]*>[\\s\\S]*?<\\/i>)|(<u\\b[^>]*>[\\s\\S]*?<\\/u>)|(<s\\b[^>]*>[\\s\\S]*?<\\/s>)|(<code\\b[^>]*>[\\s\\S]*?<\\/code>)|(<br\\s*\\/?>)|(<[^>]+>)|([^<]+)/gi;\n\n const segments = [];\n let m;\n while ((m = re.exec(html))){\n segments.push(\n m[1] || m[2] || m[3] || m[4] || m[5] || m[6] || m[7] || m[8] ||\n (m[9] ? '<br>' : (m[10] || m[11]))\n );\n }\n\n const chunks = [];\n let buffer = '';\n let prefixOpen = '';\n\n const pushBuffer = ()=>{\n if (!buffer.trim()) return;\n let out = prefixOpen + buffer;\n // Only close a/pre/blockquote\n const open = getOpenTagsWithAttrs(out);\n out += closeTags(open);\n prefixOpen = openTagsStr(open); // reopen at next chunk if any\n chunks.push(out.trim());\n buffer = '';\n };\n\n // Shrink visible text inside <a> if needed\n const shrinkAnchorIfTooLong = (seg, room)=>{\n const mm = /^<a\\b[^>]*>([\\s\\S]*?)<\\/a>$/i.exec(seg);\n if (!mm) return seg;\n const visible = mm[1];\n if (seg.length <= room) return seg;\n const truncated = (visible.length > room - 30) ? (visible.slice(0, Math.max(3, room - 33)) + '...') : visible;\n return seg.replace(visible, truncated);\n };\n\n // Split huge <pre> / <blockquote> into multiple wrapped chunks\n const splitWrappedBlock = (segTag, seg, available, emit)=>{\n const rx = new RegExp(`^<${segTag}[^>]*>([\\\\s\\\\S]*)<\\\\/${segTag}>$`, 'i');\n const mm = rx.exec(seg);\n if (!mm) return false;\n let content = mm[1];\n const shellLen = (`<${segTag}></${segTag}>`).length; // 11 for pre, 23 for blockquote, etc.\n\n // if it fits already → no split here\n if (seg.length <= available) return false;\n\n // flush current buffer to start fresh\n if (buffer.trim()) pushBuffer();\n\n while (content.length){\n // recompute room each loop (prefixOpen may change)\n const room = Math.max(200, maxLen - prefixOpen.length - shellLen - 10);\n const slice = content.slice(0, room);\n buffer = `<${segTag}>${slice}</${segTag}>`;\n pushBuffer();\n content = content.slice(slice.length);\n }\n return true;\n };\n\n for (const seg of segments){\n const segLen = seg.length;\n\n // if appending seg would overflow\n if ((prefixOpen.length + buffer.length + segLen) > maxLen){\n\n // 7.1 Try shrinking anchor text\n if (/^<a\\b/i.test(seg)){\n const room = maxLen - prefixOpen.length - buffer.length - 1;\n const shrunk = shrinkAnchorIfTooLong(seg, Math.max(60, room));\n if ((prefixOpen.length + buffer.length + shrunk.length) <= maxLen){\n buffer += shrunk;\n continue;\n }\n }\n\n // 7.2 Split huge <pre>/<blockquote> safely (keep wrappers)\n const available = maxLen - prefixOpen.length - buffer.length;\n if (/^<pre\\b/i.test(seg)){\n if (splitWrappedBlock('pre', seg, available)) continue;\n }\n if (/^<blockquote\\b/i.test(seg)){\n if (splitWrappedBlock('blockquote', seg, available)) continue;\n }\n\n // 7.3 Push current chunk\n pushBuffer();\n\n // 7.4 Place seg into new buffer or split plain text / hard-cut tag as last resort\n if ((prefixOpen.length + segLen) <= maxLen){\n buffer = seg;\n } else if (!seg.startsWith('<')) {\n // split plain text by whitespace then hard-cut if needed\n const words = seg.split(/(\\s+)/);\n for (const w of words){\n const candidate = buffer + w;\n if ((prefixOpen.length + candidate.length) > maxLen){\n pushBuffer();\n if ((prefixOpen.length + w.length) > maxLen){\n let s = w;\n const room = Math.max(200, maxLen - prefixOpen.length - 10);\n while (s.length){\n buffer = s.slice(0, room);\n pushBuffer();\n s = s.slice(room);\n }\n } else {\n buffer = w;\n }\n } else {\n buffer = candidate;\n }\n }\n } else {\n // FINAL FALLBACK: extremely long single tag (very rare)\n // Hard-cut by characters (may break styling but keeps length constraints)\n let start = 0;\n const room = Math.max(200, maxLen - prefixOpen.length - 10);\n while (start < segLen){\n buffer = seg.slice(start, start + room);\n pushBuffer();\n start += room;\n }\n }\n continue;\n }\n\n // fits → append\n buffer += seg;\n }\n\n if (buffer.trim()) pushBuffer();\n return chunks;\n}\n\n/* ===== Run ===== */\nconst normalized = normalizeToTelegramHtml(text);\nconst chunks = splitHtmlSmart(normalized, MAX_LEN);\n\n/* Export to Telegram node */\nreturn chunks.map(c => ({ json: { text: c } }));\n"
},
"typeVersion": 2
},
{
"id": "39ffde34-51f1-4463-9bf6-a7ca9410b34b",
"name": "Clean and Chunk",
"type": "n8n-nodes-base.code",
"position": [
-3680,
-944
],
"parameters": {
"jsCode": "/**\n * Telegram HTML Normalizer + Chunker (≤ 2000 chars)\n * - Markdown → Telegram HTML (b,i,u,s,code,pre,a,br,blockquote)\n * - Map/strip unsupported tags, sanitize attributes\n * - Preserve links/code/pre; do NOT split inside <a>/<pre>/<blockquote> and inline pairs\n * - Close/reopen ONLY long-lived tags per chunk (a, pre, blockquote)\n * - Prevent stray closing tags and treat lone \"<\" safely\n */\n\nconst MAX_LEN = 2000;\n\n/* === Get input === */\nlet text = ($input.first().json.output ?? $input.first().json.text ?? '').trim();\nif (!text) return [];\n\n/* === Allowed Telegram tags === */\nconst allowed = new Set(['a','b','i','u','s','code','pre','br','blockquote']);\n\n/* ---------- Utils ---------- */\nconst escapeHtml = (s) => String(s).replace(/&/g,'&').replace(/</g,'<').replace(/>/g,'>');\n\nfunction sanitizeHref(href){\n if (!href) return '';\n const h = String(href).trim().replace(/^['\"]|['\"]$/g,'');\n const lower = h.toLowerCase();\n if (lower.startsWith('javascript:') || lower.startsWith('data:')) return '';\n return h;\n}\n\n/* ---------- 1) Markdown -> HTML ---------- */\nfunction mdToHtml(md){\n let s = md;\n // code block\n s = s.replace(/```([\\s\\S]*?)```/g, (m,p1)=>`<pre>${escapeHtml(p1.trim())}</pre>`);\n // inline code\n s = s.replace(/`([^`]+)`/g, (m,p1)=>`<code>${escapeHtml(p1)}</code>`);\n // bold / italic / underline / strike\n s = s.replace(/\\*\\*([^*]+)\\*\\*/g, '<b>$1</b>');\n s = s.replace(/(^|[\\s(])\\*([^*\\n]+)\\*(?=$|[\\s).,!?\\]])/g, '$1<i>$2</i>');\n s = s.replace(/__([^_]+)__/g, '<u>$1</u>');\n s = s.replace(/~~([^~]+)~~/g, '<s>$1</s>');\n // headers → bold + newline\n s = s.replace(/^(#{1,6})\\s+(.+)$/gm, (m, hashes, content)=>`<b>${content.trim()}</b>\\n`);\n // list markers → bullet\n s = s.replace(/^[\\-\\*\\+]\\s+(.+)$/gm, '• $1');\n // [text](url)\n s = s.replace(/\\[([^\\]]+)\\]\\(([^)]+)\\)/g, (m,txt,url)=>{\n const safe = sanitizeHref(url);\n return safe ? `<a href=\"${safe}\">${txt}</a>` : txt;\n });\n return s;\n}\n\n/* ---------- 2) Protect <pre>/<code> contents ---------- */\nfunction protectPreCodeBlocks(input){\n const placeholders = [];\n let out = input;\n\n out = out.replace(/<pre\\b[^>]*>([\\s\\S]*?)<\\/pre>/gi, (m, inner)=>{\n const token = `__PRE_BLOCK_${placeholders.length}__`;\n placeholders.push({token, tag:'pre', content: escapeHtml(inner)});\n return token;\n });\n\n out = out.replace(/<code\\b[^>]*>([\\s\\S]*?)<\\/code>/gi, (m, inner)=>{\n const token = `__CODE_BLOCK_${placeholders.length}__`;\n placeholders.push({token, tag:'code', content: escapeHtml(inner)});\n return token;\n });\n\n const restore = (s)=>{\n for (const ph of placeholders){\n s = s.split(ph.token).join(`<${ph.tag}>${ph.content}</${ph.tag}>`);\n }\n return s;\n };\n\n return { text: out, restore };\n}\n\n/* ---------- 3) HTML normalize ---------- */\nfunction htmlNormalize(input){\n let s = input;\n\n s = s.replace(/<br\\s*\\/?>/gi, '<br>');\n s = s.replace(/<li[^>]*>/gi, '• ').replace(/<\\/li>/gi, '\\n');\n s = s.replace(/<\\/?(ul|ol)[^>]*>/gi, '');\n\n s = s.replace(/<p[^>]*>/gi, '').replace(/<\\/p>/gi, '\\n\\n');\n s = s.replace(/<div[^>]*>/gi, '').replace(/<\\/div>/gi, '\\n');\n\n s = s.replace(/<h[1-6][^>]*>([\\s\\S]*?)<\\/h[1-6]>/gi, (m, inner)=>`<b>${inner.trim()}</b>\\n`);\n\n // Map aliases -> canonical\n s = s.replace(/<\\/?strong\\b/gi, t=>t.replace(/strong/i,'b'));\n s = s.replace(/<\\/?em\\b/gi, t=>t.replace(/em/i,'i'));\n s = s.replace(/<\\/?ins\\b/gi, t=>t.replace(/ins/i,'u'));\n s = s.replace(/<\\/?(strike|del)\\b/gi, t=>t.replace(/strike|del/i,'s'));\n\n // remove spans\n s = s.replace(/<\\/?span[^>]*>/gi, '');\n\n // images -> textual hint\n s = s.replace(/<img[^>]*src\\s*=\\s*(\"[^\"]+\"|'[^']+'|[^\\s>]+)[^>]*>/gi, (m,src)=>{\n const clean = String(src).replace(/^['\"]|['\"]$/g,'');\n return ` (ảnh: ${clean}) `;\n });\n\n return s;\n}\n\n/* ---------- 3.5) Escape lone '<' that are NOT valid tags ---------- */\nfunction escapeLoneAngles(input){\n // allow only tags in: a|b|i|u|s|code|pre|br|blockquote\n return input.replace(/<(?!\\/?(a|b|i|u|s|code|pre|br|blockquote)\\b)/gi, '<');\n}\n\n/* ---------- 4) Sanitize tags (keep only Telegram tags) ---------- */\nfunction sanitizeHtml(input){\n return input.replace(/<\\/?([a-z0-9]+)(\\s+[^>]*)?>/gi, (full, tag, attrs='')=>{\n const t = tag.toLowerCase();\n\n if (t === 'a'){\n if (full.startsWith('</')) return '</a>';\n const hrefMatch = attrs.match(/href\\s*=\\s*(\"[^\"]+\"|'[^']+'|[^\\s>]+)/i);\n const safeHref = hrefMatch ? sanitizeHref(hrefMatch[1]) : '';\n return safeHref ? `<a href=\"${safeHref}\">` : '';\n }\n\n if (!allowed.has(t)) return '';\n if (t === 'br') return '<br>';\n return full[1] === '/' ? `</${t}>` : `<${t}>`;\n });\n}\n\n/* ---------- 5) Track ONLY long-lived tags across chunks ---------- */\n/* We only track <a>, <pre>, <blockquote> so inline pairs never get auto-closed. */\nfunction getOpenTagsWithAttrs(html){\n const trackable = new Set(['a','pre','blockquote']);\n const stack = [];\n const re = /<\\/?([a-z0-9]+)(?:\\s+[^>]*)?>/gi;\n let m;\n while ((m = re.exec(html))){\n const raw = m[0];\n const t = m[1].toLowerCase();\n if (!trackable.has(t)) continue;\n\n if (raw[1] === '/'){\n const idx = [...stack].reverse().findIndex(e=>e.tag===t);\n if (idx !== -1) stack.splice(stack.length - 1 - idx, 1);\n } else {\n if (t === 'a'){\n const hrefMatch = raw.match(/href\\s*=\\s*(\"[^\"]+\"|'[^']+'|[^\\s>]+)/i);\n const href = hrefMatch ? sanitizeHref(hrefMatch[1]) : '';\n if (href) stack.push({tag:'a', href});\n } else {\n stack.push({tag:t});\n }\n }\n }\n return stack;\n}\nconst closeTags = (open)=>[...open].reverse().map(e=>`</${e.tag}>`).join('');\nconst openTagsStr = (open)=>open.map(e=>e.tag==='a'?`<a href=\"${e.href}\">`:`<${e.tag}>`).join('');\n\n/* ---------- 6) Normalize pipeline ---------- */\nfunction normalizeToTelegramHtml(input){\n const looksLikeMd = /(^|\\s)[*_`~]|^#{1,6}\\s|```/.test(input);\n let s = looksLikeMd ? mdToHtml(input) : input;\n\n s = htmlNormalize(s);\n\n const protector = protectPreCodeBlocks(s);\n s = protector.text;\n\n // avoid \"<10\" etc. being treated as a tag\n s = escapeLoneAngles(s);\n\n s = sanitizeHtml(s);\n s = protector.restore(s);\n\n s = s.replace(/\\n{3,}/g, '\\n\\n').trim();\n return s;\n}\n\n/* ---------- 7) Split safely ---------- */\nfunction splitHtmlSmart(html, maxLen){\n // Atomic segments: a, pre, blockquote, and inline pairs b/i/u/s/code\n const re = /(<a\\b[^>]*>[\\s\\S]*?<\\/a>)|(<pre\\b[^>]*>[\\s\\S]*?<\\/pre>)|(<blockquote\\b[^>]*>[\\s\\S]*?<\\/blockquote>)|(<b\\b[^>]*>[\\s\\S]*?<\\/b>)|(<i\\b[^>]*>[\\s\\S]*?<\\/i>)|(<u\\b[^>]*>[\\s\\S]*?<\\/u>)|(<s\\b[^>]*>[\\s\\S]*?<\\/s>)|(<code\\b[^>]*>[\\s\\S]*?<\\/code>)|(<br\\s*\\/?>)|(<[^>]+>)|([^<]+)/gi;\n\n const segments = [];\n let m;\n while ((m = re.exec(html))){\n segments.push(\n m[1] || m[2] || m[3] || m[4] || m[5] || m[6] || m[7] || m[8] ||\n (m[9] ? '<br>' : (m[10] || m[11]))\n );\n }\n\n const chunks = [];\n let buffer = '';\n let prefixOpen = '';\n\n const pushBuffer = ()=>{\n if (!buffer.trim()) return;\n let out = prefixOpen + buffer;\n // Only close a/pre/blockquote\n const open = getOpenTagsWithAttrs(out);\n out += closeTags(open);\n prefixOpen = openTagsStr(open); // reopen at next chunk if any\n chunks.push(out.trim());\n buffer = '';\n };\n\n // Shrink visible text inside <a> if needed\n const shrinkAnchorIfTooLong = (seg, room)=>{\n const mm = /^<a\\b[^>]*>([\\s\\S]*?)<\\/a>$/i.exec(seg);\n if (!mm) return seg;\n const visible = mm[1];\n if (seg.length <= room) return seg;\n const truncated = (visible.length > room - 30) ? (visible.slice(0, Math.max(3, room - 33)) + '...') : visible;\n return seg.replace(visible, truncated);\n };\n\n // Split huge <pre> / <blockquote> into multiple wrapped chunks\n const splitWrappedBlock = (segTag, seg, available, emit)=>{\n const rx = new RegExp(`^<${segTag}[^>]*>([\\\\s\\\\S]*)<\\\\/${segTag}>$`, 'i');\n const mm = rx.exec(seg);\n if (!mm) return false;\n let content = mm[1];\n const shellLen = (`<${segTag}></${segTag}>`).length; // 11 for pre, 23 for blockquote, etc.\n\n // if it fits already → no split here\n if (seg.length <= available) return false;\n\n // flush current buffer to start fresh\n if (buffer.trim()) pushBuffer();\n\n while (content.length){\n // recompute room each loop (prefixOpen may change)\n const room = Math.max(200, maxLen - prefixOpen.length - shellLen - 10);\n const slice = content.slice(0, room);\n buffer = `<${segTag}>${slice}</${segTag}>`;\n pushBuffer();\n content = content.slice(slice.length);\n }\n return true;\n };\n\n for (const seg of segments){\n const segLen = seg.length;\n\n // if appending seg would overflow\n if ((prefixOpen.length + buffer.length + segLen) > maxLen){\n\n // 7.1 Try shrinking anchor text\n if (/^<a\\b/i.test(seg)){\n const room = maxLen - prefixOpen.length - buffer.length - 1;\n const shrunk = shrinkAnchorIfTooLong(seg, Math.max(60, room));\n if ((prefixOpen.length + buffer.length + shrunk.length) <= maxLen){\n buffer += shrunk;\n continue;\n }\n }\n\n // 7.2 Split huge <pre>/<blockquote> safely (keep wrappers)\n const available = maxLen - prefixOpen.length - buffer.length;\n if (/^<pre\\b/i.test(seg)){\n if (splitWrappedBlock('pre', seg, available)) continue;\n }\n if (/^<blockquote\\b/i.test(seg)){\n if (splitWrappedBlock('blockquote', seg, available)) continue;\n }\n\n // 7.3 Push current chunk\n pushBuffer();\n\n // 7.4 Place seg into new buffer or split plain text / hard-cut tag as last resort\n if ((prefixOpen.length + segLen) <= maxLen){\n buffer = seg;\n } else if (!seg.startsWith('<')) {\n // split plain text by whitespace then hard-cut if needed\n const words = seg.split(/(\\s+)/);\n for (const w of words){\n const candidate = buffer + w;\n if ((prefixOpen.length + candidate.length) > maxLen){\n pushBuffer();\n if ((prefixOpen.length + w.length) > maxLen){\n let s = w;\n const room = Math.max(200, maxLen - prefixOpen.length - 10);\n while (s.length){\n buffer = s.slice(0, room);\n pushBuffer();\n s = s.slice(room);\n }\n } else {\n buffer = w;\n }\n } else {\n buffer = candidate;\n }\n }\n } else {\n // FINAL FALLBACK: extremely long single tag (very rare)\n // Hard-cut by characters (may break styling but keeps length constraints)\n let start = 0;\n const room = Math.max(200, maxLen - prefixOpen.length - 10);\n while (start < segLen){\n buffer = seg.slice(start, start + room);\n pushBuffer();\n start += room;\n }\n }\n continue;\n }\n\n // fits → append\n buffer += seg;\n }\n\n if (buffer.trim()) pushBuffer();\n return chunks;\n}\n\n/* ===== Run ===== */\nconst normalized = normalizeToTelegramHtml(text);\nconst chunks = splitHtmlSmart(normalized, MAX_LEN);\n\n/* Export to Telegram node */\nreturn chunks.map(c => ({ json: { text: c } }));\n"
},
"typeVersion": 2
},
{
"id": "64b00ec5-0764-4742-80e6-305c9926a940",
"name": "Send Typing Action",
"type": "n8n-nodes-base.telegram",
"position": [
-3824,
-352
],
"webhookId": "0a00ab89-9035-4c14-a093-98c0b315f65a",
"parameters": {
"chatId": "={{ $('Telegram Trigger - User Message').item.json.message.from.id }}",
"operation": "sendChatAction"
},
"credentials": {
"telegramApi": {
"id": "o4Df0vGzQkRsas07",
"name": "n8n Notification Bot"
}
},
"typeVersion": 1.2
},
{
"id": "b16014a7-866e-4c51-b37a-6cd7512ddc67",
"name": "Haftnotiz",
"type": "n8n-nodes-base.stickyNote",
"position": [
-4960,
-1456
],
"parameters": {
"color": 2,
"width": 1584,
"height": 800,
"content": "## 1. Daily Pulse Automation \n\n- **Schedule Trigger:** Runs every morning at a set time (e.g., 8:00 AM). This reflects Toan’s philosophy of keeping automations consistent and reliable. \n- **Reddit & n8n Forum Search + Merge Sources:** Fetches trending posts from Reddit and the n8n Forum. The merging logic was crafted by Nguyễn Thiệu Toàn to unify multiple community voices into one feed. \n- **AI Summarizer Overview:** Summarizes the hottest discussions into concise insights—designed with Toan’s emphasis on clarity and usefulness. \n- **Format for Telegram & Send Auto Reply:** Splits summaries to fit Telegram’s limits and delivers them seamlessly. This attention to detail reflects Toan’s commitment to great user experience. "
},
"typeVersion": 1
},
{
"id": "0dd1e595-4c56-417d-a06f-5eeb0435c845",
"name": "Wrap as Data Object2",
"type": "n8n-nodes-base.code",
"position": [
-4016,
-1168
],
"parameters": {
"jsCode": "return [\n {\n json: {\n data: items.map(item => item.json)\n }\n }\n];\n"
},
"typeVersion": 2
},
{
"id": "bb812af8-4c1c-4800-a272-dd8a14f6b971",
"name": "Set Speicher ID Session",
"type": "n8n-nodes-base.set",
"position": [
-3808,
-1168
],
"parameters": {
"options": {},
"assignments": {
"assignments": [
{
"id": "a4d9d186-0b7e-4e65-aecf-91f400e826d8",
"name": "MyTelegramID",
"type": "number",
"value": 6163095869
}
]
}
},
"typeVersion": 3.4
}
],
"pinData": {},
"connections": {
"1b85f710-e828-4b76-89f0-f07efff16922": {
"main": [
[
{
"node": "58fa20c0-632b-41ac-9596-3daa9c553529",
"type": "main",
"index": 0
}
]
]
},
"2378b2e7-79bf-4bce-b83b-7da7c3fd4ea7": {
"main": [
[
{
"node": "e0e5646a-cb32-457a-9cb3-24b76939f31e",
"type": "main",
"index": 0
}
]
]
},
"94a4c032-240c-4f5a-9f85-931a24ae6cf7": {
"main": [
[
{
"node": "441f9d16-c686-4bfb-8b7a-8db094e8effc",
"type": "main",
"index": 0
}
],
[
{
"node": "30dc7838-f81b-46bb-9f7f-178b67ce0b76",
"type": "main",
"index": 0
}
]
]
},
"3ca253c1-d29d-4675-93a1-736fc0a9cd87": {
"main": [
[
{
"node": "02bc28d4-3228-464b-adb7-8ce0a99c6d40",
"type": "main",
"index": 0
},
{
"node": "1dfa4b4b-5520-4e51-ab4c-996d02e28c16",
"type": "main",
"index": 0
}
],
[
{
"node": "1dfa4b4b-5520-4e51-ab4c-996d02e28c16",
"type": "main",
"index": 0
}
],
[
{
"node": "02bc28d4-3228-464b-adb7-8ce0a99c6d40",
"type": "main",
"index": 0
}
]
]
},
"2f34f6d8-9121-42b5-945e-800b715e8eaa": {
"main": [
[
{
"node": "8587a97b-abcd-443d-bf32-78369a0d1ade",
"type": "main",
"index": 0
}
]
]
},
"e0e5646a-cb32-457a-9cb3-24b76939f31e": {
"main": [
[
{
"node": "d2196535-97ca-414c-b85b-dcd35a9519ab",
"type": "main",
"index": 0
}
]
]
},
"b15abbe6-1436-4967-b72e-06d12524a289": {
"main": [
[
{
"node": "e059fea5-05a0-4310-9403-3aba0997d5f7",
"type": "main",
"index": 0
}
]
]
},
"1dfa4b4b-5520-4e51-ab4c-996d02e28c16": {
"main": [
[
{
"node": "7ec168e5-9121-453e-84bb-5b8404cb97ad",
"type": "main",
"index": 0
}
],
[
{
"node": "94a4c032-240c-4f5a-9f85-931a24ae6cf7",
"type": "main",
"index": 0
}
]
]
},
"Merge Sources1": {
"main": [
[
{
"node": "0dd1e595-4c56-417d-a06f-5eeb0435c845",
"type": "main",
"index": 0
}
]
]
},
"39ffde34-51f1-4463-9bf6-a7ca9410b34b": {
"main": [
[
{
"node": "989d3b40-cd71-486a-b2bf-a8aed22d801b",
"type": "main",
"index": 0
}
]
]
},
"6374695d-e955-4564-9f72-954edfa93e89": {
"ai_languageModel": [
[
{
"node": "981c5a5e-a19d-4617-b068-8bf57b2ed63b",
"type": "ai_languageModel",
"index": 0
},
{
"node": "dfe59ad7-1801-4d7b-b95b-656a84c06d5a",
"type": "ai_languageModel",
"index": 0
}
]
]
},
"24d74cc8-a0e1-44b0-8ebb-1bbdc72ac235": {
"main": [
[
{
"node": "3ca253c1-d29d-4675-93a1-736fc0a9cd87",
"type": "main",
"index": 0
}
],
[
{
"node": "6e91f239-5580-493f-b5fd-842e5ffa414b",
"type": "main",
"index": 0
}
],
[
{
"node": "f9a040e9-c186-40f9-841c-6c82431e6fd4",
"type": "main",
"index": 0
}
],
[
{
"node": "897009cf-d40d-4e60-b564-4bf64ec88225",
"type": "main",
"index": 0
}
]
]
},
"6d52e5f7-b2f5-4bda-b936-887378afdf0f": {
"main": [
[
{
"node": "a75a37ff-1b57-4c4d-9d09-82fba7118860",
"type": "main",
"index": 0
}
]
]
},
"8587a97b-abcd-443d-bf32-78369a0d1ade": {
"main": [
[
{
"node": "2378b2e7-79bf-4bce-b83b-7da7c3fd4ea7",
"type": "main",
"index": 0
},
{
"node": "Merge Link Content",
"type": "main",
"index": 0
}
]
]
},
"Schedule Trigger": {
"main": [
[
{
"node": "1b85f710-e828-4b76-89f0-f07efff16922",
"type": "main",
"index": 0
},
{
"node": "f0e544ac-71bc-4341-a99f-06d704e4f37e",
"type": "main",
"index": 0
}
]
]
},
"d2196535-97ca-414c-b85b-dcd35a9519ab": {
"main": [
[
{
"node": "Merge Link Content",
"type": "main",
"index": 1
}
]
]
},
"e059fea5-05a0-4310-9403-3aba0997d5f7": {
"main": [
[
{
"node": "Merge Link Content",
"type": "main",
"index": 1
}
]
]
},
"fc07b8f6-0dd8-4406-9301-bfc29a1f511f": {
"main": [
[
{
"node": "Merge Link Content",
"type": "main",
"index": 0
}
]
]
},
"8d7a273d-12a7-4795-9a2e-c3350c13c655": {
"main": [
[
{
"node": "fc07b8f6-0dd8-4406-9301-bfc29a1f511f",
"type": "main",
"index": 0
},
{
"node": "b15abbe6-1436-4967-b72e-06d12524a289",
"type": "main",
"index": 0
}
]
]
},
"981c5a5e-a19d-4617-b068-8bf57b2ed63b": {
"main": [
[
{
"node": "24d74cc8-a0e1-44b0-8ebb-1bbdc72ac235",
"type": "main",
"index": 0
},
{
"node": "64b00ec5-0764-4742-80e6-305c9926a940",
"type": "main",
"index": 0
}
]
]
},
"Merge Link Content": {
"main": [
[
{
"node": "6e91f239-5580-493f-b5fd-842e5ffa414b",
"type": "main",
"index": 0
}
]
]
},
"Merge Search Result": {
"main": [
[
{
"node": "6e91f239-5580-493f-b5fd-842e5ffa414b",
"type": "main",
"index": 0
}
]
]
},
"MongoDB Chat Memory": {
"ai_memory": [
[
{
"node": "981c5a5e-a19d-4617-b068-8bf57b2ed63b",
"type": "ai_memory",
"index": 0
}
]
]
},
"6e91f239-5580-493f-b5fd-842e5ffa414b": {
"main": [
[
{
"node": "AI Summarizer Deep-dive",
"type": "main",
"index": 0
}
]
]
},
"f0e544ac-71bc-4341-a99f-06d704e4f37e": {
"main": [
[
{
"node": "787c3a75-5adf-4224-82e6-352e8bbb21e3",
"type": "main",
"index": 0
}
]
]
},
"MongoDB Chat Memory1": {
"ai_memory": [
[
{
"node": "AI Summarizer Deep-dive",
"type": "ai_memory",
"index": 0
}
]
]
},
"MongoDB Chat Memory2": {
"ai_memory": [
[
{
"node": "AI Summarizer Overview",
"type": "ai_memory",
"index": 0
}
]
]
},
"MongoDB Chat Memory3": {
"ai_memory": [
[
{
"node": "897009cf-d40d-4e60-b564-4bf64ec88225",
"type": "ai_memory",
"index": 0
}
]
]
},
"0dd1e595-4c56-417d-a06f-5eeb0435c845": {
"main": [
[
{
"node": "Set Memory ID Session",
"type": "main",
"index": 0
}
]
]
},
"897009cf-d40d-4e60-b564-4bf64ec88225": {
"main": [
[
{
"node": "6d52e5f7-b2f5-4bda-b936-887378afdf0f",
"type": "main",
"index": 0
}
]
]
},
"Set Memory ID Session": {
"main": [
[
{
"node": "AI Summarizer Overview",
"type": "main",
"index": 0
}
]
]
},
"AI Summarizer Overview": {
"main": [
[
{
"node": "39ffde34-51f1-4463-9bf6-a7ca9410b34b",
"type": "main",
"index": 0
}
]
]
},
"AI Summarizer Deep-dive": {
"main": [
[
{
"node": "cd19593f-3229-4620-b606-ac2b9c688d73",
"type": "main",
"index": 0
}
]
]
},
"58fa20c0-632b-41ac-9596-3daa9c553529": {
"main": [
[
{
"node": "Merge Sources1",
"type": "main",
"index": 0
}
]
]
},
"787c3a75-5adf-4224-82e6-352e8bbb21e3": {
"main": [
[
{
"node": "Merge Sources1",
"type": "main",
"index": 1
}
]
]
},
"Google Gemini Chat Model": {
"ai_languageModel": [
[
{
"node": "AI Summarizer Deep-dive",
"type": "ai_languageModel",
"index": 0
}
]
]
},
"dfe59ad7-1801-4d7b-b95b-656a84c06d5a": {
"ai_outputParser": [
[
{
"node": "981c5a5e-a19d-4617-b068-8bf57b2ed63b",
"type": "ai_outputParser",
"index": 0
}
]
]
},
"02bc28d4-3228-464b-adb7-8ce0a99c6d40": {
"main": [
[
{
"node": "57f5c5c6-abfa-49e6-af85-3d3d14dd43dc",
"type": "main",
"index": 0
}
]
]
},
"Google Gemini Chat Model1": {
"ai_languageModel": [
[
{
"node": "AI Summarizer Overview",
"type": "ai_languageModel",
"index": 0
}
]
]
},
"Google Gemini Chat Model2": {
"ai_languageModel": [
[
{
"node": "897009cf-d40d-4e60-b564-4bf64ec88225",
"type": "ai_languageModel",
"index": 0
}
]
]
},
"cd19593f-3229-4620-b606-ac2b9c688d73": {
"main": [
[
{
"node": "f665afa8-3a9e-48fd-9276-85193319221c",
"type": "main",
"index": 0
}
]
]
},
"30dc7838-f81b-46bb-9f7f-178b67ce0b76": {
"main": [
[
{
"node": "1164897e-b75f-4424-b5c3-9bda2c37116c",
"type": "main",
"index": 0
}
]
]
},
"1164897e-b75f-4424-b5c3-9bda2c37116c": {
"main": [
[
{
"node": "Merge Search Result",
"type": "main",
"index": 0
}
]
]
},
"Telegram Trigger - User Message": {
"main": [
[
{
"node": "981c5a5e-a19d-4617-b068-8bf57b2ed63b",
"type": "main",
"index": 0
}
]
]
},
"f9a040e9-c186-40f9-841c-6c82431e6fd4": {
"main": [
[
{
"node": "2f34f6d8-9121-42b5-945e-800b715e8eaa",
"type": "main",
"index": 0
}
],
[
{
"node": "8d7a273d-12a7-4795-9a2e-c3350c13c655",
"type": "main",
"index": 0
}
]
]
},
"441f9d16-c686-4bfb-8b7a-8db094e8effc": {
"main": [
[
{
"node": "1164897e-b75f-4424-b5c3-9bda2c37116c",
"type": "main",
"index": 0
}
]
]
},
"57f5c5c6-abfa-49e6-af85-3d3d14dd43dc": {
"main": [
[
{
"node": "Merge Search Result",
"type": "main",
"index": 1
}
]
]
},
"7ec168e5-9121-453e-84bb-5b8404cb97ad": {
"main": [
[
{
"node": "1164897e-b75f-4424-b5c3-9bda2c37116c",
"type": "main",
"index": 0
}
]
]
}
}
}Wie verwende ich diesen Workflow?
Kopieren Sie den obigen JSON-Code, erstellen Sie einen neuen Workflow in Ihrer n8n-Instanz und wählen Sie "Aus JSON importieren". Fügen Sie die Konfiguration ein und passen Sie die Anmeldedaten nach Bedarf an.
Für welche Szenarien ist dieser Workflow geeignet?
Experte - Verschiedenes, KI-Chatbot, Multimodales KI
Ist es kostenpflichtig?
Dieser Workflow ist völlig kostenlos. Beachten Sie jedoch, dass Drittanbieterdienste (wie OpenAI API), die im Workflow verwendet werden, möglicherweise kostenpflichtig sind.
Verwandte Workflows
Nguyen Thieu Toan
@nguyenthieutoanAn AI Automation consultant and system builder specializing in business workflow optimization with n8n. As the Founder of GenStaff, Toan empowers teams to automate complex processes using no-code/low-code tools and AI Agents, making operations smarter and more efficient. He actively shares expertise and tutorials about n8n, AI, and automation on his blog and social channels.
Diesen Workflow teilen