LLM模板
高级
这是一个Engineering, AI RAG领域的自动化工作流,包含 25 个节点。主要使用 Set, Agent, ChatTrigger, LmChatOpenAi, RerankerCohere 等节点。 使用GPT-4o-mini和Qdrant向量数据库构建持久聊天记忆
前置要求
- •OpenAI API Key
- •Qdrant 服务器连接信息
使用的节点 (25)
工作流预览
可视化展示节点连接关系,支持缩放和平移
导出工作流
复制以下 JSON 配置到 n8n 导入,即可使用此工作流
{
"id": "EDZcm0r7Lp2uIkTn",
"meta": {
"instanceId": "48f9e8e7598a73c86aec19069eefaf1e83b51b8858cbb8999ee59d6fa3d9a3f2",
"templateCredsSetupCompleted": true
},
"name": "LLM_TEMPLATE",
"tags": [],
"nodes": [
{
"id": "265bbb29-3ae9-49dd-9d77-4a8230af5f3e",
"name": "Embeddings OpenAI",
"type": "@n8n/n8n-nodes-langchain.embeddingsOpenAi",
"position": [
816,
672
],
"parameters": {
"options": {
"dimensions": 1024
}
},
"credentials": {
"openAiApi": {
"id": "uHrKvsqlQYyImnjO",
"name": "openai - einarcesar@gmail.com"
}
},
"typeVersion": 1.2
},
{
"id": "8e8d619d-8356-485e-9ba5-26489e7ef46c",
"name": "便签",
"type": "n8n-nodes-base.stickyNote",
"position": [
560,
800
],
"parameters": {
"width": 324,
"height": 416,
"content": "## 🔤 文本向量化"
},
"typeVersion": 1
},
{
"id": "ae0a96a7-6cd5-4868-aadb-2b91e3e8f448",
"name": "默认数据加载器",
"type": "@n8n/n8n-nodes-langchain.documentDefaultDataLoader",
"position": [
944,
672
],
"parameters": {
"options": {}
},
"typeVersion": 1
},
{
"id": "f8685c76-dde4-400a-a359-e52348d9f0ae",
"name": "便签 2",
"type": "n8n-nodes-base.stickyNote",
"position": [
944,
1024
],
"parameters": {
"width": 324,
"height": 371,
"content": "## 📄 文档处理器"
},
"typeVersion": 1
},
{
"id": "fd60c06d-0c22-40fc-ab62-7f87b4c6f29a",
"name": "递归字符文本分割器",
"type": "@n8n/n8n-nodes-langchain.textSplitterRecursiveCharacterTextSplitter",
"position": [
1040,
880
],
"parameters": {
"options": {},
"chunkSize": 200,
"chunkOverlap": 40
},
"typeVersion": 1
},
{
"id": "487e7425-4d1e-48be-9d92-5398e6328279",
"name": "便签 3",
"type": "n8n-nodes-base.stickyNote",
"position": [
1296,
848
],
"parameters": {
"width": 340,
"height": 392,
"content": "## ✂️ 文本分块策略"
},
"typeVersion": 1
},
{
"id": "922bfcdb-14ce-40cc-a3df-e89be2d59635",
"name": "当收到聊天消息时",
"type": "@n8n/n8n-nodes-langchain.chatTrigger",
"position": [
-112,
448
],
"webhookId": "ef238f10-3af1-409d-b7e8-3bf61cd357e4",
"parameters": {
"options": {}
},
"typeVersion": 1.1
},
{
"id": "a8f24cf0-077f-43ea-a5b6-885ef7069948",
"name": "### DeepSeek Reasoner R1",
"type": "n8n-nodes-base.stickyNote",
"position": [
-464,
320
],
"parameters": {
"width": 340,
"height": 428,
"content": "## 💬 聊天界面"
},
"typeVersion": 1
},
{
"id": "58538d83-7b62-47ea-a099-143517886719",
"name": "检索用嵌入向量",
"type": "@n8n/n8n-nodes-langchain.embeddingsOpenAi",
"position": [
208,
896
],
"parameters": {
"options": {
"dimensions": 1024
}
},
"credentials": {
"openAiApi": {
"id": "uHrKvsqlQYyImnjO",
"name": "openai - einarcesar@gmail.com"
}
},
"typeVersion": 1.2
},
{
"id": "64ee54af-4f43-4b8a-a73b-f0fb02a69fca",
"name": "## 带记忆功能的 DeepSeek 对话代理",
"type": "n8n-nodes-base.stickyNote",
"position": [
208,
1024
],
"parameters": {
"color": 6,
"width": 340,
"height": 260,
"content": "## 🔍 检索嵌入向量"
},
"typeVersion": 1
},
{
"id": "b86d86d8-8595-4c27-bc9f-45e706d08623",
"name": "Cohere 重新排序器",
"type": "@n8n/n8n-nodes-langchain.rerankerCohere",
"position": [
464,
1328
],
"parameters": {},
"credentials": {
"cohereApi": {
"id": "7GqfOJcuJFHWeOpS",
"name": "CohereApi account"
}
},
"typeVersion": 1
},
{
"id": "43ef1754-fe7a-4435-be5f-5cc912ef7590",
"name": "便签 6",
"type": "n8n-nodes-base.stickyNote",
"position": [
576,
1248
],
"parameters": {
"color": 5,
"width": 340,
"height": 396,
"content": "## 🎯 相关性优化器"
},
"typeVersion": 1
},
{
"id": "9df9f1e4-b067-4ec8-8ee1-1d64f256081a",
"name": "RAG_MEMORY",
"type": "@n8n/n8n-nodes-langchain.vectorStoreQdrant",
"onError": "continueRegularOutput",
"position": [
160,
688
],
"parameters": {
"mode": "retrieve-as-tool",
"topK": 20,
"options": {},
"toolName": "RAG_MEMORY",
"useReranker": true,
"toolDescription": "Long-term memory storage for maintaining context across conversations. Use this to recall previous interactions, user preferences, and historical context.",
"qdrantCollection": {
"__rl": true,
"mode": "list",
"value": "ltm",
"cachedResultName": "ltm"
}
},
"credentials": {
"qdrantApi": {
"id": "IMqj7iGvb0Ko0nCj",
"name": "Qdrant - einar.qzz.io"
}
},
"typeVersion": 1.2
},
{
"id": "a2f24b1e-df0f-4c10-b525-4eeea31edf7e",
"name": "便签 7",
"type": "n8n-nodes-base.stickyNote",
"position": [
-160,
784
],
"parameters": {
"color": 3,
"width": 340,
"height": 428,
"content": "## 🧠 记忆检索系统"
},
"typeVersion": 1
},
{
"id": "33b1f48b-9700-4edb-a73b-5889316e7cdf",
"name": "OpenAI 聊天模型",
"type": "@n8n/n8n-nodes-langchain.lmChatOpenAi",
"position": [
432,
816
],
"parameters": {
"model": {
"__rl": true,
"mode": "list",
"value": "gpt-4o-mini"
},
"options": {
"maxTokens": 2000,
"temperature": 0.7
}
},
"credentials": {
"openAiApi": {
"id": "uHrKvsqlQYyImnjO",
"name": "openai - einarcesar@gmail.com"
}
},
"typeVersion": 1.2
},
{
"id": "866bf74b-dc5b-4bf9-b01d-8fbc2da442c1",
"name": "结构化输出解析器",
"type": "@n8n/n8n-nodes-langchain.outputParserStructured",
"position": [
544,
112
],
"parameters": {
"autoFix": true,
"jsonSchemaExample": "{\n \"sessionId\": \"unique-session-identifier\",\n \"chatInput\": \"User's message\",\n \"output\": \"AI's response\",\n \"timestamp\": \"2024-01-01T12:00:00Z\",\n \"relevanceScore\": 0.95\n}"
},
"typeVersion": 1.3
},
{
"id": "130381f8-4d66-4a5c-b233-5079e3630f71",
"name": "便签8",
"type": "n8n-nodes-base.stickyNote",
"position": [
848,
48
],
"parameters": {
"color": 4,
"width": 340,
"height": 344,
"content": "## 📐 输出格式化器"
},
"typeVersion": 1
},
{
"id": "6d7eb364-5c8f-4d6a-9ef6-51e3a1fc45bd",
"name": "格式化响应",
"type": "n8n-nodes-base.set",
"position": [
1504,
-32
],
"parameters": {
"options": {},
"assignments": {
"assignments": [
{
"id": "fdd39640-54c5-4ed7-9f37-c8cd4302a212",
"name": "output",
"type": "string",
"value": "={{ $('AI Agent').first().json.output.output }}"
}
]
}
},
"executeOnce": true,
"typeVersion": 3.4
},
{
"id": "cdc23122-cf49-4e54-922e-4990f5a2a5ee",
"name": "便签9",
"type": "n8n-nodes-base.stickyNote",
"position": [
1696,
-64
],
"parameters": {
"width": 340,
"height": 304,
"content": "## 🎨 响应格式化器"
},
"typeVersion": 1
},
{
"id": "98430332-8de1-48f9-b883-017e7ee35983",
"name": "AI 代理",
"type": "@n8n/n8n-nodes-langchain.agent",
"position": [
144,
432
],
"parameters": {
"options": {
"systemMessage": "# AI Assistant with Long-Term Memory\n\nYou are an AI assistant equipped with a sophisticated long-term memory system. Your RAG_MEMORY tool allows you to recall past conversations, user preferences, and contextual information across sessions.\n\n## Core Capabilities:\n1. **Context Retention**: Remember and reference previous conversations\n2. **User Personalization**: Adapt responses based on learned preferences\n3. **Knowledge Accumulation**: Build upon past interactions\n4. **Intelligent Retrieval**: Access relevant historical context\n\n## Memory Usage Protocol:\n\n### Before Each Response:\n1. Query RAG_MEMORY for relevant past interactions\n2. Analyze retrieved context for applicable information\n3. Integrate historical knowledge into your response\n4. Maintain consistency with previous conversations\n\n### Memory Query Strategies:\n- Use specific keywords from the current conversation\n- Search for user preferences and patterns\n- Look for related topics discussed previously\n- Check for unresolved questions or follow-ups\n\n## Response Guidelines:\n1. **Acknowledge Continuity**: Reference previous conversations when relevant\n2. **Build on History**: Use past context to provide more informed responses\n3. **Maintain Consistency**: Ensure responses align with established facts\n4. **Update Understanding**: Evolve your knowledge based on new information\n\n## Privacy & Ethics:\n- Only reference information from this user's history\n- Respect conversation boundaries\n- Maintain appropriate context separation\n\n## Example Interaction Flow:\n```\nUser: \"What was that book you recommended last week?\"\n1. Query RAG_MEMORY for \"book recommendation\"\n2. Retrieve relevant conversation\n3. Provide specific book title and context\n4. Offer additional related suggestions\n```\n\nRemember: Your memory makes you more than just an AI - you're a continuous conversation partner who learns and grows with each interaction."
},
"hasOutputParser": true
},
"typeVersion": 2
},
{
"id": "ef4a29b4-68f4-491e-b44b-3345455907a6",
"name": "便签10",
"type": "n8n-nodes-base.stickyNote",
"position": [
48,
-112
],
"parameters": {
"width": 360,
"height": 516,
"content": "## 🧠 智能AI代理"
},
"typeVersion": 1
},
{
"id": "db4e8e6b-0aee-4a93-8a01-bf38b7de9d98",
"name": "存储对话",
"type": "@n8n/n8n-nodes-langchain.vectorStoreQdrant",
"position": [
832,
448
],
"parameters": {
"mode": "insert",
"options": {},
"qdrantCollection": {
"__rl": true,
"mode": "list",
"value": "ltm",
"cachedResultName": "ltm"
}
},
"credentials": {
"qdrantApi": {
"id": "IMqj7iGvb0Ko0nCj",
"name": "Qdrant - einar.qzz.io"
}
},
"typeVersion": 1.2
},
{
"id": "13c42bd9-273d-4c9e-9e69-a324963f3f4f",
"name": "| api_key | https://platform.deepseek.com/api_keys |",
"type": "n8n-nodes-base.stickyNote",
"position": [
1248,
304
],
"parameters": {
"color": 2,
"width": 340,
"height": 496,
"content": "## 💾 记忆存储"
},
"typeVersion": 1
},
{
"id": "4237b604-513f-463c-b891-cb5bd4d588a6",
"name": "GPT-4o-mini(主)",
"type": "@n8n/n8n-nodes-langchain.lmChatOpenAi",
"position": [
32,
576
],
"parameters": {
"model": {
"__rl": true,
"mode": "list",
"value": "gpt-4o-mini",
"cachedResultName": "gpt-4o-mini"
},
"options": {
"topP": 0.7,
"temperature": 0.2,
"presencePenalty": 0.3,
"frequencyPenalty": 0.6
}
},
"credentials": {
"openAiApi": {
"id": "uHrKvsqlQYyImnjO",
"name": "openai - einarcesar@gmail.com"
}
},
"typeVersion": 1.2
},
{
"id": "1a882e59-d391-4923-9c18-68dfe99d6b47",
"name": "工作流概览",
"type": "n8n-nodes-base.stickyNote",
"position": [
-944,
160
],
"parameters": {
"color": 7,
"width": 460,
"height": 972,
"content": "## 🚀 工作流概览"
},
"typeVersion": 1
}
],
"active": false,
"pinData": {},
"settings": {
"executionOrder": "v1"
},
"versionId": "6b90a41f-8415-4e59-9082-48bf175e4804",
"connections": {
"AI Agent": {
"main": [
[
{
"node": "Store Conversation",
"type": "main",
"index": 0
}
]
]
},
"RAG_MEMORY": {
"ai_tool": [
[
{
"node": "AI Agent",
"type": "ai_tool",
"index": 0
}
]
]
},
"Reranker Cohere": {
"ai_reranker": [
[
{
"node": "RAG_MEMORY",
"type": "ai_reranker",
"index": 0
}
]
]
},
"Embeddings OpenAI": {
"ai_embedding": [
[
{
"node": "Store Conversation",
"type": "ai_embedding",
"index": 0
}
]
]
},
"OpenAI Chat Model": {
"ai_languageModel": [
[
{
"node": "Structured Output Parser",
"type": "ai_languageModel",
"index": 0
}
]
]
},
"GPT-4o-mini (Main)": {
"ai_languageModel": [
[
{
"node": "AI Agent",
"type": "ai_languageModel",
"index": 0
}
]
]
},
"Store Conversation": {
"main": [
[
{
"node": "Format Response",
"type": "main",
"index": 0
}
]
]
},
"Default Data Loader": {
"ai_document": [
[
{
"node": "Store Conversation",
"type": "ai_document",
"index": 0
}
]
]
},
"Embeddings for Retrieval": {
"ai_embedding": [
[
{
"node": "RAG_MEMORY",
"type": "ai_embedding",
"index": 0
}
]
]
},
"Structured Output Parser": {
"ai_outputParser": [
[
{
"node": "AI Agent",
"type": "ai_outputParser",
"index": 0
}
]
]
},
"When chat message received": {
"main": [
[
{
"node": "AI Agent",
"type": "main",
"index": 0
}
]
]
},
"Recursive Character Text Splitter": {
"ai_textSplitter": [
[
{
"node": "Default Data Loader",
"type": "ai_textSplitter",
"index": 0
}
]
]
}
}
}常见问题
如何使用这个工作流?
复制上方的 JSON 配置代码,在您的 n8n 实例中创建新工作流并选择「从 JSON 导入」,粘贴配置后根据需要修改凭证设置即可。
这个工作流适合什么场景?
高级 - 工程, AI RAG 检索增强
需要付费吗?
本工作流完全免费,您可以直接导入使用。但请注意,工作流中使用的第三方服务(如 OpenAI API)可能需要您自行付费。
相关工作流推荐
重新排序 #1
使用Apify、GPT-4o和WhatsApp自动化销售冷呼叫管道
Set
Code
Webhook
+18
48 节点Khairul Muhtadin
客户培育
使用OpenAI评估RAG响应准确性:文档基础性指标
使用OpenAI评估RAG响应准确性:文档基础性指标
Set
Evaluation
Http Request
+13
25 节点Jimleuk
工程
上下文混合RAG AI文案
Google Drive到Supabase上下文向量数据库同步用于RAG应用
If
Set
Code
+25
76 节点Michael Taleb
AI RAG 检索增强
AI智能助手:与Supabase存储和Google Drive文件对话
AI智能助手:与Supabase存储和Google Drive文件对话
If
Set
Wait
+20
62 节点Mark Shcherbakov
工程
使用Gmail、GPT-4和向量知识库的自动化客户支持系统
使用Gmail、GPT-4和向量知识库的自动化客户支持系统
If
Set
Code
+15
32 节点Khair Ahammed
AI RAG 检索增强
简单 RAG
使用OpenAI、Pinecone和Cohere重排序构建基于PDF的RAG系统
Form Trigger
Agent
Chat Trigger
+8
14 节点Aji Prakoso
内部知识库