我的工作流 2
高级
这是一个Engineering, AI RAG领域的自动化工作流,包含 19 个节点。主要使用 If, Code, OpenAi, WebSearch, FormTrigger 等节点。 多源RAG系统,集成GPT-4 Turbo、新闻和学术论文
前置要求
- •OpenAI API Key
- •可能需要目标 API 的认证凭证
- •HTTP Webhook 端点(n8n 会自动生成)
- •Google Drive API 凭证
工作流预览
可视化展示节点连接关系,支持缩放和平移
导出工作流
复制以下 JSON 配置到 n8n 导入,即可使用此工作流
{
"id": "VhEwspDqzu7ssFVE",
"meta": {
"instanceId": "f4b0efaa33080e7774e0d9285c40c7abcd2c6f7cf1a8b901fa7106170dd4cda3",
"templateCredsSetupCompleted": true
},
"name": "我的工作流 2",
"tags": [
{
"id": "DxXGubfBzRKh6L8T",
"name": "Revenue Optimization",
"createdAt": "2025-07-25T16:24:30.370Z",
"updatedAt": "2025-07-25T16:24:30.370Z"
},
{
"id": "IxkcJ2IpYIxivoHV",
"name": "Content Strategy",
"createdAt": "2025-07-25T12:57:37.677Z",
"updatedAt": "2025-07-25T12:57:37.677Z"
},
{
"id": "PAKIJ2Mm9EvRcR3u",
"name": "Trend Monitoring",
"createdAt": "2025-07-25T12:57:37.670Z",
"updatedAt": "2025-07-25T12:57:37.670Z"
},
{
"id": "YtfXmaZk44MYedPO",
"name": "Dynamic Pricing",
"createdAt": "2025-07-25T16:24:30.369Z",
"updatedAt": "2025-07-25T16:24:30.369Z"
},
{
"id": "wJ30mjhtrposO8Qt",
"name": "Simple RAG",
"createdAt": "2025-07-28T12:55:14.424Z",
"updatedAt": "2025-07-28T12:55:14.424Z"
}
],
"nodes": [
{
"id": "0247527a-478c-4ce6-a999-cea55f4923ac",
"name": "🚀 高级 RAG 表单",
"type": "n8n-nodes-base.formTrigger",
"position": [
-1696,
160
],
"webhookId": "8f393097-f14b-40a2-8d4c-c28066c84bec",
"parameters": {
"options": {},
"formTitle": "🧠 Enterprise RAG Assistant",
"formFields": {
"values": [
{
"fieldType": "textarea",
"fieldLabel": "query",
"requiredField": true
},
{
"fieldType": "select",
"fieldLabel": "search_scope",
"requiredField": true
},
{
"fieldType": "select",
"fieldLabel": "response_style"
},
{
"fieldType": "select",
"fieldLabel": "language"
},
{
"fieldType": "select",
"fieldLabel": "output_format"
},
{
"fieldType": "email",
"fieldLabel": "user_email"
},
{
"fieldType": "select",
"fieldLabel": "priority_level"
}
]
},
"formDescription": "Advanced RAG system for intelligent research and content generation"
},
"typeVersion": 2.2
},
{
"id": "97c556f7-c675-4fb7-9e31-10926885485a",
"name": "📋 表单配置",
"type": "n8n-nodes-base.stickyNote",
"position": [
-1760,
-512
],
"parameters": {
"color": 4,
"width": 650,
"height": 600,
"content": "# 高级 RAG 表单 🚀"
},
"typeVersion": 1
},
{
"id": "de5b5d88-8e1e-4bfc-91bf-905d709a6cb9",
"name": "🔍 查询预处理器",
"type": "n8n-nodes-base.code",
"position": [
-1376,
160
],
"parameters": {
"jsCode": "// Input validation and preprocessing\nconst input = $input.all()[0].json;\n\n// Validate required fields\nif (!input.query || input.query.trim().length < 3) {\n throw new Error('Query too short. Please enter at least 3 characters.');\n}\n\n// Clean and preprocess query\nconst processedQuery = {\n original_query: input.query,\n cleaned_query: input.query.trim().replace(/\\s+/g, ' '),\n query_length: input.query.trim().length,\n estimated_complexity: input.query.split(' ').length > 10 ? 'high' : 'medium',\n search_scope: input.search_scope || 'web_general',\n response_style: input.response_style || 'comprehensive',\n language: input.language || 'italian',\n output_format: input.output_format || 'markdown',\n priority_level: input.priority_level || 'medium',\n user_email: input.user_email || null,\n session_id: `rag_${Date.now()}_${Math.random().toString(36).substr(2, 9)}`,\n timestamp: new Date().toISOString(),\n processing_steps: []\n};\n\n// Add processing metadata\nprocessedQuery.processing_steps.push({\n step: 'input_validation',\n status: 'completed',\n timestamp: new Date().toISOString()\n});\n\n// Generate search keywords from query\nconst keywords = processedQuery.cleaned_query\n .toLowerCase()\n .split(' ')\n .filter(word => word.length > 3)\n .slice(0, 8);\n\nprocessedQuery.search_keywords = keywords;\n\n// Determine search strategy based on scope\nconst searchStrategies = {\n 'web_general': ['google', 'bing', 'duckduckgo'],\n 'academic_papers': ['scholar', 'arxiv', 'pubmed'],\n 'news_articles': ['news_api', 'rss_feeds'],\n 'technical_docs': ['github', 'stackoverflow', 'documentation'],\n 'company_internal': ['drive', 'confluence', 'notion'],\n 'multi_source': ['google', 'scholar', 'news_api', 'github']\n};\n\nprocessedQuery.search_strategy = searchStrategies[processedQuery.search_scope] || ['google'];\n\nconsole.log('Processed query:', JSON.stringify(processedQuery, null, 2));\n\nreturn [{ json: processedQuery }];"
},
"typeVersion": 2
},
{
"id": "00133ca9-7379-4692-9500-35289f24c5f0",
"name": "🔄 搜索路由器",
"type": "n8n-nodes-base.if",
"position": [
-1056,
160
],
"parameters": {
"options": {},
"conditions": {
"options": {
"leftValue": "",
"caseSensitive": true,
"typeValidation": "strict"
},
"combinator": "and",
"conditions": [
{
"id": "scope-check",
"operator": {
"type": "string",
"operation": "equals"
},
"leftValue": "={{ $json.search_scope }}",
"rightValue": "company_internal"
}
]
}
},
"typeVersion": 2
},
{
"id": "377114fa-2f1b-4135-a054-86ac460fc319",
"name": "🏢 内部知识搜索",
"type": "n8n-nodes-base.googleDriveSearch",
"position": [
-1056,
368
],
"parameters": {},
"typeVersion": 1
},
{
"id": "48da1505-73dd-424f-8c70-fa54ef66cf2c",
"name": "🌐 增强网页搜索",
"type": "n8n-nodes-base.webSearch",
"position": [
-1056,
-48
],
"parameters": {},
"typeVersion": 1
},
{
"id": "9a805200-52bb-4e8b-8435-87be41b23c40",
"name": "🎓 学术论文搜索",
"type": "n8n-nodes-base.httpRequest",
"position": [
-736,
64
],
"parameters": {
"url": "https://api.crossref.org/works",
"options": {
"timeout": 30000
},
"sendQuery": true,
"queryParameters": {
"parameters": [
{
"name": "query",
"value": "={{ $json.cleaned_query }}"
},
{
"name": "rows",
"value": "10"
},
{
"name": "sort",
"value": "relevance"
},
{
"name": "order",
"value": "desc"
}
]
}
},
"typeVersion": 4.2
},
{
"id": "e441e67d-4de3-41cf-939b-3232f7389fe3",
"name": "📰 新闻搜索 API",
"type": "n8n-nodes-base.httpRequest",
"position": [
-736,
272
],
"parameters": {
"url": "https://newsapi.org/v2/everything",
"options": {
"timeout": 30000
},
"sendQuery": true,
"authentication": "headerAuth",
"queryParameters": {
"parameters": [
{
"name": "q",
"value": "={{ $json.cleaned_query }}"
},
{
"name": "sortBy",
"value": "relevancy"
},
{
"name": "pageSize",
"value": "10"
},
{
"name": "language",
"value": "={{ $json.language === 'italian' ? 'it' : 'en' }}"
}
]
}
},
"typeVersion": 4.2
},
{
"id": "4faa39af-1e65-4f1b-8a54-3648f0cc285a",
"name": "📊 数据聚合器",
"type": "n8n-nodes-base.code",
"position": [
-416,
160
],
"parameters": {
"jsCode": "// Advanced data aggregation and processing\nconst allInputs = $input.all();\nlet aggregatedData = {\n session_id: '',\n query_info: {},\n sources: [],\n total_results: 0,\n processing_time: Date.now(),\n quality_score: 0\n};\n\n// Process each input source\nallInputs.forEach((input, index) => {\n const data = input.json;\n \n // Extract session info from first input\n if (index === 0) {\n aggregatedData.session_id = data.session_id || `rag_${Date.now()}`;\n aggregatedData.query_info = {\n original_query: data.original_query || data.cleaned_query,\n cleaned_query: data.cleaned_query,\n search_scope: data.search_scope,\n response_style: data.response_style || 'comprehensive',\n language: data.language || 'italian'\n };\n }\n \n // Process web search results\n if (data.results && Array.isArray(data.results)) {\n data.results.forEach(item => {\n aggregatedData.sources.push({\n type: 'web_content',\n title: item.title || 'Web Content',\n content: item.snippet || item.description || '',\n url: item.url || item.link || '',\n relevance_score: 0.8,\n source_quality: 'high',\n timestamp: new Date().toISOString()\n });\n });\n }\n \n // Process Google Drive results\n if (data.name && data.mimeType) {\n aggregatedData.sources.push({\n type: 'internal_document',\n title: data.name,\n content: data.content || data.description || '',\n url: data.webViewLink || '',\n relevance_score: 0.9,\n source_quality: 'internal',\n timestamp: data.modifiedTime || new Date().toISOString()\n });\n }\n \n // Process academic papers\n if (data.message && data.message.items) {\n data.message.items.forEach(paper => {\n if (paper.title && paper.abstract) {\n aggregatedData.sources.push({\n type: 'academic_paper',\n title: Array.isArray(paper.title) ? paper.title[0] : paper.title,\n content: paper.abstract || '',\n url: paper.URL || '',\n relevance_score: 0.95,\n source_quality: 'academic',\n authors: paper.author ? paper.author.map(a => `${a.given || ''} ${a.family || ''}`).join(', ') : '',\n timestamp: paper.published ? (paper.published['date-parts'] ? paper.published['date-parts'][0].join('-') : new Date().toISOString()) : new Date().toISOString()\n });\n }\n });\n }\n \n // Process news articles\n if (data.articles && Array.isArray(data.articles)) {\n data.articles.forEach(article => {\n aggregatedData.sources.push({\n type: 'news_article',\n title: article.title || 'News Article',\n content: article.description || article.content || '',\n url: article.url || '',\n relevance_score: 0.85,\n source_quality: 'news',\n author: article.author || '',\n timestamp: article.publishedAt || new Date().toISOString()\n });\n });\n }\n});\n\n// Calculate quality metrics\naggregatedData.total_results = aggregatedData.sources.length;\naggregatedData.quality_score = aggregatedData.sources.reduce((sum, source) => \n sum + source.relevance_score, 0) / Math.max(aggregatedData.total_results, 1);\n\n// Sort sources by relevance\naggregatedData.sources.sort((a, b) => b.relevance_score - a.relevance_score);\n\n// Limit to top 15 most relevant sources\naggregatedData.sources = aggregatedData.sources.slice(0, 15);\n\n// Add processing metadata\naggregatedData.processing_time = Date.now() - aggregatedData.processing_time;\naggregatedData.aggregation_timestamp = new Date().toISOString();\n\nconsole.log(`Aggregated ${aggregatedData.total_results} sources with quality score: ${aggregatedData.quality_score.toFixed(2)}`);\n\nreturn [{ json: aggregatedData }];"
},
"typeVersion": 2
},
{
"id": "52fe7489-732b-4610-afc0-2c55d4b8f535",
"name": "🧠 上下文构建器",
"type": "n8n-nodes-base.code",
"position": [
-96,
160
],
"parameters": {
"jsCode": "// Advanced context preparation for LLM\nconst data = $input.all()[0].json;\n\n// Build comprehensive context\nlet contextSections = [];\n\n// Add query information\ncontextSections.push(`## QUERY INFORMATION\\nOriginal Query: ${data.query_info.original_query}\\nCleaned Query: ${data.query_info.cleaned_query}\\nSearch Scope: ${data.query_info.search_scope}\\nResponse Style: ${data.query_info.response_style}\\nLanguage: ${data.query_info.language}`);\n\n// Add sources with structured formatting\nif (data.sources && data.sources.length > 0) {\n contextSections.push('## RETRIEVED SOURCES');\n \n data.sources.forEach((source, index) => {\n const sourceSection = `\\n### Source ${index + 1}: ${source.title}\\n**Type**: ${source.type}\\n**Relevance**: ${(source.relevance_score * 100).toFixed(1)}%\\n**Quality**: ${source.source_quality}\\n**URL**: ${source.url}\\n${source.author ? `**Author**: ${source.author}` : ''}\\n**Timestamp**: ${source.timestamp}\\n\\n**Content**:\\n${source.content.substring(0, 1500)}${source.content.length > 1500 ? '...' : ''}\\n`;\n \n contextSections.push(sourceSection);\n });\n}\n\n// Add metadata\ncontextSections.push(`## METADATA\\nTotal Sources: ${data.total_results}\\nQuality Score: ${(data.quality_score * 100).toFixed(1)}%\\nProcessing Time: ${data.processing_time}ms\\nSession ID: ${data.session_id}`);\n\n// Create comprehensive context\nconst fullContext = contextSections.join('\\n\\n');\n\n// Prepare messages for LLM based on response style\nconst stylePrompts = {\n 'comprehensive': 'Provide a complete and detailed response, using all relevant information from the sources. Include specific examples and citations.',\n 'concise': 'Provide a brief and direct response, keeping only the most important information.',\n 'technical': 'Provide a detailed technical response with specific terminology and implementation details.',\n 'executive_summary': 'Provide an executive summary with key points, recommendations and next steps.',\n 'bullet_points': 'Organize the response in clear and structured bullet points.',\n 'detailed_analysis': 'Provide an in-depth analysis with pros/cons, implications and multiple considerations.'\n};\n\nconst stylePrompt = stylePrompts[data.query_info.response_style] || stylePrompts['comprehensive'];\n\n// Language-specific instructions\nconst languageInstructions = {\n 'italian': 'Respond in Italian',\n 'english': 'Respond in English',\n 'spanish': 'Respond in Spanish',\n 'french': 'Respond in French',\n 'german': 'Respond in German'\n};\n\nconst langInstruction = languageInstructions[data.query_info.language] || languageInstructions['english'];\n\n// Create system message\nconst systemMessage = `You are an advanced AI assistant specialized in RAG (Retrieval-Augmented Generation). \\n\\nINSTRUCTIONS:\\n1. ${langInstruction}\\n2. ${stylePrompt}\\n3. ALWAYS use information from the provided sources\\n4. Cite sources using [Source X] when appropriate\\n5. If information is insufficient, clearly indicate what is missing\\n6. Maintain accuracy and objectivity\\n7. Structure the response logically and readably\\n\\nCONTEXT AND SOURCES:\\n${fullContext}`;\n\n// Create user message\nconst userMessage = `Based exclusively on the sources provided in the context, ${data.query_info.original_query}`;\n\nconst result = {\n messages: [\n {\n role: 'system',\n content: systemMessage\n },\n {\n role: 'user', \n content: userMessage\n }\n ],\n context_length: fullContext.length,\n sources_count: data.sources.length,\n session_id: data.session_id,\n query_info: data.query_info,\n context_prepared_at: new Date().toISOString()\n};\n\nconsole.log(`Context prepared: ${result.context_length} chars, ${result.sources_count} sources`);\n\nreturn [{ json: result }];"
},
"typeVersion": 2
},
{
"id": "172349cf-ce97-46d0-bb69-f679605e89c2",
"name": "🤖 高级 LLM 处理器",
"type": "n8n-nodes-base.openAi",
"position": [
224,
160
],
"parameters": {
"model": "gpt-4-turbo-preview",
"options": {
"topP": 0.9,
"maxTokens": 2048,
"temperature": 0.3
},
"requestOptions": {}
},
"typeVersion": 1
},
{
"id": "b34adec5-1506-485e-a671-8ee862a0fa7d",
"name": "✨ 响应增强器",
"type": "n8n-nodes-base.code",
"position": [
544,
160
],
"parameters": {
"jsCode": "// Post-processing and response enhancement\nconst llmResponse = $input.all()[0].json;\nconst contextData = $('Context Builder').first().json;\n\n// Extract the AI response\nconst aiResponse = llmResponse.choices?.[0]?.message?.content || llmResponse.content || '';\n\n// Build comprehensive response object\nconst enhancedResponse = {\n // Core response\n response: aiResponse,\n \n // Metadata\n session_id: contextData.session_id,\n query: contextData.query_info.original_query,\n response_style: contextData.query_info.response_style,\n language: contextData.query_info.language,\n \n // Quality metrics\n response_length: aiResponse.length,\n sources_used: contextData.sources_count,\n context_length: contextData.context_length,\n \n // Processing info\n generated_at: new Date().toISOString(),\n model_used: 'gpt-4-turbo-preview',\n \n // Usage statistics\n tokens_used: llmResponse.usage?.total_tokens || 0,\n prompt_tokens: llmResponse.usage?.prompt_tokens || 0,\n completion_tokens: llmResponse.usage?.completion_tokens || 0,\n \n // Source summary\n source_breakdown: {\n web_content: contextData.query_info.search_scope === 'web_general' ? contextData.sources_count : 0,\n internal_docs: contextData.query_info.search_scope === 'company_internal' ? contextData.sources_count : 0,\n academic_papers: contextData.query_info.search_scope === 'academic_papers' ? contextData.sources_count : 0,\n news_articles: contextData.query_info.search_scope === 'news_articles' ? contextData.sources_count : 0\n },\n \n // Response quality indicators\n quality_indicators: {\n has_citations: aiResponse.includes('[Source') || aiResponse.includes('fonte'),\n response_completeness: aiResponse.length > 200 ? 'complete' : 'partial',\n source_integration: contextData.sources_count > 0 ? 'good' : 'limited',\n estimated_accuracy: contextData.sources_count >= 3 ? 'high' : 'medium'\n }\n};\n\n// Format response based on output format preference\nconst outputFormat = contextData.query_info.output_format || 'markdown';\n\nswitch (outputFormat) {\n case 'html':\n enhancedResponse.formatted_response = `\n<!DOCTYPE html>\n<html>\n<head>\n <title>RAG Response - ${contextData.query_info.original_query}</title>\n <style>\n body { font-family: Arial, sans-serif; max-width: 800px; margin: 0 auto; padding: 20px; }\n .header { background: #f5f5f5; padding: 15px; border-radius: 5px; margin-bottom: 20px; }\n .response { line-height: 1.6; }\n .metadata { background: #e9f4ff; padding: 10px; border-radius: 5px; margin-top: 20px; font-size: 0.9em; }\n </style>\n</head>\n<body>\n <div class=\"header\">\n <h1>RAG Response</h1>\n <p><strong>Query:</strong> ${contextData.query_info.original_query}</p>\n <p><strong>Generated:</strong> ${enhancedResponse.generated_at}</p>\n </div>\n <div class=\"response\">\n ${aiResponse.replace(/\\n/g, '<br>')}\n </div>\n <div class=\"metadata\">\n <strong>Sources Used:</strong> ${enhancedResponse.sources_used} | \n <strong>Tokens:</strong> ${enhancedResponse.tokens_used} | \n <strong>Session:</strong> ${enhancedResponse.session_id}\n </div>\n</body>\n</html>`;\n break;\n \n case 'json_structured':\n enhancedResponse.formatted_response = JSON.stringify({\n query: contextData.query_info.original_query,\n response: aiResponse,\n metadata: {\n sources_count: enhancedResponse.sources_used,\n tokens_used: enhancedResponse.tokens_used,\n quality_score: enhancedResponse.quality_indicators,\n session_id: enhancedResponse.session_id\n }\n }, null, 2);\n break;\n \n default: // markdown\n enhancedResponse.formatted_response = `# RAG Response\\n\\n**Query:** ${contextData.query_info.original_query}\\n\\n---\\n\\n${aiResponse}\\n\\n---\\n\\n**Metadata:**\\n- Sources Used: ${enhancedResponse.sources_used}\\n- Tokens Used: ${enhancedResponse.tokens_used}\\n- Generated: ${enhancedResponse.generated_at}\\n- Session ID: ${enhancedResponse.session_id}`;\n}\n\nconsole.log(`Response generated: ${enhancedResponse.response_length} chars, ${enhancedResponse.tokens_used} tokens`);\n\nreturn [{ json: enhancedResponse }];"
},
"typeVersion": 2
},
{
"id": "e720959c-0c8a-40b9-b608-7b6c5835f6e7",
"name": "📤 Webhook 响应",
"type": "n8n-nodes-base.respondToWebhook",
"position": [
864,
160
],
"parameters": {
"options": {},
"respondWith": "allIncomingData"
},
"typeVersion": 1
},
{
"id": "sticky-query-preprocessor",
"name": "查询预处理器信息",
"type": "n8n-nodes-base.stickyNote",
"position": [
-1376,
-200
],
"parameters": {
"color": 3,
"width": 500,
"height": 400,
"content": "# 🔍 查询预处理器"
},
"typeVersion": 1
},
{
"id": "sticky-search-router",
"name": "搜索路由器信息",
"type": "n8n-nodes-base.stickyNote",
"position": [
-1056,
-200
],
"parameters": {
"color": 4,
"width": 500,
"height": 400,
"content": "# 🔄 搜索路由器"
},
"typeVersion": 1
},
{
"id": "sticky-context-builder",
"name": "上下文构建器信息",
"type": "n8n-nodes-base.stickyNote",
"position": [
-96,
-200
],
"parameters": {
"color": 5,
"width": 500,
"height": 450,
"content": "# 🧠 上下文构建器"
},
"typeVersion": 1
},
{
"id": "sticky-llm-processor",
"name": "LLM 处理器信息",
"type": "n8n-nodes-base.stickyNote",
"position": [
224,
-200
],
"parameters": {
"color": 6,
"width": 500,
"height": 400,
"content": "# 🤖 高级 LLM 处理器"
},
"typeVersion": 1
},
{
"id": "sticky-response-enhancer",
"name": "响应增强器信息",
"type": "n8n-nodes-base.stickyNote",
"position": [
544,
-200
],
"parameters": {
"color": 7,
"width": 500,
"height": 450,
"content": "# ✨ 响应增强器"
},
"typeVersion": 1
},
{
"id": "sticky-webhook-response",
"name": "Webhook 响应信息",
"type": "n8n-nodes-base.stickyNote",
"position": [
864,
-200
],
"parameters": {
"color": 2,
"width": 500,
"height": 400,
"content": "# 📤 Webhook 响应"
},
"typeVersion": 1
}
],
"active": false,
"pinData": {},
"settings": {
"executionOrder": "v1"
},
"versionId": "552ac041-e7ae-4695-9b21-47675446cff0",
"connections": {
"🔄 Search Router": {
"main": [
[
{
"node": "🎓 Academic Papers Search",
"type": "main",
"index": 0
},
{
"node": "📰 News Search API",
"type": "main",
"index": 0
}
]
]
},
"📊 Data Aggregator": {
"main": [
[
{
"node": "🧠 Context Builder",
"type": "main",
"index": 0
}
]
]
},
"📰 News Search API": {
"main": [
[
{
"node": "📊 Data Aggregator",
"type": "main",
"index": 0
}
]
]
},
"🧠 Context Builder": {
"main": [
[
{
"node": "🤖 Advanced LLM Processor",
"type": "main",
"index": 0
}
]
]
},
"✨ Response Enhancer": {
"main": [
[
{
"node": "📤 Webhook Response",
"type": "main",
"index": 0
}
]
]
},
"🚀 Advanced RAG Form": {
"main": [
[
{
"node": "🔍 Query Preprocessor",
"type": "main",
"index": 0
}
]
]
},
"🔍 Query Preprocessor": {
"main": [
[
{
"node": "🔄 Search Router",
"type": "main",
"index": 0
}
]
]
},
"🎓 Academic Papers Search": {
"main": [
[
{
"node": "📊 Data Aggregator",
"type": "main",
"index": 0
}
]
]
},
"🤖 Advanced LLM Processor": {
"main": [
[
{
"node": "✨ Response Enhancer",
"type": "main",
"index": 0
}
]
]
}
}
}常见问题
如何使用这个工作流?
复制上方的 JSON 配置代码,在您的 n8n 实例中创建新工作流并选择「从 JSON 导入」,粘贴配置后根据需要修改凭证设置即可。
这个工作流适合什么场景?
高级 - 工程, AI RAG 检索增强
需要付费吗?
本工作流完全免费,您可以直接导入使用。但请注意,工作流中使用的第三方服务(如 OpenAI API)可能需要您自行付费。
相关工作流推荐
从 Notion 内容创建 Linear 工单
从 Notion 内容创建 Linear 工单
If
Set
Code
+11
24 节点David Roberts
工程
实时Notion Todoist双向同步模板
使用Redis的Notion Todoist实时双向同步
If
Set
Code
+26
246 节点Mario
销售
银行对账单分析器 - 简化版
AI驱动银行对账单分析与交易分类
If
Code
Open Ai
+5
9 节点vinci-king-01
文档提取
基于Bright Data、OpenAI和Redis的高级多源AI研究
使用Bright Data、OpenAI和Redis进行高级多源AI研究
If
Set
Code
+15
43 节点Daniel Shashko
市场调研
使用Gmail、GPT-4和向量知识库的自动化客户支持系统
使用Gmail、GPT-4和向量知识库的自动化客户支持系统
If
Set
Code
+15
32 节点Khair Ahammed
AI RAG 检索增强
我的工作流 2
结合 AI 竞品监控和收入优化的自动化动态定价
If
Code
Merge
+8
25 节点vinci-king-01
市场调研