Automatisierung von wissenschaftlichen Literaturrezensionen mit GPT-4 und Mehrfachdatenbankensuche
Dies ist ein Document Extraction, AI RAG, Multimodal AI-Bereich Automatisierungsworkflow mit 13 Nodes. Hauptsächlich werden If, Set, Code, OpenAi, SplitInBatches und andere Nodes verwendet. Automatisierung von Literaturüberblicken mit GPT-4 und mehrfacher Datenbanksuche
- •OpenAI API Key
Verwendete Nodes (13)
{
"meta": {
"instanceId": "placeholder"
},
"nodes": [
{
"id": "overview-note",
"name": "Übersicht der Recherche",
"type": "n8n-nodes-base.stickyNote",
"position": [
50,
50
],
"parameters": {
"color": 5,
"width": 350,
"height": 180,
"content": "## 📖 Literature Review Generator\n\nSystematic review automation:\n• **Searches** multiple databases\n• **Screens** with inclusion criteria\n• **Assesses** study quality\n• **Synthesizes** findings\n• **Generates** PRISMA-compliant review"
},
"typeVersion": 1
},
{
"id": "search-note",
"name": "Datenbank-Suche",
"type": "n8n-nodes-base.stickyNote",
"position": [
450,
450
],
"parameters": {
"width": 260,
"height": 160,
"content": "## 🔍 Search Strategy\n\nDatabases searched:\n• PubMed/MEDLINE\n• Web of Science\n• Cochrane Library\n• Google Scholar\n\n💡 De-duplicates results"
},
"typeVersion": 1
},
{
"id": "quality-note",
"name": "Studienqualität",
"type": "n8n-nodes-base.stickyNote",
"position": [
850,
450
],
"parameters": {
"width": 260,
"height": 150,
"content": "## 📊 Quality Assessment\n\n**Evaluates:**\n• Study design\n• Sample size\n• Risk of bias\n• Evidence level\n\n✅ Cochrane standards"
},
"typeVersion": 1
},
{
"id": "output-note",
"name": "Abschließende Recherche",
"type": "n8n-nodes-base.stickyNote",
"position": [
1150,
450
],
"parameters": {
"color": 6,
"width": 260,
"height": 180,
"content": "## 📝 Review Output\n\nGenerates:\n• Narrative synthesis\n• Evidence tables\n• PRISMA diagram\n• Forest plots\n• Bibliography\n\n🎯 Publication ready!"
},
"typeVersion": 1
},
{
"id": "set-parameters",
"name": "Suchparameter festlegen",
"type": "n8n-nodes-base.set",
"notes": "Configure literature review parameters",
"position": [
250,
300
],
"parameters": {
"values": {
"number": [
{
"name": "maxPapers",
"value": 20
}
],
"string": [
{
"name": "topic",
"value": "machine learning in healthcare"
},
{
"name": "yearFrom",
"value": "2020"
},
{
"name": "yearTo",
"value": "2024"
}
]
}
},
"typeVersion": 1
},
{
"id": "pdfvector-search",
"name": "PDF Vector - Papers durchsuchen",
"type": "n8n-nodes-pdfvector.pdfVector",
"notes": "Search academic databases",
"position": [
450,
300
],
"parameters": {
"limit": 50,
"query": "={{ $json.topic }}",
"yearTo": "={{ $json.yearTo }}",
"resource": "academic",
"yearFrom": "={{ $json.yearFrom }}",
"operation": "search",
"providers": [
"pubmed",
"semantic-scholar",
"arxiv"
],
"additionalFields": {
"fields": [
"title",
"abstract",
"authors",
"year",
"doi",
"pdfURL",
"totalCitations"
]
}
},
"typeVersion": 1
},
{
"id": "rank-papers",
"name": "Papers bewerten & auswählen",
"type": "n8n-nodes-base.code",
"notes": "Rank papers by relevance",
"position": [
650,
300
],
"parameters": {
"jsCode": "// Rank papers by relevance and citations\nconst papers = $input.all().map(item => item.json);\nconst searchTopic = $node['Set Search Parameters'].json.topic;\n\n// Calculate relevance scores\nconst scoredPapers = papers.map(paper => {\n let score = 0;\n \n // Citation score (normalized)\n const maxCitations = Math.max(...papers.map(p => p.totalCitations || 0));\n const citationScore = (paper.totalCitations || 0) / (maxCitations || 1) * 40;\n score += citationScore;\n \n // Recency score\n const paperYear = parseInt(paper.year);\n const currentYear = new Date().getFullYear();\n const recencyScore = Math.max(0, 20 - (currentYear - paperYear) * 2);\n score += recencyScore;\n \n // Title relevance\n const topicWords = searchTopic.toLowerCase().split(' ');\n const titleWords = paper.title.toLowerCase();\n const titleMatches = topicWords.filter(word => titleWords.includes(word)).length;\n score += titleMatches * 10;\n \n // Abstract relevance\n if (paper.abstract) {\n const abstractWords = paper.abstract.toLowerCase();\n const abstractMatches = topicWords.filter(word => abstractWords.includes(word)).length;\n score += abstractMatches * 5;\n }\n \n return {\n ...paper,\n relevanceScore: Math.round(score),\n rankingDetails: {\n citationScore: Math.round(citationScore),\n recencyScore,\n titleRelevance: titleMatches,\n abstractRelevance: abstractMatches || 0\n }\n };\n});\n\n// Sort by score and limit to top N\nconst maxPapers = $node['Set Search Parameters'].json.maxPapers;\nconst topPapers = scoredPapers\n .sort((a, b) => b.relevanceScore - a.relevanceScore)\n .slice(0, maxPapers);\n\nreturn topPapers.map(paper => ({ json: paper }));"
},
"typeVersion": 2
},
{
"id": "split-batch",
"name": "Einzelne Verarbeitung",
"type": "n8n-nodes-base.splitInBatches",
"notes": "Process papers individually",
"position": [
850,
300
],
"parameters": {
"options": {},
"batchSize": 1
},
"typeVersion": 1
},
{
"id": "has-pdf",
"name": "PDF vorhanden?",
"type": "n8n-nodes-base.if",
"position": [
1050,
300
],
"parameters": {
"conditions": {
"string": [
{
"value1": "={{ $json.pdfURL }}",
"operation": "isNotEmpty"
}
]
}
},
"typeVersion": 1
},
{
"id": "pdfvector-parse",
"name": "PDF Vector - Paper analysieren",
"type": "n8n-nodes-pdfvector.pdfVector",
"notes": "Parse paper content from PDF or image",
"position": [
1250,
250
],
"parameters": {
"url": "={{ $json.pdfURL }}",
"useLLM": "auto",
"resource": "document",
"inputType": "url",
"operation": "parse"
},
"typeVersion": 1
},
{
"id": "analyze-paper",
"name": "Paperinhalt auswerten",
"type": "n8n-nodes-base.openAi",
"notes": "Generate review entry",
"position": [
1450,
300
],
"parameters": {
"model": "gpt-4",
"messages": {
"values": [
{
"content": "Create a literature review entry for this paper in the context of '{{ $node['Set Search Parameters'].json.topic }}':\n\nTitle: {{ $json.title }}\nAuthors: {{ $json.authors }}\nYear: {{ $json.year }}\nCitations: {{ $json.totalCitations }}\n\nContent: {{ $json.content || $json.abstract }}\n\nProvide:\n1. A 3-4 sentence summary of the paper's contribution\n2. Key methodology used\n3. Main findings (2-3 bullet points)\n4. How it relates to the topic\n5. Limitations mentioned\n6. Suggested citation in APA format"
}
]
}
},
"typeVersion": 1
},
{
"id": "store-entry",
"name": "Rechercheeintrag speichern",
"type": "n8n-nodes-base.set",
"notes": "Save processed entry",
"position": [
1650,
300
],
"parameters": {
"values": {
"string": [
{
"name": "reviewEntry",
"value": "={{ $json.choices[0].message.content }}"
},
{
"name": "paperTitle",
"value": "={{ $node['Has PDF?'].json.title }}"
},
{
"name": "paperDoi",
"value": "={{ $node['Has PDF?'].json.doi }}"
}
]
}
},
"typeVersion": 1
},
{
"id": "compile-review",
"name": "Literaturrecherche zusammenstellen",
"type": "n8n-nodes-base.code",
"notes": "Generate final document",
"position": [
1850,
300
],
"parameters": {
"functionCode": "// Wait for all papers to be processed\nconst allEntries = $input.all().map(item => item.json);\n\n// Group papers by themes/methodologies\nconst themes = {\n 'Machine Learning Models': [],\n 'Clinical Applications': [],\n 'Data Processing': [],\n 'Evaluation Studies': [],\n 'Review Papers': [],\n 'Other': []\n};\n\n// Categorize papers (simplified - in production use NLP)\nallEntries.forEach(entry => {\n const review = entry.reviewEntry.toLowerCase();\n if (review.includes('neural network') || review.includes('deep learning')) {\n themes['Machine Learning Models'].push(entry);\n } else if (review.includes('clinical') || review.includes('patient')) {\n themes['Clinical Applications'].push(entry);\n } else if (review.includes('preprocessing') || review.includes('data processing')) {\n themes['Data Processing'].push(entry);\n } else if (review.includes('evaluation') || review.includes('comparison')) {\n themes['Evaluation Studies'].push(entry);\n } else if (review.includes('review') || review.includes('survey')) {\n themes['Review Papers'].push(entry);\n } else {\n themes['Other'].push(entry);\n }\n});\n\n// Generate literature review document\nlet reviewDocument = `# Literature Review: ${$node['Set Search Parameters'].json.topic}\\n\\n`;\nreviewDocument += `Generated on: ${new Date().toLocaleDateString()}\\n\\n`;\nreviewDocument += `## Summary\\n\\n`;\nreviewDocument += `This review analyzes ${allEntries.length} papers published between ${$node['Set Search Parameters'].json.yearFrom} and ${$node['Set Search Parameters'].json.yearTo} on the topic of ${$node['Set Search Parameters'].json.topic}.\\n\\n`;\n\n// Add themed sections\nObject.entries(themes).forEach(([theme, papers]) => {\n if (papers.length > 0) {\n reviewDocument += `## ${theme} (${papers.length} papers)\\n\\n`;\n papers.forEach(paper => {\n reviewDocument += `### ${paper.paperTitle}\\n\\n`;\n reviewDocument += paper.reviewEntry + '\\n\\n';\n });\n }\n});\n\n// Add bibliography\nreviewDocument += `## Bibliography\\n\\n`;\nallEntries.forEach((entry, index) => {\n const citation = entry.reviewEntry.split('Suggested citation:')[1] || 'Citation not available';\n reviewDocument += `${index + 1}. ${citation.trim()}\\n\\n`;\n});\n\nreturn [{\n json: {\n reviewDocument,\n totalPapers: allEntries.length,\n themes: Object.entries(themes).map(([theme, papers]) => ({\n theme,\n count: papers.length\n })),\n generatedAt: new Date().toISOString()\n }\n}];"
},
"typeVersion": 2
}
],
"connections": {
"has-pdf": {
"main": [
[
{
"node": "pdfvector-parse",
"type": "main",
"index": 0
}
],
[
{
"node": "analyze-paper",
"type": "main",
"index": 0
}
]
]
},
"split-batch": {
"main": [
[
{
"node": "has-pdf",
"type": "main",
"index": 0
}
]
]
},
"store-entry": {
"main": [
[
{
"node": "split-batch",
"type": "main",
"index": 0
}
]
]
},
"rank-papers": {
"main": [
[
{
"node": "split-batch",
"type": "main",
"index": 0
}
]
]
},
"analyze-paper": {
"main": [
[
{
"node": "store-entry",
"type": "main",
"index": 0
}
]
]
},
"set-parameters": {
"main": [
[
{
"node": "pdfvector-search",
"type": "main",
"index": 0
}
]
]
},
"pdfvector-parse": {
"main": [
[
{
"node": "analyze-paper",
"type": "main",
"index": 0
}
]
]
},
"pdfvector-search": {
"main": [
[
{
"node": "rank-papers",
"type": "main",
"index": 0
}
]
]
}
}
}Wie verwende ich diesen Workflow?
Kopieren Sie den obigen JSON-Code, erstellen Sie einen neuen Workflow in Ihrer n8n-Instanz und wählen Sie "Aus JSON importieren". Fügen Sie die Konfiguration ein und passen Sie die Anmeldedaten nach Bedarf an.
Für welche Szenarien ist dieser Workflow geeignet?
Fortgeschritten - Dokumentenextraktion, KI RAG, Multimodales KI
Ist es kostenpflichtig?
Dieser Workflow ist völlig kostenlos. Beachten Sie jedoch, dass Drittanbieterdienste (wie OpenAI API), die im Workflow verwendet werden, möglicherweise kostenpflichtig sind.
Verwandte Workflows
PDF Vector
@pdfvectorA fully featured PDF APIs for developers - Parse any PDF or Word document, extract structured data, and access millions of academic papers - all through simple APIs.
Diesen Workflow teilen