Umfassende Literaturübersicht mit GPT-4 und Mehrdatenbankensuche erstellen
Dies ist ein Document Extraction, Multimodal AI-Bereich Automatisierungsworkflow mit 8 Nodes. Hauptsächlich werden Code, OpenAi, PdfVector, WriteBinaryFile und andere Nodes verwendet. Umfassende Literaturrecherche mit GPT-4 und Mehrdatenbankensuche
- •OpenAI API Key
Verwendete Nodes (8)
Kategorie
{
"meta": {
"instanceId": "placeholder"
},
"nodes": [
{
"id": "start-node",
"name": "Start",
"type": "n8n-nodes-base.stickyNote",
"position": [
250,
250
],
"parameters": {
"content": "## Literature Review Parameters\n\nTopic: {{ $json.topic }}\nYear Range: {{ $json.startYear }}-{{ $json.endYear }}\nMax Papers: {{ $json.maxPapers }}"
},
"typeVersion": 1
},
{
"id": "pdfvector-search",
"name": "PDF Vector - Search Papers",
"type": "n8n-nodes-pdfvector.pdfVector",
"notes": "Search across multiple academic databases",
"position": [
450,
300
],
"parameters": {
"limit": 50,
"query": "={{ $json.topic }}",
"fields": [
"title",
"abstract",
"authors",
"year",
"doi",
"pdfUrl",
"totalCitations"
],
"yearTo": "={{ $json.endYear }}",
"resource": "academic",
"yearFrom": "={{ $json.startYear }}",
"operation": "search",
"providers": [
"pubmed",
"semantic_scholar",
"arxiv",
"google_scholar"
]
},
"typeVersion": 1
},
{
"id": "sort-papers",
"name": "Sortieren by Citations",
"type": "n8n-nodes-base.code",
"position": [
650,
300
],
"parameters": {
"functionCode": "// Sort papers by citations in descending order\nreturn items.sort((a, b) => (b.json.totalCitations || 0) - (a.json.totalCitations || 0));"
},
"typeVersion": 1
},
{
"id": "limit-papers",
"name": "Select Top Papers",
"type": "n8n-nodes-base.code",
"position": [
850,
300
],
"parameters": {
"functionCode": "// Limit to top N papers\nconst maxPapers = $node['Start'].json.maxPapers || 10;\nreturn items.slice(0, maxPapers);"
},
"typeVersion": 1
},
{
"id": "pdfvector-parse",
"name": "PDF Vector - Parse Papers",
"type": "n8n-nodes-pdfvector.pdfVector",
"notes": "Parse each paper's PDF",
"position": [
1050,
300
],
"parameters": {
"useLlm": "auto",
"resource": "document",
"operation": "parse",
"documentUrl": "={{ $json.pdfUrl }}"
},
"typeVersion": 1
},
{
"id": "synthesize",
"name": "Synthesize Review",
"type": "n8n-nodes-base.openAi",
"position": [
1250,
300
],
"parameters": {
"model": "gpt-4",
"messages": {
"values": [
{
"content": "Create a literature review section for this paper:\n\nTitle: {{ $json.title }}\nAuthors: {{ $json.authors }}\nYear: {{ $json.year }}\n\nContent: {{ $json.content }}\n\nGenerate:\n1. Key contribution summary (2-3 sentences)\n2. Methodology overview\n3. Main findings\n4. Relevance to topic: {{ $node['Start'].json.topic }}"
}
]
}
},
"typeVersion": 1
},
{
"id": "combine-sections",
"name": "Combine Sections",
"type": "n8n-nodes-base.code",
"position": [
1450,
300
],
"parameters": {
"functionCode": "// Combine all review sections into a single document\nconst reviewSections = items.map(item => item.json.reviewSection || item.json.content || '').filter(section => section);\nreturn [{ json: { reviewSections: reviewSections.join('\\n\\n') } }];"
},
"typeVersion": 1
},
{
"id": "export-review",
"name": "Export Review",
"type": "n8n-nodes-base.writeBinaryFile",
"position": [
1650,
300
],
"parameters": {
"fileName": "literature_review_{{ $now.format('yyyy-MM-dd') }}.md",
"fileContent": "# Literature Review: {{ $node['Start'].json.topic }}\n\n{{ $json.reviewSections }}"
},
"typeVersion": 1
}
],
"connections": {
"start-node": {
"main": [
[
{
"node": "pdfvector-search",
"type": "main",
"index": 0
}
]
]
},
"combine-sections": {
"main": [
[
{
"node": "export-review",
"type": "main",
"index": 0
}
]
]
},
"limit-papers": {
"main": [
[
{
"node": "pdfvector-parse",
"type": "main",
"index": 0
}
]
]
},
"Sort by Citations": {
"main": [
[
{
"node": "limit-papers",
"type": "main",
"index": 0
}
]
]
},
"synthesize": {
"main": [
[
{
"node": "combine-sections",
"type": "main",
"index": 0
}
]
]
},
"pdfvector-parse": {
"main": [
[
{
"node": "synthesize",
"type": "main",
"index": 0
}
]
]
},
"pdfvector-search": {
"main": [
[
{
"node": "Sort by Citations",
"type": "main",
"index": 0
}
]
]
}
}
}Wie verwende ich diesen Workflow?
Kopieren Sie den obigen JSON-Code, erstellen Sie einen neuen Workflow in Ihrer n8n-Instanz und wählen Sie "Aus JSON importieren". Fügen Sie die Konfiguration ein und passen Sie die Anmeldedaten nach Bedarf an.
Für welche Szenarien ist dieser Workflow geeignet?
Fortgeschritten - Dokumentenextraktion, Multimodales KI
Ist es kostenpflichtig?
Dieser Workflow ist völlig kostenlos. Beachten Sie jedoch, dass Drittanbieterdienste (wie OpenAI API), die im Workflow verwendet werden, möglicherweise kostenpflichtig sind.
Verwandte Workflows
PDF Vector
@pdfvectorA fully featured PDF APIs for developers - Parse any PDF or Word document, extract structured data, and access millions of academic papers - all through simple APIs.
Diesen Workflow teilen