Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
28 changes: 28 additions & 0 deletions servers/knowledge-base-hybrid-rag/readme.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
# Knowledge Base Hybrid RAG MCP Server

A powerful MCP server providing **37 tools** for Hybrid RAG (Retrieval-Augmented Generation) with PostgreSQL/pgvector.

## Features

- **Hybrid RAG Search**: Semantic + Keyword search with configurable weights
- **LLM Reranking**: +5-10% accuracy improvement
- **In-Memory Caching**: 2-3x speed improvement
- **Knowledge Graph**: Entity/relation-based knowledge management
- **Auto Entity Extraction**: LLM-based NER with automatic KG save
- **Conversational Memory**: Session-based memory for conversational RAG
- **Multiple Chunking Strategies**: fixed, paragraph, semantic
- **Evaluation Metrics**: Precision, Recall, F1, MRR

## Documentation

Full documentation: https://github.com/hwandam77/Knowledge-Base-Hybrid-RAG

## Requirements

- PostgreSQL with pgvector extension
- Ollama with embedding model (qwen3-embedding:0.6b)
- Ollama with LLM model (gemma3:12b or similar)

## License

MIT License
81 changes: 81 additions & 0 deletions servers/knowledge-base-hybrid-rag/server.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,81 @@
name: knowledge-base-hybrid-rag
image: mcp/knowledge-base-hybrid-rag
type: server
meta:
category: database
tags:
- database
- rag
- knowledge-graph
- vector-search
- postgresql
- hybrid-search
- semantic-search
- ai
about:
title: Knowledge Base Hybrid RAG
description: A powerful MCP server providing 37 tools for Hybrid RAG (Retrieval-Augmented Generation) with PostgreSQL/pgvector. Features include semantic + keyword search, LLM-based reranking (+5-10% accuracy), in-memory caching (2-3x speed), Knowledge Graph with entity extraction, conversational memory, multiple chunking strategies, and search quality evaluation metrics (Precision, Recall, F1, MRR).
icon: https://www.google.com/s2/favicons?domain=github.com&sz=64
source:
project: https://github.com/hwandam77/Knowledge-Base-Hybrid-RAG
branch: main
config:
description: Configure PostgreSQL database and Ollama LLM connection for Knowledge Base RAG
secrets:
- name: knowledge-base-hybrid-rag.db_password
env: DB_PASSWORD
example: your_secure_password
env:
- name: DB_HOST
example: "10.5.5.10"
value: "{{knowledge-base-hybrid-rag.db_host}}"
- name: DB_PORT
example: "5432"
value: "{{knowledge-base-hybrid-rag.db_port}}"
- name: DB_NAME
example: "knowledge_base_db"
value: "{{knowledge-base-hybrid-rag.db_name}}"
- name: DB_USER
example: "postgres"
value: "{{knowledge-base-hybrid-rag.db_user}}"
- name: LOCAL_OLLAMA_URL
example: "http://host.docker.internal:11434"
value: "{{knowledge-base-hybrid-rag.ollama_url}}"
- name: EMBEDDING_MODEL
example: "qwen3-embedding:0.6b"
value: "{{knowledge-base-hybrid-rag.embedding_model}}"
- name: LLM_MODEL
example: "gemma3:12b"
value: "{{knowledge-base-hybrid-rag.llm_model}}"
- name: KB_PROJECT
example: "default"
value: "{{knowledge-base-hybrid-rag.kb_project}}"
parameters:
type: object
properties:
db_host:
type: string
description: PostgreSQL host address
db_port:
type: string
description: PostgreSQL port
db_name:
type: string
description: Database name
db_user:
type: string
description: Database username
ollama_url:
type: string
description: Ollama API endpoint URL
embedding_model:
type: string
description: Ollama embedding model name
llm_model:
type: string
description: Ollama LLM model name
kb_project:
type: string
description: Project isolation key
required:
- db_host
150 changes: 150 additions & 0 deletions servers/knowledge-base-hybrid-rag/tools.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,150 @@
[
{
"name": "kb_health_check",
"description": "Check system health status (DB, Ollama connection)"
},
{
"name": "kb_search",
"description": "Basic vector search for documents"
},
{
"name": "kb_search_hybrid",
"description": "Hybrid search combining semantic + keyword search"
},
{
"name": "kb_search_hybrid_v2",
"description": "Enhanced hybrid search with adjustable weights"
},
{
"name": "kb_search_semantic",
"description": "Pure semantic vector search"
},
{
"name": "kb_document_search_within",
"description": "Search within a specific document"
},
{
"name": "kb_ask",
"description": "RAG-based Q&A with LLM"
},
{
"name": "kb_summarize",
"description": "Auto-summarize documents using LLM"
},
{
"name": "kb_auto_tag",
"description": "Auto-extract tags from documents using LLM"
},
{
"name": "kb_image_analyze",
"description": "Analyze images using Vision model"
},
{
"name": "kb_embedding_generate",
"description": "Generate embedding vectors for text"
},
{
"name": "kb_document_create",
"description": "Create a new document"
},
{
"name": "kb_document_get",
"description": "Get full document content"
},
{
"name": "kb_document_get_meta",
"description": "Get document metadata only"
},
{
"name": "kb_document_get_chunk",
"description": "Get document by chunk"
},
{
"name": "kb_document_list",
"description": "List all documents"
},
{
"name": "kb_document_update",
"description": "Update document content"
},
{
"name": "kb_document_delete",
"description": "Delete a document"
},
{
"name": "kg_create_entities",
"description": "Create entities in Knowledge Graph"
},
{
"name": "kg_create_relations",
"description": "Create relations between entities"
},
{
"name": "kg_add_observations",
"description": "Add observations to entities"
},
{
"name": "kg_add_tags",
"description": "Add tags to entities"
},
{
"name": "kg_search_knowledge",
"description": "Search Knowledge Graph (fuzzy/exact)"
},
{
"name": "kg_read_graph",
"description": "Read entire Knowledge Graph"
},
{
"name": "unified_search",
"description": "Search both documents and Knowledge Graph"
},
{
"name": "kb_search_with_rerank",
"description": "Hybrid search with LLM-based reranking for improved accuracy (+5-10%)"
},
{
"name": "kb_rerank",
"description": "Rerank existing search results using LLM relevance scoring"
},
{
"name": "kb_cache_stats",
"description": "Get cache statistics (hit rate, size, memory usage)"
},
{
"name": "kb_cache_clear",
"description": "Clear cache (search, embedding, or all)"
},
{
"name": "kb_extract_entities",
"description": "Extract entities from document using LLM-based NER"
},
{
"name": "kb_extract_and_save",
"description": "Extract entities and automatically save to Knowledge Graph"
},
{
"name": "kb_memory_save",
"description": "Save content to session memory for conversational RAG"
},
{
"name": "kb_memory_recall",
"description": "Recall relevant memories from session using semantic search"
},
{
"name": "kb_memory_clear",
"description": "Clear all memories for a session"
},
{
"name": "kb_document_create_chunked",
"description": "Create document with advanced chunking strategy (fixed/paragraph/semantic)"
},
{
"name": "kb_evaluate_search",
"description": "Evaluate search quality with precision, recall, F1, MRR metrics"
},
{
"name": "kb_benchmark",
"description": "Run performance benchmark on search operations"
}
]