Spaces:
Sleeping
Sleeping
File size: 1,369 Bytes
11d9dfb |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 |
# Professional RAG System Configuration
app:
name: "Professional RAG Document Assistant"
version: "1.0.0"
debug: false
max_upload_size: 50 # MB
max_concurrent_uploads: 3
# Model configurations
models:
embedding:
name: "sentence-transformers/all-MiniLM-L6-v2"
max_seq_length: 256
batch_size: 32
device: "auto" # auto, cpu, cuda
reranker:
name: "cross-encoder/ms-marco-MiniLM-L-6-v2"
max_seq_length: 512
batch_size: 16
enabled: true
# Document processing
processing:
chunk_size: 512
chunk_overlap: 50
min_chunk_size: 100
max_chunks_per_doc: 1000
supported_formats: ["pdf", "docx", "txt"]
# Search configuration
search:
default_k: 10
max_k: 20
vector_weight: 0.7
bm25_weight: 0.3
rerank_top_k: 50
final_top_k: 10
# Caching
cache:
embedding_cache_size: 10000
query_cache_size: 1000
cache_ttl: 3600 # seconds
enable_disk_cache: true
cache_dir: "./cache"
# UI settings
ui:
theme: "soft"
title: "Professional RAG Assistant"
description: "Upload documents and ask questions with AI-powered retrieval"
max_file_size: "50MB"
allowed_extensions: [".pdf", ".docx", ".txt"]
show_progress: true
show_analytics: true
# Logging
logging:
level: "INFO"
format: "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
file: "logs/rag_system.log"
max_size: "10MB"
backup_count: 5 |