Add Ollama auto-installation and educational LLM model suggestions

 Features:
- One-click Ollama installation using official script
- Educational LLM model recommendations after successful install
- Smart 3-option menu: auto-install, manual, or skip
- Clear performance vs quality guidance for model selection

🛡 Safety & UX:
- Uses official ollama.com/install.sh script
- Shows exact commands before execution
- Graceful fallback to manual installation
- Auto-starts Ollama server and verifies health
- Educational approach with size/performance trade-offs

🎯 Model Recommendations:
- qwen3:0.6b (lightweight, 400MB)
- qwen3:1.7b (balanced, 1GB)
- qwen3:3b (excellent for this project, 2GB)
- qwen3:8b (premium results, 5GB)
- Creative suggestions: mistral for storytelling, qwen3-coder for development

Transforms installation from multi-step manual process to guided automation.
This commit is contained in:
BobAi 2025-08-14 19:50:12 +10:00
parent 2f2dd6880b
commit 11639c8237

View File

@ -162,22 +162,72 @@ check_ollama() {
print_warning "Ollama not found" print_warning "Ollama not found"
echo "" echo ""
echo -e "${CYAN}Ollama provides the best embedding quality and performance.${NC}" echo -e "${CYAN}Ollama provides the best embedding quality and performance.${NC}"
echo -e "${YELLOW}To install Ollama:${NC}"
echo " 1. Visit: https://ollama.ai/download"
echo " 2. Download and install for your system"
echo " 3. Run: ollama serve"
echo " 4. Re-run this installer"
echo "" echo ""
echo -e "${BLUE}Alternative: Use ML fallback (requires more disk space)${NC}" echo -e "${BOLD}Options:${NC}"
echo -e "${GREEN}1) Install Ollama automatically${NC} (recommended)"
echo -e "${YELLOW}2) Manual installation${NC} - Visit https://ollama.com/download"
echo -e "${BLUE}3) Continue without Ollama${NC} (uses ML fallback)"
echo "" echo ""
echo -n "Continue without Ollama? (y/N): " echo -n "Choose [1/2/3]: "
read -r continue_without read -r ollama_choice
if [[ $continue_without =~ ^[Yy]$ ]]; then
return 1 case "$ollama_choice" in
else 1|"")
print_info "Install Ollama first, then re-run this script" print_info "Installing Ollama using official installer..."
exit 0 echo -e "${CYAN}Running: curl -fsSL https://ollama.com/install.sh | sh${NC}"
fi
if curl -fsSL https://ollama.com/install.sh | sh; then
print_success "Ollama installed successfully"
print_info "Starting Ollama server..."
ollama serve &
sleep 3
if curl -s http://localhost:11434/api/version >/dev/null 2>&1; then
print_success "Ollama server started"
echo ""
echo -e "${CYAN}💡 Pro tip: Download an LLM for AI-powered search synthesis!${NC}"
echo -e " Lightweight: ${GREEN}ollama pull qwen3:0.6b${NC} (~400MB, very fast)"
echo -e " Balanced: ${GREEN}ollama pull qwen3:1.7b${NC} (~1GB, good quality)"
echo -e " Excellent: ${GREEN}ollama pull qwen3:3b${NC} (~2GB, great for this project)"
echo -e " Premium: ${GREEN}ollama pull qwen3:8b${NC} (~5GB, amazing results)"
echo ""
echo -e "${BLUE}Creative possibilities: Try mistral for storytelling, or qwen3-coder for development!${NC}"
echo ""
return 0
else
print_warning "Ollama installed but failed to start automatically"
echo "Please start Ollama manually: ollama serve"
echo "Then re-run this installer"
exit 1
fi
else
print_error "Failed to install Ollama automatically"
echo "Please install manually from https://ollama.com/download"
exit 1
fi
;;
2)
echo ""
echo -e "${YELLOW}Manual Ollama installation:${NC}"
echo " 1. Visit: https://ollama.com/download"
echo " 2. Download and install for your system"
echo " 3. Run: ollama serve"
echo " 4. Re-run this installer"
print_info "Exiting for manual installation..."
exit 0
;;
3)
print_info "Continuing without Ollama (will use ML fallback)"
return 1
;;
*)
print_warning "Invalid choice, continuing without Ollama"
return 1
;;
esac
fi fi
} }
@ -271,8 +321,8 @@ get_installation_preferences() {
echo "" echo ""
echo -e "${BOLD}Installation options:${NC}" echo -e "${BOLD}Installation options:${NC}"
echo -e "${GREEN}L) Light${NC} - Ollama + basic deps (~50MB)" echo -e "${GREEN}L) Light${NC} - Ollama + basic deps (~50MB) ${CYAN}← Best performance + AI chat${NC}"
echo -e "${YELLOW}F) Full${NC} - Light + ML fallback (~2-3GB)" echo -e "${YELLOW}F) Full${NC} - Light + ML fallback (~2-3GB) ${CYAN}← RAG-only if no Ollama${NC}"
echo -e "${BLUE}C) Custom${NC} - Configure individual components" echo -e "${BLUE}C) Custom${NC} - Configure individual components"
echo "" echo ""