Initial commit: NPM HA add-on to Proxmox LXC migration script
This commit is contained in:
@@ -0,0 +1,317 @@
|
||||
#!/usr/bin/env bash
|
||||
# =============================================================================
|
||||
# migrate-npm-to-proxmox-lxc.sh
|
||||
#
|
||||
# Migrates NGINX Proxy Manager from HAOS add-on → Proxmox LXC
|
||||
#
|
||||
# HA Host : 10.0.0.55 (enp1s0f0)
|
||||
# New LXC : 10.0.0.54
|
||||
# Addon : Nginx Proxy Manager 2.1.0 (a0d7b954_nginxproxymanager)
|
||||
#
|
||||
# Prerequisites:
|
||||
# - SSH key from this Proxmox host authorised in HA Terminal & SSH add-on
|
||||
# - pct available (run from Proxmox node)
|
||||
# - wget available (for tteck script)
|
||||
# =============================================================================
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# ─── COLOURS ──────────────────────────────────────────────────────────────────
|
||||
RED='\033[0;31m'; GREEN='\033[0;32m'; YELLOW='\033[1;33m'
|
||||
CYAN='\033[0;36m'; BOLD='\033[1m'; RESET='\033[0m'
|
||||
|
||||
info() { echo -e "${CYAN}[INFO]${RESET} $*"; }
|
||||
success() { echo -e "${GREEN}[ OK ]${RESET} $*"; }
|
||||
warn() { echo -e "${YELLOW}[WARN]${RESET} $*"; }
|
||||
die() { echo -e "\n${RED}[ERROR]${RESET} $*\n" >&2; exit 1; }
|
||||
step() { echo -e "\n${BOLD}${CYAN}══════════════════════════════════════════════${RESET}"; \
|
||||
echo -e "${BOLD}${CYAN} $*${RESET}"; \
|
||||
echo -e "${BOLD}${CYAN}══════════════════════════════════════════════${RESET}\n"; }
|
||||
pause() { echo -e "\n${YELLOW}${BOLD}>>> Press ENTER when ready to continue...${RESET}"; read -r; }
|
||||
|
||||
# ─── CONFIG ───────────────────────────────────────────────────────────────────
|
||||
HA_HOST="10.0.0.55"
|
||||
HA_SSH_PORT="22222" # Default Terminal & SSH add-on port — adjust if needed
|
||||
HA_SSH_USER="root"
|
||||
ADDON_SLUG="a0d7b954_nginxproxymanager"
|
||||
|
||||
NEW_LXC_IP="10.0.0.54"
|
||||
LXC_NETMASK="24"
|
||||
LXC_GATEWAY="10.0.0.254" # OPNsense
|
||||
PROXMOX_BRIDGE="vmbr0"
|
||||
LXC_STORAGE="local-lvm" # Change to your Proxmox storage pool if different
|
||||
|
||||
TIMESTAMP="$(date +%Y%m%d-%H%M%S)"
|
||||
BACKUP_DIR="/tmp/npm-migration-${TIMESTAMP}"
|
||||
BACKUP_ARCHIVE="/tmp/npm-backup-${TIMESTAMP}.tar.gz"
|
||||
|
||||
# ─── SANITY CHECKS ────────────────────────────────────────────────────────────
|
||||
[[ $EUID -eq 0 ]] || die "Run as root on the Proxmox node."
|
||||
command -v pct &>/dev/null || die "'pct' not found — run this on a Proxmox node."
|
||||
command -v wget &>/dev/null || die "'wget' not found."
|
||||
|
||||
# =============================================================================
|
||||
# PHASE 1 — BACKUP NPM DATA FROM HA ADD-ON
|
||||
# =============================================================================
|
||||
step "PHASE 1 — Backup NPM from HA add-on (${HA_HOST})"
|
||||
|
||||
info "Testing SSH to HA host ${HA_HOST}:${HA_SSH_PORT}..."
|
||||
if ! ssh -p "${HA_SSH_PORT}" \
|
||||
-o ConnectTimeout=10 \
|
||||
-o BatchMode=yes \
|
||||
-o StrictHostKeyChecking=accept-new \
|
||||
"${HA_SSH_USER}@${HA_HOST}" "echo connected" &>/dev/null; then
|
||||
die "SSH to ${HA_HOST}:${HA_SSH_PORT} failed.\n\n" \
|
||||
" Fix checklist:\n" \
|
||||
" 1. Is the Terminal & SSH add-on running in HA?\n" \
|
||||
" 2. Is your Proxmox host's public key in the add-on 'authorized_keys' config?\n" \
|
||||
" 3. Is HA_SSH_PORT correct? (check the add-on config page in HA)\n" \
|
||||
" Common ports: 22222 (default), 22\n\n" \
|
||||
" To add your key: copy output of 'cat ~/.ssh/id_rsa.pub' or 'cat ~/.ssh/id_ed25519.pub'\n" \
|
||||
" then add it to the Terminal & SSH add-on Options → authorized_keys"
|
||||
fi
|
||||
success "SSH connection OK"
|
||||
|
||||
info "Creating backup on HA host..."
|
||||
# shellcheck disable=SC2087
|
||||
ssh -p "${HA_SSH_PORT}" "${HA_SSH_USER}@${HA_HOST}" bash << REMOTE
|
||||
set -euo pipefail
|
||||
|
||||
ADDON_SLUG="${ADDON_SLUG}"
|
||||
TIMESTAMP="${TIMESTAMP}"
|
||||
BACKUP_DIR="/tmp/npm-migration-\${TIMESTAMP}"
|
||||
BACKUP_ARCHIVE="/tmp/npm-backup-\${TIMESTAMP}.tar.gz"
|
||||
|
||||
mkdir -p "\${BACKUP_DIR}"
|
||||
|
||||
# ── Find the NPM data directory ──────────────────────────────────────────────
|
||||
# Try host-level path first (accessible if SSH gives host shell),
|
||||
# then the addon_configs path (accessible from Terminal add-on container)
|
||||
|
||||
DATA_FOUND=0
|
||||
CANDIDATES=(
|
||||
"/mnt/data/supervisor/addons/data/\${ADDON_SLUG}"
|
||||
"/data/\${ADDON_SLUG}"
|
||||
"/data"
|
||||
)
|
||||
|
||||
for DIR in "\${CANDIDATES[@]}"; do
|
||||
if [ -d "\${DIR}" ] && [ -f "\${DIR}/database.sqlite" ]; then
|
||||
echo "Found NPM data at: \${DIR}"
|
||||
DATA_DIR="\${DIR}"
|
||||
DATA_FOUND=1
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
if [ \${DATA_FOUND} -eq 0 ]; then
|
||||
# Last resort — check if we can find the sqlite db anywhere
|
||||
DB_PATH=\$(find /mnt/data /data -name "database.sqlite" 2>/dev/null | grep -i npm | head -1 || true)
|
||||
if [ -n "\${DB_PATH}" ]; then
|
||||
DATA_DIR="\$(dirname "\${DB_PATH}")"
|
||||
echo "Found NPM database at: \${DATA_DIR}"
|
||||
DATA_FOUND=1
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ \${DATA_FOUND} -eq 0 ]; then
|
||||
echo "ERROR: Could not locate NPM data directory (database.sqlite not found)."
|
||||
echo "Tried: \${CANDIDATES[*]}"
|
||||
echo ""
|
||||
echo "If you are in the HA Terminal container, the addon data directory"
|
||||
echo "may not be mounted here. See README in the repo for manual steps."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# ── Copy files into staging dir ───────────────────────────────────────────────
|
||||
echo "Staging backup files..."
|
||||
|
||||
[ -f "\${DATA_DIR}/database.sqlite" ] && \
|
||||
cp "\${DATA_DIR}/database.sqlite" "\${BACKUP_DIR}/" && echo " ✓ database.sqlite"
|
||||
|
||||
[ -d "\${DATA_DIR}/nginx" ] && \
|
||||
cp -a "\${DATA_DIR}/nginx" "\${BACKUP_DIR}/" && echo " ✓ nginx/"
|
||||
|
||||
[ -d "\${DATA_DIR}/letsencrypt" ] && \
|
||||
cp -a "\${DATA_DIR}/letsencrypt" "\${BACKUP_DIR}/" && echo " ✓ letsencrypt/"
|
||||
|
||||
[ -d "\${DATA_DIR}/custom_ssl" ] && \
|
||||
cp -a "\${DATA_DIR}/custom_ssl" "\${BACKUP_DIR}/" && echo " ✓ custom_ssl/ (if present)"
|
||||
|
||||
# ── Also grab addon_configs (nginx custom configs/snippets) ───────────────────
|
||||
ADDON_CONFIG_DIR="/addon_configs/\${ADDON_SLUG}"
|
||||
if [ -d "\${ADDON_CONFIG_DIR}" ]; then
|
||||
cp -a "\${ADDON_CONFIG_DIR}" "\${BACKUP_DIR}/addon_configs" && echo " ✓ addon_configs/"
|
||||
fi
|
||||
|
||||
# ── Create archive ─────────────────────────────────────────────────────────────
|
||||
tar -czf "\${BACKUP_ARCHIVE}" -C "\${BACKUP_DIR}" .
|
||||
echo ""
|
||||
echo "Backup archive created: \${BACKUP_ARCHIVE}"
|
||||
ls -lh "\${BACKUP_ARCHIVE}"
|
||||
REMOTE
|
||||
|
||||
success "Backup created on HA host"
|
||||
|
||||
info "Downloading backup to Proxmox (${BACKUP_ARCHIVE})..."
|
||||
scp -P "${HA_SSH_PORT}" \
|
||||
"${HA_SSH_USER}@${HA_HOST}:/tmp/npm-backup-${TIMESTAMP}.tar.gz" \
|
||||
"${BACKUP_ARCHIVE}"
|
||||
|
||||
ARCHIVE_SIZE=$(du -sh "${BACKUP_ARCHIVE}" | cut -f1)
|
||||
success "Backup downloaded → ${BACKUP_ARCHIVE} (${ARCHIVE_SIZE})"
|
||||
|
||||
info "Archive contents:"
|
||||
tar -tzf "${BACKUP_ARCHIVE}" | head -40
|
||||
|
||||
# =============================================================================
|
||||
# PHASE 2 — CREATE NPM LXC VIA TTECK
|
||||
# =============================================================================
|
||||
step "PHASE 2 — Create NPM LXC via tteck script"
|
||||
|
||||
echo -e "${BOLD}When the tteck script prompts you, use these settings:${RESET}"
|
||||
echo ""
|
||||
echo -e " ${BOLD}IP Address :${RESET} ${NEW_LXC_IP}/${LXC_NETMASK}"
|
||||
echo -e " ${BOLD}Gateway :${RESET} ${LXC_GATEWAY}"
|
||||
echo -e " ${BOLD}Bridge :${RESET} ${PROXMOX_BRIDGE}"
|
||||
echo -e " ${BOLD}Storage :${RESET} ${LXC_STORAGE}"
|
||||
echo -e " ${BOLD}RAM :${RESET} 1024 MB (recommended)"
|
||||
echo -e " ${BOLD}Disk :${RESET} 4 GB+"
|
||||
echo -e " ${BOLD}Hostname :${RESET} nginx-proxy-manager (or whatever you prefer)"
|
||||
echo ""
|
||||
warn "The script is interactive — answer the prompts above when asked."
|
||||
pause
|
||||
|
||||
info "Fetching and running tteck NPM script..."
|
||||
bash -c "$(wget -qLO - https://github.com/tteck/Proxmox/raw/main/ct/nginx-proxy-manager.sh)"
|
||||
|
||||
success "tteck script finished"
|
||||
|
||||
# =============================================================================
|
||||
# PHASE 3 — RESTORE BACKUP INTO THE NEW LXC
|
||||
# =============================================================================
|
||||
step "PHASE 3 — Restore NPM data into the new LXC"
|
||||
|
||||
echo -e "${YELLOW}What LXC container ID was created? (shown at end of tteck output, e.g. 101)${RESET}"
|
||||
read -rp "LXC ID: " LXC_ID
|
||||
|
||||
[[ "${LXC_ID}" =~ ^[0-9]+$ ]] || die "Invalid LXC ID — must be a number."
|
||||
|
||||
info "Verifying LXC ${LXC_ID} status..."
|
||||
for i in {1..15}; do
|
||||
LXC_STATUS=$(pct status "${LXC_ID}" 2>/dev/null | awk '{print $2}' || echo "unknown")
|
||||
if [[ "${LXC_STATUS}" == "running" ]]; then
|
||||
success "LXC ${LXC_ID} is running"
|
||||
break
|
||||
fi
|
||||
warn "LXC status: ${LXC_STATUS} — waiting (${i}/15)..."
|
||||
sleep 5
|
||||
done
|
||||
[[ "${LXC_STATUS}" == "running" ]] || die "LXC ${LXC_ID} is not running. Check Proxmox UI."
|
||||
|
||||
info "Stopping NPM service in LXC before restore..."
|
||||
pct exec "${LXC_ID}" -- systemctl stop npm 2>/dev/null || true
|
||||
sleep 2
|
||||
|
||||
info "Pushing backup archive into LXC..."
|
||||
pct push "${LXC_ID}" "${BACKUP_ARCHIVE}" "/tmp/npm-backup.tar.gz"
|
||||
|
||||
info "Restoring data into /opt/npm/data/..."
|
||||
pct exec "${LXC_ID}" -- bash << 'RESTORE'
|
||||
set -euo pipefail
|
||||
|
||||
NPM_DATA="/opt/npm/data"
|
||||
|
||||
mkdir -p "${NPM_DATA}/letsencrypt" "${NPM_DATA}/nginx" "${NPM_DATA}/custom_ssl"
|
||||
|
||||
# Extract archive contents
|
||||
STAGING="/tmp/npm-restore-staging"
|
||||
mkdir -p "${STAGING}"
|
||||
tar -xzf /tmp/npm-backup.tar.gz -C "${STAGING}"
|
||||
|
||||
echo "Staged files:"
|
||||
ls -la "${STAGING}/"
|
||||
|
||||
# Restore database
|
||||
if [ -f "${STAGING}/database.sqlite" ]; then
|
||||
cp "${STAGING}/database.sqlite" "${NPM_DATA}/"
|
||||
echo " ✓ database.sqlite restored"
|
||||
else
|
||||
echo " ⚠ No database.sqlite found — NPM will start fresh (you can reconfigure manually)"
|
||||
fi
|
||||
|
||||
# Restore nginx configs
|
||||
if [ -d "${STAGING}/nginx" ]; then
|
||||
cp -a "${STAGING}/nginx/." "${NPM_DATA}/nginx/"
|
||||
echo " ✓ nginx/ restored"
|
||||
fi
|
||||
|
||||
# Restore Let's Encrypt certs
|
||||
if [ -d "${STAGING}/letsencrypt" ]; then
|
||||
cp -a "${STAGING}/letsencrypt/." "${NPM_DATA}/letsencrypt/"
|
||||
echo " ✓ letsencrypt/ restored"
|
||||
fi
|
||||
|
||||
# Restore custom SSL if present
|
||||
if [ -d "${STAGING}/custom_ssl" ]; then
|
||||
cp -a "${STAGING}/custom_ssl/." "${NPM_DATA}/custom_ssl/"
|
||||
echo " ✓ custom_ssl/ restored"
|
||||
fi
|
||||
|
||||
# Fix ownership (NPM runs as uid 1000)
|
||||
chown -R 1000:1000 "${NPM_DATA}/"
|
||||
|
||||
# Cleanup
|
||||
rm -rf "${STAGING}" /tmp/npm-backup.tar.gz
|
||||
|
||||
# Start NPM
|
||||
systemctl start npm
|
||||
sleep 3
|
||||
|
||||
# Confirm it's up
|
||||
if systemctl is-active --quiet npm; then
|
||||
echo ""
|
||||
echo "NPM service is running ✓"
|
||||
else
|
||||
echo ""
|
||||
echo "WARNING: NPM service did not start cleanly — check 'journalctl -u npm -n 50'"
|
||||
fi
|
||||
RESTORE
|
||||
|
||||
success "Restore complete, NPM started in LXC ${LXC_ID}"
|
||||
|
||||
# =============================================================================
|
||||
# PHASE 4 — VERIFICATION & NEXT STEPS
|
||||
# =============================================================================
|
||||
step "PHASE 4 — Verify & Cutover Checklist"
|
||||
|
||||
echo -e "${BOLD}1. Open the new NPM UI and check everything is there:${RESET}"
|
||||
echo -e " ${CYAN}http://${NEW_LXC_IP}:81${RESET}"
|
||||
echo -e " (If prompted for first-run login: admin@example.com / changeme)\n"
|
||||
|
||||
echo -e "${BOLD}2. Confirm proxy hosts, SSL certs, and users are restored correctly${RESET}\n"
|
||||
|
||||
echo -e "${BOLD}3. Update OPNsense port forwards (Firewall → NAT → Port Forward):${RESET}"
|
||||
echo -e " Port 80 redirect: ${HA_HOST} → ${NEW_LXC_IP}"
|
||||
echo -e " Port 443 redirect: ${HA_HOST} → ${NEW_LXC_IP}\n"
|
||||
|
||||
echo -e "${BOLD}4. Add trusted proxy to HA configuration.yaml:${RESET}"
|
||||
echo -e "${YELLOW}"
|
||||
cat << YAML
|
||||
http:
|
||||
use_x_forwarded_for: true
|
||||
trusted_proxies:
|
||||
- ${NEW_LXC_IP}
|
||||
YAML
|
||||
echo -e "${RESET}"
|
||||
|
||||
echo -e "${BOLD}5. Restart Home Assistant core after saving configuration.yaml${RESET}\n"
|
||||
|
||||
echo -e "${BOLD}6. Test external access via your domain(s)${RESET}\n"
|
||||
|
||||
echo -e "${BOLD}7. Once confirmed working:${RESET}"
|
||||
echo -e " - Uninstall NPM add-on from HA (Settings → Add-ons → Nginx Proxy Manager)"
|
||||
echo -e " - Remove old OPNsense NAT rules pointing to ${HA_HOST}"
|
||||
echo -e " - Clean up backup: ${YELLOW}rm ${BACKUP_ARCHIVE}${RESET}\n"
|
||||
|
||||
echo -e "${GREEN}${BOLD}Migration script finished. Go check ${NEW_LXC_IP}:81 ✓${RESET}\n"
|
||||
Reference in New Issue
Block a user