Files
proxmox-npm-migration/migrate-npm-to-proxmox-lxc.sh
T

327 lines
14 KiB
Bash

#!/usr/bin/env bash
# =============================================================================
# migrate-npm-to-proxmox-lxc.sh
#
# Migrates NGINX Proxy Manager from HAOS add-on → Proxmox LXC
#
# HA Host : 10.0.0.55 (enp1s0f0, Samba share available)
# New LXC : 10.0.0.54
# Addon : Nginx Proxy Manager 2.1.0 (a0d7b954_nginxproxymanager)
#
# Backup method: HA Supervisor partial backup → pulled via Samba (no SSH needed)
# The backup named "NPM Migration Backup" must already exist in HA backups.
# (Claude already triggered this via HA MCP before this script was run)
#
# Prerequisites:
# - smbclient installed on Proxmox (apt install smbclient -y)
# - pct available (run from Proxmox node)
# - wget available (for tteck script)
# =============================================================================
set -euo pipefail
# ─── COLOURS ──────────────────────────────────────────────────────────────────
RED='\033[0;31m'; GREEN='\033[0;32m'; YELLOW='\033[1;33m'
CYAN='\033[0;36m'; BOLD='\033[1m'; RESET='\033[0m'
info() { echo -e "${CYAN}[INFO]${RESET} $*"; }
success() { echo -e "${GREEN}[ OK ]${RESET} $*"; }
warn() { echo -e "${YELLOW}[WARN]${RESET} $*"; }
die() { echo -e "\n${RED}[ERROR]${RESET} $*\n" >&2; exit 1; }
step() { echo -e "\n${BOLD}${CYAN}══════════════════════════════════════════════${RESET}"; \
echo -e "${BOLD}${CYAN} $*${RESET}"; \
echo -e "${BOLD}${CYAN}══════════════════════════════════════════════${RESET}\n"; }
pause() { echo -e "\n${YELLOW}${BOLD}>>> Press ENTER when ready to continue...${RESET}"; read -r; }
# ─── CONFIG ───────────────────────────────────────────────────────────────────
HA_HOST="10.0.0.55"
HA_SAMBA_USER="homeassistant" # Default HA Samba NAS username
HA_SAMBA_PASS="" # Leave blank to prompt, or set here
ADDON_SLUG="a0d7b954_nginxproxymanager"
BACKUP_LABEL="NPM Migration Backup"
NEW_LXC_IP="10.0.0.54"
LXC_NETMASK="24"
LXC_GATEWAY="10.0.0.254" # OPNsense
PROXMOX_BRIDGE="vmbr0"
LXC_STORAGE="local-lvm" # Change to your Proxmox storage pool if different
TIMESTAMP="$(date +%Y%m%d-%H%M%S)"
WORK_DIR="/tmp/npm-migration-${TIMESTAMP}"
SUPERVISOR_BACKUP="/tmp/npm-supervisor-backup-${TIMESTAMP}.tar"
RESTORE_STAGING="${WORK_DIR}/restore-staging"
# ─── SANITY CHECKS ────────────────────────────────────────────────────────────
[[ $EUID -eq 0 ]] || die "Run as root on the Proxmox node."
command -v pct &>/dev/null || die "'pct' not found — run this on a Proxmox node."
command -v wget &>/dev/null || die "'wget' not found."
command -v smbclient &>/dev/null || {
info "smbclient not found — installing..."
apt-get install -y smbclient 2>/dev/null || die "Could not install smbclient. Run: apt install smbclient -y"
}
mkdir -p "${WORK_DIR}" "${RESTORE_STAGING}"
# =============================================================================
# PHASE 1 — PULL NPM SUPERVISOR BACKUP FROM HA VIA SAMBA
# =============================================================================
step "PHASE 1 — Pull NPM backup from HA via Samba"
echo -e "${BOLD}HA Samba credentials needed${RESET}"
echo -e " Share: \\\\${HA_HOST}\\backup"
echo -e " User: ${HA_SAMBA_USER} (or whatever you set in the Samba NAS add-on)"
echo ""
if [[ -z "${HA_SAMBA_PASS}" ]]; then
read -rsp " Samba password for '${HA_SAMBA_USER}': " HA_SAMBA_PASS
echo ""
fi
info "Listing HA backup share to find the NPM backup..."
BACKUP_FILE=$(smbclient "//$(echo ${HA_HOST})/backup" \
-U "${HA_SAMBA_USER}%${HA_SAMBA_PASS}" \
-c "ls" 2>/dev/null \
| awk '{print $1}' \
| grep '\.tar$' \
| head -20 | tr '\n' '\n' || true)
if [[ -z "${BACKUP_FILE}" ]]; then
die "Could not list the HA backup share at //${HA_HOST}/backup\n\n" \
" Check:\n" \
" 1. Samba NAS add-on is running in HA\n" \
" 2. Username/password are correct (set in Samba add-on config)\n" \
" 3. The 'backup' folder is enabled in the Samba add-on options\n\n" \
" Alternatively, go to HA → Settings → System → Backups,\n" \
" find 'NPM Migration Backup', download it manually to this machine\n" \
" and re-run this script with SUPERVISOR_BACKUP set to that file path."
fi
echo ""
info "Backup files found on share:"
echo "${BACKUP_FILE}"
echo ""
# Find the right backup — look for the most recent one (the one we just created)
# HA backup filenames are the backup slug (8-char hex), e.g. a1b2c3d4.tar
info "Fetching the most recently modified .tar from the share..."
LATEST_TAR=$(smbclient "//$(echo ${HA_HOST})/backup" \
-U "${HA_SAMBA_USER}%${HA_SAMBA_PASS}" \
-c "ls" 2>/dev/null \
| grep '\.tar' \
| sort -k3,4 \
| tail -1 \
| awk '{print $1}')
[[ -n "${LATEST_TAR}" ]] || die "Could not identify the latest backup tar file."
info "Downloading: ${LATEST_TAR}${SUPERVISOR_BACKUP}"
smbclient "//$(echo ${HA_HOST})/backup" \
-U "${HA_SAMBA_USER}%${HA_SAMBA_PASS}" \
-c "get ${LATEST_TAR} ${SUPERVISOR_BACKUP}" 2>/dev/null
[[ -f "${SUPERVISOR_BACKUP}" ]] || die "Download failed — file not found at ${SUPERVISOR_BACKUP}"
ARCHIVE_SIZE=$(du -sh "${SUPERVISOR_BACKUP}" | cut -f1)
success "Supervisor backup downloaded → ${SUPERVISOR_BACKUP} (${ARCHIVE_SIZE})"
# ── Extract NPM addon data from the Supervisor backup format ──────────────────
# Supervisor .tar structure:
# backup.json ← metadata
# {addon_slug}/
# addon.tar.gz ← addon DATA (database, certs, nginx)
# addon_config.tar.gz ← addon CONFIG files
info "Extracting NPM data from Supervisor backup..."
tar -xf "${SUPERVISOR_BACKUP}" -C "${WORK_DIR}" 2>/dev/null || \
die "Failed to extract supervisor backup. Is it a valid HA backup file?"
# Find addon subfolder (it may be named by a hash, not the slug)
ADDON_DIR=$(find "${WORK_DIR}" -maxdepth 1 -name "*.tar.gz" -o -type d 2>/dev/null | head -5)
info "Backup contents:"
ls -la "${WORK_DIR}/"
# Find the addon.tar.gz inside any subdirectory
ADDON_DATA_TAR=$(find "${WORK_DIR}" -name "addon.tar.gz" | head -1)
ADDON_CONFIG_TAR=$(find "${WORK_DIR}" -name "addon_config.tar.gz" | head -1)
if [[ -z "${ADDON_DATA_TAR}" ]]; then
# Some versions put it at root with the slug as filename
ADDON_DATA_TAR=$(find "${WORK_DIR}" -name "${ADDON_SLUG}.tar.gz" | head -1)
fi
[[ -n "${ADDON_DATA_TAR}" ]] || die "Could not find addon.tar.gz in the supervisor backup.\n" \
" This may not be the NPM backup — check HA backups and confirm\n" \
" 'NPM Migration Backup' was created successfully before re-running."
info "Extracting addon data archive: ${ADDON_DATA_TAR}"
tar -xzf "${ADDON_DATA_TAR}" -C "${RESTORE_STAGING}" 2>/dev/null || true
if [[ -n "${ADDON_CONFIG_TAR}" ]]; then
info "Extracting addon config archive: ${ADDON_CONFIG_TAR}"
mkdir -p "${RESTORE_STAGING}/addon_config"
tar -xzf "${ADDON_CONFIG_TAR}" -C "${RESTORE_STAGING}/addon_config" 2>/dev/null || true
fi
info "Staged restore contents:"
find "${RESTORE_STAGING}" -maxdepth 3 | head -30
success "NPM data extracted and staged"
# =============================================================================
# PHASE 2 — CREATE NPM LXC VIA TTECK
# =============================================================================
step "PHASE 2 — Create NPM LXC via tteck script"
echo -e "${BOLD}When the tteck script prompts you, use these settings:${RESET}"
echo ""
echo -e " ${BOLD}IP Address :${RESET} ${NEW_LXC_IP}/${LXC_NETMASK}"
echo -e " ${BOLD}Gateway :${RESET} ${LXC_GATEWAY}"
echo -e " ${BOLD}Bridge :${RESET} ${PROXMOX_BRIDGE}"
echo -e " ${BOLD}Storage :${RESET} ${LXC_STORAGE}"
echo -e " ${BOLD}RAM :${RESET} 1024 MB (recommended)"
echo -e " ${BOLD}Disk :${RESET} 4 GB+"
echo -e " ${BOLD}Hostname :${RESET} nginx-proxy-manager (or whatever you prefer)"
echo ""
warn "The script is interactive — answer the prompts above when asked."
pause
info "Fetching and running tteck NPM script..."
bash -c "$(wget -qLO - https://github.com/tteck/Proxmox/raw/main/ct/nginx-proxy-manager.sh)"
success "tteck script finished"
# =============================================================================
# PHASE 3 — RESTORE BACKUP INTO THE NEW LXC
# =============================================================================
step "PHASE 3 — Restore NPM data into the new LXC"
echo -e "${YELLOW}What LXC container ID was created? (shown at end of tteck output, e.g. 101)${RESET}"
read -rp "LXC ID: " LXC_ID
[[ "${LXC_ID}" =~ ^[0-9]+$ ]] || die "Invalid LXC ID — must be a number."
info "Verifying LXC ${LXC_ID} status..."
for i in {1..15}; do
LXC_STATUS=$(pct status "${LXC_ID}" 2>/dev/null | awk '{print $2}' || echo "unknown")
if [[ "${LXC_STATUS}" == "running" ]]; then
success "LXC ${LXC_ID} is running"
break
fi
warn "LXC status: ${LXC_STATUS} — waiting (${i}/15)..."
sleep 5
done
[[ "${LXC_STATUS}" == "running" ]] || die "LXC ${LXC_ID} is not running. Check Proxmox UI."
info "Stopping NPM service in LXC before restore..."
pct exec "${LXC_ID}" -- systemctl stop npm 2>/dev/null || true
sleep 2
info "Creating restore tarball from staged data..."
RESTORE_ARCHIVE="${WORK_DIR}/npm-restore.tar.gz"
tar -czf "${RESTORE_ARCHIVE}" -C "${RESTORE_STAGING}" .
info "Pushing restore archive into LXC..."
pct push "${LXC_ID}" "${RESTORE_ARCHIVE}" "/tmp/npm-restore.tar.gz"
info "Restoring data into /opt/npm/data/..."
pct exec "${LXC_ID}" -- bash << 'RESTORE'
set -euo pipefail
NPM_DATA="/opt/npm/data"
mkdir -p "${NPM_DATA}/letsencrypt" "${NPM_DATA}/nginx" "${NPM_DATA}/custom_ssl"
STAGING="/tmp/npm-restore-staging"
mkdir -p "${STAGING}"
tar -xzf /tmp/npm-restore.tar.gz -C "${STAGING}" 2>/dev/null || true
echo "Staged contents:"
find "${STAGING}" -maxdepth 3 | head -30
echo ""
# ── Restore database ──────────────────────────────────────────────────────────
DB=$(find "${STAGING}" -name "database.sqlite" | head -1)
if [[ -n "${DB}" ]]; then
cp "${DB}" "${NPM_DATA}/database.sqlite"
echo " ✓ database.sqlite"
else
echo " ⚠ database.sqlite not found — NPM will start fresh"
fi
# ── Restore nginx configs ─────────────────────────────────────────────────────
NGINX_DIR=$(find "${STAGING}" -type d -name "nginx" | head -1)
if [[ -n "${NGINX_DIR}" ]]; then
cp -a "${NGINX_DIR}/." "${NPM_DATA}/nginx/"
echo " ✓ nginx/"
fi
# ── Restore Let's Encrypt certs ───────────────────────────────────────────────
LE_DIR=$(find "${STAGING}" -type d -name "letsencrypt" | head -1)
if [[ -n "${LE_DIR}" ]]; then
cp -a "${LE_DIR}/." "${NPM_DATA}/letsencrypt/"
echo " ✓ letsencrypt/"
fi
# ── Restore custom SSL ────────────────────────────────────────────────────────
SSL_DIR=$(find "${STAGING}" -type d -name "custom_ssl" | head -1)
if [[ -n "${SSL_DIR}" ]]; then
cp -a "${SSL_DIR}/." "${NPM_DATA}/custom_ssl/"
echo " ✓ custom_ssl/"
fi
# Fix ownership (NPM runs as uid 1000)
chown -R 1000:1000 "${NPM_DATA}/"
# Cleanup
rm -rf "${STAGING}" /tmp/npm-restore.tar.gz
# Start NPM
systemctl start npm
sleep 4
if systemctl is-active --quiet npm; then
echo ""
echo "NPM service is running ✓"
else
echo ""
echo "WARNING: NPM did not start cleanly."
echo "Check logs with: journalctl -u npm -n 50"
fi
RESTORE
success "Restore complete, NPM started in LXC ${LXC_ID}"
# =============================================================================
# PHASE 4 — VERIFICATION & NEXT STEPS
# =============================================================================
step "PHASE 4 — Verify & Cutover Checklist"
echo -e "${BOLD}1. Open the new NPM UI and check everything is there:${RESET}"
echo -e " ${CYAN}http://${NEW_LXC_IP}:81${RESET}"
echo -e " (If prompted for first-run login: admin@example.com / changeme)\n"
echo -e "${BOLD}2. Confirm proxy hosts, SSL certs, and users are restored correctly${RESET}\n"
echo -e "${BOLD}3. Update OPNsense port forwards (Firewall → NAT → Port Forward):${RESET}"
echo -e " Port 80 redirect: ${HA_HOST}${NEW_LXC_IP}"
echo -e " Port 443 redirect: ${HA_HOST}${NEW_LXC_IP}\n"
echo -e "${BOLD}4. Add trusted proxy to HA configuration.yaml:${RESET}"
echo -e "${YELLOW}"
cat << YAML
http:
use_x_forwarded_for: true
trusted_proxies:
- ${NEW_LXC_IP}
YAML
echo -e "${RESET}"
echo -e "${BOLD}5. Restart Home Assistant core after saving configuration.yaml${RESET}\n"
echo -e "${BOLD}6. Test external access via your domain(s)${RESET}\n"
echo -e "${BOLD}7. Once confirmed working:${RESET}"
echo -e " - Uninstall NPM add-on from HA (Settings → Add-ons → Nginx Proxy Manager)"
echo -e " - Remove old OPNsense NAT rules pointing to ${HA_HOST}"
echo -e " - Clean up backup: ${YELLOW}rm ${BACKUP_ARCHIVE}${RESET}\n"
echo -e "${GREEN}${BOLD}Migration script finished. Go check ${NEW_LXC_IP}:81 ✓${RESET}\n"