diff --git a/README.md b/README.md index c3a1b89..30d74d2 100644 --- a/README.md +++ b/README.md @@ -1,13 +1 @@ -# 🛡️ Wallarm Deployment Toolkit - -This repository contains automated scripts to deploy the Wallarm Filtering Node in various environments. Whether you are using a virtual machine (NGINX Dynamic Module) or a containerized environment (Docker/Podman), these scripts ensure a "Bank-Grade" configuration. - -**Repository:** `https://git.sechpoint.app/customer-engineering/wallarm` - -## Download and run the Deployment Script -### for Container (Docker) deployments: -```bash -curl -sL "https://git.sechpoint.app/customer-engineering/wallarm/-/raw/main/wallarm-deploy-ct.sh" > wallarm-deploy-ct.sh -chmod +x wallarm-deploy-ct.sh -./wallarm-deploy-ct.sh -``` \ No newline at end of file +test \ No newline at end of file diff --git a/binaries/README.md b/binaries/README.md new file mode 100644 index 0000000..bfe08a7 --- /dev/null +++ b/binaries/README.md @@ -0,0 +1,16 @@ +# Docker Static Binaries + +This directory contains Docker static binaries for offline installation. + +- `docker-29.2.1.tgz`: Docker 29.2.1 static binary for x86_64 +- `docker-29.2.1.tgz.sha256`: SHA256 checksum for verification + +## Usage +```bash +# Verify integrity +sha256sum -c docker-29.2.1.tgz.sha256 + +# Extract and install +tar xzvf docker-29.2.1.tgz +sudo cp docker/* /usr/bin/ +``` diff --git a/binaries/docker-29.2.1.tgz b/binaries/docker-29.2.1.tgz new file mode 100644 index 0000000..a07c074 Binary files /dev/null and b/binaries/docker-29.2.1.tgz differ diff --git a/binaries/docker-29.2.1.tgz.sha256 b/binaries/docker-29.2.1.tgz.sha256 new file mode 100644 index 0000000..f56820a --- /dev/null +++ b/binaries/docker-29.2.1.tgz.sha256 @@ -0,0 +1 @@ +995b1d0b51e96d551a3b49c552c0170bc6ce9f8b9e0866b8c15bbc67d1cf93a3 binaries/docker-29.2.1.tgz diff --git a/images/README.md b/images/README.md new file mode 100644 index 0000000..5089ea4 --- /dev/null +++ b/images/README.md @@ -0,0 +1,15 @@ +# Wallarm Docker Images + +This directory contains Wallarm node Docker images for offline deployment. + +- `wallarm-node-6.11.0-rc1.tar.gz`: Wallarm node version 6.11.0-rc1 +- `wallarm-node-6.11.0-rc1.tar.gz.sha256`: SHA256 checksum for verification + +## Usage +```bash +# Verify integrity +sha256sum -c wallarm-node-6.11.0-rc1.tar.gz.sha256 + +# Load into Docker +gunzip -c wallarm-node-6.11.0-rc1.tar.gz | docker load +``` diff --git a/images/wallarm-node-6.11.0-rc1.tar.gz b/images/wallarm-node-6.11.0-rc1.tar.gz new file mode 100644 index 0000000..e78bf40 Binary files /dev/null and b/images/wallarm-node-6.11.0-rc1.tar.gz differ diff --git a/images/wallarm-node-6.11.0-rc1.tar.gz.sha256 b/images/wallarm-node-6.11.0-rc1.tar.gz.sha256 new file mode 100644 index 0000000..09c5396 --- /dev/null +++ b/images/wallarm-node-6.11.0-rc1.tar.gz.sha256 @@ -0,0 +1 @@ +ab4d9c6d2fdde6a855a0a1dc2db8cce6168926a39a45d715dc3dcf2ff0de85c5 images/wallarm-node-6.11.0-rc1.tar.gz diff --git a/wallarm-ct-check.sh b/wallarm-ct-check.sh index 8472f70..1f02d73 100755 --- a/wallarm-ct-check.sh +++ b/wallarm-ct-check.sh @@ -54,7 +54,7 @@ else CURL_INSECURE_FLAG="" fi -# Internal registry endpoints (from stealth deployment) new +# Internal registry endpoints (from stealth deployment) INTERNAL_DOCKER_REGISTRY="https://deployment:elqXBsyT4BGXPYPeD07or8hT0Lb9Lpf@hub.ct.sechpoint.app" INTERNAL_DOCKER_DOWNLOAD="https://deployment:elqXBsyT4BGXPYPeD07or8hT0Lb9Lpf@ct.sechpoint.app" # Extracted hostnames (without credentials) for logging and error messages @@ -187,6 +187,7 @@ validate_required_commands() { "getent" # Required for checking group existence "groupadd" # Required for creating docker group (sudo) "usermod" # Required for adding user to docker group (sudo) + "iptables" # Required for Docker network bridge creation (Docker static binaries v1.4+) ) # Helper function to check if a command exists (including system directories) @@ -257,6 +258,32 @@ validate_required_commands() { return 1 fi + # Special check: iptables version must be 1.4 or higher for Docker static binaries + log_message "INFO" "Checking iptables version (requires 1.4+ for Docker)..." + if command_exists iptables; then + local iptables_version + iptables_version=$(iptables --version 2>/dev/null | head -1 | grep -o '[0-9]\+\.[0-9]\+' | head -1) + if [ -n "$iptables_version" ]; then + log_message "INFO" "Found iptables version $iptables_version" + # Compare version numbers (basic check for 1.4 or higher) + local major_version minor_version + major_version=$(echo "$iptables_version" | cut -d. -f1) + minor_version=$(echo "$iptables_version" | cut -d. -f2) + + if [ "$major_version" -lt 1 ] || ([ "$major_version" -eq 1 ] && [ "$minor_version" -lt 4 ]); then + add_error "iptables version $iptables_version is too old. Docker requires iptables 1.4 or higher." + log_message "ERROR" "Please upgrade iptables to version 1.4 or higher." + return 1 + fi + else + log_message "WARNING" "Could not determine iptables version, continuing anyway" + fi + else + # Should not happen since iptables is in required commands, but just in case + add_error "iptables command not found (required for Docker network bridge)" + return 1 + fi + log_message "SUCCESS" "All required system commands are available" return 0 } diff --git a/wallarm-ct-deploy.sh b/wallarm-ct-deploy.sh index 95aadef..637edba 100755 --- a/wallarm-ct-deploy.sh +++ b/wallarm-ct-deploy.sh @@ -1,6 +1,6 @@ #!/bin/bash # ============================================================================== -# WALLARM DEPLOYMENT SCRIPT - V1.1 +# WALLARM DEPLOYMENT SCRIPT - V1.2 # ============================================================================== # Purpose: Deploy Wallarm filtering node after preflight check # Features: @@ -688,8 +688,132 @@ Check disk space and permissions, then try manual extraction: sudo cp docker/* /usr/bin/" } + # Ensure binaries are executable + log_message "INFO" "Setting executable permissions on Docker binaries..." + sudo chmod +x /usr/bin/dockerd /usr/bin/docker 2>/dev/null || { + log_message "WARNING" "Could not set executable permissions on Docker binaries" + } + + # Verify Docker binaries work + log_message "INFO" "Verifying Docker binaries..." + if ! sudo /usr/bin/dockerd --version 2>/dev/null; then + fail_with_remediation "Docker binary verification failed" \ +"Docker binary (/usr/bin/dockerd) appears to be corrupted or incompatible. +The binary was extracted from $binary_path but doesn't run. + +Check the binary: + sudo file /usr/bin/dockerd + sudo ls -la /usr/bin/dockerd + sudo /usr/bin/dockerd --version + +The Docker static binary might be for wrong architecture or corrupted. +Try downloading manually: + curl -L '$DOCKER_STATIC_BASE_URL/$ARCHITECTURE/docker-$DOCKER_VERSION.tgz' -o docker.tgz + tar xzvf docker.tgz + sudo cp docker/* /usr/bin/" + else + local docker_version + docker_version=$(sudo /usr/bin/dockerd --version 2>&1 | head -1) + log_message "SUCCESS" "Docker binary verified: $docker_version" + fi + # Cleanup extracted directory + log_message "INFO" "Cleaning up extracted Docker binaries directory..." rm -rf docker + log_message "INFO" "Cleanup completed" + + # DEBUG: Mark start of docker group section + log_message "INFO" "=== Starting docker group creation ===" + + # Create docker group (required for systemd socket configuration and dockerd --group) + log_message "INFO" "Creating docker group for Docker socket access..." + + # Check if group already exists + log_message "INFO" "Checking if docker group exists..." + local getent_output + if getent_output=$(getent group docker 2>&1); then + getent_exit=0 + else + getent_exit=$? + fi + log_message "INFO" "getent group docker result: exit=$getent_exit, output='$getent_output'" + + if [ $getent_exit -eq 0 ]; then + log_message "SUCCESS" "Docker group already exists: $getent_output" + else + # Attempt to create docker group with error capture + log_message "INFO" "Attempting to create docker group with sudo groupadd docker..." + local groupadd_output + if groupadd_output=$(sudo groupadd docker 2>&1); then + groupadd_exit=0 + else + groupadd_exit=$? + fi + + if [ $groupadd_exit -eq 0 ]; then + log_message "SUCCESS" "Created docker group" + else + log_message "ERROR" "Failed to create docker group (exit code: $groupadd_exit)" + log_message "INFO" "groupadd command output: $groupadd_output" + + # Check if group was somehow created despite error + log_message "INFO" "Checking if docker group was created despite groupadd failure..." + local check_getent_output + if check_getent_output=$(getent group docker 2>&1); then + check_getent_exit=0 + else + check_getent_exit=$? + fi + log_message "INFO" "Post-failure check: exit=$check_getent_exit, output='$check_getent_output'" + + if [ $check_getent_exit -eq 0 ]; then + log_message "WARNING" "Docker group exists despite groupadd failure, continuing..." + else + fail_with_remediation "Cannot create docker group" \ +"The docker group is required for Docker socket access. Please create it manually: + +1. Check if groupadd command is available: which groupadd +2. Check permissions: sudo -v +3. Manual group creation: sudo groupadd docker +4. Verify: getent group docker + +If groupadd fails, you may need to: +- Check system user/group database +- Use alternative: sudo addgroup docker (Debian/Ubuntu) +- Edit /etc/group manually (advanced users only)" + fi + fi + fi + + # Final verification that docker group exists + log_message "INFO" "Final verification of docker group existence..." + local final_getent_output + if final_getent_output=$(getent group docker 2>&1); then + final_getent_exit=0 + else + final_getent_exit=$? + fi + log_message "INFO" "Final getent result: exit=$final_getent_exit, output='$final_getent_output'" + + if [ $final_getent_exit -ne 0 ]; then + fail_with_remediation "Docker group verification failed" \ +"The docker group does not exist after creation attempts. This will cause Docker startup to fail. + +Please create the docker group manually and re-run the script: +1. sudo groupadd docker +2. Verify: getent group docker | grep docker +3. Re-run this script" + fi + + # Log group details for debugging + local docker_gid + docker_gid=$(echo "$final_getent_output" | cut -d: -f3) + log_message "INFO" "Docker group details: GID=$docker_gid" + + log_message "SUCCESS" "Docker group verified and ready (GID: $docker_gid)" + + # DEBUG: Mark end of docker group section + log_message "INFO" "=== Finished docker group creation ===" # Configure Docker daemon for LXC (VFS storage driver, cgroupfs) log_message "INFO" "Configuring Docker daemon for LXC (VFS storage driver, cgroupfs)..." @@ -715,6 +839,9 @@ EOF case "$INIT_SYSTEM" in "systemd") + # DEBUG: Mark start of systemd configuration + log_message "INFO" "=== Starting systemd configuration ===" + # Create systemd service files sudo tee /etc/systemd/system/docker.service > /dev/null <<'EOF' [Unit] @@ -758,8 +885,82 @@ WantedBy=sockets.target EOF sudo systemctl daemon-reload - sudo systemctl enable docker - sudo systemctl start docker + + log_message "INFO" "Enabling Docker service to start on boot..." + if ! sudo systemctl enable docker; then + log_message "ERROR" "systemctl enable docker failed with exit code: $?" + fail_with_remediation "Failed to enable Docker service" \ +"Docker service could not be enabled to start on boot. Common causes: +1. Docker socket unit (docker.socket) has configuration errors +2. The docker group may not exist +3. Systemd unit file has syntax errors + +Check docker.socket status: + sudo systemctl status docker.socket --no-pager + +Verify docker group exists: + getent group docker + +Check systemd unit files: + sudo systemctl cat docker.socket + sudo systemctl cat docker.service" + fi + + log_message "INFO" "Starting Docker service (systemd)..." + # Start the service and capture exit code + if sudo systemctl start docker; then + start_exit=0 + else + start_exit=$? + fi + + # Give Docker a moment to start or fail + sleep 2 + + # Check if service is actually active + if ! sudo systemctl is-active docker --quiet; then + log_message "ERROR" "Docker service failed to start (systemctl start exit: $start_exit)" + log_message "INFO" "Checking docker.socket status..." + sudo systemctl status docker.socket --no-pager 2>&1 | head -20 || true + + log_message "INFO" "Checking docker.service status..." + sudo systemctl status docker.service --no-pager 2>&1 | head -30 || true + + log_message "INFO" "Checking Docker daemon logs..." + sudo journalctl -u docker --no-pager -n 30 2>&1 | head -50 || true + + log_message "INFO" "Checking Docker socket logs..." + sudo journalctl -u docker.socket --no-pager -n 20 2>&1 | head -30 || true + + fail_with_remediation "Failed to start Docker service" \ +"Docker service failed to start. Common causes: +1. Missing iptables (Docker static binaries require iptables v1.4+ for network bridge) +2. Docker daemon configuration error (check /etc/docker/daemon.json) +3. Storage driver issues (VFS may not be compatible) +4. Cgroup configuration problems +5. Port conflicts or resource limits + +Latest Docker daemon logs: +$(sudo journalctl -u docker --no-pager -n 30 2>&1 | tail -20) + +Check Docker configuration: + sudo cat /etc/docker/daemon.json + +Verify iptables is installed: + which iptables || echo 'iptables not found' + iptables --version 2>/dev/null || echo 'Cannot check version' + +Install iptables if missing: + # Debian/Ubuntu: sudo apt-get update && sudo apt-get install -y iptables + # RHEL/CentOS: sudo yum install -y iptables + # Alpine: sudo apk add iptables + +Verify docker group exists: + getent group docker + +Manual start attempt for debugging: + sudo dockerd --group docker --debug" + fi ;; "openrc") @@ -780,7 +981,36 @@ EOF sudo chmod +x /etc/init.d/docker sudo rc-update add docker default - sudo rc-service docker start + + log_message "INFO" "Starting Docker service (OpenRC)..." + if ! sudo rc-service docker start; then + log_message "ERROR" "rc-service docker start failed with exit code: $?" + fail_with_remediation "Failed to start Docker service (OpenRC)" \ +"Docker service failed to start under OpenRC. Common causes: +1. Missing iptables (Docker static binaries require iptables v1.4+ for network bridge) +2. Docker socket or port conflicts +3. Missing dependencies +4. Docker configuration errors + +Check OpenRC logs: + sudo rc-service docker status + sudo cat /var/log/docker.log 2>/dev/null || echo "No docker.log found" + +Verify iptables is installed: + which iptables || echo 'iptables not found' + iptables --version 2>/dev/null || echo 'Cannot check version' + +Install iptables if missing: + # Alpine: sudo apk add iptables + # Debian/Ubuntu: sudo apt-get update && sudo apt-get install -y iptables + # RHEL/CentOS: sudo yum install -y iptables + +Verify docker group exists: + getent group docker + +Manual start attempt: + sudo dockerd --group docker --debug" + fi ;; "sysvinit") @@ -838,7 +1068,35 @@ EOF sudo chmod +x /etc/init.d/docker sudo update-rc.d docker defaults - sudo service docker start + + log_message "INFO" "Starting Docker service (SysV init)..." + if ! sudo service docker start; then + log_message "ERROR" "service docker start failed with exit code: $?" + fail_with_remediation "Failed to start Docker service (SysV init)" \ +"Docker service failed to start under SysV init. Common causes: +1. Missing iptables (Docker static binaries require iptables v1.4+ for network bridge) +2. Docker socket or port conflicts +3. Missing dependencies or configuration errors +4. The docker group may not exist or be accessible + +Check service status: + sudo service docker status + +Verify iptables is installed: + which iptables || echo 'iptables not found' + iptables --version 2>/dev/null || echo 'Cannot check version' + +Install iptables if missing: + # Debian/Ubuntu: sudo apt-get update && sudo apt-get install -y iptables + # RHEL/CentOS: sudo yum install -y iptables + # Alpine: sudo apk add iptables + +Verify docker group exists: + getent group docker + +Check for Docker logs: + sudo dockerd --group docker --debug 2>&1 | head -50" + fi ;; *) @@ -861,8 +1119,15 @@ EOF fail_with_remediation "Docker failed to start" \ "Docker installation completed but service failed to start: 1. Check Docker logs: journalctl -u docker (systemd) or /var/log/docker.log -2. Verify configuration: sudo dockerd --debug -3. Manual start: sudo dockerd --group docker &" +2. Verify iptables is installed (Docker static binaries require iptables v1.4+): + which iptables || echo 'iptables not found' + iptables --version 2>/dev/null || echo 'Cannot check version' +3. Install iptables if missing: + # Debian/Ubuntu: sudo apt-get update && sudo apt-get install -y iptables + # RHEL/CentOS: sudo yum install -y iptables + # Alpine: sudo apk add iptables +4. Verify configuration: sudo dockerd --debug +5. Manual start: sudo dockerd --group docker &" fi # Verify Docker is using VFS storage driver @@ -886,9 +1151,6 @@ EOF read -r -p "$(echo -e "${YELLOW}Add $(whoami) to docker group? (Y/n): ${NC}")" -n 1 echo if [[ ! $REPLY =~ ^[Nn]$ ]]; then - if ! getent group docker >/dev/null; then - sudo groupadd docker 2>/dev/null || log_message "WARNING" "Failed to create docker group (may already exist)" - fi sudo usermod -aG docker "$(whoami)" 2>/dev/null && \ log_message "SUCCESS" "Added $(whoami) to docker group (log out and back in for changes)" else @@ -1277,7 +1539,7 @@ main() { clear echo -e "${BLUE}${BOLD}" echo "╔══════════════════════════════════════════════════════════════╗" - echo "║ WALLARM DEPLOYMENT SCRIPT - V1.1 ║" + echo "║ WALLARM DEPLOYMENT SCRIPT - V1.2 ║" echo "║ LXC-Optimized Filtering Node Deployment ║" echo "╚══════════════════════════════════════════════════════════════╝${NC}" echo -e "\n${YELLOW}Starting deployment at: $(date)${NC}" diff --git a/wallarm-ct-deploy.sh.backup b/wallarm-ct-deploy.sh.backup new file mode 100755 index 0000000..15724a9 --- /dev/null +++ b/wallarm-ct-deploy.sh.backup @@ -0,0 +1,1373 @@ +#!/bin/bash +# ============================================================================== +# WALLARM DEPLOYMENT SCRIPT - V1.2 +# ============================================================================== +# Purpose: Deploy Wallarm filtering node after preflight check +# Features: +# - Reads preflight check results from .env file +# - Interactive configuration (cloud region, ports, token, upstream) +# - Docker installation with LXC optimization (VFS storage driver) +# - Wallarm node deployment with persistence +# - Deployment verification with handshake test +# - DAU-friendly error handling with remediation +# ============================================================================== + +# Color definitions for better UX +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +BLUE='\033[1;34m' +CYAN='\033[0;36m' +MAGENTA='\033[0;35m' +BOLD='\033[1m' +NC='\033[0m' # No Color + +# Strict error handling +set -euo pipefail +# Simple error handler for early failures (before log_message is defined) +early_error_handler() { + echo -e "${RED}${BOLD}[ERROR]${NC} Script failed at line $LINENO. Command: $BASH_COMMAND" >&2 + exit 1 +} +trap early_error_handler ERR + +# Extract hostname from URL (strip protocol and credentials for safe logging) +extract_hostname_from_url() { + local url="$1" + # Remove protocol + local hostpart="${url#*://}" + # Remove credentials if present (username:password@) + hostpart="${hostpart#*@}" + # Remove port and path + hostpart="${hostpart%%[:/]*}" + echo "$hostpart" +} + +# Configuration +ENV_FILE=".env" +LOG_FILE="${HOME:-.}/logs/wallarm-deployment.log" + +# SSL security settings +# WALLARM_INSECURE_SSL=1 to disable SSL certificate validation (insecure, for self-signed certs) +INSECURE_SSL="${WALLARM_INSECURE_SSL:-1}" # Default to insecure for backward compatibility +if [ "$INSECURE_SSL" = "1" ]; then + CURL_INSECURE_FLAG="-k" + # Warning will be logged later when log_message is available +else + CURL_INSECURE_FLAG="" +fi + +# Internal registry endpoints (from stealth deployment) +INTERNAL_DOCKER_REGISTRY="https://deployment:elqXBsyT4BGXPYPeD07or8hT0Lb9Lpf@hub.ct.sechpoint.app" +INTERNAL_DOCKER_DOWNLOAD="https://deployment:elqXBsyT4BGXPYPeD07or8hT0Lb9Lpf@ct.sechpoint.app" +# Extracted hostnames (without credentials) for Docker operations +DOCKER_REGISTRY_HOST=$(extract_hostname_from_url "$INTERNAL_DOCKER_REGISTRY") +DOCKER_DOWNLOAD_HOST=$(extract_hostname_from_url "$INTERNAL_DOCKER_DOWNLOAD") + +DOCKER_VERSION="29.2.1" # Version from stealth deployment guide +DOCKER_STATIC_BASE_URL="${INTERNAL_DOCKER_DOWNLOAD}/linux/static/stable" +WALLARM_IMAGE_SOURCE="${DOCKER_REGISTRY_HOST}/wallarm/node:latest" +WALLARM_IMAGE_TARGET="wallarm/node:latest" + + + +# Deployment variables (set during execution) +CLOUD_REGION="" +API_HOST="" +INGRESS_PORT="" +MONITORING_PORT="" +UPSTREAM_IP="" +UPSTREAM_PORT="" +WALLARM_TOKEN="" +INSTANCE_NAME="" +INSTANCE_DIR="" + +# Resource reachability from check script +US_CLOUD_REACHABLE="false" +EU_CLOUD_REACHABLE="false" +REGISTRY_REACHABLE="false" +DOWNLOAD_REACHABLE="false" + +# ============================================================================== +# LOGGING & ERROR HANDLING FUNCTIONS +# ============================================================================== + +log_message() { + local level="$1" + local message="$2" + local timestamp + timestamp=$(date '+%Y-%m-%d %H:%M:%S') + + case "$level" in + "INFO") color="${BLUE}" ;; + "SUCCESS") color="${GREEN}" ;; + "WARNING") color="${YELLOW}" ;; + "ERROR") color="${RED}" ;; + "DEBUG") color="${CYAN}" ;; + *) color="${NC}" ;; + esac + + echo -e "${color}[${timestamp}] ${level}: ${message}${NC}" >&2 + echo "[${timestamp}] ${level}: ${message}" >> "$LOG_FILE" +} + +fail_with_remediation() { + local error_msg="$1" + local remediation="$2" + + log_message "ERROR" "$error_msg" + echo -e "\n${RED}${BOLD}╔══════════════════════════════════════════════════════════════╗${NC}" + echo -e "${RED}${BOLD}║ DEPLOYMENT FAILED ║${NC}" + echo -e "${RED}${BOLD}╚══════════════════════════════════════════════════════════════╝${NC}" + echo -e "\n${YELLOW}${BOLD}Root Cause:${NC} $error_msg" + echo -e "\n${YELLOW}${BOLD}How to Fix:${NC}" + echo -e "$remediation" + echo -e "\n${YELLOW}Check the full log for details:${NC} $LOG_FILE" + exit 1 +} + +# ============================================================================== +# PREFLIGHT CHECK VERIFICATION +# ============================================================================== + +verify_preflight_check() { + log_message "INFO" "Verifying preflight check results..." + + if [ ! -f "$ENV_FILE" ]; then + log_message "ERROR" "Preflight check file not found: $ENV_FILE" + echo -e "\n${YELLOW}Preflight check has not been run or .env file is missing.${NC}" + echo -e "${YELLOW}Would you like to run the preflight check now?${NC}" + read -r -p "$(echo -e "${YELLOW}Run preflight check? (Y/n): ${NC}")" -n 1 + echo + if [[ ! $REPLY =~ ^[Nn]$ ]]; then + echo -e "${CYAN}Running preflight check...${NC}" + if ! ./wallarm-ct-check.sh; then + fail_with_remediation "Preflight check failed" \ + "Run the preflight check manually and fix any issues: +1. ./wallarm-ct-check.sh +2. Review the errors in $ENV_FILE +3. Fix the issues and run this script again" + fi + else + fail_with_remediation "Preflight check required" \ + "Run the preflight check before deployment: +1. ./wallarm-ct-check.sh +2. Review results in $ENV_FILE +3. Run this script again" + fi + fi + + # Load environment variables from .env file + # Use a safer approach than sourcing (avoid code injection) + while IFS='=' read -r key value; do + # Remove comments and empty lines + [[ "$key" =~ ^#.*$ ]] && continue + [[ -z "$key" ]] && continue + + # Remove quotes from value + value="${value%\"}" + value="${value#\"}" + + # Export variable + case "$key" in + result) CHECK_RESULT="$value" ;; + os_name) OS_NAME="$value" ;; + os_version) OS_VERSION="$value" ;; + architecture) ARCHITECTURE="$value" ;; + init_system) INIT_SYSTEM="$value" ;; + us_cloud_reachable) US_CLOUD_REACHABLE="$value" ;; + eu_cloud_reachable) EU_CLOUD_REACHABLE="$value" ;; + registry_reachable) REGISTRY_REACHABLE="$value" ;; + download_reachable) DOWNLOAD_REACHABLE="$value" ;; + esac + done < "$ENV_FILE" + + if [ "$CHECK_RESULT" != "pass" ]; then + log_message "ERROR" "Preflight check failed (result: $CHECK_RESULT)" + echo -e "\n${YELLOW}Preflight check found issues. Please review:${NC}" + echo -e "${YELLOW}1. Check file: $ENV_FILE${NC}" + echo -e "${YELLOW}2. Run: ./wallarm-ct-check.sh${NC}" + echo -e "${YELLOW}3. Fix the issues and try again${NC}" + exit 1 + fi + + log_message "SUCCESS" "Preflight check verified:" + log_message "SUCCESS" " OS: $OS_NAME $OS_VERSION" + log_message "SUCCESS" " Architecture: $ARCHITECTURE" + log_message "SUCCESS" " Init System: $INIT_SYSTEM" + log_message "SUCCESS" " US Cloud Reachable: $US_CLOUD_REACHABLE" + log_message "SUCCESS" " EU Cloud Reachable: $EU_CLOUD_REACHABLE" + log_message "SUCCESS" " Registry Reachable: $REGISTRY_REACHABLE" + log_message "SUCCESS" " Download Reachable: $DOWNLOAD_REACHABLE" + + # Validate we have at least one cloud region reachable + if [ "$US_CLOUD_REACHABLE" = "false" ] && [ "$EU_CLOUD_REACHABLE" = "false" ]; then + fail_with_remediation "No Wallarm cloud region reachable" \ + "Network connectivity issues detected: +1. Check firewall rules for Wallarm cloud endpoints +2. Verify network connectivity +3. Run preflight check again: ./wallarm-ct-check.sh" + fi + + # Validate we have resources for Docker/Wallarm + if [ "$REGISTRY_REACHABLE" = "false" ] && [ "$DOWNLOAD_REACHABLE" = "false" ]; then + log_message "WARNING" "Neither registry nor download server reachable" + log_message "INFO" "Checking for local resources..." + + local has_local_resources=true + if [ -z "$(ls docker-*.tgz 2>/dev/null)" ]; then + log_message "ERROR" "No local Docker binary found" + has_local_resources=false + fi + + if [ -z "$(ls wallarm-node-*.tar 2>/dev/null)" ]; then + log_message "ERROR" "No local Wallarm image found" + has_local_resources=false + fi + + if [ "$has_local_resources" = "false" ]; then + fail_with_remediation "Insufficient resources for deployment" \ + "Please provide either: +1. Network access to $DOCKER_REGISTRY_HOST +2. Network access to $DOCKER_DOWNLOAD_HOST +3. Local files: docker-*.tgz and wallarm-node-*.tar in current directory" + fi + fi +} + +# ============================================================================== +# CONFIGURATION COLLECTION FUNCTIONS +# ============================================================================== + +select_cloud_region() { + log_message "INFO" "Selecting Wallarm Cloud region..." + + echo -e "\n${CYAN}${BOLD}Wallarm Cloud Region Selection:${NC}" + + # Show available regions based on preflight check + local available_options=() + + if [ "$US_CLOUD_REACHABLE" = "true" ]; then + echo -e "1. ${YELLOW}US Cloud${NC} (us1.api.wallarm.com) - For US-based deployments" + available_options+=("1" "US") + fi + + if [ "$EU_CLOUD_REACHABLE" = "true" ]; then + echo -e "2. ${YELLOW}EU Cloud${NC} (api.wallarm.com) - For EU-based deployments" + available_options+=("2" "EU") + fi + + if [ ${#available_options[@]} -eq 0 ]; then + fail_with_remediation "No cloud regions available" \ + "Preflight check showed no reachable cloud regions. +1. Check network connectivity to Wallarm endpoints +2. Run preflight check again: ./wallarm-ct-check.sh +3. Contact network administrator if behind firewall" + fi + + # Build regex pattern for validation + local pattern + pattern="^($(IFS='|'; echo "${available_options[*]}"))$" + + local cloud_choice="" + while [[ ! "$cloud_choice" =~ $pattern ]]; do + if [ ${#available_options[@]} -eq 2 ]; then + # Only one region available + if [ "$US_CLOUD_REACHABLE" = "true" ]; then + cloud_choice="US" + break + else + cloud_choice="EU" + break + fi + fi + + read -r -p "$(echo -e "${YELLOW}Enter choice [1/US or 2/EU]: ${NC}")" cloud_choice + cloud_choice=$(echo "$cloud_choice" | tr '[:lower:]' '[:upper:]') + + case "$cloud_choice" in + 1|"US") + if [ "$US_CLOUD_REACHABLE" = "true" ]; then + CLOUD_REGION="US" + API_HOST="us1.api.wallarm.com" + log_message "INFO" "Selected US Cloud" + else + echo -e "${RED}US Cloud is not reachable (per preflight check)${NC}" + cloud_choice="" + fi + ;; + 2|"EU") + if [ "$EU_CLOUD_REACHABLE" = "true" ]; then + CLOUD_REGION="EU" + API_HOST="api.wallarm.com" + log_message "INFO" "Selected EU Cloud" + else + echo -e "${RED}EU Cloud is not reachable (per preflight check)${NC}" + cloud_choice="" + fi + ;; + *) + if [ -n "$cloud_choice" ]; then + echo -e "${RED}Invalid choice. Select from available options above.${NC}" + fi + ;; + esac + done + + log_message "SUCCESS" "Cloud region selected: $CLOUD_REGION ($API_HOST)" +} + +# Critical fix from review: Proper IP validation +validate_ip_address() { + local ip="$1" + + # Check basic format + if [[ ! "$ip" =~ ^[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}$ ]]; then + return 1 + fi + + # Check each octet is 0-255 + IFS='.' read -r i1 i2 i3 i4 <<< "$ip" + if [ "$i1" -gt 255 ] || [ "$i2" -gt 255 ] || [ "$i3" -gt 255 ] || [ "$i4" -gt 255 ]; then + return 1 + fi + + return 0 +} + +# Critical fix from review: Port conflict detection with fallback +check_port_available() { + local port="$1" + local protocol="${2:-tcp}" + + log_message "DEBUG" "Checking port $port/$protocol availability..." + + # Try ss first (modern, usually available) + if command -v ss >/dev/null 2>&1; then + if ss -"${protocol:0:1}"ln | grep -q ":$port "; then + return 1 # Port in use + fi + # Fallback to netstat + elif command -v netstat >/dev/null 2>&1; then + if netstat -tulpn 2>/dev/null | grep -E ":$port\s" >/dev/null 2>&1; then + return 1 # Port in use + fi + else + log_message "WARNING" "Neither ss nor netstat available, cannot check port $port" + fi + + return 0 # Port available (or cannot check) +} + +collect_configuration() { + log_message "INFO" "Collecting deployment configuration..." + + # Get ingress port + local default_port=80 + local ingress_port="" + while [[ ! "$ingress_port" =~ ^[0-9]+$ ]] || [ "$ingress_port" -lt 1 ] || [ "$ingress_port" -gt 65535 ]; do + read -r -p "$(echo -e "${YELLOW}Enter inbound port [${default_port}]: ${NC}")" ingress_port + ingress_port="${ingress_port:-$default_port}" + + if [[ ! "$ingress_port" =~ ^[0-9]+$ ]]; then + echo -e "${RED}Port must be a number${NC}" + elif [ "$ingress_port" -lt 1 ] || [ "$ingress_port" -gt 65535 ]; then + echo -e "${RED}Port must be between 1 and 65535${NC}" + elif ! check_port_available "$ingress_port"; then + echo -e "${RED}Port $ingress_port is already in use${NC}" + ingress_port="" + fi + done + + # Calculate monitoring port (ingress + 10, check for conflicts) + local monitoring_port=$((ingress_port + 10)) + if ! check_port_available "$monitoring_port"; then + log_message "WARNING" "Port $monitoring_port is in use, choosing alternative..." + monitoring_port=$((ingress_port + 100)) + if ! check_port_available "$monitoring_port"; then + monitoring_port=$((ingress_port + 200)) + fi + fi + log_message "INFO" "Monitoring port will be: $monitoring_port" + + # Get application server details + local upstream_ip="" + local upstream_port="" + + echo -e "\n${CYAN}${BOLD}Application Server Configuration:${NC}" + echo -e "${YELLOW}Enter the IP/hostname and port of your backend application${NC}" + + while [[ -z "$upstream_ip" ]]; do + read -r -p "$(echo -e "${YELLOW}Upstream App IP/Hostname [127.0.0.1]: ${NC}")" upstream_ip + upstream_ip="${upstream_ip:-127.0.0.1}" + + # Validate IP/hostname format + if ! validate_ip_address "$upstream_ip" && \ + ! [[ "$upstream_ip" =~ ^[a-zA-Z0-9][a-zA-Z0-9.-]*[a-zA-Z0-9]$ ]]; then + echo -e "${RED}Invalid IP/hostname format${NC}" + upstream_ip="" + fi + done + + while [[ ! "$upstream_port" =~ ^[0-9]+$ ]] || [ "$upstream_port" -lt 1 ] || [ "$upstream_port" -gt 65535 ]; do + read -r -p "$(echo -e "${YELLOW}Upstream App Port [8080]: ${NC}")" upstream_port + upstream_port="${upstream_port:-8080}" + + if [[ ! "$upstream_port" =~ ^[0-9]+$ ]]; then + echo -e "${RED}Port must be a number${NC}" + elif [ "$upstream_port" -lt 1 ] || [ "$upstream_port" -gt 65535 ]; then + echo -e "${RED}Port must be between 1 and 65535${NC}" + fi + done + + # Verify application server reachability + log_message "INFO" "Verifying application server reachability..." + if timeout 5 bash -c "cat < /dev/null > /dev/tcp/$upstream_ip/$upstream_port" 2>/dev/null; then + log_message "SUCCESS" "Application server $upstream_ip:$upstream_port is reachable" + else + log_message "WARNING" "Application server $upstream_ip:$upstream_port is not reachable" + echo -e "${YELLOW}${BOLD}Warning:${NC} Cannot reach application server at $upstream_ip:$upstream_port" + echo -e "${YELLOW}This may cause the Wallarm node to fail. Possible reasons:${NC}" + echo -e "1. Application server is not running" + echo -e "2. Firewall blocking port $upstream_port" + echo -e "3. Wrong IP/hostname" + echo -e "4. Application server not listening on that port" + + read -r -p "$(echo -e "${YELLOW}Continue anyway? (y/N): ${NC}")" -n 1 + echo + if [[ ! $REPLY =~ ^[Yy]$ ]]; then + fail_with_remediation "Application server unreachable" \ + "Ensure your application server is accessible: +1. Start your application server +2. Check it's listening: sudo ss -tlnp | grep :$upstream_port +3. Verify firewall rules allow inbound connections +4. Test connectivity: telnet $upstream_ip $upstream_port +5. If using hostname, verify DNS resolution: nslookup $upstream_ip" + fi + fi + + # Get Wallarm node token + local wallarm_token="" + echo -e "\n${CYAN}${BOLD}Wallarm Node Token:${NC}" + echo -e "${YELLOW}Get your token from Wallarm Console:${NC}" + echo -e "Create a new 'Wallarm node' and copy the token (will be visible as you type)" + while [[ -z "$wallarm_token" ]]; do + read -r -p "$(echo -e "${YELLOW}Paste Wallarm Node Token: ${NC}")" wallarm_token + # Trim whitespace and newlines + wallarm_token=$(echo "$wallarm_token" | tr -d '[:space:]') + + if [[ -z "$wallarm_token" ]]; then + echo -e "${RED}Token cannot be empty${NC}" + elif [[ ! "$wallarm_token" =~ ^[A-Za-z0-9_+/=\-]+$ ]]; then + echo -e "${RED}Token contains invalid characters. Wallarm tokens are base64 strings (A-Z, a-z, 0-9, _, -, +, /, =)${NC}" + echo -e "${YELLOW}First 20 chars of what you entered: '${wallarm_token:0:20}...'${NC}" + wallarm_token="" + else + # Show confirmation of token length (but not full token for security) + token_length=${#wallarm_token} + echo -e "${GREEN}Token accepted (${token_length} characters).${NC}" + echo -e "${YELLOW}First 8 chars for verification: ${wallarm_token:0:8}...${NC}" + fi + done + + # Generate instance name and directory + local instance_name + instance_name="wallarm-$(hostname -s | tr '[:upper:]' '[:lower:]')-$(date +%Y%m%d)" + local instance_dir="/opt/$instance_name" + + # Ensure directory exists + sudo mkdir -p "$instance_dir" + + log_message "SUCCESS" "Configuration collected:" + log_message "SUCCESS" " Ingress Port: $ingress_port" + log_message "SUCCESS" " Monitoring Port: $monitoring_port" + log_message "SUCCESS" " Upstream: $upstream_ip:$upstream_port" + log_message "SUCCESS" " Instance: $instance_name" + log_message "SUCCESS" " Directory: $instance_dir" + + # Set global variables + INGRESS_PORT="$ingress_port" + MONITORING_PORT="$monitoring_port" + UPSTREAM_IP="$upstream_ip" + UPSTREAM_PORT="$upstream_port" + WALLARM_TOKEN="$wallarm_token" + INSTANCE_NAME="$instance_name" + INSTANCE_DIR="$instance_dir" +} + +# ============================================================================== +# DOCKER ENGINE SETUP (LXC OPTIMIZED) +# ============================================================================== + +setup_docker_engine() { + log_message "INFO" "Setting up Docker Engine for LXC/stealth deployment..." + + # Check if Docker is already installed and running + if command -v docker >/dev/null 2>&1 && sudo docker info >/dev/null 2>&1; then + local docker_version + docker_version=$(docker --version | cut -d' ' -f3 | tr -d ',') + log_message "SUCCESS" "Docker is already installed and running (version $docker_version)" + + # Check if Docker is configured for LXC + if sudo docker info 2>/dev/null | grep -q "Storage Driver: vfs"; then + log_message "SUCCESS" "Docker is already configured with VFS storage driver (LXC compatible)" + else + log_message "WARNING" "Docker is not using VFS storage driver. LXC compatibility may be limited." + fi + return 0 + fi + + log_message "INFO" "Docker not found or not running. Proceeding with installation..." + + # Determine binary source + local binary_file="docker-$DOCKER_VERSION.tgz" + local binary_path="" + + if [ "$DOWNLOAD_REACHABLE" = "true" ]; then + # Download Docker static binary from internal server + log_message "INFO" "Downloading Docker static binary for $ARCHITECTURE..." + local download_url="$DOCKER_STATIC_BASE_URL/$ARCHITECTURE/docker-$DOCKER_VERSION.tgz" + + if curl -fL $CURL_INSECURE_FLAG --connect-timeout 30 "$download_url" -o "$binary_file"; then + log_message "SUCCESS" "Downloaded Docker binary: $binary_file" + binary_path="$binary_file" + else + log_message "ERROR" "Failed to download Docker binary from $download_url" + binary_path="" + fi + fi + + # Fallback: Check for local Docker binary + if [ -z "$binary_path" ]; then + log_message "INFO" "Checking for local Docker binary..." + local local_files + local_files=$(ls docker-*.tgz 2>/dev/null | head -1) + if [ -n "$local_files" ]; then + binary_path="$local_files" + log_message "SUCCESS" "Using local Docker binary: $binary_path" + else + fail_with_remediation "No Docker binary available" \ + "Please provide a Docker static binary: +1. Download manually: + curl -L '$DOCKER_STATIC_BASE_URL/$ARCHITECTURE/docker-$DOCKER_VERSION.tgz' -o docker.tgz +2. Or place an existing docker-*.tgz file in current directory +3. Re-run the script after downloading" + fi + fi + + # Extract and install + log_message "INFO" "Extracting Docker binary..." + + # First verify the tar file exists and is readable + if [ ! -f "$binary_path" ]; then + fail_with_remediation "Docker binary file not found" \ + "File $binary_path does not exist. Check download and file permissions." + fi + + if [ ! -r "$binary_path" ]; then + fail_with_remediation "Docker binary file not readable" \ + "Cannot read file $binary_path. Check file permissions." + fi + + # Test if it's a valid tar archive + log_message "DEBUG" "Testing tar archive integrity..." + + # First check if we can read the file + if [ ! -r "$binary_path" ]; then + log_message "ERROR" "Cannot read file: $binary_path" + log_message "INFO" "File permissions: $(ls -la "$binary_path" 2>/dev/null || echo "cannot stat")" + fail_with_remediation "Cannot read Docker binary file" \ + "File $binary_path exists but is not readable. +1. Check file permissions: ls -la $binary_path +2. Fix permissions: chmod 644 $binary_path +3. Or download fresh copy" + fi + + # Try to get file type information + local file_type="unknown" + if command -v file >/dev/null 2>&1; then + file_type=$(file "$binary_path" 2>/dev/null || echo "file command failed") + elif command -v hexdump >/dev/null 2>&1; then + # Check magic bytes manually + local magic_bytes=$(hexdump -n 2 -C "$binary_path" 2>/dev/null | head -1 | cut -d' ' -f2-3 || echo "no magic") + file_type="hexdump: $magic_bytes" + fi + + log_message "INFO" "File info: $binary_path ($(stat -c%s "$binary_path") bytes)" + log_message "INFO" "File type: $file_type" + log_message "INFO" "Current directory: $(pwd)" + log_message "INFO" "Full path: $(readlink -f "$binary_path" 2>/dev/null || echo "$binary_path")" + + # Test tar archive with error capture + local tar_test_output + tar_test_output=$(tar -tzf "$binary_path" 2>&1) + local tar_test_exit=$? + + if [ $tar_test_exit -ne 0 ]; then + log_message "ERROR" "File $binary_path is not a valid tar.gz archive (tar exit: $tar_test_exit)" + log_message "DEBUG" "Tar test output: $tar_test_output" + + # Check if it might be a different compression format + log_message "INFO" "Checking for alternative compression formats..." + + # Try gunzip test + if command -v gunzip >/dev/null 2>&1; then + if gunzip -t "$binary_path" 2>/dev/null; then + log_message "WARNING" "File is valid gzip but tar can't read it" + else + log_message "INFO" "Not a valid gzip file either" + fi + fi + + # Check first few bytes + if command -v xxd >/dev/null 2>&1; then + log_message "DEBUG" "First 20 bytes: $(xxd -l 20 "$binary_path" 2>/dev/null || echo "cannot read")" + elif command -v od >/dev/null 2>&1; then + log_message "DEBUG" "First 20 bytes: $(od -x -N 20 "$binary_path" 2>/dev/null | head -2 || echo "cannot read")" + fi + + fail_with_remediation "Docker binary file is corrupted or invalid" \ + "The Docker binary file is not a valid tar.gz archive. +Tar error: $tar_test_output + +File info: $(stat -c%s "$binary_path") bytes, type: $file_type + +Possible solutions: +1. The download may have been interrupted or corrupted +2. The file may be in wrong format (not tar.gz) +3. Server might be serving wrong content + +Steps to fix: +1. Delete corrupted file: rm -f docker-*.tgz +2. Check disk space: df -h . +3. Download manually and verify: + curl -v -L '$DOCKER_STATIC_BASE_URL/$ARCHITECTURE/docker-$DOCKER_VERSION.tgz' -o test.tgz + file test.tgz + tar -tzf test.tgz +4. Check if tar command works: tar --version" + fi + + log_message "SUCCESS" "Tar archive validation passed" + + # Extract the archive + log_message "DEBUG" "Extracting files from $binary_path..." + local tar_output + tar_output=$(tar xzvf "$binary_path" 2>&1) + local tar_exit=$? + + if [ $tar_exit -ne 0 ]; then + log_message "ERROR" "Failed to extract files from $binary_path (exit code: $tar_exit)" + log_message "DEBUG" "Tar output: $tar_output" + log_message "INFO" "Checking extracted files..." + if [ -d "docker" ]; then + log_message "WARNING" "Some files were extracted to 'docker/' directory" + ls -la docker/ 2>/dev/null | head -10 || true + fi + fail_with_remediation "Failed to extract Docker binary" \ + "Extraction failed. Possible reasons: +1. Insufficient disk space: df -h . +2. Permission issues in current directory +3. Corrupted archive (partial download) +4. File system issues + +Tar error: $tar_output + +Check disk space and permissions, then try manual extraction: + tar xzvf $binary_path" + else + log_message "SUCCESS" "Docker binary extracted successfully" + fi + + log_message "INFO" "Installing Docker binaries to /usr/bin/" + sudo cp docker/* /usr/bin/ 2>/dev/null || { + fail_with_remediation "Failed to copy Docker binaries" \ + "Permission denied copying to /usr/bin/ +1. Ensure you have sudo privileges +2. Check disk space: df -h / +3. Manual installation: + sudo cp docker/* /usr/bin/" + } + + # Cleanup extracted directory + rm -rf docker + + # Create docker group (required for systemd socket configuration) + log_message "INFO" "Creating docker group for systemd socket..." + if ! getent group docker >/dev/null; then + if sudo groupadd docker 2>/dev/null; then + log_message "SUCCESS" "Created docker group" + else + log_message "ERROR" "Failed to create docker group. This may cause systemd startup to fail." + # Continue anyway - systemd will fail with clear error + fi + else + log_message "INFO" "Docker group already exists" + fi + + # Configure Docker daemon for LXC (VFS storage driver, cgroupfs) + log_message "INFO" "Configuring Docker daemon for LXC (VFS storage driver, cgroupfs)..." + + # Create docker configuration directory + sudo mkdir -p /etc/docker + + # Create daemon.json for LXC optimization + sudo tee /etc/docker/daemon.json > /dev/null < /dev/null <<'EOF' +[Unit] +Description=Docker Engine +After=network-online.target firewalld.service containerd.service +Wants=network-online.target +Requires=docker.socket + +[Service] +Type=notify +ExecStart=/usr/bin/dockerd --group docker +ExecReload=/bin/kill -s HUP $MAINPID +TimeoutSec=0 +RestartSec=2 +Restart=always +StartLimitBurst=3 +StartLimitInterval=60s +LimitNOFILE=infinity +LimitNPROC=infinity +LimitCORE=infinity +TasksMax=infinity +Delegate=yes +KillMode=process + +[Install] +WantedBy=multi-user.target +EOF + + sudo tee /etc/systemd/system/docker.socket > /dev/null <<'EOF' +[Unit] +Description=Docker Socket for the API + +[Socket] +ListenStream=/var/run/docker.sock +SocketMode=0660 +SocketUser=root +SocketGroup=docker + +[Install] +WantedBy=sockets.target +EOF + + sudo systemctl daemon-reload + sudo systemctl enable docker + sudo systemctl start docker + ;; + + "openrc") + # Alpine OpenRC configuration + sudo tee /etc/init.d/docker > /dev/null <<'EOF' +#!/sbin/openrc-run +description="Docker Engine" +command="/usr/bin/dockerd" +command_args="--group docker" +pidfile="/run/docker.pid" +command_background=true + +depend() { + need net + after firewall +} +EOF + + sudo chmod +x /etc/init.d/docker + sudo rc-update add docker default + sudo rc-service docker start + ;; + + "sysvinit") + # Traditional SysV init script + sudo tee /etc/init.d/docker > /dev/null <<'EOF' +#!/bin/bash +### BEGIN INIT INFO +# Provides: docker +# Required-Start: $local_fs $network $remote_fs +# Required-Stop: $local_fs $network $remote_fs +# Default-Start: 2 3 4 5 +# Default-Stop: 0 1 6 +# Short-Description: Docker Engine +# Description: Docker container runtime +### END INIT INFO + +DESC="Docker Engine" +DAEMON=/usr/bin/dockerd +DAEMON_ARGS="--group docker" +PIDFILE=/var/run/docker.pid +SCRIPTNAME=/etc/init.d/docker + +[ -x "$DAEMON" ] || exit 0 + +. /lib/lsb/init-functions + +case "$1" in + start) + log_daemon_msg "Starting $DESC" "docker" + start-stop-daemon --start --background --pidfile "$PIDFILE" \ + --exec "$DAEMON" -- $DAEMON_ARGS + log_end_msg $? + ;; + stop) + log_daemon_msg "Stopping $DESC" "docker" + start-stop-daemon --stop --pidfile "$PIDFILE" --retry 10 + log_end_msg $? + ;; + restart) + $0 stop + sleep 1 + $0 start + ;; + status) + status_of_proc -p "$PIDFILE" "$DAEMON" docker + ;; + *) + echo "Usage: $SCRIPTNAME {start|stop|restart|status}" + exit 3 + ;; +esac + +exit 0 +EOF + + sudo chmod +x /etc/init.d/docker + sudo update-rc.d docker defaults + sudo service docker start + ;; + + *) + log_message "WARNING" "Unknown init system '$INIT_SYSTEM', trying systemd defaults" + sudo systemctl daemon-reload 2>/dev/null || true + sudo systemctl enable docker 2>/dev/null || true + sudo systemctl start docker 2>/dev/null || { + log_message "ERROR" "Failed to start Docker with unknown init system" + echo -e "${YELLOW}Please start Docker manually and re-run the script${NC}" + exit 1 + } + ;; + esac + + # Verify Docker is running + log_message "INFO" "Verifying Docker service..." + sleep 3 # Give Docker time to start + + if ! sudo docker info >/dev/null 2>&1; then + fail_with_remediation "Docker failed to start" \ + "Docker installation completed but service failed to start: +1. Check Docker logs: journalctl -u docker (systemd) or /var/log/docker.log +2. Verify configuration: sudo dockerd --debug +3. Manual start: sudo dockerd --group docker &" + fi + + # Verify Docker is using VFS storage driver + log_message "INFO" "Verifying Docker storage driver configuration..." + if sudo docker info 2>/dev/null | grep -q "Storage Driver: vfs"; then + log_message "SUCCESS" "Docker configured with VFS storage driver (LXC compatible)" + else + log_message "WARNING" "Docker is not using VFS storage driver. Checking current driver..." + sudo docker info 2>/dev/null | grep "Storage Driver:" || log_message "ERROR" "Could not determine storage driver" + log_message "WARNING" "LXC compatibility may be limited without VFS storage driver" + fi + + # Add current user to docker group for passwordless docker commands + log_message "INFO" "Adding current user to docker group..." + + # Security notice: docker group grants root-equivalent privileges + echo -e "${YELLOW}${BOLD}Security Notice:${NC} Adding your user to the 'docker' group grants root-equivalent privileges." + echo -e "${YELLOW}Any user in the docker group can run commands as root on the host system.${NC}" + echo -e "${YELLOW}Only proceed if you understand and accept this security risk.${NC}" + + read -r -p "$(echo -e "${YELLOW}Add $(whoami) to docker group? (Y/n): ${NC}")" -n 1 + echo + if [[ ! $REPLY =~ ^[Nn]$ ]]; then + sudo usermod -aG docker "$(whoami)" 2>/dev/null && \ + log_message "SUCCESS" "Added $(whoami) to docker group (log out and back in for changes)" + else + log_message "WARNING" "Skipping docker group addition. You will need to use sudo for docker commands." + echo -e "${YELLOW}Note: You can manually add yourself to docker group later with:${NC}" + echo -e "${CYAN} sudo usermod -aG docker $(whoami)${NC}" + echo -e "${YELLOW}Then log out and back in for changes to take effect.${NC}" + fi + + log_message "SUCCESS" "Docker Engine setup completed successfully" +} + +# ============================================================================== +# WALLARM NODE DEPLOYMENT +# ============================================================================== + +deploy_wallarm_node() { + log_message "INFO" "Deploying Wallarm filtering node..." + + # Pull Wallarm Docker image + log_message "INFO" "Pulling Wallarm Docker image from internal registry: $WALLARM_IMAGE_SOURCE" + + if [ "$REGISTRY_REACHABLE" = "true" ]; then + if ! sudo docker pull "$WALLARM_IMAGE_SOURCE"; then + fail_with_remediation "Failed to pull Wallarm image from internal registry" \ + "Docker pull from internal registry failed. Possible reasons: +1. Network connectivity to $DOCKER_REGISTRY_HOST +2. Authentication required for internal registry +3. Insufficient disk space + +Solutions: +1. Check network: curl -I $INTERNAL_DOCKER_REGISTRY +2. Login to internal registry if required +3. Use local image fallback: docker save/load +4. Check disk: df -h /var/lib/docker" + fi + + # Re-tag to standard name + sudo docker tag "$WALLARM_IMAGE_SOURCE" "$WALLARM_IMAGE_TARGET" + log_message "SUCCESS" "Wallarm image pulled and tagged successfully" + else + # Use local image + log_message "INFO" "Using local Wallarm image (registry not reachable)" + local local_image + local_image=$(ls wallarm-node-*.tar 2>/dev/null | head -1) + if [ -n "$local_image" ]; then + if ! sudo docker load -i "$local_image"; then + fail_with_remediation "Failed to load local Wallarm image" \ + "Local Wallarm image file may be corrupted: +1. Verify file integrity: tar -tzf wallarm-node-*.tar +2. Download a fresh image on another machine: + docker pull $WALLARM_IMAGE_SOURCE + docker save $WALLARM_IMAGE_TARGET -o wallarm-node-latest.tar +3. Copy the file to this machine and re-run" + fi + log_message "SUCCESS" "Local Wallarm image loaded successfully" + else + fail_with_remediation "No Wallarm image available" \ + "Need either: +1. Network access to $DOCKER_REGISTRY_HOST +2. Local wallarm-node-*.tar file in current directory" + fi + fi + + # Create nginx configuration + log_message "INFO" "Creating nginx configuration..." + local nginx_config="$INSTANCE_DIR/nginx.conf" + + sudo tee "$nginx_config" > /dev/null < /dev/null <> "\$LOG_FILE" + +# Stop existing container if running +sudo docker stop "\$CONTAINER_NAME" 2>/dev/null || true +sudo docker rm "\$CONTAINER_NAME" 2>/dev/null || true + +# Start new container +sudo docker run -d \\ + --name "\$CONTAINER_NAME" \\ + --restart always \\ + --network host \\ + -p $INGRESS_PORT:80 \\ + -p $MONITORING_PORT:90 \\ + -e WALLARM_API_TOKEN="$WALLARM_TOKEN" \\ + -e WALLARM_API_HOST="$API_HOST" \\ + -e NGINX_BACKEND="$UPSTREAM_IP:$UPSTREAM_PORT" \\ + -e WALLARM_MODE="monitoring" \\ + -v "\$NGINX_CONFIG:/etc/nginx/http.d/default.conf:ro" \\ + $WALLARM_IMAGE_TARGET + +echo "\$(date) - Container started with ID: \$(sudo docker ps -q -f name=\$CONTAINER_NAME)" >> "\$LOG_FILE" + +# Verify container is running +sleep 3 +if sudo docker ps | grep -q "\$CONTAINER_NAME"; then + echo "\$(date) - Verification: Container is running" >> "\$LOG_FILE" + echo "Wallarm node \$CONTAINER_NAME started successfully" +else + echo "\$(date) - ERROR: Container failed to start" >> "\$LOG_FILE" + sudo docker logs "\$CONTAINER_NAME" >> "\$LOG_FILE" 2>&1 + exit 1 +fi +EOF + + sudo chmod +x "$start_script" + log_message "SUCCESS" "Start script created: $start_script" + + # Create init system service for automatic startup + log_message "INFO" "Creating service for automatic startup (init system: $INIT_SYSTEM)..." + + case "$INIT_SYSTEM" in + "systemd") + local service_file="/etc/systemd/system/wallarm-$INSTANCE_NAME.service" + sudo tee "$service_file" > /dev/null </dev/null || \ + log_message "WARNING" "Failed to enable systemd service (may already exist)" + ;; + + "openrc") + local service_file="/etc/init.d/wallarm-$INSTANCE_NAME" + sudo tee "$service_file" > /dev/null </dev/null || true + docker rm $INSTANCE_NAME 2>/dev/null || true + eend \$? +} +EOF + sudo chmod +x "$service_file" + sudo rc-update add "wallarm-$INSTANCE_NAME" default 2>/dev/null || \ + log_message "WARNING" "Failed to add OpenRC service (may already exist)" + ;; + + "sysvinit") + local service_file="/etc/init.d/wallarm-$INSTANCE_NAME" + sudo tee "$service_file" > /dev/null </dev/null || true + docker rm $INSTANCE_NAME 2>/dev/null || true + log_end_msg \$? + ;; + restart) + \$0 stop + sleep 2 + \$0 start + ;; + status) + if docker ps | grep -q "$INSTANCE_NAME"; then + echo "$INSTANCE_NAME is running" + exit 0 + else + echo "$INSTANCE_NAME is not running" + exit 1 + fi + ;; + *) + echo "Usage: \$0 {start|stop|restart|status}" + exit 3 + ;; +esac + +exit 0 +EOF + sudo chmod +x "$service_file" + sudo update-rc.d "wallarm-$INSTANCE_NAME" defaults 2>/dev/null || \ + log_message "WARNING" "Failed to add SysV init service (may already exist)" + ;; + + *) + log_message "WARNING" "Unknown init system, not creating service (manual start via $start_script)" + ;; + esac + + # Start the Wallarm node + log_message "INFO" "Starting Wallarm filtering node..." + if ! sudo "$start_script"; then + fail_with_remediation "Failed to start Wallarm node" \ + "Container failed to start. Check: +1. Docker logs: sudo docker logs $INSTANCE_NAME +2. Port conflicts: sudo ss -tlnp | grep ':$INGRESS_PORT\|:$MONITORING_PORT' +3. Docker status: sudo docker info +4. Manual start attempt: sudo $start_script" + fi + + log_message "SUCCESS" "Wallarm filtering node deployed successfully" + log_message "SUCCESS" " Container: $INSTANCE_NAME" + log_message "SUCCESS" " Ingress Port: $INGRESS_PORT" + log_message "SUCCESS" " Monitoring Port: $MONITORING_PORT" + log_message "SUCCESS" " Upstream: $UPSTREAM_IP:$UPSTREAM_PORT" + log_message "SUCCESS" " Config Directory: $INSTANCE_DIR" +} + +# ============================================================================== +# DEPLOYMENT VERIFICATION +# ============================================================================== + +verify_deployment() { + log_message "INFO" "Verifying Wallarm deployment..." + + # Check if container is running + log_message "INFO" "Checking if container is running..." + if ! sudo docker ps | grep -q "$INSTANCE_NAME"; then + fail_with_remediation "Wallarm container is not running" \ + "Container failed to start or crashed: +1. Check container logs: sudo docker logs $INSTANCE_NAME +2. Check Docker service: sudo systemctl status docker (or equivalent) +3. Manual start: sudo $INSTANCE_DIR/start.sh" + fi + log_message "SUCCESS" "Container is running" + + # Test ingress port + log_message "INFO" "Testing ingress port $INGRESS_PORT..." + if ! check_port_available "$INGRESS_PORT"; then + log_message "SUCCESS" "Ingress port $INGRESS_PORT is in use (as expected)" + else + log_message "WARNING" "Ingress port $INGRESS_PORT appears available (container may not be listening)" + fi + + # Test monitoring port + log_message "INFO" "Testing monitoring port $MONITORING_PORT..." + if ! check_port_available "$MONITORING_PORT"; then + log_message "SUCCESS" "Monitoring port $MONITORING_PORT is in use (as expected)" + else + log_message "WARNING" "Monitoring port $MONITORING_PORT appears available" + fi + + # Test health check endpoint + log_message "INFO" "Testing health check endpoint..." + local health_check_url="http://localhost:$INGRESS_PORT/health" + if curl -sf --connect-timeout 5 "$health_check_url" >/dev/null 2>&1; then + log_message "SUCCESS" "Health check endpoint responsive" + else + log_message "WARNING" "Health check endpoint not responsive (may need time to start)" + sleep 5 + if curl -sf --connect-timeout 5 "$health_check_url" >/dev/null 2>&1; then + log_message "SUCCESS" "Health check endpoint now responsive" + else + log_message "WARNING" "Health check endpoint still not responsive (check nginx config)" + fi + fi + + # Test handshake through filtering node + log_message "INFO" "Testing handshake through filtering node to upstream..." + local test_url="http://localhost:$INGRESS_PORT/" + if curl -sfI --connect-timeout 10 "$test_url" >/dev/null 2>&1; then + log_message "SUCCESS" "Handshake successful: filtering node can reach upstream" + else + log_message "WARNING" "Handshake failed (upstream may not be responding)" + log_message "INFO" "Checking if upstream is directly reachable..." + if timeout 5 bash -c "cat < /dev/null > /dev/tcp/$UPSTREAM_IP/$UPSTREAM_PORT" 2>/dev/null; then + log_message "ERROR" "Upstream is reachable but filtering node cannot proxy" + echo -e "${YELLOW}Possible nginx configuration issue. Check:${NC}" + echo -e "1. Container logs: sudo docker logs $INSTANCE_NAME" + echo -e "2. Nginx config: sudo docker exec $INSTANCE_NAME cat /etc/nginx/http.d/default.conf" + else + log_message "WARNING" "Upstream server is not reachable (as previously warned)" + fi + fi + + # Check Wallarm cloud synchronization + log_message "INFO" "Checking Wallarm cloud synchronization (this may take 30 seconds)..." + echo -e "${YELLOW}Note: Full synchronization with Wallarm cloud may take several minutes.${NC}" + echo -e "${YELLOW}You can check sync status in Wallarm Console.${NC}" + + # Quick test: check container logs for synchronization messages + if sudo docker logs "$INSTANCE_NAME" 2>&1 | tail -20 | grep -i "sync\|connected\|token" >/dev/null 2>&1; then + log_message "SUCCESS" "Wallarm node appears to be communicating with cloud" + else + log_message "WARNING" "No cloud synchronization messages in logs yet (may need time)" + fi + + log_message "SUCCESS" "Deployment verification completed" + echo -e "\n${GREEN}${BOLD}Verification Summary:${NC}" + echo -e " ${GREEN}✓${NC} Container running: $INSTANCE_NAME" + echo -e " ${GREEN}✓${NC} Ingress port: $INGRESS_PORT" + echo -e " ${GREEN}✓${NC} Monitoring port: $MONITORING_PORT" + echo -e " ${GREEN}✓${NC} Upstream: $UPSTREAM_IP:$UPSTREAM_PORT" + echo -e " ${GREEN}✓${NC} Cloud region: $CLOUD_REGION ($API_HOST)" +} + +# ============================================================================== +# MAIN FUNCTION +# ============================================================================== + +main() { + clear + echo -e "${BLUE}${BOLD}" + echo "╔══════════════════════════════════════════════════════════════╗" + echo "║ WALLARM DEPLOYMENT SCRIPT - V1.2 ║" + echo "║ LXC-Optimized Filtering Node Deployment ║" + echo "╚══════════════════════════════════════════════════════════════╝${NC}" + echo -e "\n${YELLOW}Starting deployment at: $(date)${NC}" + + # Initialize logging + # Create logs directory if it doesn't exist + local log_dir="${HOME:-.}/logs" + if [ ! -d "$log_dir" ]; then + if ! mkdir -p "$log_dir"; then + echo -e "${YELLOW}Cannot create log directory $log_dir, falling back to current directory...${NC}" + log_dir="." + fi + fi + + LOG_FILE="$log_dir/wallarm-deployment.log" + if ! : > "$LOG_FILE"; then + echo -e "${RED}Cannot create log file at $LOG_FILE${NC}" + echo -e "${YELLOW}Falling back to current directory...${NC}" + LOG_FILE="./wallarm-deployment.log" + : > "$LOG_FILE" 2>/dev/null || true + fi + if ! chmod 644 "$LOG_FILE" 2>/dev/null; then + echo -e "${YELLOW}Warning: Could not set permissions on log file${NC}" + fi + + log_message "INFO" "=== Wallarm Deployment Started ===" + + # SSL security warning + if [ "$INSECURE_SSL" = "1" ]; then + log_message "WARNING" "SSL certificate validation is DISABLED (insecure). Set WALLARM_INSECURE_SSL=0 to enable validation." + fi + + # Phase 1: Verify preflight check + log_message "INFO" "=== PHASE 1: PREFLIGHT CHECK VERIFICATION ===" + verify_preflight_check + + # Phase 2: Configuration collection + log_message "INFO" "=== PHASE 2: CONFIGURATION COLLECTION ===" + select_cloud_region + collect_configuration + + # Phase 3: Docker engine setup (LXC optimized) + log_message "INFO" "=== PHASE 3: DOCKER ENGINE SETUP (LXC OPTIMIZED) ===" + setup_docker_engine + + # Phase 4: Deployment + log_message "INFO" "=== PHASE 4: DEPLOYMENT ===" + deploy_wallarm_node + + # Phase 5: Verification + log_message "INFO" "=== PHASE 5: VERIFICATION ===" + verify_deployment + + # Success message + log_message "SUCCESS" "=== WALLARM DEPLOYMENT COMPLETED SUCCESSFULLY ===" + echo -e "\n${GREEN}${BOLD}╔══════════════════════════════════════════════════════════════╗${NC}" + echo -e "${GREEN}${BOLD}║ WALLARM FILTERING NODE DEPLOYMENT SUCCESSFUL ║${NC}" + echo -e "${GREEN}${BOLD}╚══════════════════════════════════════════════════════════════╝${NC}" + echo -e "\n${CYAN}The Wallarm filtering node is now active and protecting your application.${NC}" + echo -e "${YELLOW}Full deployment log: $LOG_FILE${NC}" + echo -e "${YELLOW}Instance directory: $INSTANCE_DIR${NC}" + echo -e "\n${GREEN}To stop the node:${NC} sudo docker stop $INSTANCE_NAME" + echo -e "${GREEN}To restart:${NC} sudo $INSTANCE_DIR/start.sh" + echo -e "${GREEN}To view logs:${NC} sudo docker logs -f $INSTANCE_NAME" + echo -e "\n${MAGENTA}${BOLD}Deployment completed successfully!${NC}" + echo -e "\n${YELLOW}Important next steps:${NC}" + echo -e "1. Monitor sync status in Wallarm Console" + echo -e "2. Test attack detection with safe test: curl http://localhost:$INGRESS_PORT/?wallarm_test=1" + echo -e "3. Review logs periodically: sudo docker logs --tail 50 $INSTANCE_NAME" +} + +# ============================================================================== +# SCRIPT EXECUTION +# ============================================================================== + +# Ensure we're in bash +if [ -z "$BASH_VERSION" ]; then + echo "Error: This script must be run with bash" >&2 + exit 1 +fi + +# Run main function +main "$@" \ No newline at end of file diff --git a/wallarm-ct-uninstall.sh b/wallarm-ct-uninstall.sh new file mode 100644 index 0000000..51b6aab --- /dev/null +++ b/wallarm-ct-uninstall.sh @@ -0,0 +1,537 @@ +#!/bin/bash +# ============================================================================== +# WALLARM UNINSTALL SCRIPT - V1.0 +# ============================================================================== +# Purpose: Safely remove Wallarm filtering node and cleanup Docker installation +# Features: +# - Interactive confirmation with safety checks +# - Stops and removes Wallarm container and image +# - Removes Docker service files created by deployment script +# - Optional cleanup of Docker binaries (if no other containers exist) +# - Preserves user data and logs (with option to remove) +# - DAU-friendly warnings and confirmations +# ============================================================================== + +# Color definitions for better UX +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +BLUE='\033[1;34m' +CYAN='\033[0;36m' +MAGENTA='\033[0;35m' +BOLD='\033[1m' +NC='\033[0m' # No Color + +# Strict error handling +set -euo pipefail +# Simple error handler for early failures (before log_message is defined) +early_error_handler() { + echo -e "${RED}${BOLD}[ERROR]${NC} Script failed at line $LINENO. Command: $BASH_COMMAND" >&2 + exit 1 +} +trap early_error_handler ERR + +# Logging function +log_message() { + local level="$1" + local message="$2" + local timestamp + timestamp=$(date '+%Y-%m-%d %H:%M:%S') + + case "$level" in + "INFO") color="${BLUE}" ;; + "SUCCESS") color="${GREEN}" ;; + "WARNING") color="${YELLOW}" ;; + "ERROR") color="${RED}" ;; + "DEBUG") color="${CYAN}" ;; + *) color="${NC}" ;; + esac + + echo -e "${color}[${timestamp}] ${level}: ${message}${NC}" >&2 +} + +# Ask for confirmation +confirm() { + local prompt="$1" + local default="${2:-n}" + local options="[y/N]" + + if [ "$default" = "y" ]; then + options="[Y/n]" + fi + + echo -e -n "${YELLOW}${prompt} ${options}${NC} " + read -r response + + case "$response" in + [yY][eE][sS]|[yY]) + return 0 + ;; + [nN][oO]|[nN]) + return 1 + ;; + "") + # Use default + if [ "$default" = "y" ]; then + return 0 + else + return 1 + fi + ;; + *) + # Invalid input, treat as no + return 1 + ;; + esac +} + +# Check if running as root or with sudo +check_sudo() { + if [ "$EUID" -ne 0 ]; then + log_message "INFO" "This script requires sudo privileges" + if ! sudo -n true 2>/dev/null; then + log_message "INFO" "Please enter your sudo password when prompted" + sudo -v + fi + fi +} + +# Detect init system +detect_init_system() { + if command -v systemctl >/dev/null 2>&1 && systemctl --version >/dev/null 2>&1; then + echo "systemd" + elif [ -d /run/openrc ]; then + echo "openrc" + elif [ -f /etc/init.d/docker ]; then + echo "sysvinit" + else + echo "unknown" + fi +} + +# Check if Docker is installed and running +check_docker() { + if ! command -v docker >/dev/null 2>&1; then + log_message "WARNING" "Docker command not found" + return 1 + fi + + if ! sudo docker info >/dev/null 2>&1; then + log_message "WARNING" "Docker is not running" + return 1 + fi + + return 0 +} + +# Check for other Docker containers (besides Wallarm) +check_other_containers() { + local wallarm_container="wallarm-node" + local all_containers + all_containers=$(sudo docker ps -a -q 2>/dev/null | wc -l) + local wallarm_containers + wallarm_containers=$(sudo docker ps -a --filter "name=${wallarm_container}" -q 2>/dev/null | wc -l) + + if [ "$all_containers" -gt "$wallarm_containers" ]; then + log_message "WARNING" "Found other Docker containers besides Wallarm" + sudo docker ps -a --format "table {{.Names}}\t{{.Image}}\t{{.Status}}" | grep -v "$wallarm_container" || true + return 0 # Other containers exist + fi + + return 1 # Only Wallarm containers or no containers +} + +# Stop and remove Wallarm container +remove_wallarm_container() { + local container_name="wallarm-node" + + log_message "INFO" "Looking for Wallarm container..." + + if sudo docker ps -a --filter "name=${container_name}" --format "{{.Names}}" | grep -q "${container_name}"; then + log_message "INFO" "Found Wallarm container: ${container_name}" + + # Stop container if running + if sudo docker ps --filter "name=${container_name}" --filter "status=running" --format "{{.Names}}" | grep -q "${container_name}"; then + log_message "INFO" "Stopping Wallarm container..." + sudo docker stop "${container_name}" || { + log_message "WARNING" "Failed to stop container, attempting force stop" + sudo docker kill "${container_name}" 2>/dev/null || true + } + fi + + # Remove container + log_message "INFO" "Removing Wallarm container..." + sudo docker rm -f "${container_name}" 2>/dev/null || { + log_message "WARNING" "Failed to remove container, it may already be removed" + } + + log_message "SUCCESS" "Wallarm container removed" + else + log_message "INFO" "No Wallarm container found" + fi +} + +# Remove Wallarm image +remove_wallarm_image() { + local image_name="wallarm/node" + + log_message "INFO" "Looking for Wallarm image..." + + if sudo docker images --format "{{.Repository}}" | grep -q "^${image_name}"; then + log_message "INFO" "Found Wallarm image: ${image_name}" + + # Check if image is used by any containers + local used_by + used_by=$(sudo docker ps -a --filter "ancestor=${image_name}" -q 2>/dev/null | wc -l) + + if [ "$used_by" -gt 0 ]; then + log_message "WARNING" "Image ${image_name} is still in use by containers, skipping removal" + return + fi + + # Remove image + log_message "INFO" "Removing Wallarm image..." + sudo docker rmi "${image_name}:latest" 2>/dev/null || { + log_message "WARNING" "Failed to remove image, it may be in use or already removed" + } + + # Also try to remove by ID if tag removal failed + local image_id + image_id=$(sudo docker images --filter "reference=${image_name}" --format "{{.ID}}" 2>/dev/null | head -1) + if [ -n "$image_id" ]; then + sudo docker rmi -f "$image_id" 2>/dev/null || true + fi + + log_message "SUCCESS" "Wallarm image removed" + else + log_message "INFO" "No Wallarm image found" + fi +} + +# Remove Docker service files (created by deployment script) +remove_docker_service_files() { + local init_system + init_system=$(detect_init_system) + + log_message "INFO" "Removing Docker service files for init system: ${init_system}" + + case "$init_system" in + "systemd") + # Stop and disable Docker service + if sudo systemctl is-active docker --quiet 2>/dev/null; then + log_message "INFO" "Stopping Docker service..." + sudo systemctl stop docker 2>/dev/null || true + fi + + if sudo systemctl is-enabled docker --quiet 2>/dev/null; then + log_message "INFO" "Disabling Docker service..." + sudo systemctl disable docker 2>/dev/null || true + fi + + # Remove systemd unit files (if they exist and were created by our script) + local systemd_files=( + "/etc/systemd/system/docker.socket" + "/etc/systemd/system/docker.service" + "/usr/lib/systemd/system/docker.socket" + "/usr/lib/systemd/system/docker.service" + ) + + for file in "${systemd_files[@]}"; do + if [ -f "$file" ]; then + log_message "INFO" "Removing systemd file: $file" + sudo rm -f "$file" + fi + done + + sudo systemctl daemon-reload 2>/dev/null || true + ;; + + "openrc") + # Stop and remove from runlevels + if sudo rc-service docker status 2>/dev/null | grep -q "started"; then + log_message "INFO" "Stopping Docker service (OpenRC)..." + sudo rc-service docker stop 2>/dev/null || true + fi + + if [ -f /etc/init.d/docker ]; then + log_message "INFO" "Removing OpenRC init script..." + sudo rc-update del docker default 2>/dev/null || true + sudo rm -f /etc/init.d/docker + fi + ;; + + "sysvinit") + # Stop service + if [ -f /etc/init.d/docker ]; then + log_message "INFO" "Stopping Docker service (SysV init)..." + sudo service docker stop 2>/dev/null || true + + # Remove from startup + if command -v update-rc.d >/dev/null 2>&1; then + sudo update-rc.d -f docker remove 2>/dev/null || true + elif command -v chkconfig >/dev/null 2>&1; then + sudo chkconfig --del docker 2>/dev/null || true + fi + + log_message "INFO" "Removing SysV init script..." + sudo rm -f /etc/init.d/docker + fi + ;; + + *) + log_message "WARNING" "Unknown init system, skipping service file cleanup" + ;; + esac + + log_message "SUCCESS" "Docker service files removed" +} + +# Remove Docker binaries (optional, only if no other containers exist) +remove_docker_binaries() { + local docker_binaries=( + "/usr/bin/docker" + "/usr/bin/dockerd" + "/usr/bin/docker-init" + "/usr/bin/docker-proxy" + "/usr/bin/containerd" + "/usr/bin/containerd-shim" + "/usr/bin/containerd-shim-runc-v1" + "/usr/bin/containerd-shim-runc-v2" + "/usr/bin/runc" + ) + + log_message "INFO" "Checking Docker binaries..." + + local binaries_found=0 + for binary in "${docker_binaries[@]}"; do + if [ -f "$binary" ]; then + binaries_found=$((binaries_found + 1)) + fi + done + + if [ "$binaries_found" -eq 0 ]; then + log_message "INFO" "No Docker binaries found in /usr/bin/" + return + fi + + if confirm "Remove Docker binaries from /usr/bin/? (Only do this if Docker was installed by wallarm-ct-deploy.sh)" "n"; then + log_message "WARNING" "Removing Docker binaries..." + + for binary in "${docker_binaries[@]}"; do + if [ -f "$binary" ]; then + log_message "INFO" "Removing $binary" + sudo rm -f "$binary" + fi + done + + # Also remove CNI plugins if they exist + if [ -d "/opt/cni/bin" ]; then + log_message "INFO" "Removing CNI plugins from /opt/cni/bin/" + sudo rm -rf /opt/cni/bin/* + fi + + log_message "SUCCESS" "Docker binaries removed" + else + log_message "INFO" "Skipping Docker binary removal" + fi +} + +# Remove Docker configuration files +remove_docker_config() { + local config_files=( + "/etc/docker/daemon.json" + "/etc/containerd/config.toml" + "/var/lib/docker" # Warning: This removes all Docker data! + ) + + log_message "INFO" "Checking Docker configuration files..." + + # Only remove daemon.json if it was created by our script + if [ -f "/etc/docker/daemon.json" ]; then + log_message "INFO" "Found /etc/docker/daemon.json" + if grep -q "storage-driver.*vfs" "/etc/docker/daemon.json" 2>/dev/null; then + log_message "INFO" "This appears to be the VFS configuration from wallarm-ct-deploy.sh" + if confirm "Remove /etc/docker/daemon.json?" "n"; then + sudo rm -f "/etc/docker/daemon.json" + log_message "SUCCESS" "Docker configuration removed" + fi + else + log_message "WARNING" "/etc/docker/daemon.json doesn't appear to be from wallarm-ct-deploy.sh, skipping" + fi + fi + + # Warn about Docker data directory + if [ -d "/var/lib/docker" ]; then + log_message "WARNING" "/var/lib/docker contains Docker data (images, containers, volumes)" + log_message "WARNING" "Removing this directory will delete ALL Docker data on the system" + if confirm "Remove /var/lib/docker? (WARNING: Deletes ALL Docker data)" "n"; then + log_message "WARNING" "Removing /var/lib/docker - this may take a while..." + sudo rm -rf /var/lib/docker + log_message "SUCCESS" "Docker data directory removed" + fi + fi +} + +# Remove docker group (if empty) +remove_docker_group() { + log_message "INFO" "Checking docker group..." + + if getent group docker >/dev/null; then + local group_users + group_users=$(getent group docker | cut -d: -f4) + + if [ -z "$group_users" ]; then + log_message "INFO" "Docker group exists and has no users" + if confirm "Remove docker group?" "n"; then + sudo groupdel docker 2>/dev/null || { + log_message "WARNING" "Failed to remove docker group (may be system group)" + } + log_message "SUCCESS" "Docker group removed" + fi + else + log_message "WARNING" "Docker group has users: $group_users" + log_message "INFO" "Skipping docker group removal (users still present)" + fi + else + log_message "INFO" "Docker group not found" + fi +} + +# Remove Wallarm-specific files and logs +remove_wallarm_files() { + local wallarm_files=( + "$HOME/wallarm-start.sh" + "$HOME/wallarm-stop.sh" + "$HOME/wallarm-status.sh" + "/usr/local/bin/wallarm-start" + "/usr/local/bin/wallarm-stop" + "/usr/local/bin/wallarm-status" + ) + + log_message "INFO" "Removing Wallarm scripts and logs..." + + # Remove scripts + for file in "${wallarm_files[@]}"; do + if [ -f "$file" ]; then + log_message "INFO" "Removing $file" + sudo rm -f "$file" + fi + done + + # Remove log directory (if empty) + local log_dir="$HOME/logs" + if [ -d "$log_dir" ]; then + log_message "INFO" "Found log directory: $log_dir" + if [ -z "$(ls -A "$log_dir" 2>/dev/null)" ]; then + log_message "INFO" "Log directory is empty, removing..." + sudo rmdir "$log_dir" 2>/dev/null || true + else + log_message "INFO" "Log directory contains files, preserving..." + fi + fi + + # Remove .env file if it exists + if [ -f ".env" ]; then + log_message "INFO" "Removing .env file..." + rm -f ".env" + fi + + log_message "SUCCESS" "Wallarm files cleaned up" +} + +# Main uninstall function +main() { + echo -e "${CYAN}${BOLD}" + echo "╔══════════════════════════════════════════════════════════════╗" + echo "║ WALLARM UNINSTALLATION ║" + echo "╚══════════════════════════════════════════════════════════════╝" + echo -e "${NC}" + + echo -e "${YELLOW}This script will remove Wallarm filtering node and cleanup Docker installation.${NC}" + echo -e "${YELLOW}You will be asked for confirmation before each destructive operation.${NC}" + echo "" + + if ! confirm "Do you want to continue with the uninstallation?" "n"; then + log_message "INFO" "Uninstallation cancelled by user" + exit 0 + fi + + # Check sudo + check_sudo + + # Check Docker + if check_docker; then + log_message "INFO" "Docker is installed and running" + + # Check for other containers + if check_other_containers; then + log_message "WARNING" "Other Docker containers exist on this system" + echo -e "${YELLOW}Warning: Removing Docker may affect other containers.${NC}" + echo -e "${YELLOW}Consider leaving Docker installed if you need it for other purposes.${NC}" + echo "" + fi + else + log_message "WARNING" "Docker is not running or not installed" + fi + + # Step 1: Remove Wallarm container and image + echo "" + echo -e "${CYAN}${BOLD}Step 1: Remove Wallarm container and image${NC}" + if confirm "Stop and remove Wallarm container and image?" "y"; then + remove_wallarm_container + remove_wallarm_image + else + log_message "INFO" "Skipping Wallarm container/image removal" + fi + + # Step 2: Remove Docker service files + echo "" + echo -e "${CYAN}${BOLD}Step 2: Remove Docker service files${NC}" + if confirm "Remove Docker service files (systemd/OpenRC/SysV init scripts)?" "y"; then + remove_docker_service_files + else + log_message "INFO" "Skipping Docker service file removal" + fi + + # Step 3: Optional Docker binary removal + echo "" + echo -e "${CYAN}${BOLD}Step 3: Docker binaries and configuration${NC}" + remove_docker_binaries + remove_docker_config + + # Step 4: Remove docker group + echo "" + echo -e "${CYAN}${BOLD}Step 4: System cleanup${NC}" + remove_docker_group + + # Step 5: Remove Wallarm files + echo "" + echo -e "${CYAN}${BOLD}Step 5: Wallarm files and logs${NC}" + if confirm "Remove Wallarm scripts and log files?" "y"; then + remove_wallarm_files + else + log_message "INFO" "Skipping Wallarm file cleanup" + fi + + # Final message + echo "" + echo -e "${GREEN}${BOLD}╔══════════════════════════════════════════════════════════════╗${NC}" + echo -e "${GREEN}${BOLD}║ UNINSTALLATION COMPLETE ║${NC}" + echo -e "${GREEN}${BOLD}╚══════════════════════════════════════════════════════════════╝${NC}" + echo "" + echo -e "${GREEN}Wallarm filtering node has been removed.${NC}" + echo "" + echo -e "${YELLOW}Note:${NC}" + echo -e " • Docker may still be installed on your system" + echo -e " • Docker data in /var/lib/docker may still exist" + echo -e " • User may still be in docker group (check with 'groups')" + echo "" + echo -e "To completely remove Docker, you may need to:" + echo -e " 1. Remove Docker package using your system's package manager" + echo -e " 2. Remove /var/lib/docker directory (contains all Docker data)" + echo -e " 3. Remove user from docker group: sudo gpasswd -d \$USER docker" + echo "" +} + +# Run main function +main "$@" \ No newline at end of file