1363 lines
No EOL
51 KiB
Bash
Executable file
1363 lines
No EOL
51 KiB
Bash
Executable file
#!/bin/bash
|
|
# ==============================================================================
|
|
# WALLARM DEPLOYMENT SCRIPT - V1.1
|
|
# ==============================================================================
|
|
# Purpose: Deploy Wallarm filtering node after preflight check
|
|
# Features:
|
|
# - Reads preflight check results from .env file
|
|
# - Interactive configuration (cloud region, ports, token, upstream)
|
|
# - Docker installation with LXC optimization (VFS storage driver)
|
|
# - Wallarm node deployment with persistence
|
|
# - Deployment verification with handshake test
|
|
# - DAU-friendly error handling with remediation
|
|
# ==============================================================================
|
|
|
|
# Color definitions for better UX
|
|
RED='\033[0;31m'
|
|
GREEN='\033[0;32m'
|
|
YELLOW='\033[1;33m'
|
|
BLUE='\033[1;34m'
|
|
CYAN='\033[0;36m'
|
|
MAGENTA='\033[0;35m'
|
|
BOLD='\033[1m'
|
|
NC='\033[0m' # No Color
|
|
|
|
# Strict error handling
|
|
set -euo pipefail
|
|
# Simple error handler for early failures (before log_message is defined)
|
|
early_error_handler() {
|
|
echo -e "${RED}${BOLD}[ERROR]${NC} Script failed at line $LINENO. Command: $BASH_COMMAND" >&2
|
|
exit 1
|
|
}
|
|
trap early_error_handler ERR
|
|
|
|
# Extract hostname from URL (strip protocol and credentials for safe logging)
|
|
extract_hostname_from_url() {
|
|
local url="$1"
|
|
# Remove protocol
|
|
local hostpart="${url#*://}"
|
|
# Remove credentials if present (username:password@)
|
|
hostpart="${hostpart#*@}"
|
|
# Remove port and path
|
|
hostpart="${hostpart%%[:/]*}"
|
|
echo "$hostpart"
|
|
}
|
|
|
|
# Configuration
|
|
ENV_FILE=".env"
|
|
LOG_FILE="${HOME:-.}/logs/wallarm-deployment.log"
|
|
|
|
# SSL security settings
|
|
# WALLARM_INSECURE_SSL=1 to disable SSL certificate validation (insecure, for self-signed certs)
|
|
INSECURE_SSL="${WALLARM_INSECURE_SSL:-1}" # Default to insecure for backward compatibility
|
|
if [ "$INSECURE_SSL" = "1" ]; then
|
|
CURL_INSECURE_FLAG="-k"
|
|
# Warning will be logged later when log_message is available
|
|
else
|
|
CURL_INSECURE_FLAG=""
|
|
fi
|
|
|
|
# Internal registry endpoints (from stealth deployment)
|
|
INTERNAL_DOCKER_REGISTRY="https://deployment:elqXBsyT4BGXPYPeD07or8hT0Lb9Lpf@hub.ct.sechpoint.app"
|
|
INTERNAL_DOCKER_DOWNLOAD="https://deployment:elqXBsyT4BGXPYPeD07or8hT0Lb9Lpf@ct.sechpoint.app"
|
|
# Extracted hostnames (without credentials) for Docker operations
|
|
DOCKER_REGISTRY_HOST=$(extract_hostname_from_url "$INTERNAL_DOCKER_REGISTRY")
|
|
DOCKER_DOWNLOAD_HOST=$(extract_hostname_from_url "$INTERNAL_DOCKER_DOWNLOAD")
|
|
|
|
DOCKER_VERSION="29.2.1" # Version from stealth deployment guide
|
|
DOCKER_STATIC_BASE_URL="${INTERNAL_DOCKER_DOWNLOAD}/linux/static/stable"
|
|
WALLARM_IMAGE_SOURCE="${DOCKER_REGISTRY_HOST}/wallarm/node:6.11.0-rc1"
|
|
WALLARM_IMAGE_TARGET="wallarm/node:6.11.0-rc1"
|
|
|
|
|
|
|
|
# Deployment variables (set during execution)
|
|
CLOUD_REGION=""
|
|
API_HOST=""
|
|
INGRESS_PORT=""
|
|
MONITORING_PORT=""
|
|
UPSTREAM_IP=""
|
|
UPSTREAM_PORT=""
|
|
WALLARM_TOKEN=""
|
|
INSTANCE_NAME=""
|
|
INSTANCE_DIR=""
|
|
|
|
# Resource reachability from check script
|
|
US_CLOUD_REACHABLE="false"
|
|
EU_CLOUD_REACHABLE="false"
|
|
REGISTRY_REACHABLE="false"
|
|
DOWNLOAD_REACHABLE="false"
|
|
|
|
# ==============================================================================
|
|
# LOGGING & ERROR HANDLING FUNCTIONS
|
|
# ==============================================================================
|
|
|
|
log_message() {
|
|
local level="$1"
|
|
local message="$2"
|
|
local timestamp
|
|
timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
|
|
|
case "$level" in
|
|
"INFO") color="${BLUE}" ;;
|
|
"SUCCESS") color="${GREEN}" ;;
|
|
"WARNING") color="${YELLOW}" ;;
|
|
"ERROR") color="${RED}" ;;
|
|
"DEBUG") color="${CYAN}" ;;
|
|
*) color="${NC}" ;;
|
|
esac
|
|
|
|
echo -e "${color}[${timestamp}] ${level}: ${message}${NC}" >&2
|
|
echo "[${timestamp}] ${level}: ${message}" >> "$LOG_FILE"
|
|
}
|
|
|
|
fail_with_remediation() {
|
|
local error_msg="$1"
|
|
local remediation="$2"
|
|
|
|
log_message "ERROR" "$error_msg"
|
|
echo -e "\n${RED}${BOLD}╔══════════════════════════════════════════════════════════════╗${NC}"
|
|
echo -e "${RED}${BOLD}║ DEPLOYMENT FAILED ║${NC}"
|
|
echo -e "${RED}${BOLD}╚══════════════════════════════════════════════════════════════╝${NC}"
|
|
echo -e "\n${YELLOW}${BOLD}Root Cause:${NC} $error_msg"
|
|
echo -e "\n${YELLOW}${BOLD}How to Fix:${NC}"
|
|
echo -e "$remediation"
|
|
echo -e "\n${YELLOW}Check the full log for details:${NC} $LOG_FILE"
|
|
exit 1
|
|
}
|
|
|
|
# ==============================================================================
|
|
# PREFLIGHT CHECK VERIFICATION
|
|
# ==============================================================================
|
|
|
|
verify_preflight_check() {
|
|
log_message "INFO" "Verifying preflight check results..."
|
|
|
|
if [ ! -f "$ENV_FILE" ]; then
|
|
log_message "ERROR" "Preflight check file not found: $ENV_FILE"
|
|
echo -e "\n${YELLOW}Preflight check has not been run or .env file is missing.${NC}"
|
|
echo -e "${YELLOW}Would you like to run the preflight check now?${NC}"
|
|
read -r -p "$(echo -e "${YELLOW}Run preflight check? (Y/n): ${NC}")" -n 1
|
|
echo
|
|
if [[ ! $REPLY =~ ^[Nn]$ ]]; then
|
|
echo -e "${CYAN}Running preflight check...${NC}"
|
|
if ! ./wallarm-ct-check.sh; then
|
|
fail_with_remediation "Preflight check failed" \
|
|
"Run the preflight check manually and fix any issues:
|
|
1. ./wallarm-ct-check.sh
|
|
2. Review the errors in $ENV_FILE
|
|
3. Fix the issues and run this script again"
|
|
fi
|
|
else
|
|
fail_with_remediation "Preflight check required" \
|
|
"Run the preflight check before deployment:
|
|
1. ./wallarm-ct-check.sh
|
|
2. Review results in $ENV_FILE
|
|
3. Run this script again"
|
|
fi
|
|
fi
|
|
|
|
# Load environment variables from .env file
|
|
# Use a safer approach than sourcing (avoid code injection)
|
|
while IFS='=' read -r key value; do
|
|
# Remove comments and empty lines
|
|
[[ "$key" =~ ^#.*$ ]] && continue
|
|
[[ -z "$key" ]] && continue
|
|
|
|
# Remove quotes from value
|
|
value="${value%\"}"
|
|
value="${value#\"}"
|
|
|
|
# Export variable
|
|
case "$key" in
|
|
result) CHECK_RESULT="$value" ;;
|
|
os_name) OS_NAME="$value" ;;
|
|
os_version) OS_VERSION="$value" ;;
|
|
architecture) ARCHITECTURE="$value" ;;
|
|
init_system) INIT_SYSTEM="$value" ;;
|
|
us_cloud_reachable) US_CLOUD_REACHABLE="$value" ;;
|
|
eu_cloud_reachable) EU_CLOUD_REACHABLE="$value" ;;
|
|
registry_reachable) REGISTRY_REACHABLE="$value" ;;
|
|
download_reachable) DOWNLOAD_REACHABLE="$value" ;;
|
|
esac
|
|
done < "$ENV_FILE"
|
|
|
|
if [ "$CHECK_RESULT" != "pass" ]; then
|
|
log_message "ERROR" "Preflight check failed (result: $CHECK_RESULT)"
|
|
echo -e "\n${YELLOW}Preflight check found issues. Please review:${NC}"
|
|
echo -e "${YELLOW}1. Check file: $ENV_FILE${NC}"
|
|
echo -e "${YELLOW}2. Run: ./wallarm-ct-check.sh${NC}"
|
|
echo -e "${YELLOW}3. Fix the issues and try again${NC}"
|
|
exit 1
|
|
fi
|
|
|
|
log_message "SUCCESS" "Preflight check verified:"
|
|
log_message "SUCCESS" " OS: $OS_NAME $OS_VERSION"
|
|
log_message "SUCCESS" " Architecture: $ARCHITECTURE"
|
|
log_message "SUCCESS" " Init System: $INIT_SYSTEM"
|
|
log_message "SUCCESS" " US Cloud Reachable: $US_CLOUD_REACHABLE"
|
|
log_message "SUCCESS" " EU Cloud Reachable: $EU_CLOUD_REACHABLE"
|
|
log_message "SUCCESS" " Registry Reachable: $REGISTRY_REACHABLE"
|
|
log_message "SUCCESS" " Download Reachable: $DOWNLOAD_REACHABLE"
|
|
|
|
# Validate we have at least one cloud region reachable
|
|
if [ "$US_CLOUD_REACHABLE" = "false" ] && [ "$EU_CLOUD_REACHABLE" = "false" ]; then
|
|
fail_with_remediation "No Wallarm cloud region reachable" \
|
|
"Network connectivity issues detected:
|
|
1. Check firewall rules for Wallarm cloud endpoints
|
|
2. Verify network connectivity
|
|
3. Run preflight check again: ./wallarm-ct-check.sh"
|
|
fi
|
|
|
|
# Validate we have resources for Docker/Wallarm
|
|
if [ "$REGISTRY_REACHABLE" = "false" ] && [ "$DOWNLOAD_REACHABLE" = "false" ]; then
|
|
log_message "WARNING" "Neither registry nor download server reachable"
|
|
log_message "INFO" "Checking for local resources..."
|
|
|
|
local has_local_resources=true
|
|
if [ -z "$(ls docker-*.tgz 2>/dev/null)" ]; then
|
|
log_message "ERROR" "No local Docker binary found"
|
|
has_local_resources=false
|
|
fi
|
|
|
|
if [ -z "$(ls wallarm-node-*.tar 2>/dev/null)" ]; then
|
|
log_message "ERROR" "No local Wallarm image found"
|
|
has_local_resources=false
|
|
fi
|
|
|
|
if [ "$has_local_resources" = "false" ]; then
|
|
fail_with_remediation "Insufficient resources for deployment" \
|
|
"Please provide either:
|
|
1. Network access to $DOCKER_REGISTRY_HOST
|
|
2. Network access to $DOCKER_DOWNLOAD_HOST
|
|
3. Local files: docker-*.tgz and wallarm-node-*.tar in current directory"
|
|
fi
|
|
fi
|
|
}
|
|
|
|
# ==============================================================================
|
|
# CONFIGURATION COLLECTION FUNCTIONS
|
|
# ==============================================================================
|
|
|
|
select_cloud_region() {
|
|
log_message "INFO" "Selecting Wallarm Cloud region..."
|
|
|
|
echo -e "\n${CYAN}${BOLD}Wallarm Cloud Region Selection:${NC}"
|
|
|
|
# Show available regions based on preflight check
|
|
local available_options=()
|
|
|
|
if [ "$US_CLOUD_REACHABLE" = "true" ]; then
|
|
echo -e "1. ${YELLOW}US Cloud${NC} (us1.api.wallarm.com) - For US-based deployments"
|
|
available_options+=("1" "US")
|
|
fi
|
|
|
|
if [ "$EU_CLOUD_REACHABLE" = "true" ]; then
|
|
echo -e "2. ${YELLOW}EU Cloud${NC} (api.wallarm.com) - For EU-based deployments"
|
|
available_options+=("2" "EU")
|
|
fi
|
|
|
|
if [ ${#available_options[@]} -eq 0 ]; then
|
|
fail_with_remediation "No cloud regions available" \
|
|
"Preflight check showed no reachable cloud regions.
|
|
1. Check network connectivity to Wallarm endpoints
|
|
2. Run preflight check again: ./wallarm-ct-check.sh
|
|
3. Contact network administrator if behind firewall"
|
|
fi
|
|
|
|
# Build regex pattern for validation
|
|
local pattern
|
|
pattern="^($(IFS='|'; echo "${available_options[*]}"))$"
|
|
|
|
local cloud_choice=""
|
|
while [[ ! "$cloud_choice" =~ $pattern ]]; do
|
|
if [ ${#available_options[@]} -eq 2 ]; then
|
|
# Only one region available
|
|
if [ "$US_CLOUD_REACHABLE" = "true" ]; then
|
|
cloud_choice="US"
|
|
break
|
|
else
|
|
cloud_choice="EU"
|
|
break
|
|
fi
|
|
fi
|
|
|
|
read -r -p "$(echo -e "${YELLOW}Enter choice [1/US or 2/EU]: ${NC}")" cloud_choice
|
|
cloud_choice=$(echo "$cloud_choice" | tr '[:lower:]' '[:upper:]')
|
|
|
|
case "$cloud_choice" in
|
|
1|"US")
|
|
if [ "$US_CLOUD_REACHABLE" = "true" ]; then
|
|
CLOUD_REGION="US"
|
|
API_HOST="us1.api.wallarm.com"
|
|
log_message "INFO" "Selected US Cloud"
|
|
else
|
|
echo -e "${RED}US Cloud is not reachable (per preflight check)${NC}"
|
|
cloud_choice=""
|
|
fi
|
|
;;
|
|
2|"EU")
|
|
if [ "$EU_CLOUD_REACHABLE" = "true" ]; then
|
|
CLOUD_REGION="EU"
|
|
API_HOST="api.wallarm.com"
|
|
log_message "INFO" "Selected EU Cloud"
|
|
else
|
|
echo -e "${RED}EU Cloud is not reachable (per preflight check)${NC}"
|
|
cloud_choice=""
|
|
fi
|
|
;;
|
|
*)
|
|
if [ -n "$cloud_choice" ]; then
|
|
echo -e "${RED}Invalid choice. Select from available options above.${NC}"
|
|
fi
|
|
;;
|
|
esac
|
|
done
|
|
|
|
log_message "SUCCESS" "Cloud region selected: $CLOUD_REGION ($API_HOST)"
|
|
}
|
|
|
|
# Critical fix from review: Proper IP validation
|
|
validate_ip_address() {
|
|
local ip="$1"
|
|
|
|
# Check basic format
|
|
if [[ ! "$ip" =~ ^[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}$ ]]; then
|
|
return 1
|
|
fi
|
|
|
|
# Check each octet is 0-255
|
|
IFS='.' read -r i1 i2 i3 i4 <<< "$ip"
|
|
if [ "$i1" -gt 255 ] || [ "$i2" -gt 255 ] || [ "$i3" -gt 255 ] || [ "$i4" -gt 255 ]; then
|
|
return 1
|
|
fi
|
|
|
|
return 0
|
|
}
|
|
|
|
# Critical fix from review: Port conflict detection with fallback
|
|
check_port_available() {
|
|
local port="$1"
|
|
local protocol="${2:-tcp}"
|
|
|
|
log_message "DEBUG" "Checking port $port/$protocol availability..."
|
|
|
|
# Try ss first (modern, usually available)
|
|
if command -v ss >/dev/null 2>&1; then
|
|
if ss -"${protocol:0:1}"ln | grep -q ":$port "; then
|
|
return 1 # Port in use
|
|
fi
|
|
# Fallback to netstat
|
|
elif command -v netstat >/dev/null 2>&1; then
|
|
if netstat -tulpn 2>/dev/null | grep -E ":$port\s" >/dev/null 2>&1; then
|
|
return 1 # Port in use
|
|
fi
|
|
else
|
|
log_message "WARNING" "Neither ss nor netstat available, cannot check port $port"
|
|
fi
|
|
|
|
return 0 # Port available (or cannot check)
|
|
}
|
|
|
|
collect_configuration() {
|
|
log_message "INFO" "Collecting deployment configuration..."
|
|
|
|
# Get ingress port
|
|
local default_port=80
|
|
local ingress_port=""
|
|
while [[ ! "$ingress_port" =~ ^[0-9]+$ ]] || [ "$ingress_port" -lt 1 ] || [ "$ingress_port" -gt 65535 ]; do
|
|
read -r -p "$(echo -e "${YELLOW}Enter inbound port [${default_port}]: ${NC}")" ingress_port
|
|
ingress_port="${ingress_port:-$default_port}"
|
|
|
|
if [[ ! "$ingress_port" =~ ^[0-9]+$ ]]; then
|
|
echo -e "${RED}Port must be a number${NC}"
|
|
elif [ "$ingress_port" -lt 1 ] || [ "$ingress_port" -gt 65535 ]; then
|
|
echo -e "${RED}Port must be between 1 and 65535${NC}"
|
|
elif ! check_port_available "$ingress_port"; then
|
|
echo -e "${RED}Port $ingress_port is already in use${NC}"
|
|
ingress_port=""
|
|
fi
|
|
done
|
|
|
|
# Calculate monitoring port (ingress + 10, check for conflicts)
|
|
local monitoring_port=$((ingress_port + 10))
|
|
if ! check_port_available "$monitoring_port"; then
|
|
log_message "WARNING" "Port $monitoring_port is in use, choosing alternative..."
|
|
monitoring_port=$((ingress_port + 100))
|
|
if ! check_port_available "$monitoring_port"; then
|
|
monitoring_port=$((ingress_port + 200))
|
|
fi
|
|
fi
|
|
log_message "INFO" "Monitoring port will be: $monitoring_port"
|
|
|
|
# Get application server details
|
|
local upstream_ip=""
|
|
local upstream_port=""
|
|
|
|
echo -e "\n${CYAN}${BOLD}Application Server Configuration:${NC}"
|
|
echo -e "${YELLOW}Enter the IP/hostname and port of your backend application${NC}"
|
|
|
|
while [[ -z "$upstream_ip" ]]; do
|
|
read -r -p "$(echo -e "${YELLOW}Upstream App IP/Hostname [127.0.0.1]: ${NC}")" upstream_ip
|
|
upstream_ip="${upstream_ip:-127.0.0.1}"
|
|
|
|
# Validate IP/hostname format
|
|
if ! validate_ip_address "$upstream_ip" && \
|
|
! [[ "$upstream_ip" =~ ^[a-zA-Z0-9][a-zA-Z0-9.-]*[a-zA-Z0-9]$ ]]; then
|
|
echo -e "${RED}Invalid IP/hostname format${NC}"
|
|
upstream_ip=""
|
|
fi
|
|
done
|
|
|
|
while [[ ! "$upstream_port" =~ ^[0-9]+$ ]] || [ "$upstream_port" -lt 1 ] || [ "$upstream_port" -gt 65535 ]; do
|
|
read -r -p "$(echo -e "${YELLOW}Upstream App Port [8080]: ${NC}")" upstream_port
|
|
upstream_port="${upstream_port:-8080}"
|
|
|
|
if [[ ! "$upstream_port" =~ ^[0-9]+$ ]]; then
|
|
echo -e "${RED}Port must be a number${NC}"
|
|
elif [ "$upstream_port" -lt 1 ] || [ "$upstream_port" -gt 65535 ]; then
|
|
echo -e "${RED}Port must be between 1 and 65535${NC}"
|
|
fi
|
|
done
|
|
|
|
# Verify application server reachability
|
|
log_message "INFO" "Verifying application server reachability..."
|
|
if timeout 5 bash -c "cat < /dev/null > /dev/tcp/$upstream_ip/$upstream_port" 2>/dev/null; then
|
|
log_message "SUCCESS" "Application server $upstream_ip:$upstream_port is reachable"
|
|
else
|
|
log_message "WARNING" "Application server $upstream_ip:$upstream_port is not reachable"
|
|
echo -e "${YELLOW}${BOLD}Warning:${NC} Cannot reach application server at $upstream_ip:$upstream_port"
|
|
echo -e "${YELLOW}This may cause the Wallarm node to fail. Possible reasons:${NC}"
|
|
echo -e "1. Application server is not running"
|
|
echo -e "2. Firewall blocking port $upstream_port"
|
|
echo -e "3. Wrong IP/hostname"
|
|
echo -e "4. Application server not listening on that port"
|
|
|
|
read -r -p "$(echo -e "${YELLOW}Continue anyway? (y/N): ${NC}")" -n 1
|
|
echo
|
|
if [[ ! $REPLY =~ ^[Yy]$ ]]; then
|
|
fail_with_remediation "Application server unreachable" \
|
|
"Ensure your application server is accessible:
|
|
1. Start your application server
|
|
2. Check it's listening: sudo ss -tlnp | grep :$upstream_port
|
|
3. Verify firewall rules allow inbound connections
|
|
4. Test connectivity: telnet $upstream_ip $upstream_port
|
|
5. If using hostname, verify DNS resolution: nslookup $upstream_ip"
|
|
fi
|
|
fi
|
|
|
|
# Get Wallarm node token
|
|
local wallarm_token=""
|
|
echo -e "\n${CYAN}${BOLD}Wallarm Node Token:${NC}"
|
|
echo -e "${YELLOW}Get your token from Wallarm Console:${NC}"
|
|
echo -e "Create a new 'Wallarm node' and copy the token (will be visible as you type)"
|
|
while [[ -z "$wallarm_token" ]]; do
|
|
read -r -p "$(echo -e "${YELLOW}Paste Wallarm Node Token: ${NC}")" wallarm_token
|
|
# Trim whitespace and newlines
|
|
wallarm_token=$(echo "$wallarm_token" | tr -d '[:space:]')
|
|
|
|
if [[ -z "$wallarm_token" ]]; then
|
|
echo -e "${RED}Token cannot be empty${NC}"
|
|
elif [[ ! "$wallarm_token" =~ ^[A-Za-z0-9_+/=\-]+$ ]]; then
|
|
echo -e "${RED}Token contains invalid characters. Wallarm tokens are base64 strings (A-Z, a-z, 0-9, _, -, +, /, =)${NC}"
|
|
echo -e "${YELLOW}First 20 chars of what you entered: '${wallarm_token:0:20}...'${NC}"
|
|
wallarm_token=""
|
|
else
|
|
# Show confirmation of token length (but not full token for security)
|
|
token_length=${#wallarm_token}
|
|
echo -e "${GREEN}Token accepted (${token_length} characters).${NC}"
|
|
echo -e "${YELLOW}First 8 chars for verification: ${wallarm_token:0:8}...${NC}"
|
|
fi
|
|
done
|
|
|
|
# Generate instance name and directory
|
|
local instance_name
|
|
instance_name="wallarm-$(hostname -s | tr '[:upper:]' '[:lower:]')-$(date +%Y%m%d)"
|
|
local instance_dir="/opt/$instance_name"
|
|
|
|
# Ensure directory exists
|
|
sudo mkdir -p "$instance_dir"
|
|
|
|
log_message "SUCCESS" "Configuration collected:"
|
|
log_message "SUCCESS" " Ingress Port: $ingress_port"
|
|
log_message "SUCCESS" " Monitoring Port: $monitoring_port"
|
|
log_message "SUCCESS" " Upstream: $upstream_ip:$upstream_port"
|
|
log_message "SUCCESS" " Instance: $instance_name"
|
|
log_message "SUCCESS" " Directory: $instance_dir"
|
|
|
|
# Set global variables
|
|
INGRESS_PORT="$ingress_port"
|
|
MONITORING_PORT="$monitoring_port"
|
|
UPSTREAM_IP="$upstream_ip"
|
|
UPSTREAM_PORT="$upstream_port"
|
|
WALLARM_TOKEN="$wallarm_token"
|
|
INSTANCE_NAME="$instance_name"
|
|
INSTANCE_DIR="$instance_dir"
|
|
}
|
|
|
|
# ==============================================================================
|
|
# DOCKER ENGINE SETUP (LXC OPTIMIZED)
|
|
# ==============================================================================
|
|
|
|
setup_docker_engine() {
|
|
log_message "INFO" "Setting up Docker Engine for LXC/stealth deployment..."
|
|
|
|
# Check if Docker is already installed and running
|
|
if command -v docker >/dev/null 2>&1 && sudo docker info >/dev/null 2>&1; then
|
|
local docker_version
|
|
docker_version=$(docker --version | cut -d' ' -f3 | tr -d ',')
|
|
log_message "SUCCESS" "Docker is already installed and running (version $docker_version)"
|
|
|
|
# Check if Docker is configured for LXC
|
|
if sudo docker info 2>/dev/null | grep -q "Storage Driver: vfs"; then
|
|
log_message "SUCCESS" "Docker is already configured with VFS storage driver (LXC compatible)"
|
|
else
|
|
log_message "WARNING" "Docker is not using VFS storage driver. LXC compatibility may be limited."
|
|
fi
|
|
return 0
|
|
fi
|
|
|
|
log_message "INFO" "Docker not found or not running. Proceeding with installation..."
|
|
|
|
# Determine binary source
|
|
local binary_file="docker-$DOCKER_VERSION.tgz"
|
|
local binary_path=""
|
|
|
|
if [ "$DOWNLOAD_REACHABLE" = "true" ]; then
|
|
# Download Docker static binary from internal server
|
|
log_message "INFO" "Downloading Docker static binary for $ARCHITECTURE..."
|
|
local download_url="$DOCKER_STATIC_BASE_URL/$ARCHITECTURE/docker-$DOCKER_VERSION.tgz"
|
|
|
|
if curl -fL $CURL_INSECURE_FLAG --connect-timeout 30 "$download_url" -o "$binary_file"; then
|
|
log_message "SUCCESS" "Downloaded Docker binary: $binary_file"
|
|
binary_path="$binary_file"
|
|
else
|
|
log_message "ERROR" "Failed to download Docker binary from $download_url"
|
|
binary_path=""
|
|
fi
|
|
fi
|
|
|
|
# Fallback: Check for local Docker binary
|
|
if [ -z "$binary_path" ]; then
|
|
log_message "INFO" "Checking for local Docker binary..."
|
|
local local_files
|
|
local_files=$(ls docker-*.tgz 2>/dev/null | head -1)
|
|
if [ -n "$local_files" ]; then
|
|
binary_path="$local_files"
|
|
log_message "SUCCESS" "Using local Docker binary: $binary_path"
|
|
else
|
|
fail_with_remediation "No Docker binary available" \
|
|
"Please provide a Docker static binary:
|
|
1. Download manually:
|
|
curl -L '$DOCKER_STATIC_BASE_URL/$ARCHITECTURE/docker-$DOCKER_VERSION.tgz' -o docker.tgz
|
|
2. Or place an existing docker-*.tgz file in current directory
|
|
3. Re-run the script after downloading"
|
|
fi
|
|
fi
|
|
|
|
# Extract and install
|
|
log_message "INFO" "Extracting Docker binary..."
|
|
|
|
# First verify the tar file exists and is readable
|
|
if [ ! -f "$binary_path" ]; then
|
|
fail_with_remediation "Docker binary file not found" \
|
|
"File $binary_path does not exist. Check download and file permissions."
|
|
fi
|
|
|
|
if [ ! -r "$binary_path" ]; then
|
|
fail_with_remediation "Docker binary file not readable" \
|
|
"Cannot read file $binary_path. Check file permissions."
|
|
fi
|
|
|
|
# Test if it's a valid tar archive
|
|
log_message "DEBUG" "Testing tar archive integrity..."
|
|
|
|
# First check if we can read the file
|
|
if [ ! -r "$binary_path" ]; then
|
|
log_message "ERROR" "Cannot read file: $binary_path"
|
|
log_message "INFO" "File permissions: $(ls -la "$binary_path" 2>/dev/null || echo "cannot stat")"
|
|
fail_with_remediation "Cannot read Docker binary file" \
|
|
"File $binary_path exists but is not readable.
|
|
1. Check file permissions: ls -la $binary_path
|
|
2. Fix permissions: chmod 644 $binary_path
|
|
3. Or download fresh copy"
|
|
fi
|
|
|
|
# Try to get file type information
|
|
local file_type="unknown"
|
|
if command -v file >/dev/null 2>&1; then
|
|
file_type=$(file "$binary_path" 2>/dev/null || echo "file command failed")
|
|
elif command -v hexdump >/dev/null 2>&1; then
|
|
# Check magic bytes manually
|
|
local magic_bytes=$(hexdump -n 2 -C "$binary_path" 2>/dev/null | head -1 | cut -d' ' -f2-3 || echo "no magic")
|
|
file_type="hexdump: $magic_bytes"
|
|
fi
|
|
|
|
log_message "INFO" "File info: $binary_path ($(stat -c%s "$binary_path") bytes)"
|
|
log_message "INFO" "File type: $file_type"
|
|
log_message "INFO" "Current directory: $(pwd)"
|
|
log_message "INFO" "Full path: $(readlink -f "$binary_path" 2>/dev/null || echo "$binary_path")"
|
|
|
|
# Test tar archive with error capture
|
|
local tar_test_output
|
|
tar_test_output=$(tar -tzf "$binary_path" 2>&1)
|
|
local tar_test_exit=$?
|
|
|
|
if [ $tar_test_exit -ne 0 ]; then
|
|
log_message "ERROR" "File $binary_path is not a valid tar.gz archive (tar exit: $tar_test_exit)"
|
|
log_message "DEBUG" "Tar test output: $tar_test_output"
|
|
|
|
# Check if it might be a different compression format
|
|
log_message "INFO" "Checking for alternative compression formats..."
|
|
|
|
# Try gunzip test
|
|
if command -v gunzip >/dev/null 2>&1; then
|
|
if gunzip -t "$binary_path" 2>/dev/null; then
|
|
log_message "WARNING" "File is valid gzip but tar can't read it"
|
|
else
|
|
log_message "INFO" "Not a valid gzip file either"
|
|
fi
|
|
fi
|
|
|
|
# Check first few bytes
|
|
if command -v xxd >/dev/null 2>&1; then
|
|
log_message "DEBUG" "First 20 bytes: $(xxd -l 20 "$binary_path" 2>/dev/null || echo "cannot read")"
|
|
elif command -v od >/dev/null 2>&1; then
|
|
log_message "DEBUG" "First 20 bytes: $(od -x -N 20 "$binary_path" 2>/dev/null | head -2 || echo "cannot read")"
|
|
fi
|
|
|
|
fail_with_remediation "Docker binary file is corrupted or invalid" \
|
|
"The Docker binary file is not a valid tar.gz archive.
|
|
Tar error: $tar_test_output
|
|
|
|
File info: $(stat -c%s "$binary_path") bytes, type: $file_type
|
|
|
|
Possible solutions:
|
|
1. The download may have been interrupted or corrupted
|
|
2. The file may be in wrong format (not tar.gz)
|
|
3. Server might be serving wrong content
|
|
|
|
Steps to fix:
|
|
1. Delete corrupted file: rm -f docker-*.tgz
|
|
2. Check disk space: df -h .
|
|
3. Download manually and verify:
|
|
curl -v -L '$DOCKER_STATIC_BASE_URL/$ARCHITECTURE/docker-$DOCKER_VERSION.tgz' -o test.tgz
|
|
file test.tgz
|
|
tar -tzf test.tgz
|
|
4. Check if tar command works: tar --version"
|
|
fi
|
|
|
|
log_message "SUCCESS" "Tar archive validation passed"
|
|
|
|
# Extract the archive
|
|
log_message "DEBUG" "Extracting files from $binary_path..."
|
|
local tar_output
|
|
tar_output=$(tar xzvf "$binary_path" 2>&1)
|
|
local tar_exit=$?
|
|
|
|
if [ $tar_exit -ne 0 ]; then
|
|
log_message "ERROR" "Failed to extract files from $binary_path (exit code: $tar_exit)"
|
|
log_message "DEBUG" "Tar output: $tar_output"
|
|
log_message "INFO" "Checking extracted files..."
|
|
if [ -d "docker" ]; then
|
|
log_message "WARNING" "Some files were extracted to 'docker/' directory"
|
|
ls -la docker/ 2>/dev/null | head -10 || true
|
|
fi
|
|
fail_with_remediation "Failed to extract Docker binary" \
|
|
"Extraction failed. Possible reasons:
|
|
1. Insufficient disk space: df -h .
|
|
2. Permission issues in current directory
|
|
3. Corrupted archive (partial download)
|
|
4. File system issues
|
|
|
|
Tar error: $tar_output
|
|
|
|
Check disk space and permissions, then try manual extraction:
|
|
tar xzvf $binary_path"
|
|
else
|
|
log_message "SUCCESS" "Docker binary extracted successfully"
|
|
fi
|
|
|
|
log_message "INFO" "Installing Docker binaries to /usr/bin/"
|
|
sudo cp docker/* /usr/bin/ 2>/dev/null || {
|
|
fail_with_remediation "Failed to copy Docker binaries" \
|
|
"Permission denied copying to /usr/bin/
|
|
1. Ensure you have sudo privileges
|
|
2. Check disk space: df -h /
|
|
3. Manual installation:
|
|
sudo cp docker/* /usr/bin/"
|
|
}
|
|
|
|
# Cleanup extracted directory
|
|
rm -rf docker
|
|
|
|
# Configure Docker daemon for LXC (VFS storage driver, cgroupfs)
|
|
log_message "INFO" "Configuring Docker daemon for LXC (VFS storage driver, cgroupfs)..."
|
|
|
|
# Create docker configuration directory
|
|
sudo mkdir -p /etc/docker
|
|
|
|
# Create daemon.json for LXC optimization
|
|
sudo tee /etc/docker/daemon.json > /dev/null <<EOF
|
|
{
|
|
"storage-driver": "vfs",
|
|
"exec-opts": ["native.cgroupdriver=cgroupfs"],
|
|
"log-driver": "json-file",
|
|
"log-opts": {
|
|
"max-size": "100m",
|
|
"max-file": "3"
|
|
}
|
|
}
|
|
EOF
|
|
|
|
# Configure based on init system
|
|
log_message "INFO" "Configuring Docker service for init system: $INIT_SYSTEM"
|
|
|
|
case "$INIT_SYSTEM" in
|
|
"systemd")
|
|
# Create systemd service files
|
|
sudo tee /etc/systemd/system/docker.service > /dev/null <<'EOF'
|
|
[Unit]
|
|
Description=Docker Engine
|
|
After=network-online.target firewalld.service containerd.service
|
|
Wants=network-online.target
|
|
Requires=docker.socket
|
|
|
|
[Service]
|
|
Type=notify
|
|
ExecStart=/usr/bin/dockerd --group docker
|
|
ExecReload=/bin/kill -s HUP $MAINPID
|
|
TimeoutSec=0
|
|
RestartSec=2
|
|
Restart=always
|
|
StartLimitBurst=3
|
|
StartLimitInterval=60s
|
|
LimitNOFILE=infinity
|
|
LimitNPROC=infinity
|
|
LimitCORE=infinity
|
|
TasksMax=infinity
|
|
Delegate=yes
|
|
KillMode=process
|
|
|
|
[Install]
|
|
WantedBy=multi-user.target
|
|
EOF
|
|
|
|
sudo tee /etc/systemd/system/docker.socket > /dev/null <<'EOF'
|
|
[Unit]
|
|
Description=Docker Socket for the API
|
|
|
|
[Socket]
|
|
ListenStream=/var/run/docker.sock
|
|
SocketMode=0660
|
|
SocketUser=root
|
|
SocketGroup=docker
|
|
|
|
[Install]
|
|
WantedBy=sockets.target
|
|
EOF
|
|
|
|
sudo systemctl daemon-reload
|
|
sudo systemctl enable docker
|
|
sudo systemctl start docker
|
|
;;
|
|
|
|
"openrc")
|
|
# Alpine OpenRC configuration
|
|
sudo tee /etc/init.d/docker > /dev/null <<'EOF'
|
|
#!/sbin/openrc-run
|
|
description="Docker Engine"
|
|
command="/usr/bin/dockerd"
|
|
command_args="--group docker"
|
|
pidfile="/run/docker.pid"
|
|
command_background=true
|
|
|
|
depend() {
|
|
need net
|
|
after firewall
|
|
}
|
|
EOF
|
|
|
|
sudo chmod +x /etc/init.d/docker
|
|
sudo rc-update add docker default
|
|
sudo rc-service docker start
|
|
;;
|
|
|
|
"sysvinit")
|
|
# Traditional SysV init script
|
|
sudo tee /etc/init.d/docker > /dev/null <<'EOF'
|
|
#!/bin/bash
|
|
### BEGIN INIT INFO
|
|
# Provides: docker
|
|
# Required-Start: $local_fs $network $remote_fs
|
|
# Required-Stop: $local_fs $network $remote_fs
|
|
# Default-Start: 2 3 4 5
|
|
# Default-Stop: 0 1 6
|
|
# Short-Description: Docker Engine
|
|
# Description: Docker container runtime
|
|
### END INIT INFO
|
|
|
|
DESC="Docker Engine"
|
|
DAEMON=/usr/bin/dockerd
|
|
DAEMON_ARGS="--group docker"
|
|
PIDFILE=/var/run/docker.pid
|
|
SCRIPTNAME=/etc/init.d/docker
|
|
|
|
[ -x "$DAEMON" ] || exit 0
|
|
|
|
. /lib/lsb/init-functions
|
|
|
|
case "$1" in
|
|
start)
|
|
log_daemon_msg "Starting $DESC" "docker"
|
|
start-stop-daemon --start --background --pidfile "$PIDFILE" \
|
|
--exec "$DAEMON" -- $DAEMON_ARGS
|
|
log_end_msg $?
|
|
;;
|
|
stop)
|
|
log_daemon_msg "Stopping $DESC" "docker"
|
|
start-stop-daemon --stop --pidfile "$PIDFILE" --retry 10
|
|
log_end_msg $?
|
|
;;
|
|
restart)
|
|
$0 stop
|
|
sleep 1
|
|
$0 start
|
|
;;
|
|
status)
|
|
status_of_proc -p "$PIDFILE" "$DAEMON" docker
|
|
;;
|
|
*)
|
|
echo "Usage: $SCRIPTNAME {start|stop|restart|status}"
|
|
exit 3
|
|
;;
|
|
esac
|
|
|
|
exit 0
|
|
EOF
|
|
|
|
sudo chmod +x /etc/init.d/docker
|
|
sudo update-rc.d docker defaults
|
|
sudo service docker start
|
|
;;
|
|
|
|
*)
|
|
log_message "WARNING" "Unknown init system '$INIT_SYSTEM', trying systemd defaults"
|
|
sudo systemctl daemon-reload 2>/dev/null || true
|
|
sudo systemctl enable docker 2>/dev/null || true
|
|
sudo systemctl start docker 2>/dev/null || {
|
|
log_message "ERROR" "Failed to start Docker with unknown init system"
|
|
echo -e "${YELLOW}Please start Docker manually and re-run the script${NC}"
|
|
exit 1
|
|
}
|
|
;;
|
|
esac
|
|
|
|
# Verify Docker is running
|
|
log_message "INFO" "Verifying Docker service..."
|
|
sleep 3 # Give Docker time to start
|
|
|
|
if ! sudo docker info >/dev/null 2>&1; then
|
|
fail_with_remediation "Docker failed to start" \
|
|
"Docker installation completed but service failed to start:
|
|
1. Check Docker logs: journalctl -u docker (systemd) or /var/log/docker.log
|
|
2. Verify configuration: sudo dockerd --debug
|
|
3. Manual start: sudo dockerd --group docker &"
|
|
fi
|
|
|
|
# Verify Docker is using VFS storage driver
|
|
log_message "INFO" "Verifying Docker storage driver configuration..."
|
|
if sudo docker info 2>/dev/null | grep -q "Storage Driver: vfs"; then
|
|
log_message "SUCCESS" "Docker configured with VFS storage driver (LXC compatible)"
|
|
else
|
|
log_message "WARNING" "Docker is not using VFS storage driver. Checking current driver..."
|
|
sudo docker info 2>/dev/null | grep "Storage Driver:" || log_message "ERROR" "Could not determine storage driver"
|
|
log_message "WARNING" "LXC compatibility may be limited without VFS storage driver"
|
|
fi
|
|
|
|
# Add current user to docker group for passwordless docker commands
|
|
log_message "INFO" "Adding current user to docker group..."
|
|
|
|
# Security notice: docker group grants root-equivalent privileges
|
|
echo -e "${YELLOW}${BOLD}Security Notice:${NC} Adding your user to the 'docker' group grants root-equivalent privileges."
|
|
echo -e "${YELLOW}Any user in the docker group can run commands as root on the host system.${NC}"
|
|
echo -e "${YELLOW}Only proceed if you understand and accept this security risk.${NC}"
|
|
|
|
read -r -p "$(echo -e "${YELLOW}Add $(whoami) to docker group? (Y/n): ${NC}")" -n 1
|
|
echo
|
|
if [[ ! $REPLY =~ ^[Nn]$ ]]; then
|
|
if ! getent group docker >/dev/null; then
|
|
sudo groupadd docker 2>/dev/null || log_message "WARNING" "Failed to create docker group (may already exist)"
|
|
fi
|
|
sudo usermod -aG docker "$(whoami)" 2>/dev/null && \
|
|
log_message "SUCCESS" "Added $(whoami) to docker group (log out and back in for changes)"
|
|
else
|
|
log_message "WARNING" "Skipping docker group addition. You will need to use sudo for docker commands."
|
|
echo -e "${YELLOW}Note: You can manually add yourself to docker group later with:${NC}"
|
|
echo -e "${CYAN} sudo usermod -aG docker $(whoami)${NC}"
|
|
echo -e "${YELLOW}Then log out and back in for changes to take effect.${NC}"
|
|
fi
|
|
|
|
log_message "SUCCESS" "Docker Engine setup completed successfully"
|
|
}
|
|
|
|
# ==============================================================================
|
|
# WALLARM NODE DEPLOYMENT
|
|
# ==============================================================================
|
|
|
|
deploy_wallarm_node() {
|
|
log_message "INFO" "Deploying Wallarm filtering node..."
|
|
|
|
# Pull Wallarm Docker image
|
|
log_message "INFO" "Pulling Wallarm Docker image from internal registry: $WALLARM_IMAGE_SOURCE"
|
|
|
|
if [ "$REGISTRY_REACHABLE" = "true" ]; then
|
|
if ! sudo docker pull "$WALLARM_IMAGE_SOURCE"; then
|
|
fail_with_remediation "Failed to pull Wallarm image from internal registry" \
|
|
"Docker pull from internal registry failed. Possible reasons:
|
|
1. Network connectivity to $DOCKER_REGISTRY_HOST
|
|
2. Authentication required for internal registry
|
|
3. Insufficient disk space
|
|
|
|
Solutions:
|
|
1. Check network: curl -I $INTERNAL_DOCKER_REGISTRY
|
|
2. Login to internal registry if required
|
|
3. Use local image fallback: docker save/load
|
|
4. Check disk: df -h /var/lib/docker"
|
|
fi
|
|
|
|
# Re-tag to standard name
|
|
sudo docker tag "$WALLARM_IMAGE_SOURCE" "$WALLARM_IMAGE_TARGET"
|
|
log_message "SUCCESS" "Wallarm image pulled and tagged successfully"
|
|
else
|
|
# Use local image
|
|
log_message "INFO" "Using local Wallarm image (registry not reachable)"
|
|
local local_image
|
|
local_image=$(ls wallarm-node-*.tar 2>/dev/null | head -1)
|
|
if [ -n "$local_image" ]; then
|
|
if ! sudo docker load -i "$local_image"; then
|
|
fail_with_remediation "Failed to load local Wallarm image" \
|
|
"Local Wallarm image file may be corrupted:
|
|
1. Verify file integrity: tar -tzf wallarm-node-*.tar
|
|
2. Download a fresh image on another machine:
|
|
docker pull $WALLARM_IMAGE_SOURCE
|
|
docker save $WALLARM_IMAGE_TARGET -o wallarm-node-latest.tar
|
|
3. Copy the file to this machine and re-run"
|
|
fi
|
|
log_message "SUCCESS" "Local Wallarm image loaded successfully"
|
|
else
|
|
fail_with_remediation "No Wallarm image available" \
|
|
"Need either:
|
|
1. Network access to $DOCKER_REGISTRY_HOST
|
|
2. Local wallarm-node-*.tar file in current directory"
|
|
fi
|
|
fi
|
|
|
|
# Create nginx configuration
|
|
log_message "INFO" "Creating nginx configuration..."
|
|
local nginx_config="$INSTANCE_DIR/nginx.conf"
|
|
|
|
sudo tee "$nginx_config" > /dev/null <<EOF
|
|
server {
|
|
listen 80;
|
|
server_name _;
|
|
|
|
location / {
|
|
proxy_pass http://$UPSTREAM_IP:$UPSTREAM_PORT;
|
|
proxy_set_header Host \$host;
|
|
proxy_set_header X-Real-IP \$remote_addr;
|
|
proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
|
|
proxy_set_header X-Forwarded-Proto \$scheme;
|
|
|
|
# Wallarm-specific headers
|
|
wallarm_mode monitoring;
|
|
wallarm_instance 1;
|
|
}
|
|
|
|
# Health check endpoint
|
|
location /health {
|
|
access_log off;
|
|
return 200 "healthy\n";
|
|
add_header Content-Type text/plain;
|
|
}
|
|
}
|
|
EOF
|
|
|
|
log_message "SUCCESS" "Nginx configuration created: $nginx_config"
|
|
|
|
# Create start.sh script for persistence
|
|
log_message "INFO" "Creating start script for persistence..."
|
|
local start_script="$INSTANCE_DIR/start.sh"
|
|
|
|
sudo tee "$start_script" > /dev/null <<EOF
|
|
#!/bin/bash
|
|
# Start script for Wallarm filtering node: $INSTANCE_NAME
|
|
# Generated: $(date)
|
|
|
|
CONTAINER_NAME="$INSTANCE_NAME"
|
|
NGINX_CONFIG="$nginx_config"
|
|
LOG_FILE="$INSTANCE_DIR/container.log"
|
|
|
|
echo "\$(date) - Starting Wallarm node \$CONTAINER_NAME" >> "\$LOG_FILE"
|
|
|
|
# Stop existing container if running
|
|
sudo docker stop "\$CONTAINER_NAME" 2>/dev/null || true
|
|
sudo docker rm "\$CONTAINER_NAME" 2>/dev/null || true
|
|
|
|
# Start new container
|
|
sudo docker run -d \\
|
|
--name "\$CONTAINER_NAME" \\
|
|
--restart always \\
|
|
--network host \\
|
|
-p $INGRESS_PORT:80 \\
|
|
-p $MONITORING_PORT:90 \\
|
|
-e WALLARM_API_TOKEN="$WALLARM_TOKEN" \\
|
|
-e WALLARM_API_HOST="$API_HOST" \\
|
|
-e NGINX_BACKEND="$UPSTREAM_IP:$UPSTREAM_PORT" \\
|
|
-e WALLARM_MODE="monitoring" \\
|
|
-v "\$NGINX_CONFIG:/etc/nginx/http.d/default.conf:ro" \\
|
|
$WALLARM_IMAGE_TARGET
|
|
|
|
echo "\$(date) - Container started with ID: \$(sudo docker ps -q -f name=\$CONTAINER_NAME)" >> "\$LOG_FILE"
|
|
|
|
# Verify container is running
|
|
sleep 3
|
|
if sudo docker ps | grep -q "\$CONTAINER_NAME"; then
|
|
echo "\$(date) - Verification: Container is running" >> "\$LOG_FILE"
|
|
echo "Wallarm node \$CONTAINER_NAME started successfully"
|
|
else
|
|
echo "\$(date) - ERROR: Container failed to start" >> "\$LOG_FILE"
|
|
sudo docker logs "\$CONTAINER_NAME" >> "\$LOG_FILE" 2>&1
|
|
exit 1
|
|
fi
|
|
EOF
|
|
|
|
sudo chmod +x "$start_script"
|
|
log_message "SUCCESS" "Start script created: $start_script"
|
|
|
|
# Create init system service for automatic startup
|
|
log_message "INFO" "Creating service for automatic startup (init system: $INIT_SYSTEM)..."
|
|
|
|
case "$INIT_SYSTEM" in
|
|
"systemd")
|
|
local service_file="/etc/systemd/system/wallarm-$INSTANCE_NAME.service"
|
|
sudo tee "$service_file" > /dev/null <<EOF
|
|
[Unit]
|
|
Description=Wallarm Filtering Node: $INSTANCE_NAME
|
|
After=docker.service
|
|
Requires=docker.service
|
|
|
|
[Service]
|
|
Type=oneshot
|
|
RemainAfterExit=yes
|
|
ExecStart=$start_script
|
|
ExecStop=/usr/bin/docker stop $INSTANCE_NAME
|
|
ExecStopPost=/usr/bin/docker rm $INSTANCE_NAME
|
|
WorkingDirectory=$INSTANCE_DIR
|
|
User=root
|
|
Group=root
|
|
|
|
[Install]
|
|
WantedBy=multi-user.target
|
|
EOF
|
|
sudo systemctl daemon-reload
|
|
sudo systemctl enable "wallarm-$INSTANCE_NAME.service" 2>/dev/null || \
|
|
log_message "WARNING" "Failed to enable systemd service (may already exist)"
|
|
;;
|
|
|
|
"openrc")
|
|
local service_file="/etc/init.d/wallarm-$INSTANCE_NAME"
|
|
sudo tee "$service_file" > /dev/null <<EOF
|
|
#!/sbin/openrc-run
|
|
description="Wallarm Filtering Node: $INSTANCE_NAME"
|
|
command="$start_script"
|
|
command_args=""
|
|
pidfile="/run/wallarm-\${RC_SVCNAME}.pid"
|
|
command_background=true
|
|
|
|
depend() {
|
|
need docker
|
|
after docker
|
|
}
|
|
|
|
start() {
|
|
ebegin "Starting Wallarm node: $INSTANCE_NAME"
|
|
\$command
|
|
eend \$?
|
|
}
|
|
|
|
stop() {
|
|
ebegin "Stopping Wallarm node: $INSTANCE_NAME"
|
|
docker stop $INSTANCE_NAME 2>/dev/null || true
|
|
docker rm $INSTANCE_NAME 2>/dev/null || true
|
|
eend \$?
|
|
}
|
|
EOF
|
|
sudo chmod +x "$service_file"
|
|
sudo rc-update add "wallarm-$INSTANCE_NAME" default 2>/dev/null || \
|
|
log_message "WARNING" "Failed to add OpenRC service (may already exist)"
|
|
;;
|
|
|
|
"sysvinit")
|
|
local service_file="/etc/init.d/wallarm-$INSTANCE_NAME"
|
|
sudo tee "$service_file" > /dev/null <<EOF
|
|
#!/bin/bash
|
|
### BEGIN INIT INFO
|
|
# Provides: wallarm-$INSTANCE_NAME
|
|
# Required-Start: \$local_fs \$network docker
|
|
# Required-Stop: \$local_fs \$network
|
|
# Default-Start: 2 3 4 5
|
|
# Default-Stop: 0 1 6
|
|
# Short-Description: Wallarm Filtering Node: $INSTANCE_NAME
|
|
# Description: Wallarm security filtering node
|
|
### END INIT INFO
|
|
|
|
DESC="Wallarm Filtering Node: $INSTANCE_NAME"
|
|
SCRIPT="$start_script"
|
|
PIDFILE="/var/run/wallarm-$INSTANCE_NAME.pid"
|
|
|
|
. /lib/lsb/init-functions
|
|
|
|
case "\$1" in
|
|
start)
|
|
log_daemon_msg "Starting \$DESC" "$INSTANCE_NAME"
|
|
\$SCRIPT
|
|
log_end_msg \$?
|
|
;;
|
|
stop)
|
|
log_daemon_msg "Stopping \$DESC" "$INSTANCE_NAME"
|
|
docker stop $INSTANCE_NAME 2>/dev/null || true
|
|
docker rm $INSTANCE_NAME 2>/dev/null || true
|
|
log_end_msg \$?
|
|
;;
|
|
restart)
|
|
\$0 stop
|
|
sleep 2
|
|
\$0 start
|
|
;;
|
|
status)
|
|
if docker ps | grep -q "$INSTANCE_NAME"; then
|
|
echo "$INSTANCE_NAME is running"
|
|
exit 0
|
|
else
|
|
echo "$INSTANCE_NAME is not running"
|
|
exit 1
|
|
fi
|
|
;;
|
|
*)
|
|
echo "Usage: \$0 {start|stop|restart|status}"
|
|
exit 3
|
|
;;
|
|
esac
|
|
|
|
exit 0
|
|
EOF
|
|
sudo chmod +x "$service_file"
|
|
sudo update-rc.d "wallarm-$INSTANCE_NAME" defaults 2>/dev/null || \
|
|
log_message "WARNING" "Failed to add SysV init service (may already exist)"
|
|
;;
|
|
|
|
*)
|
|
log_message "WARNING" "Unknown init system, not creating service (manual start via $start_script)"
|
|
;;
|
|
esac
|
|
|
|
# Start the Wallarm node
|
|
log_message "INFO" "Starting Wallarm filtering node..."
|
|
if ! sudo "$start_script"; then
|
|
fail_with_remediation "Failed to start Wallarm node" \
|
|
"Container failed to start. Check:
|
|
1. Docker logs: sudo docker logs $INSTANCE_NAME
|
|
2. Port conflicts: sudo ss -tlnp | grep ':$INGRESS_PORT\|:$MONITORING_PORT'
|
|
3. Docker status: sudo docker info
|
|
4. Manual start attempt: sudo $start_script"
|
|
fi
|
|
|
|
log_message "SUCCESS" "Wallarm filtering node deployed successfully"
|
|
log_message "SUCCESS" " Container: $INSTANCE_NAME"
|
|
log_message "SUCCESS" " Ingress Port: $INGRESS_PORT"
|
|
log_message "SUCCESS" " Monitoring Port: $MONITORING_PORT"
|
|
log_message "SUCCESS" " Upstream: $UPSTREAM_IP:$UPSTREAM_PORT"
|
|
log_message "SUCCESS" " Config Directory: $INSTANCE_DIR"
|
|
}
|
|
|
|
# ==============================================================================
|
|
# DEPLOYMENT VERIFICATION
|
|
# ==============================================================================
|
|
|
|
verify_deployment() {
|
|
log_message "INFO" "Verifying Wallarm deployment..."
|
|
|
|
# Check if container is running
|
|
log_message "INFO" "Checking if container is running..."
|
|
if ! sudo docker ps | grep -q "$INSTANCE_NAME"; then
|
|
fail_with_remediation "Wallarm container is not running" \
|
|
"Container failed to start or crashed:
|
|
1. Check container logs: sudo docker logs $INSTANCE_NAME
|
|
2. Check Docker service: sudo systemctl status docker (or equivalent)
|
|
3. Manual start: sudo $INSTANCE_DIR/start.sh"
|
|
fi
|
|
log_message "SUCCESS" "Container is running"
|
|
|
|
# Test ingress port
|
|
log_message "INFO" "Testing ingress port $INGRESS_PORT..."
|
|
if ! check_port_available "$INGRESS_PORT"; then
|
|
log_message "SUCCESS" "Ingress port $INGRESS_PORT is in use (as expected)"
|
|
else
|
|
log_message "WARNING" "Ingress port $INGRESS_PORT appears available (container may not be listening)"
|
|
fi
|
|
|
|
# Test monitoring port
|
|
log_message "INFO" "Testing monitoring port $MONITORING_PORT..."
|
|
if ! check_port_available "$MONITORING_PORT"; then
|
|
log_message "SUCCESS" "Monitoring port $MONITORING_PORT is in use (as expected)"
|
|
else
|
|
log_message "WARNING" "Monitoring port $MONITORING_PORT appears available"
|
|
fi
|
|
|
|
# Test health check endpoint
|
|
log_message "INFO" "Testing health check endpoint..."
|
|
local health_check_url="http://localhost:$INGRESS_PORT/health"
|
|
if curl -sf --connect-timeout 5 "$health_check_url" >/dev/null 2>&1; then
|
|
log_message "SUCCESS" "Health check endpoint responsive"
|
|
else
|
|
log_message "WARNING" "Health check endpoint not responsive (may need time to start)"
|
|
sleep 5
|
|
if curl -sf --connect-timeout 5 "$health_check_url" >/dev/null 2>&1; then
|
|
log_message "SUCCESS" "Health check endpoint now responsive"
|
|
else
|
|
log_message "WARNING" "Health check endpoint still not responsive (check nginx config)"
|
|
fi
|
|
fi
|
|
|
|
# Test handshake through filtering node
|
|
log_message "INFO" "Testing handshake through filtering node to upstream..."
|
|
local test_url="http://localhost:$INGRESS_PORT/"
|
|
if curl -sfI --connect-timeout 10 "$test_url" >/dev/null 2>&1; then
|
|
log_message "SUCCESS" "Handshake successful: filtering node can reach upstream"
|
|
else
|
|
log_message "WARNING" "Handshake failed (upstream may not be responding)"
|
|
log_message "INFO" "Checking if upstream is directly reachable..."
|
|
if timeout 5 bash -c "cat < /dev/null > /dev/tcp/$UPSTREAM_IP/$UPSTREAM_PORT" 2>/dev/null; then
|
|
log_message "ERROR" "Upstream is reachable but filtering node cannot proxy"
|
|
echo -e "${YELLOW}Possible nginx configuration issue. Check:${NC}"
|
|
echo -e "1. Container logs: sudo docker logs $INSTANCE_NAME"
|
|
echo -e "2. Nginx config: sudo docker exec $INSTANCE_NAME cat /etc/nginx/http.d/default.conf"
|
|
else
|
|
log_message "WARNING" "Upstream server is not reachable (as previously warned)"
|
|
fi
|
|
fi
|
|
|
|
# Check Wallarm cloud synchronization
|
|
log_message "INFO" "Checking Wallarm cloud synchronization (this may take 30 seconds)..."
|
|
echo -e "${YELLOW}Note: Full synchronization with Wallarm cloud may take several minutes.${NC}"
|
|
echo -e "${YELLOW}You can check sync status in Wallarm Console.${NC}"
|
|
|
|
# Quick test: check container logs for synchronization messages
|
|
if sudo docker logs "$INSTANCE_NAME" 2>&1 | tail -20 | grep -i "sync\|connected\|token" >/dev/null 2>&1; then
|
|
log_message "SUCCESS" "Wallarm node appears to be communicating with cloud"
|
|
else
|
|
log_message "WARNING" "No cloud synchronization messages in logs yet (may need time)"
|
|
fi
|
|
|
|
log_message "SUCCESS" "Deployment verification completed"
|
|
echo -e "\n${GREEN}${BOLD}Verification Summary:${NC}"
|
|
echo -e " ${GREEN}✓${NC} Container running: $INSTANCE_NAME"
|
|
echo -e " ${GREEN}✓${NC} Ingress port: $INGRESS_PORT"
|
|
echo -e " ${GREEN}✓${NC} Monitoring port: $MONITORING_PORT"
|
|
echo -e " ${GREEN}✓${NC} Upstream: $UPSTREAM_IP:$UPSTREAM_PORT"
|
|
echo -e " ${GREEN}✓${NC} Cloud region: $CLOUD_REGION ($API_HOST)"
|
|
}
|
|
|
|
# ==============================================================================
|
|
# MAIN FUNCTION
|
|
# ==============================================================================
|
|
|
|
main() {
|
|
clear
|
|
echo -e "${BLUE}${BOLD}"
|
|
echo "╔══════════════════════════════════════════════════════════════╗"
|
|
echo "║ WALLARM DEPLOYMENT SCRIPT - V1.1 ║"
|
|
echo "║ LXC-Optimized Filtering Node Deployment ║"
|
|
echo "╚══════════════════════════════════════════════════════════════╝${NC}"
|
|
echo -e "\n${YELLOW}Starting deployment at: $(date)${NC}"
|
|
|
|
# Initialize logging
|
|
# Create logs directory if it doesn't exist
|
|
local log_dir="${HOME:-.}/logs"
|
|
if [ ! -d "$log_dir" ]; then
|
|
if ! mkdir -p "$log_dir"; then
|
|
echo -e "${YELLOW}Cannot create log directory $log_dir, falling back to current directory...${NC}"
|
|
log_dir="."
|
|
fi
|
|
fi
|
|
|
|
LOG_FILE="$log_dir/wallarm-deployment.log"
|
|
if ! : > "$LOG_FILE"; then
|
|
echo -e "${RED}Cannot create log file at $LOG_FILE${NC}"
|
|
echo -e "${YELLOW}Falling back to current directory...${NC}"
|
|
LOG_FILE="./wallarm-deployment.log"
|
|
: > "$LOG_FILE" 2>/dev/null || true
|
|
fi
|
|
if ! chmod 644 "$LOG_FILE" 2>/dev/null; then
|
|
echo -e "${YELLOW}Warning: Could not set permissions on log file${NC}"
|
|
fi
|
|
|
|
log_message "INFO" "=== Wallarm Deployment Started ==="
|
|
|
|
# SSL security warning
|
|
if [ "$INSECURE_SSL" = "1" ]; then
|
|
log_message "WARNING" "SSL certificate validation is DISABLED (insecure). Set WALLARM_INSECURE_SSL=0 to enable validation."
|
|
fi
|
|
|
|
# Phase 1: Verify preflight check
|
|
log_message "INFO" "=== PHASE 1: PREFLIGHT CHECK VERIFICATION ==="
|
|
verify_preflight_check
|
|
|
|
# Phase 2: Configuration collection
|
|
log_message "INFO" "=== PHASE 2: CONFIGURATION COLLECTION ==="
|
|
select_cloud_region
|
|
collect_configuration
|
|
|
|
# Phase 3: Docker engine setup (LXC optimized)
|
|
log_message "INFO" "=== PHASE 3: DOCKER ENGINE SETUP (LXC OPTIMIZED) ==="
|
|
setup_docker_engine
|
|
|
|
# Phase 4: Deployment
|
|
log_message "INFO" "=== PHASE 4: DEPLOYMENT ==="
|
|
deploy_wallarm_node
|
|
|
|
# Phase 5: Verification
|
|
log_message "INFO" "=== PHASE 5: VERIFICATION ==="
|
|
verify_deployment
|
|
|
|
# Success message
|
|
log_message "SUCCESS" "=== WALLARM DEPLOYMENT COMPLETED SUCCESSFULLY ==="
|
|
echo -e "\n${GREEN}${BOLD}╔══════════════════════════════════════════════════════════════╗${NC}"
|
|
echo -e "${GREEN}${BOLD}║ WALLARM FILTERING NODE DEPLOYMENT SUCCESSFUL ║${NC}"
|
|
echo -e "${GREEN}${BOLD}╚══════════════════════════════════════════════════════════════╝${NC}"
|
|
echo -e "\n${CYAN}The Wallarm filtering node is now active and protecting your application.${NC}"
|
|
echo -e "${YELLOW}Full deployment log: $LOG_FILE${NC}"
|
|
echo -e "${YELLOW}Instance directory: $INSTANCE_DIR${NC}"
|
|
echo -e "\n${GREEN}To stop the node:${NC} sudo docker stop $INSTANCE_NAME"
|
|
echo -e "${GREEN}To restart:${NC} sudo $INSTANCE_DIR/start.sh"
|
|
echo -e "${GREEN}To view logs:${NC} sudo docker logs -f $INSTANCE_NAME"
|
|
echo -e "\n${MAGENTA}${BOLD}Deployment completed successfully!${NC}"
|
|
echo -e "\n${YELLOW}Important next steps:${NC}"
|
|
echo -e "1. Monitor sync status in Wallarm Console"
|
|
echo -e "2. Test attack detection with safe test: curl http://localhost:$INGRESS_PORT/?wallarm_test=1"
|
|
echo -e "3. Review logs periodically: sudo docker logs --tail 50 $INSTANCE_NAME"
|
|
}
|
|
|
|
# ==============================================================================
|
|
# SCRIPT EXECUTION
|
|
# ==============================================================================
|
|
|
|
# Ensure we're in bash
|
|
if [ -z "$BASH_VERSION" ]; then
|
|
echo "Error: This script must be run with bash" >&2
|
|
exit 1
|
|
fi
|
|
|
|
# Run main function
|
|
main "$@" |