Initial release: DictIA v0.8.14-alpha (fork de Speakr, AGPL-3.0)
This commit is contained in:
300
deployment/setup.sh
Executable file
300
deployment/setup.sh
Executable file
@@ -0,0 +1,300 @@
|
||||
#!/usr/bin/env bash
|
||||
# DictIA — Main setup script
|
||||
#
|
||||
# Interactive installer that detects hardware and configures the appropriate
|
||||
# deployment profile (cloud, local-cpu, local-gpu).
|
||||
#
|
||||
# Usage:
|
||||
# bash deployment/setup.sh # Interactive mode
|
||||
# bash deployment/setup.sh --profile cloud # Non-interactive
|
||||
# bash deployment/setup.sh --profile local-gpu
|
||||
set -euo pipefail
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
|
||||
PROJECT_DIR="$(cd "$SCRIPT_DIR/.." && pwd)"
|
||||
PROFILE=""
|
||||
|
||||
for arg in "$@"; do
|
||||
case "$arg" in
|
||||
--profile=*) PROFILE="${arg#*=}" ;;
|
||||
--profile) shift_next=true ;;
|
||||
*)
|
||||
if [ "${shift_next:-false}" = true ]; then
|
||||
PROFILE="$arg"
|
||||
shift_next=false
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
# --- Colors ---
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
CYAN='\033[0;36m'
|
||||
NC='\033[0m'
|
||||
|
||||
info() { echo -e "${CYAN}[INFO]${NC} $*"; }
|
||||
ok() { echo -e "${GREEN}[OK]${NC} $*"; }
|
||||
warn() { echo -e "${YELLOW}[WARN]${NC} $*"; }
|
||||
err() { echo -e "${RED}[ERROR]${NC} $*"; }
|
||||
|
||||
echo
|
||||
echo -e "${CYAN}========================================${NC}"
|
||||
echo -e "${CYAN} DictIA — Setup${NC}"
|
||||
echo -e "${CYAN}========================================${NC}"
|
||||
echo
|
||||
|
||||
# ==========================================================================
|
||||
# 1. Hardware Detection
|
||||
# ==========================================================================
|
||||
info "Detecting hardware..."
|
||||
|
||||
# Docker
|
||||
if command -v docker &>/dev/null && docker info &>/dev/null; then
|
||||
DOCKER_VERSION=$(docker --version | grep -oP '\d+\.\d+\.\d+' | head -1)
|
||||
ok "Docker $DOCKER_VERSION"
|
||||
else
|
||||
err "Docker not found or not running."
|
||||
echo " Install Docker: https://docs.docker.com/engine/install/"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Docker Compose
|
||||
if docker compose version &>/dev/null; then
|
||||
COMPOSE_VERSION=$(docker compose version --short 2>/dev/null || echo "unknown")
|
||||
ok "Docker Compose $COMPOSE_VERSION"
|
||||
else
|
||||
err "Docker Compose not found."
|
||||
echo " Docker Compose V2 is required (comes with Docker Desktop or docker-compose-plugin)"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# GPU
|
||||
HAS_GPU=false
|
||||
if command -v nvidia-smi &>/dev/null; then
|
||||
GPU_NAME=$(nvidia-smi --query-gpu=name --format=csv,noheader 2>/dev/null | head -1 || echo "")
|
||||
if [ -n "$GPU_NAME" ]; then
|
||||
HAS_GPU=true
|
||||
ok "NVIDIA GPU: $GPU_NAME"
|
||||
# Check nvidia-container-toolkit
|
||||
if docker info 2>/dev/null | grep -qi nvidia; then
|
||||
ok "nvidia-container-toolkit detected"
|
||||
else
|
||||
warn "nvidia-container-toolkit not detected. Required for local-gpu profile."
|
||||
echo " Install: https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html"
|
||||
fi
|
||||
fi
|
||||
else
|
||||
info "No NVIDIA GPU detected"
|
||||
fi
|
||||
|
||||
# RAM
|
||||
if command -v free &>/dev/null; then
|
||||
RAM_GB=$(free -g | awk '/Mem:/{print $2}')
|
||||
info "RAM: ${RAM_GB}GB"
|
||||
fi
|
||||
|
||||
# Disk
|
||||
DISK_AVAIL=$(df -h "$PROJECT_DIR" 2>/dev/null | awk 'NR==2{print $4}')
|
||||
info "Disk available: $DISK_AVAIL"
|
||||
|
||||
echo
|
||||
|
||||
# ==========================================================================
|
||||
# 2. Profile Selection
|
||||
# ==========================================================================
|
||||
if [ -z "$PROFILE" ]; then
|
||||
echo -e "${CYAN}Select deployment profile:${NC}"
|
||||
echo
|
||||
echo " 1) cloud — VPS with ASR Proxy (GCP GPU on demand)"
|
||||
echo " Best for: remote servers, pay-per-use GPU"
|
||||
echo
|
||||
echo " 2) local-gpu — Local NVIDIA GPU for transcription"
|
||||
echo " Best for: dedicated GPU server, fastest"
|
||||
if [ "$HAS_GPU" = false ]; then
|
||||
echo -e " ${YELLOW}(No GPU detected on this machine)${NC}"
|
||||
fi
|
||||
echo
|
||||
echo " 3) local-cpu — CPU-only transcription (slow)"
|
||||
echo " Best for: testing, low-volume usage"
|
||||
echo
|
||||
read -rp "Choice [1-3]: " CHOICE
|
||||
case "$CHOICE" in
|
||||
1) PROFILE="cloud" ;;
|
||||
2) PROFILE="local-gpu" ;;
|
||||
3) PROFILE="local-cpu" ;;
|
||||
*) err "Invalid choice"; exit 1 ;;
|
||||
esac
|
||||
fi
|
||||
|
||||
COMPOSE_FILE="$SCRIPT_DIR/docker/docker-compose.$PROFILE.yml"
|
||||
if [ ! -f "$COMPOSE_FILE" ]; then
|
||||
err "Compose file not found: $COMPOSE_FILE"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
ok "Profile: $PROFILE"
|
||||
echo
|
||||
|
||||
# ==========================================================================
|
||||
# 3. Generate .env
|
||||
# ==========================================================================
|
||||
ENV_FILE="$PROJECT_DIR/.env"
|
||||
|
||||
if [ -f "$ENV_FILE" ]; then
|
||||
warn ".env already exists. Keeping existing configuration."
|
||||
echo " To reconfigure, delete .env and re-run setup."
|
||||
else
|
||||
info "Generating .env..."
|
||||
|
||||
# Generate secret key
|
||||
SECRET_KEY=$(python3 -c "import secrets; print(secrets.token_hex(32))" 2>/dev/null \
|
||||
|| openssl rand -hex 32 2>/dev/null \
|
||||
|| head -c 64 /dev/urandom | xxd -p | head -c 64)
|
||||
|
||||
# Prompt for admin credentials
|
||||
read -rp "Admin username [admin]: " ADMIN_USER
|
||||
ADMIN_USER="${ADMIN_USER:-admin}"
|
||||
read -rp "Admin email [admin@example.com]: " ADMIN_EMAIL
|
||||
ADMIN_EMAIL="${ADMIN_EMAIL:-admin@example.com}"
|
||||
read -rsp "Admin password: " ADMIN_PASS
|
||||
echo
|
||||
ADMIN_PASS="${ADMIN_PASS:-changeme}"
|
||||
|
||||
# Prompt for text model API key
|
||||
echo
|
||||
info "DictIA needs a text/LLM API key for summaries, titles, and chat."
|
||||
echo " Recommended: OpenRouter (https://openrouter.ai) — access to many models"
|
||||
read -rp "Text model API key (or press Enter to skip): " TEXT_API_KEY
|
||||
TEXT_API_KEY="${TEXT_API_KEY:-your_openrouter_api_key}"
|
||||
|
||||
# HuggingFace token for diarization
|
||||
if [ "$PROFILE" != "cloud" ]; then
|
||||
echo
|
||||
info "For speaker diarization, a HuggingFace token is needed."
|
||||
echo " Get one at: https://huggingface.co/settings/tokens"
|
||||
echo " Accept model: https://huggingface.co/pyannote/speaker-diarization-3.1"
|
||||
read -rp "HuggingFace token (or press Enter to skip): " HF_TOKEN
|
||||
HF_TOKEN="${HF_TOKEN:-}"
|
||||
else
|
||||
HF_TOKEN=""
|
||||
fi
|
||||
|
||||
# Write .env
|
||||
cp "$SCRIPT_DIR/docker/.env.example" "$ENV_FILE"
|
||||
sed -i "s|SECRET_KEY=.*|SECRET_KEY=$SECRET_KEY|" "$ENV_FILE"
|
||||
sed -i "s|DICTIA_PROFILE=.*|DICTIA_PROFILE=$PROFILE|" "$ENV_FILE"
|
||||
sed -i "s|ADMIN_USERNAME=.*|ADMIN_USERNAME=$ADMIN_USER|" "$ENV_FILE"
|
||||
sed -i "s|ADMIN_EMAIL=.*|ADMIN_EMAIL=$ADMIN_EMAIL|" "$ENV_FILE"
|
||||
sed -i "s|ADMIN_PASSWORD=.*|ADMIN_PASSWORD=$ADMIN_PASS|" "$ENV_FILE"
|
||||
sed -i "s|TEXT_MODEL_API_KEY=.*|TEXT_MODEL_API_KEY=$TEXT_API_KEY|" "$ENV_FILE"
|
||||
sed -i "s|HF_TOKEN=.*|HF_TOKEN=$HF_TOKEN|" "$ENV_FILE"
|
||||
|
||||
ok ".env generated"
|
||||
fi
|
||||
echo
|
||||
|
||||
# ==========================================================================
|
||||
# 4. Create data directories
|
||||
# ==========================================================================
|
||||
info "Creating data directories..."
|
||||
mkdir -p "$PROJECT_DIR/data/uploads" "$PROJECT_DIR/data/instance"
|
||||
ok "data/uploads and data/instance created"
|
||||
echo
|
||||
|
||||
# ==========================================================================
|
||||
# 5. Profile-specific setup
|
||||
# ==========================================================================
|
||||
case "$PROFILE" in
|
||||
cloud)
|
||||
info "Cloud profile — setting up ASR Proxy..."
|
||||
if [ -f "$SCRIPT_DIR/asr-proxy/setup.sh" ]; then
|
||||
echo " Run the ASR proxy setup separately:"
|
||||
echo " bash $SCRIPT_DIR/asr-proxy/setup.sh"
|
||||
fi
|
||||
echo
|
||||
info "Setting up iptables rules..."
|
||||
if [ -f "$SCRIPT_DIR/security/iptables-rules.sh" ] && [ "$(id -u)" -eq 0 ]; then
|
||||
bash "$SCRIPT_DIR/security/iptables-rules.sh"
|
||||
else
|
||||
echo " Run as root: sudo bash $SCRIPT_DIR/security/iptables-rules.sh"
|
||||
fi
|
||||
echo
|
||||
info "Setting up Tailscale Serve..."
|
||||
if command -v tailscale &>/dev/null; then
|
||||
echo " Run: bash $SCRIPT_DIR/config/tailscale/setup-serve.sh"
|
||||
else
|
||||
warn "Tailscale not installed."
|
||||
echo " Install: curl -fsSL https://tailscale.com/install.sh | sh"
|
||||
fi
|
||||
;;
|
||||
local-gpu)
|
||||
info "Local GPU profile — verifying NVIDIA runtime..."
|
||||
if docker info 2>/dev/null | grep -qi nvidia; then
|
||||
ok "NVIDIA Docker runtime available"
|
||||
# Quick GPU test
|
||||
if docker run --rm --gpus all nvidia/cuda:12.0-base nvidia-smi &>/dev/null; then
|
||||
ok "GPU test passed"
|
||||
else
|
||||
warn "GPU test failed. Check nvidia-container-toolkit installation."
|
||||
fi
|
||||
else
|
||||
err "NVIDIA Docker runtime not found."
|
||||
echo " Install nvidia-container-toolkit and restart Docker."
|
||||
echo " https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html"
|
||||
fi
|
||||
;;
|
||||
local-cpu)
|
||||
warn "CPU-only transcription is significantly slower than GPU."
|
||||
echo " Expect ~10x real-time (1h audio = ~10h processing)."
|
||||
echo " Consider local-gpu or cloud profile for better performance."
|
||||
;;
|
||||
esac
|
||||
|
||||
echo
|
||||
|
||||
# ==========================================================================
|
||||
# 6. Build and start
|
||||
# ==========================================================================
|
||||
info "Building DictIA Docker image..."
|
||||
cd "$PROJECT_DIR"
|
||||
docker build -t innova-ai/dictia:latest .
|
||||
ok "Image built"
|
||||
|
||||
echo
|
||||
info "Starting DictIA ($PROFILE profile)..."
|
||||
docker compose -f "$COMPOSE_FILE" up -d
|
||||
ok "Containers started"
|
||||
|
||||
# ==========================================================================
|
||||
# 7. Health check
|
||||
# ==========================================================================
|
||||
echo
|
||||
info "Waiting for DictIA to become healthy..."
|
||||
RETRIES=30
|
||||
for i in $(seq 1 $RETRIES); do
|
||||
if curl -sf -o /dev/null -m 5 http://localhost:8899/health 2>/dev/null; then
|
||||
ok "DictIA is healthy!"
|
||||
break
|
||||
fi
|
||||
if [ "$i" -eq "$RETRIES" ]; then
|
||||
warn "Health check timeout. Check logs: docker compose -f $COMPOSE_FILE logs"
|
||||
fi
|
||||
sleep 5
|
||||
done
|
||||
|
||||
echo
|
||||
echo -e "${GREEN}========================================${NC}"
|
||||
echo -e "${GREEN} DictIA is ready!${NC}"
|
||||
echo -e "${GREEN}========================================${NC}"
|
||||
echo
|
||||
echo " App: http://localhost:8899"
|
||||
echo " Profile: $PROFILE"
|
||||
echo " Compose: $COMPOSE_FILE"
|
||||
echo
|
||||
echo " Tools:"
|
||||
echo " Update: bash deployment/tools/update.sh"
|
||||
echo " Backup: bash deployment/tools/backup.sh"
|
||||
echo " Health check: bash deployment/tools/health-check.sh"
|
||||
echo
|
||||
Reference in New Issue
Block a user