Initial release: DictIA v0.8.14-alpha (fork de Speakr, AGPL-3.0)
This commit is contained in:
75
deployment/profiles/docker-compose.dictia8.yml
Normal file
75
deployment/profiles/docker-compose.dictia8.yml
Normal file
@@ -0,0 +1,75 @@
|
||||
# =============================================================================
|
||||
# DictIA 8 — Docker Compose
|
||||
# GPU : RTX 5060 (8 Go VRAM)
|
||||
# =============================================================================
|
||||
#
|
||||
# Services :
|
||||
# - dictia : Application principale DictIA
|
||||
# - whisperx-asr : Service de transcription WhisperX Large-v3
|
||||
#
|
||||
# Démarrage :
|
||||
# 1. cp config/env.dictia8.example .env
|
||||
# 2. Remplir TEXT_MODEL_API_KEY dans .env
|
||||
# 3. docker compose -f config/docker-compose.dictia8.yml up -d
|
||||
# =============================================================================
|
||||
|
||||
services:
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Application DictIA
|
||||
# ---------------------------------------------------------------------------
|
||||
dictia:
|
||||
image: dictia:latest
|
||||
container_name: dictia
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- "8899:8899"
|
||||
env_file:
|
||||
- ../.env
|
||||
environment:
|
||||
- LOG_LEVEL=ERROR
|
||||
volumes:
|
||||
- ../uploads:/data/uploads
|
||||
- ../instance:/data/instance
|
||||
# Décommenter pour l'export automatique :
|
||||
# - ../exports:/data/exports
|
||||
# Décommenter pour le traitement automatique :
|
||||
# - ../auto-process:/data/auto-process
|
||||
depends_on:
|
||||
- whisperx-asr
|
||||
networks:
|
||||
- dictia-net
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# WhisperX ASR — Transcription locale (WhisperX Large-v3)
|
||||
# RTX 5060 : BATCH_SIZE=16, COMPUTE_TYPE=float16
|
||||
# ---------------------------------------------------------------------------
|
||||
whisperx-asr:
|
||||
image: murtazanasir/whisperx-asr-service:latest
|
||||
container_name: whisperx-asr
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
- HF_TOKEN=${HF_TOKEN}
|
||||
- DEVICE=cuda
|
||||
- COMPUTE_TYPE=float16
|
||||
- BATCH_SIZE=16
|
||||
- DEFAULT_MODEL=large-v3
|
||||
volumes:
|
||||
- whisperx-models:/root/.cache
|
||||
deploy:
|
||||
resources:
|
||||
reservations:
|
||||
devices:
|
||||
- driver: nvidia
|
||||
count: 1
|
||||
capabilities: [gpu]
|
||||
networks:
|
||||
- dictia-net
|
||||
|
||||
networks:
|
||||
dictia-net:
|
||||
driver: bridge
|
||||
|
||||
volumes:
|
||||
whisperx-models:
|
||||
driver: local
|
||||
Reference in New Issue
Block a user