Initial release: DictIA v0.8.14-alpha (fork de Speakr, AGPL-3.0)

This commit is contained in:
InnovA AI
2026-03-16 21:47:37 +00:00
commit 42772a31ed
365 changed files with 103572 additions and 0 deletions

View File

@@ -0,0 +1,101 @@
# =============================================================================
# DictIA 16 — Docker Compose
# GPU : RTX 5070 Ti (16 Go VRAM)
# =============================================================================
#
# Services :
# - dictia : Application principale DictIA
# - whisperx-asr : Service de transcription WhisperX Large-v3
# - ollama : LLM local Mistral 7B (résumés, chat, Q&A)
#
# Démarrage :
# 1. cp config/env.dictia16.example .env
# 2. docker compose -f config/docker-compose.dictia16.yml up -d
# 3. Télécharger Mistral : docker exec ollama ollama pull mistral
#
# Note : Aucune clé API nécessaire — tout tourne en local (100% privé).
# =============================================================================
services:
# ---------------------------------------------------------------------------
# Application DictIA
# ---------------------------------------------------------------------------
dictia:
image: dictia:latest
container_name: dictia
restart: unless-stopped
ports:
- "8899:8899"
env_file:
- ../.env
environment:
- LOG_LEVEL=ERROR
volumes:
- ../uploads:/data/uploads
- ../instance:/data/instance
# Décommenter pour l'export automatique :
# - ../exports:/data/exports
# Décommenter pour le traitement automatique :
# - ../auto-process:/data/auto-process
depends_on:
- whisperx-asr
- ollama
networks:
- dictia-net
# ---------------------------------------------------------------------------
# WhisperX ASR — Transcription locale (WhisperX Large-v3)
# RTX 5070 Ti : BATCH_SIZE=32, COMPUTE_TYPE=float16
# ---------------------------------------------------------------------------
whisperx-asr:
image: murtazanasir/whisperx-asr-service:latest
container_name: whisperx-asr
restart: unless-stopped
environment:
- HF_TOKEN=${HF_TOKEN}
- DEVICE=cuda
- COMPUTE_TYPE=float16
- BATCH_SIZE=32
- DEFAULT_MODEL=large-v3
volumes:
- whisperx-models:/root/.cache
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: 1
capabilities: [gpu]
networks:
- dictia-net
# ---------------------------------------------------------------------------
# Ollama — LLM local Mistral 7B
# Résumés, points d'action, Q&A — 100% local, aucune donnée externe
# ---------------------------------------------------------------------------
ollama:
image: ollama/ollama:latest
container_name: ollama
restart: unless-stopped
volumes:
- ollama-models:/root/.ollama
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: 1
capabilities: [gpu]
networks:
- dictia-net
networks:
dictia-net:
driver: bridge
volumes:
whisperx-models:
driver: local
ollama-models:
driver: local