125 lines
4.6 KiB
Plaintext
125 lines
4.6 KiB
Plaintext
# =============================================================================
|
|
# DictIA — Unified Environment Configuration
|
|
# =============================================================================
|
|
#
|
|
# Copy this file to the project root as .env and edit the values.
|
|
# cp deployment/docker/.env.example .env
|
|
#
|
|
# This template combines upstream settings with DictIA deployment vars.
|
|
# See: config/env.transcription.example for full upstream documentation.
|
|
|
|
# =============================================================================
|
|
# FLASK SECRET KEY (REQUIRED — auto-generated by setup.sh)
|
|
# =============================================================================
|
|
SECRET_KEY=change-me-to-a-random-string
|
|
|
|
# =============================================================================
|
|
# DEPLOYMENT PROFILE (used by deployment scripts)
|
|
# =============================================================================
|
|
# Options: cloud, local-cpu, local-gpu
|
|
DICTIA_PROFILE=cloud
|
|
|
|
# =============================================================================
|
|
# TEXT GENERATION MODEL (REQUIRED for summaries, titles, chat)
|
|
# =============================================================================
|
|
TEXT_MODEL_BASE_URL=https://openrouter.ai/api/v1
|
|
TEXT_MODEL_API_KEY=your_openrouter_api_key
|
|
TEXT_MODEL_NAME=openai/gpt-4o-mini
|
|
|
|
# =============================================================================
|
|
# TRANSCRIPTION CONFIGURATION
|
|
# =============================================================================
|
|
# For cloud profile (ASR Proxy → GCP GPU):
|
|
# ASR_BASE_URL is set automatically in docker-compose.cloud.yml
|
|
# No need to set it here.
|
|
#
|
|
# For local profiles (WhisperX sidecar):
|
|
# ASR_BASE_URL is set automatically in docker-compose.local-*.yml
|
|
# No need to set it here.
|
|
#
|
|
# For OpenAI API instead of self-hosted ASR:
|
|
# TRANSCRIPTION_API_KEY=sk-your_openai_api_key
|
|
# TRANSCRIPTION_MODEL=gpt-4o-transcribe-diarize
|
|
|
|
# ASR model (for local WhisperX profiles)
|
|
ASR_MODEL=large-v3
|
|
|
|
# HuggingFace token (required for diarization with pyannote)
|
|
# Get yours at: https://huggingface.co/settings/tokens
|
|
# Must accept: https://huggingface.co/pyannote/speaker-diarization-3.1
|
|
HF_TOKEN=
|
|
|
|
# =============================================================================
|
|
# ASR PROXY — CLOUD PROFILE ONLY
|
|
# =============================================================================
|
|
# GCP project for GPU instances
|
|
# GCP_PROJECT=your-gcp-project
|
|
|
|
# Monthly GPU budget limit in hours (default: 50)
|
|
# MONTHLY_LIMIT_HOURS=50
|
|
|
|
# Idle timeout before auto-stopping GPU (seconds, default: 300)
|
|
# IDLE_TIMEOUT=300
|
|
|
|
# =============================================================================
|
|
# APPLICATION SETTINGS
|
|
# =============================================================================
|
|
ADMIN_USERNAME=admin
|
|
ADMIN_EMAIL=admin@example.com
|
|
ADMIN_PASSWORD=changeme
|
|
|
|
ALLOW_REGISTRATION=false
|
|
TIMEZONE="America/Toronto"
|
|
LOG_LEVEL=ERROR
|
|
LOCALE=fr_CA
|
|
DEFAULT_LANGUAGE=fr
|
|
SHOW_USERNAMES_IN_UI=true
|
|
SESSION_COOKIE_HTTPONLY=true
|
|
SESSION_COOKIE_SAMESITE=Lax
|
|
SESSION_COOKIE_SECURE=true
|
|
|
|
# =============================================================================
|
|
# OPTIONAL FEATURES
|
|
# =============================================================================
|
|
ENABLE_INQUIRE_MODE=false
|
|
ENABLE_AUTO_PROCESSING=false
|
|
ENABLE_AUTO_EXPORT=false
|
|
ENABLE_AUTO_DELETION=false
|
|
ENABLE_INTERNAL_SHARING=true
|
|
ENABLE_PUBLIC_SHARING=true
|
|
ENABLE_FOLDERS=true
|
|
VIDEO_RETENTION=true
|
|
USERS_CAN_DELETE=true
|
|
|
|
# =============================================================================
|
|
# BACKGROUND PROCESSING
|
|
# =============================================================================
|
|
JOB_QUEUE_WORKERS=4
|
|
SUMMARY_QUEUE_WORKERS=4
|
|
JOB_MAX_RETRIES=3
|
|
MAX_CONCURRENT_UPLOADS=3
|
|
|
|
# =============================================================================
|
|
# TRANSCRIPTION SETTINGS
|
|
# =============================================================================
|
|
TRANSCRIPTION_CONNECTOR=asr_endpoint
|
|
USE_NEW_TRANSCRIPTION_ARCHITECTURE=true
|
|
ENABLE_CHUNKING=true
|
|
CHUNK_LIMIT=2400s
|
|
CHUNK_OVERLAP_SECONDS=5
|
|
|
|
# =============================================================================
|
|
# LLM / SUMMARY SETTINGS
|
|
# =============================================================================
|
|
SUMMARY_LANGUAGE=fr
|
|
SUMMARY_MAX_TOKENS=16000
|
|
CHAT_MAX_TOKENS=12000
|
|
ENABLE_STREAM_OPTIONS=false
|
|
ENABLE_THINKING=false
|
|
|
|
# =============================================================================
|
|
# DOCKER/DATABASE
|
|
# =============================================================================
|
|
SQLALCHEMY_DATABASE_URI=sqlite:////data/instance/transcriptions.db
|
|
UPLOAD_FOLDER=/data/uploads
|