Initial release: DictIA v0.8.14-alpha (fork de Speakr, AGPL-3.0)

This commit is contained in:
InnovA AI
2026-03-16 21:47:37 +00:00
commit 42772a31ed
365 changed files with 103572 additions and 0 deletions

0
src/config/__init__.py Normal file
View File

164
src/config/app_config.py Normal file
View File

@@ -0,0 +1,164 @@
"""
Application configuration and initialization.
"""
import os
import sys
import httpx
from openai import OpenAI
from src.audio_chunking import AudioChunkingService
from src.config.version import get_version
# Configuration from environment
TEXT_MODEL_API_KEY = os.environ.get("TEXT_MODEL_API_KEY")
TEXT_MODEL_BASE_URL = os.environ.get("TEXT_MODEL_BASE_URL", "https://openrouter.ai/api/v1")
if TEXT_MODEL_BASE_URL:
TEXT_MODEL_BASE_URL = TEXT_MODEL_BASE_URL.split('#')[0].strip()
TEXT_MODEL_NAME = os.environ.get("TEXT_MODEL_NAME", "openai/gpt-3.5-turbo")
transcription_api_key = os.environ.get("TRANSCRIPTION_API_KEY", "")
transcription_base_url = os.environ.get("TRANSCRIPTION_BASE_URL", "")
if transcription_base_url:
transcription_base_url = transcription_base_url.split('#')[0].strip()
# New transcription connector configuration
# TRANSCRIPTION_CONNECTOR: explicit connector name (openai_whisper, openai_transcribe, asr_endpoint)
# TRANSCRIPTION_MODEL: model to use (e.g., gpt-4o-transcribe-diarize for diarization)
TRANSCRIPTION_CONNECTOR = os.environ.get('TRANSCRIPTION_CONNECTOR', '').lower().strip()
TRANSCRIPTION_MODEL = os.environ.get('TRANSCRIPTION_MODEL', '')
if TRANSCRIPTION_MODEL:
TRANSCRIPTION_MODEL = TRANSCRIPTION_MODEL.split('#')[0].strip()
# Feature flag for new transcription architecture (default: enabled)
USE_NEW_TRANSCRIPTION_ARCHITECTURE = os.environ.get(
'USE_NEW_TRANSCRIPTION_ARCHITECTURE', 'true'
).lower() == 'true'
USE_ASR_ENDPOINT = os.environ.get('USE_ASR_ENDPOINT', 'false').lower() == 'true'
ASR_BASE_URL = os.environ.get('ASR_BASE_URL')
if ASR_BASE_URL:
ASR_BASE_URL = ASR_BASE_URL.split('#')[0].strip()
if USE_ASR_ENDPOINT:
ASR_DIARIZE = os.environ.get('ASR_DIARIZE', 'true').lower() == 'true'
ASR_MIN_SPEAKERS = os.environ.get('ASR_MIN_SPEAKERS')
ASR_MAX_SPEAKERS = os.environ.get('ASR_MAX_SPEAKERS')
# Speaker embeddings are only supported by WhisperX ASR service, not the basic whisper-asr-webservice
ASR_RETURN_SPEAKER_EMBEDDINGS = os.environ.get('ASR_RETURN_SPEAKER_EMBEDDINGS', 'false').lower() == 'true'
else:
ASR_DIARIZE = False
ASR_MIN_SPEAKERS = None
ASR_MAX_SPEAKERS = None
ASR_RETURN_SPEAKER_EMBEDDINGS = False
# ASR chunking configuration - enables app-level chunking for self-hosted ASR services
# that may crash on long files due to GPU memory exhaustion
ASR_ENABLE_CHUNKING = os.environ.get('ASR_ENABLE_CHUNKING', 'false').lower() == 'true'
ASR_MAX_DURATION_SECONDS = int(os.environ.get('ASR_MAX_DURATION_SECONDS', '7200')) # 2 hours default
ENABLE_CHUNKING = os.environ.get('ENABLE_CHUNKING', 'true').lower() == 'true'
CHUNK_SIZE_MB = int(os.environ.get('CHUNK_SIZE_MB', '20'))
CHUNK_OVERLAP_SECONDS = int(os.environ.get('CHUNK_OVERLAP_SECONDS', '3'))
# Audio compression settings - compress lossless uploads (WAV, AIFF) to save storage
AUDIO_COMPRESS_UPLOADS = os.environ.get('AUDIO_COMPRESS_UPLOADS', 'true').lower() == 'true'
AUDIO_CODEC = os.environ.get('AUDIO_CODEC', 'mp3').lower() # mp3, flac, opus
AUDIO_BITRATE = os.environ.get('AUDIO_BITRATE', '128k') # For lossy codecs
# Video passthrough - send original video files directly to ASR without extracting audio
# Useful for custom ASR backends that handle video/multi-track audio internally
VIDEO_PASSTHROUGH_ASR = os.environ.get('VIDEO_PASSTHROUGH_ASR', 'false').lower() == 'true'
# Unsupported codecs - comma-separated list of codecs to exclude from the default supported list
# Useful when your transcription service doesn't support certain codecs (e.g., vllm doesn't support opus)
# Example: AUDIO_UNSUPPORTED_CODECS=opus,vorbis
_unsupported_codecs_str = os.environ.get('AUDIO_UNSUPPORTED_CODECS', '')
AUDIO_UNSUPPORTED_CODECS = {c.strip().lower() for c in _unsupported_codecs_str.split(',') if c.strip()}
# Email verification configuration
ENABLE_EMAIL_VERIFICATION = os.environ.get('ENABLE_EMAIL_VERIFICATION', 'false').lower() == 'true'
REQUIRE_EMAIL_VERIFICATION = os.environ.get('REQUIRE_EMAIL_VERIFICATION', 'false').lower() == 'true'
SMTP_HOST = os.environ.get('SMTP_HOST', '')
SMTP_PORT = int(os.environ.get('SMTP_PORT', '587'))
SMTP_USERNAME = os.environ.get('SMTP_USERNAME', '')
SMTP_PASSWORD = os.environ.get('SMTP_PASSWORD', '')
SMTP_USE_TLS = os.environ.get('SMTP_USE_TLS', 'true').lower() == 'true'
SMTP_USE_SSL = os.environ.get('SMTP_USE_SSL', 'false').lower() == 'true'
SMTP_FROM_ADDRESS = os.environ.get('SMTP_FROM_ADDRESS', 'noreply@yourdomain.com')
SMTP_FROM_NAME = os.environ.get('SMTP_FROM_NAME', 'Speakr')
# Create chunking service at module level so it can be imported by processing.py
# Always initialize the service - the needs_chunking() method will check ENABLE_CHUNKING
# and return False when appropriate. This allows connectors with hard limits (e.g.,
# max_duration_seconds) to still enforce chunking even when ENABLE_CHUNKING=false.
chunking_service = AudioChunkingService(CHUNK_SIZE_MB, CHUNK_OVERLAP_SECONDS)
def initialize_config(app):
"""Initialize application configuration."""
app_headers = {
"HTTP-Referer": "https://github.com/murtaza-nasir/speakr",
"X-Title": "Speakr - AI Audio Transcription",
"User-Agent": "Speakr/1.0 (https://github.com/murtaza-nasir/speakr)"
}
http_client_no_proxy = httpx.Client(verify=True, headers=app_headers)
client = None
try:
api_key = TEXT_MODEL_API_KEY or "not-needed"
client = OpenAI(api_key=api_key, base_url=TEXT_MODEL_BASE_URL, http_client=http_client_no_proxy)
app.logger.info(f"LLM client initialized: {TEXT_MODEL_BASE_URL} / {TEXT_MODEL_NAME}")
except Exception as e:
app.logger.error(f"Failed to initialize LLM client: {e}")
# Use module-level chunking_service (already created above)
version = get_version()
app.logger.info(f"=== DictIA {version} Starting Up ===")
# Initialize transcription connector
if USE_NEW_TRANSCRIPTION_ARCHITECTURE:
try:
from src.services.transcription import get_registry
registry = get_registry()
connector = registry.initialize_from_env()
connector_name = registry.get_active_connector_name()
capabilities = [c.name for c in connector.get_capabilities()]
app.logger.info(f"Transcription connector initialized: {connector_name}")
app.logger.info(f"Connector capabilities: {capabilities}")
# Log diarization support prominently
diarize_default = getattr(connector, 'default_diarize', connector.supports_diarization)
if not connector.supports_diarization:
app.logger.info("Speaker diarization: NOT AVAILABLE (connector does not support it)")
elif not diarize_default:
app.logger.info("Speaker diarization: DISABLED (ASR_DIARIZE=false)")
else:
app.logger.info("Speaker diarization: ENABLED")
except Exception as e:
app.logger.error(f"Failed to initialize transcription connector: {e}")
app.logger.error("Falling back to legacy transcription configuration validation")
# Fall through to legacy validation
_validate_legacy_transcription_config(app)
else:
# Legacy configuration validation
_validate_legacy_transcription_config(app)
return client, chunking_service, version
def _validate_legacy_transcription_config(app):
"""Validate legacy transcription configuration (backwards compatibility)."""
if USE_ASR_ENDPOINT:
if not ASR_BASE_URL:
app.logger.error("ERROR: ASR enabled but ASR_BASE_URL not configured!")
sys.exit(1)
app.logger.info(f"Using ASR endpoint: {ASR_BASE_URL}")
else:
if not transcription_base_url or not transcription_api_key:
app.logger.error("ERROR: No transcription service configured!")
sys.exit(1)
app.logger.info(f"Using Whisper API: {transcription_base_url}")

158
src/config/startup.py Normal file
View File

@@ -0,0 +1,158 @@
"""
Application startup functions.
"""
import os
import time
import threading
from datetime import datetime, timedelta
from flask import current_app
ENABLE_AUTO_DELETION = os.environ.get('ENABLE_AUTO_DELETION', 'false').lower() == 'true'
GLOBAL_RETENTION_DAYS = int(os.environ.get('GLOBAL_RETENTION_DAYS', '0'))
def initialize_file_monitor(app):
"""Initialize file monitor after app is fully loaded to avoid circular imports."""
try:
# Import here to avoid circular imports
import src.file_monitor as file_monitor
file_monitor.start_file_monitor()
app.logger.info("File monitor initialization completed")
except Exception as e:
app.logger.warning(f"File monitor initialization failed: {e}")
def get_file_monitor_functions(app):
"""Get file monitor functions, handling import errors gracefully."""
try:
import src.file_monitor as file_monitor
return file_monitor.start_file_monitor, file_monitor.stop_file_monitor, file_monitor.get_file_monitor_status
except ImportError as e:
app.logger.warning(f"File monitor not available: {e}")
# Create stub functions if file_monitor is not available
def start_file_monitor():
pass
def stop_file_monitor():
pass
def get_file_monitor_status():
return {'running': False, 'error': 'File monitor module not available'}
return start_file_monitor, stop_file_monitor, get_file_monitor_status
# --- Auto-Processing API Endpoints ---
def initialize_auto_deletion_scheduler(app):
"""Initialize the daily auto-deletion scheduler if enabled."""
from src.services.retention import process_auto_deletion
if not ENABLE_AUTO_DELETION:
app.logger.info("Auto-deletion scheduler not started (ENABLE_AUTO_DELETION=false)")
return
if GLOBAL_RETENTION_DAYS <= 0:
app.logger.info("Auto-deletion scheduler not started (GLOBAL_RETENTION_DAYS not set)")
return
def run_daily_deletion():
"""Background thread that runs auto-deletion daily at 2 AM."""
import time
from datetime import datetime, timedelta
app.logger.info("Auto-deletion scheduler started - will run daily at 2:00 AM")
while True:
try:
# Calculate time until next 2 AM
now = datetime.now()
next_run = now.replace(hour=2, minute=0, second=0, microsecond=0)
# If it's past 2 AM today, schedule for tomorrow
if now.hour >= 2:
next_run += timedelta(days=1)
sleep_seconds = (next_run - now).total_seconds()
app.logger.info(f"Next auto-deletion scheduled for: {next_run.strftime('%Y-%m-%d %H:%M:%S')} (in {sleep_seconds/3600:.1f} hours)")
# Sleep until next run time
time.sleep(sleep_seconds)
# Run auto-deletion
app.logger.info("Running scheduled auto-deletion...")
with app.app_context():
stats = process_auto_deletion()
app.logger.info(f"Scheduled auto-deletion completed: {stats}")
except Exception as e:
app.logger.error(f"Error in auto-deletion scheduler: {e}", exc_info=True)
# Sleep for 1 hour before retrying on error
time.sleep(3600)
# Start the scheduler thread
import threading
scheduler_thread = threading.Thread(target=run_daily_deletion, daemon=True, name="AutoDeletionScheduler")
scheduler_thread.start()
app.logger.info("✅ Auto-deletion scheduler initialized - running daily at 2:00 AM")
def initialize_file_exporter(app):
"""Initialize file exporter after app is fully loaded."""
try:
from src.file_exporter import initialize_export_directory, ENABLE_AUTO_EXPORT
if ENABLE_AUTO_EXPORT:
initialize_export_directory()
app.logger.info("✅ Auto-export initialized")
else:
app.logger.info(" Auto-export: Disabled (set ENABLE_AUTO_EXPORT=true to enable)")
except Exception as e:
app.logger.warning(f"File exporter initialization failed: {e}")
def initialize_job_queue(app):
"""Initialize and start the background job queue with orphan recovery."""
try:
from src.services.job_queue import job_queue
# Initialize job queue with app context
job_queue.init_app(app)
# Recover any jobs that were processing when the app crashed
job_queue.recover_orphaned_jobs()
# Start worker threads
job_queue.start()
# Get queue status
status = job_queue.get_queue_status()
t_queue = status['transcription_queue']
s_queue = status['summary_queue']
app.logger.info(
f"Job queues started: "
f"transcription ({t_queue['workers']} workers, {t_queue['queued']} queued), "
f"summary ({s_queue['workers']} workers, {s_queue['queued']} queued)"
)
except Exception as e:
app.logger.error(f"Failed to start job queue: {e}", exc_info=True)
def run_startup_tasks(app):
"""Run all startup tasks that need to happen after app creation."""
from src.models import SystemSetting
with app.app_context():
# Set dynamic MAX_CONTENT_LENGTH based on database setting
max_file_size_mb = SystemSetting.get_setting('max_file_size_mb', 250)
app.config['MAX_CONTENT_LENGTH'] = max_file_size_mb * 1024 * 1024
app.logger.info(f"Set MAX_CONTENT_LENGTH to {max_file_size_mb}MB from database setting")
# Initialize job queue for background processing
initialize_job_queue(app)
# Initialize file monitor after app setup
initialize_file_monitor(app)
# Initialize file exporter
initialize_file_exporter(app)
# Initialize auto-deletion scheduler
initialize_auto_deletion_scheduler(app)

29
src/config/version.py Normal file
View File

@@ -0,0 +1,29 @@
"""
Version information helper.
"""
import os
def get_version():
# Try reading VERSION file first (works in Docker)
try:
with open('VERSION', 'r') as f:
return f.read().strip()
except FileNotFoundError:
pass
# Fall back to git tags (works in development)
try:
import subprocess
return subprocess.check_output(['git', 'describe', '--tags', '--abbrev=0'],
stderr=subprocess.DEVNULL).decode().strip()
except:
pass
# Final fallback
return "unknown"