"""Verify the marketing landing template renders correctly.""" import os import sys sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) os.environ.setdefault('SQLALCHEMY_DATABASE_URI', 'sqlite:///:memory:') os.environ.setdefault('SECRET_KEY', 'test-secret-key') from src.app import app # noqa: E402 def test_landing_renders_template_not_inline_html(): """GET / renders templates/marketing/landing.html (not inline HTML from Phase 1).""" client = app.test_client() response = client.get('/', follow_redirects=False) assert response.status_code == 200 body = response.data.decode('utf-8') # Phase 2 template hallmarks assert '' in body, "Missing DOCTYPE — base.html not rendering" assert 'lang="fr-CA"' in body, "Missing lang=fr-CA" assert '/static/css/marketing.css' in body, "Missing marketing.css link" assert '/static/fonts/Inter-Variable.woff2' in body, "Missing Inter font preload" assert '/static/js/alpine.min.js' in body, "Missing Alpine.js script" def test_landing_has_canonical_url(): """OG + canonical metadata present.""" client = app.test_client() response = client.get('/') body = response.data.decode('utf-8') assert 'rel="canonical"' in body assert 'og:type' in body assert 'og:locale' in body and 'fr_CA' in body assert 'twitter:card' in body def test_landing_has_glassmorphism_header(): """FlexiHub-style header present (navy + backdrop-blur).""" client = app.test_client() response = client.get('/') body = response.data.decode('utf-8') assert 'bg-brand-navy/[0.97]' in body or 'bg-brand-navy' in body assert 'backdrop-blur-xl' in body assert 'border-white/[0.045]' in body, "Missing FlexiHub-style 0.045 border opacity" def test_landing_has_main_nav(): """Main nav has 5 links: Fonctionnalités, Conformité, Tarifs, Blog, Contact.""" client = app.test_client() response = client.get('/') body = response.data.decode('utf-8') for link in ['/fonctionnalites', '/conformite', '/tarifs', '/blog', '/contact']: assert f'href="{link}"' in body, f"Missing nav link: {link}" def test_landing_has_login_and_signup_ctas(): """Login + Signup CTAs present in header.""" client = app.test_client() response = client.get('/') body = response.data.decode('utf-8') assert 'href="/login"' in body assert 'href="/signup"' in body assert 'Démarrer' in body or 'Démarrer' in body def test_landing_footer_has_legal_links(): """Footer placeholder includes legal links (full footer in A-2.7).""" client = app.test_client() response = client.get('/') body = response.data.decode('utf-8') assert '/legal/conditions' in body assert '/legal/confidentialite' in body assert 'info@dictia.ca' in body, "Missing canonical email info@dictia.ca" assert 'Inverness' in body, "Missing Inverness QC address" def test_landing_no_login_redirect_for_anonymous(): """Anonymous user GET / must see template (regression check from B-1.3).""" client = app.test_client() response = client.get('/', follow_redirects=False) assert response.status_code == 200, \ f"Expected 200, got {response.status_code} — possibly login_required regression" def test_hero_has_h1_with_grad_text_accent(): """Hero H1 (round 3) contains the brand wordmark with grad-text accent on 'IA'. Round 3 replaces the old tagline ('sans risquer votre permis') with the canonical DictIA wordmark + the H2 phrase 'Transcription IA locale en 2 minutes'. """ client = app.test_client() body = client.get('/').data.decode('utf-8') assert 'id="hero-title"' in body, "Missing hero-title id on H1" assert 'grad-text' in body, "Missing grad-text class somewhere" # New canonical brand H2 phrase (cyan/grad on key claim) assert 'Transcription IA locale en 2' in body, "Missing canonical H2 phrase" # Hero word-staggered reveal hook on the wordmark assert 'hero-h1-word' in body, "Missing word-staggered reveal class" def test_hero_has_dual_cta(): """Hero (round 3) has primary (Réserver une démo) and ghost (Voir les forfaits) CTAs.""" client = app.test_client() body = client.get('/').data.decode('utf-8') assert 'href="/contact"' in body assert 'href="/tarifs"' in body assert 'Réserver une démo' in body or 'Réserver une démo' in body # Round 3 canonical wording: 'Voir les forfaits' (matches dictia.ca/solutions/dictai) assert 'Voir les forfaits' in body, "Round 3 secondary CTA must say 'Voir les forfaits'" def test_hero_has_cosmic_orbs_background(): """Hero has 3 radial gradient orbs (FlexiHub signature, blue/aqua palette — matches official DictIA logo).""" client = app.test_client() body = client.get('/').data.decode('utf-8') # Look for the 3 orb opacities (16% blue, 7% aqua, 11% aqua accent) assert 'rgba(37,99,235,0.16)' in body, "Missing primary blue orb" assert 'rgba(6,182,212,0.07)' in body, "Missing aqua orb" assert 'rgba(6,182,212,0.11)' in body, "Missing aqua accent orb" def test_hero_has_social_proof_microcopy(): """Hero has defensible social proof: 9 ordres pros + waitlist + launch date.""" client = app.test_client() body = client.get('/').data.decode('utf-8') assert '9 ordres professionnels' in body, "Missing factual ordres pros count" assert 'Pré-inscription' in body or 'Pré-inscription' in body, "Missing waitlist mention" assert 'Lancement printemps 2026' in body, "Missing launch date" def test_hero_has_staggered_animations(): """Hero (round 3) elements use tc-fade-in-up with staggered delays — canonical cadence. Round 3 staggers : 0 (back-link), 75 (eyebrow), 200 (3-step flow), 280 (H2 phrase), 360 (sub), 440 (stats), 520 (CTAs), 600 (social proof). """ client = app.test_client() body = client.get('/').data.decode('utf-8') assert 'animate-tc-fade-in-up' in body, "Missing fade-in animation" for delay in ['0ms', '75ms', '200ms', '280ms', '360ms', '440ms', '520ms', '600ms']: assert f'animation-delay: {delay}' in body, f"Missing staggered delay {delay}" assert 'animation-fill-mode: backwards' in body, \ "Missing animation-fill-mode (causes flash before delay fires)" def test_hero_eyebrow_has_brand_messaging(): """Hero eyebrow declares the 3 brand pillars (round 3 uses OQLF NBSP : LOI 25).""" client = app.test_client() body = client.get('/').data.decode('utf-8') assert 'TRANSCRIPTION IA' in body # OQLF-conformant : non-breaking space before "25" (NBSP entity) assert 'CONFORME LOI 25' in body or 'CONFORME LOI 25' in body, \ "Missing 'CONFORME LOI 25' eyebrow (with or without NBSP)" assert 'QU' in body # Either QUÉBEC or QUÉBEC def test_trust_bar_has_9_ordres_pros(): """Trust bar lists all 9 canonical Quebec ordres pros (matches dictia.ca).""" client = app.test_client() body = client.get('/').data.decode('utf-8') for ordre in ['Barreau', 'Chambre des notaires', 'CPA Québec', 'ChAD', 'OACIQ', 'CMQ', 'OIIQ', 'OPQ', 'OEQ']: assert ordre in body, f"Missing ordre pro: {ordre}" # Note: OPPQ deliberately removed (ambiguous abbrev — replaced with OPQ for Pharmaciens) def test_trust_bar_has_eyebrow_factual_phrasing(): """Trust bar avoids false-endorsement language (LPC art. 219 / Competition Act s. 52).""" client = app.test_client() body = client.get('/').data.decode('utf-8') assert 'MAPP' in body and '9 ORDRES PROFESSIONNELS' in body, "Missing factual eyebrow" # Forbidden marketing phrases that imply official endorsement we don't have forbidden = [ 'CERTIFIÉ PAR', 'CERTIFIE PAR', 'ENDOSSÉ PAR', 'APPROUVÉ PAR', 'RECONNU PAR', 'AVALISÉ PAR', ] body_upper = body.upper() for phrase in forbidden: assert phrase not in body_upper, f"Forbidden marketing claim found: {phrase}" def test_trust_bar_has_4_kpis_with_grad_text(): """Trust bar has 4 KPI metrics rendered with grad-text (NBSP per OQLF typography).""" client = app.test_client() body = client.get('/').data.decode('utf-8') assert '~5 min' in body # OQLF: non-breaking space before %/$ via   entity assert '95 %+' in body, "Missing NBSP-separated 95%+ KPI" assert '0 $' in body, "Missing NBSP-separated 0$ KPI" assert '100 %' in body, "Missing NBSP-separated 100% KPI" # Verify grad-text on KPI numbers assert 'grad-text mb-2' in body, "Missing grad-text on KPI numbers" def test_trust_bar_has_methodology_footnote(): """95%+ claim has a defensible methodology footnote (LPC art. 219 hygiene).""" client = app.test_client() body = client.get('/').data.decode('utf-8') # Verifiable wording: no specific hour count, methodology available on request assert 'méthodologie disponible sur demande' in body or 'méthodologie disponible sur demande' in body assert 'audio professionnel québécois' in body or 'audio professionnel québécois' in body assert 'info@dictia.ca' in body def test_pas_probleme_section_present(): """Problème section (P of PAS frame) is present after trust bar.""" client = app.test_client() body = client.get('/').data.decode('utf-8') assert 'PROBL' in body and 'TRANSCRIPTION CLOUD' in body, "Missing Problème eyebrow" assert 'violent la Loi 25' in body or 'violent la Loi 25' in body, \ "Missing legal-risk H2 anchor phrase" assert 'Cloud Act' in body, "Missing Cloud Act card" assert 'biom' in body and 'Loi 25' in body, "Missing Loi 25 biometric card" assert 'Sanctions disciplinaires' in body, "Missing sanctions disciplinaires card" def test_pas_solution_section_present(): """Solution section (S of PAS frame) is present after Problème.""" client = app.test_client() body = client.get('/').data.decode('utf-8') assert 'LA SOLUTION' in body and 'DICTIA' in body, "Missing Solution eyebrow" assert 'Conforme' in body and 'par design' in body, "Missing solution H2" assert 'WhisperX' in body, "Missing WhisperX mention" assert 'Mistral 7B' in body, "Missing Mistral 7B mention" assert 'OVH Beauharnois' in body, "Missing Quebec hosting mention" def test_pas_solution_3_pillars_with_check_icon(): """Solution has 3 pillars: 100% local, Conforme Loi 25, Précision FR-CA.""" client = app.test_client() body = client.get('/').data.decode('utf-8') assert '100 %' in body and 'local' in body, "Missing 100% local pillar" assert 'Conforme Loi 25' in body or 'Conforme Loi 25' in body, "Missing Conforme Loi 25 pillar" assert 'Précision FR-CA' in body or 'Précision FR-CA' in body, "Missing Précision FR-CA pillar" assert 'AGPL v3' in body, "Missing AGPL v3 transparency mention" def test_pas_uses_wcag_safe_text_opacity(): """PAS section text uses /70 opacity (WCAG AA compliant), not /40 or /50.""" client = app.test_client() body = client.get('/').data.decode('utf-8') # Text on white surface in problem cards must use /70 minimum # Check the problem card paragraph text uses navy/70 not navy/40 or /50 assert 'text-brand-navy/70 leading-relaxed' in body or 'text-brand-navy/70 mb-3' in body # No regression to /40 in this section # (Other sections may use /40 for decorative text — we just verify the new content uses /70) def test_bento_section_present(): """Bento features section is present after Solution section.""" client = app.test_client() body = client.get('/').data.decode('utf-8') assert 'FONCTIONNALIT' in body, "Missing Fonctionnalités eyebrow" assert 'bento-title' in body, "Missing bento section anchor" assert "rien que vous n'ayez besoin" in body, "Missing bento H2 differentiator" def test_bento_has_6_features(): """Bento grid renders 6 distinct feature cards.""" client = app.test_client() body = client.get('/').data.decode('utf-8') for feature in ['WhisperX', 'Diarisation', 'Mistral 7B', 'RAG local', 'DOCX, PDF, SRT', 'Outlook, Teams']: assert feature in body, f"Missing bento feature: {feature}" # Watermark numbers 01..06 for n in ['01', '02', '03', '04', '05', '06']: assert f'>{n}<' in body, f"Missing bento watermark number {n}" # Card 04 must use French Q&R, not English Q&A — primary identifier check assert 'Q&R' in body or 'Q&R' in body, "Card 04 must use French Q&R, not Q&A" def test_bento_uses_flexihub_styling(): """Bento uses FlexiHub spec: max-w-[1060px], gap-[1.5px], bg-brand-navy2, grad-text watermark.""" client = app.test_client() body = client.get('/').data.decode('utf-8') assert 'max-w-[1060px]' in body, "Missing FlexiHub bento container width 1060px" assert 'gap-[1.5px]' in body, "Missing FlexiHub ultrafin separator gap" assert 'bg-brand-navy2' in body, "Missing dark card background" # Watermark numbers now use grad-text + opacity-20 (brand blue family) instead of barely-visible white/[0.04] assert 'grad-text opacity-20' in body, "Missing brand-tinted watermark (grad-text opacity-20)" # Bento icons render directly with text-brand-b1 (no grad-bg backdrop tile) assert 'text-brand-b1 mb-4' in body, "Missing brand-blue icon color on bento cards" def test_bento_responsive_grid(): """Bento grid responsive: 1 col mobile, 2 cols sm, 3 cols md+.""" client = app.test_client() body = client.get('/').data.decode('utf-8') assert 'grid-cols-1 sm:grid-cols-2 md:grid-cols-3' in body, \ "Missing responsive grid breakpoints (1/2/3 cols)" def test_bento_uses_wcag_safe_text_on_dark(): """Bento card descriptions use text-white/70 (WCAG AA on bg-brand-navy2).""" client = app.test_client() body = client.get('/').data.decode('utf-8') assert 'text-white/70' in body, "Missing WCAG-safe /70 text opacity on dark cards" def test_bento_renders_nbsp_entities_not_escaped(): """Card 01 '95 %+' NBSP must render as a non-breaking space, not as literal ' ' text. Regression guard: if the bento macro stops piping description through `| safe`, Jinja autoescape will double-escape ' ' to '&nbsp;' and users see the raw entity. The HTML response must contain the literal '95 %+' once (single escape), never '95&nbsp;%+'. """ client = app.test_client() body = client.get('/').data.decode('utf-8') assert '95 %+' in body, "NBSP entity should appear single-escaped in card 01" assert '95&nbsp;' not in body, "NBSP entity must not be double-escaped (missing | safe?)" # Q&R card title: French ampersand must survive as & in HTML, not &amp; assert 'Q&R' in body, "Q&R title should appear single-escaped" assert 'Q&amp;R' not in body, "Q&R title must not be double-escaped" def test_pricing_section_present(): """Pricing section is present after bento section, with eyebrow + H2 + tax disclaimer.""" client = app.test_client() body = client.get('/').data.decode('utf-8') assert 'pricing-title' in body, "Missing pricing section anchor" assert 'Choisissez votre formule' in body, "Missing pricing H2" # Tax disclaimer must be visible (LPC art. 219 — total cost transparency) assert 'TPS' in body and 'TVQ' in body, "Missing tax disclaimer (TPS/TVQ)" def test_pricing_3_tiers_with_canonical_amounts(): """Pricing has 3 tiers: DictIA 8 (3450/173), DictIA 16 (5750/201), DictIA Cloud (0/369).""" client = app.test_client() body = client.get('/').data.decode('utf-8') # Names for name in ['DictIA 8', 'DictIA 16', 'DictIA Cloud']: assert name in body, f"Missing pricing tier: {name}" # Canonical prices with NBSP per OQLF assert '3 450 $' in body, "Missing DictIA 8 setup price" assert '173 $' in body, "Missing DictIA 8 monthly price" assert '5 750 $' in body, "Missing DictIA 16 setup price" assert '201 $' in body, "Missing DictIA 16 monthly price" assert '369 $' in body, "Missing DictIA Cloud monthly price (canonical 369$)" def test_pricing_recommended_tier_is_dictia_16(): """DictIA 16 is the visually-recommended tier (RECOMMANDÉ badge + grad-bg frame).""" client = app.test_client() body = client.get('/').data.decode('utf-8') assert 'RECOMMAND' in body, "Missing RECOMMANDÉ badge" # The recommended tier wraps in grad-bg p-[1.5px] rounded FlexiHub style (V3 brutalist 4px card frame) assert 'grad-bg p-[1.5px] rounded"' in body or 'grad-bg p-[1.5px] rounded ' in body, \ "Missing FlexiHub gradient frame on recommended tier (rounded 4px)" def test_pricing_cta_uses_reserver_pre_launch_wording(): """CTAs say 'Réserver' not 'Choisir' — pre-launch LPC art. 219 hygiene.""" client = app.test_client() body = client.get('/').data.decode('utf-8') for slug in ['dictia-8', 'dictia-16', 'dictia-cloud']: assert f'href="/checkout/{slug}"' in body, f"Missing checkout link for {slug}" assert 'Réserver DictIA 8' in body or 'Réserver DictIA 8' in body, "CTA must use 'Réserver' wording (pre-launch)" def test_pricing_features_use_safe_filter_no_double_escape(): """Pricing card features piped through | safe — ' ' must render single-escaped, not double.""" client = app.test_client() body = client.get('/').data.decode('utf-8') # GPU sizes use NBSP assert 'GPU 8 Go RTX' in body, "GPU 8 Go feature missing or NBSP double-escaped" assert 'GPU 16 Go RTX' in body, "GPU 16 Go feature missing or NBSP double-escaped" # Q&R card must use French Q&R, not English Q&A assert 'Q&R' in body, "DictIA 16 must mention Q&R (French), not Q&A (English)" assert 'Q&A' not in body, "Must use French Q&R consistently — no English Q&A" # Loi 25 with NBSP assert 'Conforme Loi 25' in body, "Conforme Loi 25 must use NBSP" # SLA must be hedged ('visé') not absolute claim assert 'SLA visé 99,9' in body, "SLA must be hedged 'visé' (pre-launch LPC art. 219 hygiene)" # Negative: NO double-escape assert '&nbsp;' not in body, "NBSP must not be double-escaped — | safe missing on pricing macro?" def test_pricing_uses_wcag_safe_text_on_white(): """Pricing card text uses text-brand-navy/70 or /80 minimum (WCAG AA on white).""" client = app.test_client() body = client.get('/').data.decode('utf-8') # No regression to weak opacities like /40 or /50 in pricing area assert 'text-brand-navy/70' in body # The features list uses /80 in our impl assert 'text-brand-navy/80' in body, "Feature text should use /80 for WCAG AA" def test_roi_calculator_present_with_alpine_bindings(): """ROI calculator section present with Alpine.js bindings + transparent hypotheses footnote.""" client = app.test_client() body = client.get('/').data.decode('utf-8') assert 'CALCULATEUR ROI' in body assert 'roi-title' in body, "ROI calculator must have aria-labelledby anchor" assert 'x-data="roiCalculator()"' in body # Three sliders with x-model.number for type coercion assert 'x-model.number="users"' in body assert 'x-model.number="hours"' in body assert 'x-model.number="rate"' in body # Live output bindings assert 'x-text="savings' in body assert 'payback === null' in body, "Payback display must use null sentinel branch" assert "moins d\\'un mois" in body or 'moins d'un mois' in body or "moins d'un mois" in body, \ "Payback display must offer 'moins d'un mois' branch" # Transparent hypothesis footnote — LPC art. 219 hygiene assert '80 %' in body and 'jours ouvrables' in body, "Missing transparent hypothesis footnote" # Sliders accessible (aria-label on each input) assert 'aria-label="Nombre d' in body def test_roi_calculator_script_loaded(): """roi_calculator.js loaded via {% block scripts %} (deferred after Alpine.js).""" client = app.test_client() body = client.get('/').data.decode('utf-8') assert '/static/js/roi_calculator.js' in body, "ROI script must be referenced" # Must come AFTER alpine.min.js in the document order alpine_pos = body.find('alpine.min.js') roi_pos = body.find('roi_calculator.js') assert alpine_pos != -1 and roi_pos != -1 assert alpine_pos < roi_pos, "Alpine.js must load before roi_calculator.js" def test_roi_calculator_sliders_capped_defensibly(): """Sliders capped: users<=25, hours<=4 (LPC art. 219 hygiene — no $35M screenshots).""" client = app.test_client() body = client.get('/').data.decode('utf-8') # Users slider max must be 25, not 50 assert 'x-model.number="users"' in body assert 'max="25"' in body, "Users slider must cap at 25 (was 50 — too aggressive for marketing claim)" # Hours slider max must be 4, not 8 assert 'x-model.number="hours"' in body assert 'max="4"' in body, "Hours slider must cap at 4 (was 8 — too aggressive)" def test_roi_savings_paragraph_has_aria_live(): """Savings

must announce updates to screen readers on slider change (aria-live polite).""" client = app.test_client() body = client.get('/').data.decode('utf-8') # The savings paragraph (the headline number) must be a polite live region assert 'aria-live="polite"' in body assert 'aria-atomic="true"' in body # Verify it's on the savings line, not somewhere unrelated # (The savings p is the only element with text-5xl in the section) assert 'text-5xl font-black grad-text' in body def test_pricing_cta_url_no_double_slash(): """pricing_card uses cta_url.rstrip('/') so href never has '//' (regression guard).""" client = app.test_client() body = client.get('/').data.decode('utf-8') # All 3 CTAs use the default cta_url='/checkout' (no trailing slash) — so /checkout/dictia-X for slug in ['dictia-8', 'dictia-16', 'dictia-cloud']: assert f'href="/checkout/{slug}"' in body, f"Missing single-slash href for {slug}" assert f'href="/checkout//{slug}"' not in body, f"Double-slash regression for {slug}" def test_footer_has_4_columns_with_aria_labels(): """Full footer has 4 columns (Brand, Produit, Légal, Compte) with proper landmarks.""" client = app.test_client() body = client.get('/').data.decode('utf-8') # Footer landmark with accessible name assert 'footer-heading' in body, "Footer must have an accessible heading" assert 'aria-label="Produit"' in body assert 'aria-label="L' in body and 'gal"' in body # Légal (handles entity-encoded é) assert 'aria-label="Compte"' in body # Address with tel + mailto assert '') + len('') footer_html = body[footer_start:footer_end] assert 'text-white/70' in footer_html, "Footer text must use /70 opacity for WCAG AA" # Negative regression assert 'text-white/40' not in footer_html, "Footer must not regress to /40 opacity" assert 'text-white/50' not in footer_html, "Footer must not regress to /50 opacity" def test_comparatif_section_present(): """Comparatif section is present after Pricing with table + sourcing footnote.""" client = app.test_client() body = client.get('/').data.decode('utf-8') assert 'comparatif-title' in body assert 'COMPARATIF' in body assert 'DictIA face aux solutions cloud' in body # Sourcing footnote (LPC art. 219 hygiene) assert 'sources publiques' in body, "Must disclose sources for competitor claims" assert '2026-04-27' in body, "Must date the comparison" # Trademark disclaimer assert 'marques déposées' in body or 'marques déposées' in body, \ "Trademark disclaimer required for competitor names" def test_comparatif_table_has_4_competitors_and_6_criteria(): """Comparatif table lists DictIA + 3 competitors over 6 criteria rows.""" client = app.test_client() body = client.get('/').data.decode('utf-8') # Column headers for col in ['DictIA', 'MS Teams Premium', 'Otter.ai Business', 'Whisper local']: assert col in body, f"Comparatif missing column: {col}" # 6 criteria (extract by their distinctive phrasing) criteria_keywords = [ 'Conforme Loi', # row 1 'Souveraineté hors Cloud Act', # row 2 (renamed) 'Large-v3 fine-tun', # row 3 'Diarisation jusqu', # row 4 (renamed) 'mensuel par utilisateur', # row 5 (renamed) 'Audit trail' # row 6 ] for kw in criteria_keywords: assert kw in body, f"Comparatif missing criterion containing: {kw}" def test_comparatif_uses_responsive_overflow_scroll(): """Comparatif table wraps in overflow-x-auto for narrow viewports + has accessible caption.""" client = app.test_client() body = client.get('/').data.decode('utf-8') assert 'overflow-x-auto' in body # Caption is sr-only but mandatory for table accessibility assert '' in body # Scope attributes on column and row headers assert 'scope="col"' in body assert 'scope="row"' in body def test_conformite_section_present(): """Conformité forteresse section is present with 4 pillar cards.""" client = app.test_client() body = client.get('/').data.decode('utf-8') assert 'conformite-title' in body assert 'CONFORMIT' in body and 'FORTERESSE' in body # Soft hedge: "conçue avec" (not "certifiée par") assert 'conçue avec' in body or 'conçue avec' in body, \ "Must use soft hedge 'conçue avec' (LPC art. 219)" def test_conformite_4_pillars(): """Conformité has 4 pillars: hébergement, Loi 25, Cadre IA, AGPL.""" client = app.test_client() body = client.get('/').data.decode('utf-8') pillar_keywords = [ 'OVH Beauharnois', # pillar 1 'LPRPSP', # pillar 2 (Loi 25 reference) 'LGGRI', # pillar 3 (Cadre IA reference) 'AGPL' # pillar 4 ] for kw in pillar_keywords: assert kw in body, f"Conformité missing pillar reference: {kw}" # Soft hedges (LPC art. 219) assert 'Mapp' in body, "Must use 'Mappé' (not 'Certifié')" # Citation/contact for verification assert 'info@dictia.ca' in body # SOC 2 claim must be hedged ('selon le périmètre') not absolute assert 'selon le périmètre' in body or 'selon le périmètre' in body, \ "SOC 2 claim must be hedged — see code-review I-3" # ISO 27001 reference is OK (verifiable from OVH compliance page) assert 'ISO' in body and '27001' in body, "Reference to ISO 27001 expected" def test_conformite_uses_wcag_safe_text_on_dark(): """Conformité card text uses text-white/80 minimum on bg-brand-navy.""" client = app.test_client() body = client.get('/').data.decode('utf-8') # Within the conformite section block specifically section_start = body.find('id="conformite-title"') # Find next section_end = body.find('', section_start) section_html = body[section_start:section_end] assert 'text-white/80' in section_html or 'text-white/70' in section_html, \ "Conformité must use /70+ on dark for WCAG AA" def test_no_unverifiable_competitor_claims(): """Comparatif must NOT contain unhedged percentage claims about competitors (LPC art. 219).""" client = app.test_client() body = client.get('/').data.decode('utf-8') # Forbidden patterns: bold quantitative claims like '5 stars', '100% accurate', 'X% precision' # We allow our own '95%+' (already hedged with methodology footnote elsewhere) forbidden_phrases = [ 'Otter.ai a 100', # No claims about Otter accuracy 'Teams a 99', # No claims about Teams accuracy '50% moins cher', # No comparative pricing without verification ] for phrase in forbidden_phrases: assert phrase not in body, f"Forbidden competitive claim: {phrase}" def test_comparatif_check_marks_consistently_mean_good(): """Status SVGs in the comparatif must mark each cell with the right semantic. Regression guard against the inverted-Cloud-Act-row bug. Since the visual marker (✓ / ✗ / ⚠) was migrated to inline SVGs (no emoji policy), we assert on (a) the row label, (b) each Teams cell wraps "Soumis Cloud Act" with the red 'Non conforme' SVG, and (c) DictIA cell uses the green 'Conforme' SVG. """ client = app.test_client() body = client.get('/').data.decode('utf-8') # The 'Souveraineté hors Cloud Act' row must remain (after rename) assert 'Souveraineté hors Cloud Act' in body # And must NOT have the legacy inverted form assert 'Exposé au Cloud Act' not in body, "Row 2 must be reworded to positive convention" # Teams cell for the territoriality criterion: must include the "Non conforme" SVG # immediately followed by the "Soumis Cloud Act" label. assert 'Soumis Cloud Act' in body, "Row 1 Teams cell must say 'Soumis Cloud Act'" assert 'aria-label="Non conforme"' in body, \ "X-mark SVG with aria-label='Non conforme' must be present (Teams ✗)" assert 'aria-label="Conforme"' in body, \ "Check SVG with aria-label='Conforme' must be present (DictIA ✓)" assert 'aria-label="Partiel"' in body, \ "Warning SVG with aria-label='Partiel' must be present (⚠ rows)" def test_testimonials_section_present_with_placeholder_cards(): """Témoignages section present with 3 placeholder cards (no fabricated quotes).""" client = app.test_client() body = client.get('/').data.decode('utf-8') assert 'testimonials-title' in body assert 'Premiers cabinets pilotes' in body # 3 personas assert 'Cabinet juridique pilote' in body assert 'Cabinet CPA pilote' in body assert 'Municipalit' in body and 'pilote' in body # No fabricated quotes — must say "Témoignage à venir" assert 'Témoignage à venir' in body or 'Témoignage à venir' in body, \ "Pre-launch testimonials must show 'Témoignage à venir' (LPC art. 219)" # Expected publication months assert 'Mai 2026' in body assert 'Juin 2026' in body def test_testimonials_use_personas_not_fake_names(): """Testimonials must NOT contain fabricated names (Me Tremblay, Mme Bouchard, etc.).""" client = app.test_client() body = client.get('/').data.decode('utf-8') forbidden_names = ['Me Tremblay', 'Mme Bouchard', 'M. Lefebvre', 'Cabinet Pilote A', 'Cabinet Pilote B', 'Municipalité Pilote C'] for name in forbidden_names: assert name not in body, f"Forbidden fabricated testimonial name: {name}" def test_faq_section_with_10_questions(): """FAQ section (round 3) present with 10 canonical questions from dictai-page-content.tsx.""" client = app.test_client() body = client.get('/').data.decode('utf-8') assert 'faq-title' in body # 10 panel IDs (loop.index is 1-indexed) for i in range(1, 11): assert f'id="faq-panel-{i}"' in body, f"Missing FAQ panel {i}" # Round 3 canonical topic anchors (sourced from dictai-page-content.tsx) topics = ['Comment fonctionne la transcription', 'formats audio', '1 heure d', 'confidentielle', 'Teams Copilot', 'Otter.ai', 'Barreau du Qu', 'Clio Manage', 'connaissances techniques', 'open source'] for topic in topics: assert topic in body, f"FAQ missing topic anchor: {topic}" def test_faq_alpine_accordion_bindings(): """FAQ uses Alpine.js x-data + @click + :aria-expanded for accessible accordion (10 items).""" client = app.test_client() body = client.get('/').data.decode('utf-8') # 10 x-data="{ open: false }" instances (round 3 enrichment) assert body.count('x-data="{ open: false }"') == 10, \ "FAQ must have 10 independent Alpine accordion items" # Each toggle button has @click and :aria-expanded assert body.count('@click="open = !open"') == 10 assert body.count(':aria-expanded="open.toString()"') == 10 # Use built-in x-transition (NOT x-collapse plugin which is not bundled) assert 'x-collapse' not in body, "Must NOT use x-collapse plugin (not loaded — use x-transition)" assert 'x-transition.opacity' in body, "FAQ panels must use built-in x-transition" # aria-controls links button to panel assert 'aria-controls="faq-panel-1"' in body def test_faq_jsonld_schema_present(): """FAQ section embeds Schema.org FAQPage JSON-LD for SEO/GEO.""" client = app.test_client() body = client.get('/').data.decode('utf-8') # Inline JSON-LD script assert '', body.find('"FAQPage"'))], \ "JSON-LD must not contain raw ' ' entities — strip them server-side" # M-1 hardening: actually parse the JSON-LD to catch malformed JSON regressions import json import re match = re.search(r'', body, re.DOTALL) assert match, "JSON-LD block not found" parsed = json.loads(match.group(1)) assert parsed['@context'] == 'https://schema.org' assert parsed['@type'] == 'FAQPage' assert isinstance(parsed['mainEntity'], list) # Round 3: enriched to 10 canonical questions from dictai-page-content.tsx assert len(parsed['mainEntity']) == 10, "FAQPage must contain exactly 10 questions (round 3)" for q in parsed['mainEntity']: assert q['@type'] == 'Question' assert q['acceptedAnswer']['@type'] == 'Answer' assert q['name'].strip(), "Question name must not be empty" assert q['acceptedAnswer']['text'].strip(), "Answer text must not be empty" def test_cta_final_section(): """CTA final (round 3) — primary démo gratuite + mailto pré-inscription + ghost button to #tarifs.""" client = app.test_client() body = client.get('/').data.decode('utf-8') assert 'cta-title' in body # Round 3 wording reinforced: "Prêt à protéger vos données" + démo gratuite assert 'prot' in body and 'donn' in body, "Missing 'protéger vos données' headline" assert 'démo gratuite' in body or 'démo gratuite' in body, \ "Round 3 primary CTA must say 'démo gratuite'" # Pré-inscription wording (any case) preserved as secondary path assert 'pré-inscription' in body or 'pré-inscription' in body \ or 'Pré-inscription' in body or 'Pré-inscription' in body, \ "Pré-inscription wording must be preserved" # mailto with subject assert 'href="mailto:info@dictia.ca?subject=Pr%C3%A9-inscription%20DictIA"' in body or \ 'href="mailto:info@dictia.ca?subject=Pré-inscription%20DictIA"' in body, \ "CTA must have mailto with subject prefilled" # Anchor link to existing #tarifs section assert 'href="#tarifs"' in body, "Secondary CTA must anchor to pricing" # Ghost variant button still in use (mailto + #tarifs) assert 'border-white/[0.08]' in body # ghost button class def test_cta_final_uses_safe_pre_launch_wording(): """CTA must use 'Réservez votre pré-inscription' not 'Achetez maintenant' (pre-launch).""" client = app.test_client() body = client.get('/').data.decode('utf-8') forbidden = ['Achetez maintenant', 'Acheter maintenant', 'Acheter immédiatement', 'Acheter dès aujourd', 'Disponible immédiatement'] for phrase in forbidden: assert phrase not in body, f"Forbidden pre-launch CTA: {phrase}" def test_round2_cycle_section_present(): """Round 2 — Cycle section ('Trois options. Une seule est conforme.') must be on landing. Sourced from dictai-cycle.tsx; covers the 3-column comparative narrative (humaine / cloud US / DictIA) with canonical pricing 315 $ vs 173 $ and savings. """ client = app.test_client() body = client.get('/').data.decode('utf-8') assert 'cycle-title' in body, "Cycle section H2 id must be present" assert 'Trois options' in body assert 'Une seule est conforme' in body assert 'Retranscription humaine' in body assert 'IA cloud américaine' in body assert 'NON CONFORME' in body assert '315' in body and '173' in body, "Canonical Cycle pricing must appear" assert 'Loi 25 conforme' in body assert '100 % hébergé au Québec' in body or '100 % hébergé au Québec' in body # Phase animation hooks assert 'cycle-pulse' in body, "Pulse rings keyframe class missing" assert 'cycle-card-dictia' in body # Reduced-motion safety assert 'prefers-reduced-motion' in body def test_round2_wave_section_present(): """Round 2 — Wave section (chaos→ordre interactive slider) must be on landing. Sourced from dictai-wave.tsx; mouse-X morphs 30 bars red→cyan + pain/solution cards. """ client = app.test_client() body = client.get('/').data.decode('utf-8') assert 'wave-title' in body, "Wave section H2 id must be present" assert 'La transcription manuelle' in body assert 'coûte cher' in body # Canonical pain labels assert '4 à 6h pour transcrire 1h' in body assert 'Délais de 48h à 5 jours' in body # Canonical solution labels (NBSP-aware) assert '~2 min pour 1h d' in body assert '173 $/mois' in body or '173 $/mois' in body # Alpine state for interactive slider assert 'onMove($event)' in body assert 'isMobile' in body # Mobile fallback toggle assert 'Activer DictIA' in body assert 'Voir sans DictIA' in body def test_round2_cadre_reglementaire_section_present(): """Round 2 — Cadre réglementaire (Moniteur d'Interception) with 6 REGS list. Sourced from dictai-contraste.tsx (REGS + MoniteurInterception subcomponent). """ client = app.test_client() body = client.get('/').data.decode('utf-8') assert 'cadre-title' in body, "Cadre réglementaire H2 id must be present" assert "Moniteur d'Interception" in body assert 'enfreignent' in body # 6 REGS — each must appear with its hyperlink for reg_label in ['Loi 25 (P-39.1)', 'Loi 96 (C-11)', 'US Cloud Act', 'Guide IA — Barreau QC', 'Cadre IA — MCN', 'CAI']: assert reg_label in body, f"Missing REG label: {reg_label}" # Authoritative sources assert 'legisquebec.gouv.qc.ca' in body assert 'cai.gouv.qc.ca' in body assert 'tresor.gouv.qc.ca' in body # HUD lines assert 'Interception IA détectée' in body assert 'NON CONFORME' in body # Cycle animation hooks assert 'cadre-folder' in body assert 'runCycle' in body def test_round4_cadre_cinematic_features(): """Round 4 — Cadre Moniteur d'Interception cinematic upgrades. - Radar sweep circulaire continu en background HUD - 6 paquets data 'voice.wav' en flight QC→US (offset-path bezier) - Console typewriter char-by-char (3 lignes via hudTyped + caret blink) - 6 REGS reveal cascadé (revealedRegs IntersectionObserver) - Verdict 'NON CONFORME' pulse glow + scan-line traversante - eyebrow ⚠ remplacé par SVG warning-triangle """ client = app.test_client() body = client.get('/').data.decode('utf-8') # Radar sweep assert 'cadre-radar-sweep' in body, "Round 4 radar sweep keyframe missing" assert 'cadre-radar' in body # 6 data packets en flight (voice.wav répété 6×) assert body.count('voice.wav') >= 6, "Round 4 must have 6 'voice.wav' packets in flight" assert 'cadre-packet' in body assert 'offset-path' in body, "Round 4 packets must use offset-path for bezier flight" # Typewriter assert 'hudTyped' in body, "Round 4 typewriter state missing" assert 'typeLine' in body, "Round 4 typewriter function missing" # REGS cascade reveal assert 'revealRegsCascade' in body or 'revealedRegs' in body assert 'cadre-reg-item' in body # Verdict pulse glow + scan line assert 'cadre-verdict-active' in body assert 'cadre-scan-line' in body # ⚠ remplacé par SVG (le mot WARNING ne doit plus apparaître entouré de ⚠ dans l'eyebrow Cadre) assert '⚠ CADRE RÉGLEMENTAIRE QUÉBEC' not in body, "⚠ literal must be replaced by SVG" def test_round4_cycle_cinematic_features(): """Round 4 — Cycle (Trois options) cinematic upgrades. - Phase reveal séquentiel + price counter Col 1 (priceHumain 0→315) - Stamp 'NON CONFORME' impact (cycle-stamp keyframes) - Col 3 checkmark draw (cycle-check-svg stroke-dashoffset) - Col 3 glow vert (cycle-conforme-glow) - Badge 'Loi 25 conforme' pulse (cycle-conforme-badge) - Section 'Économies annuelles · 25 utilisateurs' avec 3 counters (sav1/sav2/sav3) - Connecting line dash flow (cycle-line-flow) - eyebrow ⚠ remplacé par SVG """ client = app.test_client() body = client.get('/').data.decode('utf-8') # Price counter assert 'priceHumain' in body, "Round 4 price counter state missing" assert 'countTo' in body, "Round 4 counter helper missing" # Stamp impact + flash assert 'cycle-stamp' in body assert 'cycle-stamp-impact' in body or '@keyframes cycle-stamp-impact' in body assert 'cycle-col-flash' in body # Checkmark draw assert 'cycle-check-svg' in body # Conforme badge + glow assert 'cycle-conforme-badge' in body assert 'cycle-conforme-glow' in body assert 'Loi 25 conforme' in body or 'Loi 25 conforme' in body # Économies annuelles avec 3 counters assert 'Économies annuelles' in body assert 'sav1' in body and 'sav2' in body and 'sav3' in body assert 'cycle-savings-card' in body # Live dot "Réunion en cours" assert 'cycle-live-dot' in body # Dash flow assert 'cycle-line-flow' in body # ⚠ remplacé assert '⚠ CADRE RÉGLEMENTAIRE

' not in body, "Cycle eyebrow ⚠ literal must be replaced by SVG" def test_round4_no_emoji_warning_triangle(): """Round 4 — aucun ⚠ littéral ne doit subsister dans le HTML rendu.""" client = app.test_client() body = client.get('/').data.decode('utf-8') # Le caractère ⚠ U+26A0 ne doit plus apparaître nulle part dans landing.html # (sauf dans les keyframes/CSS comments qui sont absents) assert '⚠' not in body, "Round 4 must not contain literal ⚠ character anywhere on landing" def test_round4_reduced_motion_disables_all_cinematics(): """Round 4 — prefers-reduced-motion media query must disable ALL new cinematics.""" client = app.test_client() body = client.get('/').data.decode('utf-8') # Le bloc @media (prefers-reduced-motion: reduce) doit explicitement neutraliser : # - radar (cadre-radar) # - packets (cadre-packet) # - typewriter (typeLine has reduced-motion shortcut) # - stamp (cycle-stamp) # - conforme glow + badge assert 'prefers-reduced-motion: reduce' in body assert 'cadre-radar' in body and 'cadre-packet' in body # Round 4 reduced-motion must disable cycle-stamp + cycle-conforme-badge animations assert 'cycle-stamp' in body and 'cycle-conforme-badge' in body # Counter helper has explicit reduced-motion guard assert "matchMedia('(prefers-reduced-motion: reduce)')" in body def test_round2_no_external_js_libs_added(): """Round 2 must NOT add Framer Motion / GSAP / canvas-confetti / etc.""" client = app.test_client() body = client.get('/').data.decode('utf-8') forbidden_libs = ['framer-motion', 'gsap', 'canvas-confetti', 'three.min.js', 'lottie-web', 'anime.min.js'] for lib in forbidden_libs: assert lib not in body, f"Round 2 must not introduce JS lib: {lib}" def test_round2_preserves_existing_sections(): """Round 2 + 3 inserts must NOT remove Hero / Pipeline / Hub / Bento / Comparatif / Conformité. NOTE: round 3 replaced the hero copy ('sans risquer votre permis' → canonical wordmark + 'Transcription IA locale en 2 minutes'). The hero ID + pipeline are still required. """ client = app.test_client() body = client.get('/').data.decode('utf-8') # Hero (round 3 canonical hero replaces round 0) assert 'hero-title' in body assert 'Transcription IA locale en 2' in body, "Round 3 hero canonical phrase missing" # Pipeline (round 1) — auto-advance + 4 nodes assert 'pipeline-title' in body assert 'Du fichier au résumé' in body # Hub (round 1) assert 'hub-title' in body assert 'se connecte à tout' in body # Bento + ROI calculator assert 'bento-title' in body assert 'roiCalculator()' in body # Comparatif + Conformité assert 'comparatif-title' in body assert 'conformite-title' in body # Trust bar 9 ordres assert 'Mappé aux 9' in body def test_round2_oqlf_nbsp_in_new_sections(): """OQLF — non-breaking space before currency $ and % in round 2 sections.""" client = app.test_client() body = client.get('/').data.decode('utf-8') # Cycle section savings assert '3 924 $' in body or '3 924 $' in body # Wave solution card pricing assert '173 $/mois' in body or 'Dès 173' in body # Cadre — Loi 25 fine assert '25 M$' in body or '25 M$' in body def test_routes_passes_testimonials_and_faq_to_template(): """marketing.routes.landing() must pass testimonials and faq to render_template.""" # Import the module to verify the data lists exist from src.marketing import routes assert hasattr(routes, 'TESTIMONIALS'), "Module must define TESTIMONIALS list" assert hasattr(routes, 'FAQ'), "Module must define FAQ list" assert len(routes.TESTIMONIALS) == 3, "Must have 3 placeholder testimonials" # Round 3: enriched FAQ from 7 to 10 canonical questions (sourced from dictai-page-content.tsx) assert len(routes.FAQ) == 10, "Must have 10 FAQ entries (round 3)" # Each testimonial must NOT contain a 'quote' field (no fabricated quotes pre-launch) for t in routes.TESTIMONIALS: assert 'quote' not in t, "Pre-launch testimonials must not contain quote fields" assert 'persona' in t and 'placeholder_label' in t and 'expected' in t # Each FAQ entry must have 'q' and 'a' for item in routes.FAQ: assert 'q' in item and 'a' in item