Python · 3858 bytes Raw Blame History
1 """Shared pytest fixtures and options.
2
3 Fast, local-only: no torch / transformers / huggingface_hub imports at
4 collection time. Fixtures that need heavy deps import them lazily.
5 """
6
7 from __future__ import annotations
8
9 import random
10 from collections.abc import Iterator
11 from pathlib import Path
12
13 import pytest
14
15 # Load session-scoped fixtures from `tests/fixtures/` as plugins so
16 # `tiny_model_dir` (and any future heavy fixtures) are discoverable
17 # without per-directory conftest duplication.
18 pytest_plugins = ["tests.fixtures.tiny_model", "tests.fixtures.trained_store"]
19
20
21 def pytest_addoption(parser: pytest.Parser) -> None:
22 parser.addoption(
23 "--update-goldens",
24 action="store_true",
25 default=False,
26 help="Regenerate golden-output fixtures instead of asserting against them.",
27 )
28 parser.addoption(
29 "--run-heavy-vl",
30 action="store_true",
31 default=False,
32 help=(
33 "Opt into heavy VL integration-test bodies that need ~8 GB "
34 "intermediate storage + several minutes of training (e.g., "
35 "VL GGUF round-trip). Without this flag, gated heavy bodies "
36 "skip with a clear message even when all other prereqs are met."
37 ),
38 )
39
40
41 @pytest.fixture
42 def update_goldens(request: pytest.FixtureRequest) -> bool:
43 """Expose --update-goldens to tests (golden.py reads this)."""
44 return bool(request.config.getoption("--update-goldens"))
45
46
47 @pytest.fixture
48 def run_heavy_vl(request: pytest.FixtureRequest) -> bool:
49 """Expose --run-heavy-vl to tests.
50
51 The VL round-trip test gates its train→merge→convert→quantize body
52 on this flag so CI doesn't accidentally burn 8 GB of scratch space
53 on every pass. Heavy VL tests consult this fixture and skip when
54 it's False.
55 """
56 return bool(request.config.getoption("--run-heavy-vl"))
57
58
59 @pytest.fixture
60 def seeded_rng() -> Iterator[int]:
61 """Seed Python's random for tests that need local determinism.
62
63 We don't seed numpy / torch here — the hardware_mocks / tiny_model
64 fixtures own that, and not every test imports them.
65 """
66 seed = 42
67 state = random.getstate()
68 random.seed(seed)
69 try:
70 yield seed
71 finally:
72 random.setstate(state)
73
74
75 @pytest.fixture
76 def dlm_home(tmp_path: Path, monkeypatch: pytest.MonkeyPatch) -> Path:
77 """Isolate $DLM_HOME under tmp_path for per-test store sandboxing."""
78 home = tmp_path / "dlm-home"
79 home.mkdir()
80 monkeypatch.setenv("DLM_HOME", str(home))
81 return home
82
83
84 @pytest.fixture(autouse=True)
85 def _offline_hf_env(monkeypatch: pytest.MonkeyPatch) -> None:
86 """Fast-path tests never touch HF. Tests that need the network use the
87 `tiny_model` fixture which clears these vars in its own scope.
88 """
89 monkeypatch.setenv("HF_HUB_OFFLINE", "1")
90 monkeypatch.setenv("TRANSFORMERS_OFFLINE", "1")
91 monkeypatch.setenv("HF_DATASETS_OFFLINE", "1")
92 # Always reinforce our telemetry-off contract.
93 monkeypatch.setenv("HF_HUB_DISABLE_TELEMETRY", "1")
94 monkeypatch.setenv("DO_NOT_TRACK", "1")
95
96
97 @pytest.fixture
98 def hf_cache_home(tmp_path: Path, monkeypatch: pytest.MonkeyPatch) -> Path:
99 """Redirect HF_HOME to tmp for tests that download models.
100
101 The session-scoped `tiny_model` fixture overrides this to a shared
102 cache dir so CI can reuse downloads across tests.
103 """
104 cache = tmp_path / "hf-cache"
105 cache.mkdir()
106 monkeypatch.setenv("HF_HOME", str(cache))
107 return cache
108
109
110 # --- platform env cleanup -----------------------------------------------------
111
112
113 @pytest.fixture(autouse=True)
114 def _clean_cuda_env(monkeypatch: pytest.MonkeyPatch) -> None:
115 """Remove CUDA env vars that could contaminate hardware-mock tests."""
116 for var in ("CUDA_VISIBLE_DEVICES", "CUDA_LAUNCH_BLOCKING"):
117 monkeypatch.delenv(var, raising=False)