Python · 8164 bytes Raw Blame History
1 """Vendoring path resolution — missing/uninitialized submodule handling."""
2
3 from __future__ import annotations
4
5 import errno
6 from pathlib import Path
7
8 import pytest
9
10 from dlm.export.errors import VendoringError
11 from dlm.export.vendoring import (
12 convert_hf_to_gguf_py,
13 convert_lora_to_gguf_py,
14 llama_cpp_root,
15 llama_quantize_bin,
16 llama_server_bin,
17 pinned_tag,
18 )
19
20
21 def _populate_vendor(root: Path, *, with_binary: bool = True) -> Path:
22 """Create a fake `vendor/llama.cpp/` layout the resolver accepts."""
23 root.mkdir(parents=True, exist_ok=True)
24 (root / "convert_hf_to_gguf.py").write_text("# mock")
25 (root / "convert_lora_to_gguf.py").write_text("# mock")
26 if with_binary:
27 bin_dir = root / "build" / "bin"
28 bin_dir.mkdir(parents=True)
29 binary = bin_dir / "llama-quantize"
30 binary.write_text("# mock binary")
31 binary.chmod(0o755)
32 (root / "VERSION").write_text("b1234\n")
33 return root
34
35
36 class TestLlamaCppRoot:
37 def test_missing_directory_raises(self, tmp_path: Path) -> None:
38 with pytest.raises(VendoringError, match="missing"):
39 llama_cpp_root(override=tmp_path / "absent")
40
41 def test_empty_directory_raises(self, tmp_path: Path) -> None:
42 empty = tmp_path / "empty"
43 empty.mkdir()
44 with pytest.raises(VendoringError, match="empty"):
45 llama_cpp_root(override=empty)
46
47 def test_populated_directory_resolves(self, tmp_path: Path) -> None:
48 root = _populate_vendor(tmp_path / "llama.cpp")
49 assert llama_cpp_root(override=root) == root
50
51 def test_enumeration_failure_raises(
52 self, tmp_path: Path, monkeypatch: pytest.MonkeyPatch
53 ) -> None:
54 root = _populate_vendor(tmp_path / "llama.cpp")
55
56 def _raise_iterdir() -> object:
57 raise OSError(errno.EIO, "boom")
58
59 monkeypatch.setattr(Path, "iterdir", lambda self: _raise_iterdir())
60
61 with pytest.raises(VendoringError, match="cannot enumerate"):
62 llama_cpp_root(override=root)
63
64
65 class TestScriptResolvers:
66 def test_convert_hf_resolves(self, tmp_path: Path) -> None:
67 root = _populate_vendor(tmp_path / "llama.cpp")
68 path = convert_hf_to_gguf_py(override=root)
69 assert path.name == "convert_hf_to_gguf.py"
70 assert path.is_file()
71
72 def test_convert_lora_resolves(self, tmp_path: Path) -> None:
73 root = _populate_vendor(tmp_path / "llama.cpp")
74 path = convert_lora_to_gguf_py(override=root)
75 assert path.name == "convert_lora_to_gguf.py"
76
77 def test_missing_script_raises(self, tmp_path: Path) -> None:
78 root = _populate_vendor(tmp_path / "llama.cpp")
79 (root / "convert_hf_to_gguf.py").unlink()
80 with pytest.raises(VendoringError, match="convert_hf_to_gguf"):
81 convert_hf_to_gguf_py(override=root)
82
83
84 class TestLlamaBinaries:
85 def test_resolves_build_bin_layout(self, tmp_path: Path) -> None:
86 root = _populate_vendor(tmp_path / "llama.cpp")
87 path = llama_quantize_bin(override=root)
88 assert path.is_file()
89 assert path.name == "llama-quantize"
90
91 def test_llama_server_resolves_build_bin_layout(self, tmp_path: Path) -> None:
92 root = _populate_vendor(tmp_path / "llama.cpp", with_binary=False)
93 server = root / "build" / "bin" / "llama-server"
94 server.parent.mkdir(parents=True, exist_ok=True)
95 server.write_text("# mock binary")
96 server.chmod(0o755)
97 path = llama_server_bin(override=root)
98 assert path.is_file()
99 assert path.name == "llama-server"
100
101 def test_missing_binary_raises(self, tmp_path: Path, monkeypatch: pytest.MonkeyPatch) -> None:
102 # Clear PATH so the `shutil.which` fallback can't find a
103 # brew-installed llama-quantize on the developer's machine.
104 monkeypatch.setenv("PATH", str(tmp_path / "empty"))
105 root = _populate_vendor(tmp_path / "llama.cpp", with_binary=False)
106 with pytest.raises(VendoringError, match="llama-quantize"):
107 llama_quantize_bin(override=root)
108
109 def test_missing_server_binary_raises(
110 self, tmp_path: Path, monkeypatch: pytest.MonkeyPatch
111 ) -> None:
112 monkeypatch.setenv("PATH", str(tmp_path / "empty"))
113 root = _populate_vendor(tmp_path / "llama.cpp", with_binary=False)
114 with pytest.raises(VendoringError, match="llama-server"):
115 llama_server_bin(override=root)
116
117 def test_path_lookup_returns_binary_when_vendor_missing(
118 self, tmp_path: Path, monkeypatch: pytest.MonkeyPatch
119 ) -> None:
120 monkeypatch.setenv("PATH", str(tmp_path))
121 fake = tmp_path / "llama-quantize"
122 fake.write_text("#!/bin/sh\n", encoding="utf-8")
123 fake.chmod(0o755)
124 monkeypatch.setattr(
125 "shutil.which", lambda name: str(fake) if name == "llama-quantize" else None
126 )
127
128 path = llama_quantize_bin(
129 override=_populate_vendor(tmp_path / "llama.cpp", with_binary=False)
130 )
131
132 assert path == fake
133
134 def test_dlm_llama_cpp_build_env_preferred(
135 self, tmp_path: Path, monkeypatch: pytest.MonkeyPatch
136 ) -> None:
137 """Audit-08 M6: `DLM_LLAMA_CPP_BUILD` overrides the default vendor dir.
138
139 The env var points at a build-only dir (e.g. the ROCm
140 `vendor/llama.cpp/build-rocm`) that contains only binaries.
141 `_resolve_binary` must find `bin/llama-quantize` there before
142 falling through to the vendor tree.
143
144 The production path has `override=None`; we mirror that here
145 by driving vendor resolution through `DLM_LLAMA_CPP_ROOT` so
146 both env vars coexist (ROCm users set both).
147 """
148 rocm_build = tmp_path / "build-rocm"
149 (rocm_build / "bin").mkdir(parents=True)
150 rocm_bin = rocm_build / "bin" / "llama-quantize"
151 rocm_bin.write_text("#!/bin/sh\necho rocm\n")
152 rocm_bin.chmod(0o755)
153
154 vendor_root = _populate_vendor(tmp_path / "llama.cpp")
155
156 monkeypatch.setenv("DLM_LLAMA_CPP_BUILD", str(rocm_build))
157 monkeypatch.setenv("DLM_LLAMA_CPP_ROOT", str(vendor_root))
158 path = llama_quantize_bin()
159 # The ROCm build binary wins over the vendored CPU build.
160 assert path == rocm_bin
161
162 def test_dlm_llama_cpp_build_env_missing_binary_falls_through(
163 self, tmp_path: Path, monkeypatch: pytest.MonkeyPatch
164 ) -> None:
165 """Env var pointing at an incomplete dir falls through to vendor."""
166 empty_build = tmp_path / "build-rocm"
167 empty_build.mkdir()
168 vendor_root = _populate_vendor(tmp_path / "llama.cpp")
169 monkeypatch.setenv("DLM_LLAMA_CPP_BUILD", str(empty_build))
170 monkeypatch.setenv("DLM_LLAMA_CPP_ROOT", str(vendor_root))
171 path = llama_quantize_bin()
172 assert path.is_file()
173 assert str(vendor_root) in str(path)
174
175 def test_legacy_quantize_name_found(self, tmp_path: Path) -> None:
176 """Pre-rename builds shipped `quantize` rather than `llama-quantize`."""
177 root = _populate_vendor(tmp_path / "llama.cpp", with_binary=False)
178 legacy = root / "build" / "bin"
179 legacy.mkdir(parents=True, exist_ok=True)
180 legacy_bin = legacy / "quantize"
181 legacy_bin.write_text("# legacy")
182 legacy_bin.chmod(0o755)
183 path = llama_quantize_bin(override=root)
184 assert path.name == "quantize"
185
186
187 class TestPinnedTag:
188 def test_reads_version_file(self, tmp_path: Path) -> None:
189 root = _populate_vendor(tmp_path / "llama.cpp")
190 assert pinned_tag(override=root) == "b1234"
191
192 def test_missing_version_file_returns_none(self, tmp_path: Path) -> None:
193 root = _populate_vendor(tmp_path / "llama.cpp")
194 (root / "VERSION").unlink()
195 assert pinned_tag(override=root) is None
196
197 def test_missing_root_returns_none(self, tmp_path: Path) -> None:
198 assert pinned_tag(override=tmp_path / "absent") is None
199
200 def test_empty_version_file_returns_none(self, tmp_path: Path) -> None:
201 root = _populate_vendor(tmp_path / "llama.cpp")
202 (root / "VERSION").write_text("\n")
203 assert pinned_tag(override=root) is None