Python · 6128 bytes Raw Blame History
1 """CLI coverage for generic `run_export(...)` branches."""
2
3 from __future__ import annotations
4
5 from pathlib import Path
6 from types import SimpleNamespace
7 from typing import Any
8
9 import pytest
10 from typer.testing import CliRunner
11
12 from dlm.base_models import BaseModelSpec
13 from dlm.cli.app import app
14 from dlm.export.errors import ExportError, PreflightError, SubprocessError, UnsafeMergeError
15 from dlm.export.ollama.errors import (
16 OllamaCreateError,
17 OllamaError,
18 OllamaSmokeError,
19 OllamaVersionError,
20 )
21
22
23 def _joined_output(result: object) -> str:
24 text = getattr(result, "output", "") + getattr(result, "stderr", "")
25 return " ".join(text.split())
26
27
28 def _scaffold_doc(tmp_path: Path) -> Path:
29 doc = tmp_path / "doc.dlm"
30 runner = CliRunner()
31 result = runner.invoke(
32 app,
33 [
34 "--home",
35 str(tmp_path / "home"),
36 "init",
37 str(doc),
38 "--base",
39 "smollm2-135m",
40 ],
41 )
42 assert result.exit_code == 0, result.output
43 return doc
44
45
46 def _spec() -> BaseModelSpec:
47 return BaseModelSpec.model_validate(
48 {
49 "key": "demo-1b",
50 "hf_id": "org/demo-1b",
51 "revision": "0123456789abcdef0123456789abcdef01234567",
52 "architecture": "DemoForCausalLM",
53 "params": 1_000_000_000,
54 "target_modules": ["q_proj", "v_proj"],
55 "template": "chatml",
56 "gguf_arch": "demo",
57 "tokenizer_pre": "demo",
58 "license_spdx": "Apache-2.0",
59 "license_url": None,
60 "requires_acceptance": False,
61 "redistributable": True,
62 "size_gb_fp16": 2.0,
63 "context_length": 4096,
64 "recommended_seq_len": 2048,
65 }
66 )
67
68
69 def _patch_export_runtime(monkeypatch: pytest.MonkeyPatch) -> None:
70 monkeypatch.setattr("dlm.base_models.resolve", lambda *args, **kwargs: _spec())
71 monkeypatch.setattr(
72 "dlm.base_models.download_spec",
73 lambda *args, **kwargs: SimpleNamespace(path=Path("/tmp/base-cache")),
74 )
75 monkeypatch.setattr(
76 "dlm.modality.modality_for",
77 lambda spec: SimpleNamespace(accepts_images=False, accepts_audio=False),
78 )
79 monkeypatch.setattr(
80 "dlm.export.gate_fallback.resolve_and_announce",
81 lambda store, parsed: SimpleNamespace(entries=None, banner_lines=[]),
82 )
83 monkeypatch.setattr(
84 "dlm.export.targets.resolve_target",
85 lambda name: SimpleNamespace(name="ollama"),
86 )
87
88
89 class TestExportRunErrors:
90 def test_verbose_success_prints_shell_command_and_cached_tag(
91 self,
92 tmp_path: Path,
93 monkeypatch: pytest.MonkeyPatch,
94 ) -> None:
95 doc = _scaffold_doc(tmp_path)
96 runner = CliRunner()
97 captured: dict[str, Any] = {}
98
99 _patch_export_runtime(monkeypatch)
100
101 def _run_export(
102 store: object,
103 spec: object,
104 plan: object,
105 **kwargs: object,
106 ) -> object:
107 captured.update(kwargs)
108 subprocess_runner = kwargs["subprocess_runner"]
109 assert callable(subprocess_runner)
110 subprocess_runner(["llama-quantize", "--version"])
111 return SimpleNamespace(
112 cached=True,
113 export_dir=tmp_path / "exports" / "Q4_K_M",
114 artifacts=[SimpleNamespace(name="base.gguf"), SimpleNamespace(name="adapter.gguf")],
115 target="ollama",
116 ollama_name="demo-model",
117 ollama_version=1,
118 smoke_output_first_line="hello smoke",
119 )
120
121 monkeypatch.setattr("dlm.export.run_export", _run_export)
122 monkeypatch.setattr(
123 "dlm.export.quantize.run_checked", lambda cmd: SimpleNamespace(returncode=0)
124 )
125
126 result = runner.invoke(
127 app,
128 ["--home", str(tmp_path / "home"), "export", str(doc), "--verbose"],
129 )
130
131 assert result.exit_code == 0, result.output
132 text = _joined_output(result)
133 assert "$ llama-quantize --version" in text
134 assert "(cached base)" in text
135 assert "ollama: demo-model (v1)" in text
136 assert "smoke: hello smoke" in text
137 assert captured["cached_base_dir"] == Path("/tmp/base-cache")
138 assert captured["target"] == "ollama"
139
140 @pytest.mark.parametrize(
141 ("error", "needle"),
142 [
143 (UnsafeMergeError("needs --dequantize"), "merge:"),
144 (
145 PreflightError(probe="template", detail="template mismatch"),
146 "preflight: template mismatch",
147 ),
148 (
149 SubprocessError(
150 cmd=["llama-quantize"],
151 returncode=3,
152 stderr_tail="quantize failed",
153 ),
154 "subprocess:",
155 ),
156 (
157 OllamaVersionError(detected=(0, 1, 0), required=(0, 6, 0)),
158 "ollama:",
159 ),
160 (OllamaCreateError(stdout="", stderr="create failed"), "ollama create:"),
161 (OllamaSmokeError(stdout="", stderr="smoke failed"), "smoke:"),
162 (OllamaError("generic ollama error"), "ollama:"),
163 (ExportError("plain export failure"), "export:"),
164 ],
165 )
166 def test_run_export_error_mappings_exit_1(
167 self,
168 tmp_path: Path,
169 monkeypatch: pytest.MonkeyPatch,
170 error: Exception,
171 needle: str,
172 ) -> None:
173 doc = _scaffold_doc(tmp_path)
174 runner = CliRunner()
175
176 _patch_export_runtime(monkeypatch)
177 monkeypatch.setattr(
178 "dlm.export.run_export",
179 lambda *args, **kwargs: (_ for _ in ()).throw(error),
180 )
181
182 result = runner.invoke(
183 app,
184 ["--home", str(tmp_path / "home"), "export", str(doc)],
185 )
186
187 assert result.exit_code == 1, result.output
188 text = _joined_output(result)
189 assert needle in text
190 if isinstance(error, OllamaSmokeError):
191 assert "re-run with `--no-smoke`" in text