@@ -140,6 +140,27 @@ class DifferentialBackend(Protocol): |
| 140 | def as_finetuned(self) -> AbstractContextManager[_ScoringModel]: ... | 140 | def as_finetuned(self) -> AbstractContextManager[_ScoringModel]: ... |
| 141 | | 141 | |
| 142 | | 142 | |
| | 143 | +@runtime_checkable |
| | 144 | +class ScalableDifferentialBackend(DifferentialBackend, Protocol): |
| | 145 | + """A differential backend that can also scale the LoRA additive term. |
| | 146 | + |
| | 147 | + LoRA applies ``W + (alpha/r) · B @ A`` to a base weight matrix. This |
| | 148 | + protocol exposes a context manager that temporarily multiplies that |
| | 149 | + additive term by ``lam`` for everything inside the ``with`` block. |
| | 150 | + |
| | 151 | + ``lam = 0.0`` is equivalent to :meth:`as_base`. |
| | 152 | + ``lam = 1.0`` is equivalent to :meth:`as_finetuned`. |
| | 153 | + ``lam = 1.25`` overshoots — useful for N2 AdapterAblation's |
| | 154 | + response-curve measurement. |
| | 155 | + |
| | 156 | + Only the HF backend ships an implementation in v0.1. Probes that |
| | 157 | + need scaling check via ``isinstance(backend, ScalableDifferentialBackend)`` |
| | 158 | + at runtime and SKIP gracefully when unavailable. |
| | 159 | + """ |
| | 160 | + |
| | 161 | + def as_scaled_adapter(self, lam: float) -> AbstractContextManager[_ScoringModel]: ... |
| | 162 | + |
| | 163 | + |
| 143 | # Helper Protocol for type-checking the yielded context object: it | 164 | # Helper Protocol for type-checking the yielded context object: it |
| 144 | # must satisfy both Model and ScoringBackend. mypy doesn't support | 165 | # must satisfy both Model and ScoringBackend. mypy doesn't support |
| 145 | # intersection types, so we spell it out explicitly. | 166 | # intersection types, so we spell it out explicitly. |