| 1 |
{ |
| 2 |
"schema": 1, |
| 3 |
"bases": { |
| 4 |
"mistral-small-3.1-24b-instruct": { |
| 5 |
"architecture": "Mistral3ForConditionalGeneration", |
| 6 |
"support": "SUPPORTED", |
| 7 |
"llama_cpp_tag": "b8816", |
| 8 |
"reason": "'Mistral3ForConditionalGeneration' registered on Mistral3Model in llama.cpp tag=b8816; LM converts cleanly via convert_hf_to_gguf.py." |
| 9 |
}, |
| 10 |
"paligemma-3b-mix-224": { |
| 11 |
"architecture": "PaliGemmaForConditionalGeneration", |
| 12 |
"support": "UNSUPPORTED", |
| 13 |
"llama_cpp_tag": "b8816", |
| 14 |
"reason": "'PaliGemmaForConditionalGeneration' not found in any @ModelBase.register(...) decorator \u2014 vendored llama.cpp (tag=b8816) does not know this architecture. GGUF conversion would fail." |
| 15 |
}, |
| 16 |
"qwen2-vl-2b-instruct": { |
| 17 |
"architecture": "Qwen2VLForConditionalGeneration", |
| 18 |
"support": "SUPPORTED", |
| 19 |
"llama_cpp_tag": "b8816", |
| 20 |
"reason": "'Qwen2VLForConditionalGeneration' registered on Qwen2VLModel in llama.cpp tag=b8816; LM converts cleanly via convert_hf_to_gguf.py." |
| 21 |
}, |
| 22 |
"internvl2-2b": { |
| 23 |
"architecture": "InternVLChatModel", |
| 24 |
"support": "UNSUPPORTED", |
| 25 |
"llama_cpp_tag": "b8816", |
| 26 |
"reason": "'InternVLChatModel' not found in any @ModelBase.register(...) decorator \u2014 vendored llama.cpp (tag=b8816) does not know this architecture. GGUF conversion would fail." |
| 27 |
} |
| 28 |
} |
| 29 |
} |