krishnateja95's picture
Add FP8 block quantized model weights
bfb2dff
raw
history blame contribute delete
258 Bytes
default_stage:
default_modifiers:
QuantizationModifier:
targets: [Linear]
ignore: ['re:.*lm_head', 're:.*self_attn', 're:.*router', 're:.*vision_model.*', 're:.*multi_modal_projector.*',
Llama4TextAttention]
scheme: FP8_BLOCK