Code

#4
by erichartford - opened

I vibe coded a script that can quant these, I put it here in case it's helpful

import argparse
import os
from datetime import datetime

import torch
from transformers import AutoModelForCausalLM, AutoProcessor, AutoTokenizer

from llmcompressor import oneshot
from llmcompressor.modifiers.quantization import QuantizationModifier


def parse_args():
    parser = argparse.ArgumentParser(
        description="Run CPU-only FP8_DYNAMIC quantization for a Qwen3.5 model."
    )
    parser.add_argument(
        "--model",
        default="Qwen/Qwen3.5-397B-A17B",
        help="Model path or HF model ID.",
    )
    parser.add_argument(
        "--recipe",
        default=None,
        help="Optional path to quantization recipe YAML. If omitted, use built-in FP8_DYNAMIC recipe.",
    )
    parser.add_argument(
        "--output",
        default=None,
        help="Output model directory. Default: ./{model}-FP8-Dynamic",
    )
    parser.add_argument(
        "--threads",
        type=int,
        default=os.cpu_count() or 1,
        help="CPU thread count for torch/OMP/MKL/numexpr.",
    )
    return parser.parse_args()


def configure_cpu_only(threads: int):
    os.environ["CUDA_VISIBLE_DEVICES"] = ""
    os.environ["OMP_NUM_THREADS"] = str(threads)
    os.environ["MKL_NUM_THREADS"] = str(threads)
    os.environ["NUMEXPR_NUM_THREADS"] = str(threads)

    torch.set_num_threads(threads)
    torch.set_num_interop_threads(min(32, threads))


def default_recipe():
    return QuantizationModifier(
        targets=["Linear"],
        ignore=[
            "re:.*lm_head",
            "re:visual.*",
            "re:model.visual.*",
            "re:.*mlp.gate$",
            "re:.*embed_tokens$",
            "re:.*shared_expert_gate$",
            "re:.*linear_attn.*",
        ],
        scheme="FP8_DYNAMIC",
    )


def default_output_dir(model: str) -> str:
    return f"./{model}-FP8-Dynamic"


def main():
    args = parse_args()
    configure_cpu_only(args.threads)
    output_dir = args.output if args.output else default_output_dir(args.model)

    start = datetime.now()
    print(f"[{start.isoformat()}] Starting CPU-only FP8_DYNAMIC quantization")
    print(f"model={args.model}")
    print(f"recipe={args.recipe or '<built-in FP8_DYNAMIC recipe>'}")
    print(f"output={output_dir}")
    print(f"threads={args.threads}")

    print(f"[{datetime.now().isoformat()}] Loading model on CPU...")
    model = AutoModelForCausalLM.from_pretrained(
        args.model,
        dtype="auto",
        device_map="cpu",
        trust_remote_code=True,
    )

    print(f"[{datetime.now().isoformat()}] Loading processor/tokenizer...")
    try:
        processor_or_tokenizer = AutoProcessor.from_pretrained(
            args.model, trust_remote_code=True
        )
    except Exception:
        processor_or_tokenizer = AutoTokenizer.from_pretrained(
            args.model, trust_remote_code=True
        )

    print(f"[{datetime.now().isoformat()}] Running oneshot quantization...")
    recipe = args.recipe if args.recipe else default_recipe()
    oneshot(model=model, recipe=recipe)

    print(f"[{datetime.now().isoformat()}] Saving compressed model...")
    model.save_pretrained(output_dir, save_compressed=True)
    processor_or_tokenizer.save_pretrained(output_dir)

    elapsed = datetime.now() - start
    print(f"[{datetime.now().isoformat()}] Done. Elapsed={elapsed}")


if __name__ == "__main__":
    main()

I also had to tell Claude code to patch llm-compressor to work with the latest torch and transformers. It's pinned to an older version.

Sign up or log in to comment