Back to Blog

Quantizing Without Breaking Your App

Audio engineers know which compression artifacts listeners notice and which they don't. A podcast can tolerate more compression than a symphony. The format matches the content's sensitivity.

Model quantization follows similar principles. Some layers and tasks tolerate aggressive quantization. Others break noticeably. Knowing the difference is the key to successful quantization.

The Quantization Spectrum

def quantization_options():
    return {
        "FP32": {
            "bits": 32,
            "memory_multiplier": 1.0,
            "quality": "Reference",
            "use_case": "Training, debugging",
        },
        "FP16": {
            "bits": 16,
            "memory_multiplier": 0.5,
            "quality": "Essentially identical",
            "use_case": "Standard inference",
        },
        "BF16": {
            "bits": 16,
            "memory_multiplier": 0.5,
            "quality": "Essentially identical",
            "use_case": "Training and inference on modern GPUs",
        },
        "FP8": {
            "bits": 8,
            "memory_multiplier": 0.25,
            "quality": "Very close to FP16",
            "use_case": "H100 inference",
        },
        "INT8": {
            "bits": 8,
            "memory_multiplier": 0.25,
            "quality": "Slight degradation",
            "use_case": "Memory-constrained inference",
        },
        "INT4": {
            "bits": 4,
            "memory_multiplier": 0.125,
            "quality": "Noticeable degradation",
            "use_case": "Extreme memory constraints",
        },
    }

Quality Loss by Task

def quality_sensitivity_by_task():
    """
    Different tasks have different tolerance
    """
    return {
        "highly_tolerant": {
            "tasks": ["Classification", "Sentiment analysis", "Summarization"],
            "safe_quantization": "INT4",
            "typical_degradation": "1-2%",
        },
        "moderately_tolerant": {
            "tasks": ["General Q&A", "Chat", "Translation"],
            "safe_quantization": "INT8",
            "typical_degradation": "1-3%",
        },
        "sensitive": {
            "tasks": ["Code generation", "Math", "Precise extraction"],
            "safe_quantization": "FP8 or INT8",
            "typical_degradation": "3-5%",
        },
        "highly_sensitive": {
            "tasks": ["Legal document analysis", "Medical advice", "Financial calculations"],
            "safe_quantization": "FP16 or careful INT8",
            "typical_degradation": "Test extensively",
        },
    }

Layer-Wise Sensitivity

class LayerSensitivity:
    """
    Not all layers are equally sensitive
    """

    analysis = {
        "embedding_layers": {
            "sensitivity": "Low",
            "reason": "Lookup table, no computation",
            "recommendation": "INT8 safe",
        },
        "attention_layers": {
            "sensitivity": "Medium",
            "reason": "Softmax amplifies errors",
            "recommendation": "INT8 usually fine, test carefully",
        },
        "mlp_layers": {
            "sensitivity": "Low-Medium",
            "reason": "ReLU-like activations clip errors",
            "recommendation": "INT8 safe, INT4 possible",
        },
        "output_layers": {
            "sensitivity": "High",
            "reason": "Direct impact on token probabilities",
            "recommendation": "Keep FP16 or careful INT8",
        },
        "layer_norms": {
            "sensitivity": "High",
            "reason": "Small values, precision matters",
            "recommendation": "Keep higher precision",
        },
    }

The Testing Strategy

class QuantizationTesting:
    """
    How to validate quantization quality
    """

    def __init__(self, original_model, quantized_model):
        self.original = original_model
        self.quantized = quantized_model

    def test_protocol(self) -> list:
        return [
            {
                "test": "Perplexity comparison",
                "threshold": "< 0.5 increase",
                "why": "General quality metric",
            },
            {
                "test": "Task-specific benchmarks",
                "threshold": "< 2% accuracy drop",
                "why": "Your actual use case",
            },
            {
                "test": "Output comparison on 100 prompts",
                "threshold": "ROUGE/BLEU > 0.95",
                "why": "Actual output similarity",
            },
            {
                "test": "Edge case prompts",
                "threshold": "Manual review passes",
                "why": "Quantization often fails on extremes",
            },
            {
                "test": "Long context handling",
                "threshold": "No quality cliff at length",
                "why": "Errors can accumulate",
            },
        ]

    def comparison_code(self):
        """Practical comparison code"""
        return """
        import torch
        from transformers import AutoModelForCausalLM

        # Load both models
        original = AutoModelForCausalLM.from_pretrained("model")
        quantized = AutoModelForCausalLM.from_pretrained("model", load_in_8bit=True)

        # Same prompts
        test_prompts = load_test_prompts()

        # Compare outputs
        for prompt in test_prompts:
            orig_output = original.generate(prompt)
            quant_output = quantized.generate(prompt)

            # Check similarity
            similarity = compute_similarity(orig_output, quant_output)
            if similarity < 0.95:
                print(f"Degradation detected: {prompt[:50]}...")
        """

AWQ vs GPTQ vs bitsandbytes

def quantization_methods():
    return {
        "bitsandbytes": {
            "approach": "Dynamic quantization",
            "pros": ["Easy to use", "No calibration needed"],
            "cons": ["Slower inference", "Less optimization"],
            "code": "load_in_8bit=True",
        },
        "GPTQ": {
            "approach": "Post-training quantization with calibration",
            "pros": ["Good quality", "Fast inference"],
            "cons": ["Needs calibration data", "Quantization takes time"],
            "code": "quantization='gptq'",
        },
        "AWQ": {
            "approach": "Activation-aware quantization",
            "pros": ["Fast quantization", "Good quality"],
            "cons": ["May need more GPU memory to quantize"],
            "code": "quantization='awq'",
        },
        "recommendation": {
            "for_quick_test": "bitsandbytes",
            "for_production": "AWQ or GPTQ (benchmark both)",
            "for_maximum_quality": "AWQ with careful layer selection",
        },
    }

Safe Quantization Workflow

def safe_quantization_workflow():
    return [
        {
            "step": 1,
            "action": "Establish baseline",
            "how": "Run full eval suite on FP16 model",
            "output": "Reference metrics",
        },
        {
            "step": 2,
            "action": "Try INT8 first",
            "how": "Quantize with AWQ or GPTQ",
            "output": "INT8 model",
        },
        {
            "step": 3,
            "action": "Run eval suite on INT8",
            "how": "Same prompts, same metrics",
            "output": "INT8 metrics",
        },
        {
            "step": 4,
            "action": "Compare and decide",
            "how": "Is degradation acceptable for your use case?",
            "threshold": "Usually < 2% is fine, > 5% is concerning",
        },
        {
            "step": 5,
            "action": "Test edge cases manually",
            "how": "Try prompts that matter most",
            "why": "Automated metrics miss some failures",
        },
        {
            "step": 6,
            "action": "Monitor in production",
            "how": "Track quality metrics post-deployment",
            "why": "Real traffic finds issues benchmarks miss",
        },
    ]

When Not To Quantize

def when_to_avoid_quantization():
    return [
        {
            "scenario": "Not memory constrained",
            "reason": "Why accept any quality loss?",
            "alternative": "Use FP16",
        },
        {
            "scenario": "Precision-critical tasks",
            "reason": "Even 1% error might be unacceptable",
            "alternative": "Use FP16 or FP8",
        },
        {
            "scenario": "Small model already fast enough",
            "reason": "Complexity without benefit",
            "alternative": "Use FP16",
        },
        {
            "scenario": "Haven't tested thoroughly",
            "reason": "Unknown risk",
            "alternative": "Test first, then decide",
        },
    ]

Quantization is a tool for trading quality for efficiency. Use it when you need the efficiency and can afford the quality cost. Test thoroughly, monitor continuously, and be ready to roll back if quality issues emerge.