Back to Blog
How Much Data You Actually Need
A chef learning a new cuisine doesn't eat at 1,000 mediocre restaurants. They study 50 meals from masters. Quality of instruction matters more than quantity. Bad examples teach bad habits that take longer to unlearn.
Fine-tuning follows the same principle. A model learning from 1,000 carefully curated examples often outperforms one trained on 100,000 scraped examples. The model has limited capacity to learn. Every noisy example wastes capacity that could capture real patterns.
Quality Beats Quantity
def quality_vs_quantity_evidence():
return {
"common_observation": """
Experiment: Same model, same task, varying data:
Dataset A: 100,000 examples, scraped, noisy
Dataset B: 5,000 examples, human-curated
Dataset C: 1,000 examples, expert-verified
Typical results:
Dataset A: 78% accuracy
Dataset B: 85% accuracy
Dataset C: 83% accuracy
5% of the data achieved 7% better accuracy.
""",
"why_quality_wins": [
"Noisy labels confuse the model",
"Inconsistent formats split the learning signal",
"Wrong examples teach wrong patterns",
"Model capacity is finite, don't waste it",
],
"lima_paper_finding": """
'LIMA: Less Is More for Alignment'
- 1,000 carefully curated examples
- Competitive with models trained on 50K+
- Quality curation was 95% of the work
""",
}
Minimum Data Requirements
def minimum_data_by_task():
return {
"style_adaptation": {
"examples_needed": "100-500",
"why_few": "Style is a surface pattern, easy to learn",
"quality_requirement": "Consistent style in all examples",
},
"format_learning": {
"examples_needed": "200-1000",
"why_few": "Format is structural, models learn quickly",
"quality_requirement": "Perfect format compliance in all examples",
},
"domain_terminology": {
"examples_needed": "500-2000",
"why_more": "Vocabulary is distributed, needs coverage",
"quality_requirement": "Correct terminology, varied contexts",
},
"behavioral_patterns": {
"examples_needed": "1000-5000",
"why_more": "Behavior requires consistent demonstration",
"quality_requirement": "Every example demonstrates desired behavior",
},
"complex_reasoning": {
"examples_needed": "5000-20000",
"why_many": "Reasoning patterns are diverse, need coverage",
"quality_requirement": "Correct reasoning in every example",
"note": "Consider if this is right task for fine-tuning",
},
"rule_of_thumb": """
Start with 1/10 of what you think you need.
Train, evaluate, add more only if quality insufficient.
Often the small dataset is enough.
""",
}
What Makes Quality Data
def quality_data_criteria():
return {
"correctness": {
"definition": "Outputs are actually right",
"why_critical": "Wrong outputs teach wrong answers",
"how_to_ensure": "Human verification, automated validation",
"common_issue": "GPT-generated data with hallucinations",
},
"consistency": {
"definition": "Same task, same format, same style",
"why_critical": "Inconsistency fragments the learning signal",
"how_to_ensure": "Clear guidelines, format validation",
"common_issue": "Multiple annotators, no style guide",
},
"diversity": {
"definition": "Coverage of task variations",
"why_critical": "Model needs to generalize, not memorize",
"how_to_ensure": "Stratified sampling, gap analysis",
"common_issue": "All examples are too similar",
},
"relevance": {
"definition": "Examples match actual use case",
"why_critical": "Training distribution should match inference",
"how_to_ensure": "Sample from production queries",
"common_issue": "Synthetic data unlike real queries",
},
"balance": {
"definition": "Proportional representation of cases",
"why_critical": "Imbalanced data biases model to common cases",
"how_to_ensure": "Explicit balancing, weighted sampling",
"common_issue": "80% examples are one type",
},
}
Data Curation Pipeline
class DataCurationPipeline:
"""
Pipeline for creating quality fine-tuning data
"""
def curate(self, raw_data: list) -> list:
"""Multi-stage curation"""
# Stage 1: Deduplication
data = self.deduplicate(raw_data)
print(f"After dedup: {len(data)} (removed {len(raw_data)-len(data)})")
# Stage 2: Format validation
data = self.validate_format(data)
print(f"After format check: {len(data)}")
# Stage 3: Quality filtering
data = self.filter_quality(data)
print(f"After quality filter: {len(data)}")
# Stage 4: Diversity sampling
data = self.ensure_diversity(data)
print(f"After diversity sampling: {len(data)}")
# Stage 5: Balance
data = self.balance_distribution(data)
print(f"Final dataset: {len(data)}")
return data
def filter_quality(self, data: list) -> list:
"""Remove low-quality examples"""
filtered = []
for example in data:
if self.is_low_quality(example):
continue
if self.has_format_issues(example):
continue
if self.is_potential_hallucination(example):
continue
filtered.append(example)
return filtered
def ensure_diversity(self, data: list, min_per_cluster: int = 10) -> list:
"""Ensure coverage of different example types"""
# Cluster by semantic similarity
clusters = self.cluster_examples(data)
# Sample from each cluster
sampled = []
for cluster_id, examples in clusters.items():
if len(examples) < min_per_cluster:
sampled.extend(examples) # Keep all from small clusters
else:
sampled.extend(random.sample(examples, min_per_cluster))
return sampled
Synthetic Data Pitfalls
def synthetic_data_pitfalls():
return {
"hallucinations_in_training": {
"problem": "GPT-generated data contains made-up facts",
"impact": "Model learns to confidently hallucinate",
"mitigation": "Verify factual claims, use structured generation",
},
"mode_collapse": {
"problem": "Synthetic data too uniform",
"impact": "Model only learns one way to respond",
"mitigation": "Vary prompts, temperature, contexts",
},
"distribution_shift": {
"problem": "Synthetic data unlike real queries",
"impact": "Good on synthetic, bad on real",
"mitigation": "Seed with real examples, validate on real data",
},
"circular_training": {
"problem": "Training on model's own outputs",
"impact": "Amplifies model biases and errors",
"mitigation": "Use different model for generation, human verification",
},
"best_practice": """
Synthetic data is for augmentation, not foundation.
Good pattern:
1. Start with real examples (even if few)
2. Augment with synthetic variations
3. Human-verify synthetic additions
4. Evaluate on real held-out data
Bad pattern:
1. Generate 100K synthetic examples
2. Train on synthetic
3. Wonder why production is bad
""",
}
Data Quantity Experiments
def data_quantity_experiment():
"""
How to determine right data quantity for your task
"""
return {
"experiment_design": """
1. Prepare your full curated dataset
2. Create subsets: 10%, 25%, 50%, 100%
3. Train model on each subset (same hyperparameters)
4. Evaluate each on held-out test set
5. Plot learning curve
""",
"interpreting_results": {
"curve_flattens_early": {
"pattern": "10% nearly as good as 100%",
"meaning": "Task is simple, small data sufficient",
"action": "Use smaller dataset, invest in quality",
},
"curve_keeps_rising": {
"pattern": "Each increase improves quality",
"meaning": "Task benefits from more data",
"action": "Collect more data if quality is insufficient",
},
"curve_is_noisy": {
"pattern": "No clear trend",
"meaning": "Data quality issues or task mismatch",
"action": "Fix data quality before adding more",
},
},
"typical_finding": """
Most tasks flatten between 1K-5K high-quality examples.
Beyond 10K, diminishing returns are severe.
Quality improvements often beat quantity increases.
""",
}
Curation vs Collection Trade-off
def curation_vs_collection():
return {
"time_investment": {
"collect_100k_uncurated": "10 hours (scraping, API calls)",
"curate_100k_to_5k": "40 hours (filtering, verification)",
"create_1k_from_scratch": "20 hours (expert annotation)",
},
"outcome_quality": {
"100k_uncurated": "Often 75-80% task accuracy",
"5k_curated_from_100k": "Often 85-90% task accuracy",
"1k_expert_created": "Often 82-88% task accuracy",
},
"recommendation": """
Best ROI path:
1. Define clear quality criteria
2. Collect 10x what you need (cheap, automated)
3. Filter aggressively (2-5x reduction)
4. Human verify remainder
5. Train on quality subset
Don't skip filtering because you have lots of data.
The filtering IS the work.
""",
}
Practical Guidelines
def practical_guidelines():
return {
"starting_point": {
"style_tasks": "Start with 200 examples",
"format_tasks": "Start with 500 examples",
"domain_tasks": "Start with 1000 examples",
},
"when_to_add_more": [
"Validation loss still decreasing at end of training",
"Clear gap between training and validation performance",
"Model fails on specific subcategories (add those)",
],
"when_to_improve_quality": [
"Training loss very low but validation not improving",
"Model outputs are inconsistent",
"Model reproduces errors from training data",
],
"data_audit_checklist": [
"[ ] Every example verified correct",
"[ ] Format consistent across all examples",
"[ ] Coverage of expected input variations",
"[ ] No duplicates or near-duplicates",
"[ ] Balanced across important categories",
"[ ] Matches real production distribution",
],
}
The myth that more data is always better wastes resources and often hurts quality. 1,000 perfect examples teach more than 100,000 noisy ones. Invest in curation, verify everything, and only scale data quantity when quality is already high.