Back to Blog
Safe Rollouts for LLM Changes
Airlines don't test new engines by putting them on a full passenger flight. They run them on cargo flights, test flights, and eventually passenger routes with experienced pilots. Each stage adds confidence before full deployment.
Model rollouts deserve the same graduated approach. LLM outputs are hard to predict, and bad outputs can reach thousands of users in minutes. Canary deployments let you find problems before they scale.
The Canary Pattern
class CanaryDeployment:
"""
Gradual rollout with output comparison
"""
def __init__(self, baseline_model, canary_model):
self.baseline = baseline_model
self.canary = canary_model
self.canary_percentage = 0 # Start at 0%
def route_request(self, request) -> str:
"""Decide which model handles this request"""
if random.random() * 100 < self.canary_percentage:
return "canary"
return "baseline"
async def handle_with_comparison(self, request):
"""Log both outputs for comparison, serve one"""
route = self.route_request(request)
if route == "canary":
output = await self.canary.generate(request)
# Also run baseline for comparison (async, don't block)
asyncio.create_task(self.log_comparison(request, output))
else:
output = await self.baseline.generate(request)
return output
async def log_comparison(self, request, canary_output):
"""Compare canary to baseline for analysis"""
baseline_output = await self.baseline.generate(request)
comparison = {
"request_id": request.id,
"canary_output": canary_output,
"baseline_output": baseline_output,
"similarity": self.compute_similarity(canary_output, baseline_output),
"timestamp": time.time(),
}
await self.log_to_comparison_store(comparison)
The Rollout Schedule
def recommended_rollout_schedule():
return {
"stage_1": {
"percentage": 1,
"duration": "1 hour",
"criteria_to_advance": [
"No error rate increase",
"Latency within 10% of baseline",
"No quality alerts",
],
},
"stage_2": {
"percentage": 5,
"duration": "4 hours",
"criteria_to_advance": [
"Error rate stable",
"User feedback neutral or positive",
"Output comparison shows no regressions",
],
},
"stage_3": {
"percentage": 25,
"duration": "12 hours",
"criteria_to_advance": [
"Metrics stable across traffic patterns",
"No edge case failures discovered",
],
},
"stage_4": {
"percentage": 50,
"duration": "24 hours",
"criteria_to_advance": [
"Confidence in quality",
"No rollback triggers",
],
},
"stage_5": {
"percentage": 100,
"duration": "Complete",
"action": "Decommission baseline",
},
}
Output Comparison
class OutputComparison:
"""
Detect quality regressions automatically
"""
def compare_outputs(self, baseline: str, canary: str) -> dict:
return {
"token_similarity": self.token_overlap(baseline, canary),
"semantic_similarity": self.embedding_similarity(baseline, canary),
"length_ratio": len(canary) / len(baseline) if baseline else 0,
"structure_match": self.structure_match(baseline, canary),
}
def detect_regression(self, comparisons: list) -> dict:
"""Analyze batch of comparisons for regressions"""
similarities = [c["semantic_similarity"] for c in comparisons]
return {
"mean_similarity": sum(similarities) / len(similarities),
"low_similarity_rate": sum(1 for s in similarities if s < 0.8) / len(similarities),
"regression_detected": any(s < 0.5 for s in similarities),
"samples_analyzed": len(comparisons),
}
def alert_conditions(self) -> list:
return [
{"condition": "mean_similarity < 0.9", "severity": "warning"},
{"condition": "low_similarity_rate > 0.05", "severity": "warning"},
{"condition": "regression_detected == True", "severity": "critical"},
]
Rollback Triggers
def rollback_triggers():
return {
"automatic_rollback": [
{
"trigger": "Error rate > 2x baseline",
"action": "Immediate rollback to 0% canary",
"reason": "Clear regression",
},
{
"trigger": "P99 latency > 2x baseline",
"action": "Immediate rollback",
"reason": "Performance regression",
},
{
"trigger": "Safety classifier triggers > 3x baseline",
"action": "Immediate rollback",
"reason": "Quality/safety issue",
},
],
"manual_review_triggers": [
{
"trigger": "Output similarity < 85%",
"action": "Pause rollout, investigate",
"reason": "Outputs significantly different",
},
{
"trigger": "User feedback spike",
"action": "Pause rollout, review feedback",
"reason": "Users noticing issues",
},
],
}
Implementation with Feature Flags
def feature_flag_implementation():
return {
"code": """
from feature_flags import get_flag
class ModelRouter:
def __init__(self):
self.models = {
"baseline": load_model("v1.0"),
"canary": load_model("v1.1"),
}
def get_model(self, user_id: str) -> str:
# Feature flag determines canary percentage
canary_config = get_flag("model_canary")
if canary_config["enabled"]:
# Hash user_id for consistent routing
bucket = hash(user_id) % 100
if bucket < canary_config["percentage"]:
return self.models["canary"]
return self.models["baseline"]
""",
"flag_config": {
"model_canary": {
"enabled": True,
"percentage": 5,
"allowed_user_segments": ["beta", "internal"],
}
},
}
Shadow Mode
def shadow_mode():
"""
Test canary without serving its output
"""
return {
"description": "Run canary in parallel, don't serve its output",
"implementation": """
async def handle_with_shadow(request):
# Always serve baseline
baseline_output = await self.baseline.generate(request)
# Run canary in shadow (don't block response)
async def shadow_run():
canary_output = await self.canary.generate(request)
await self.log_shadow_comparison(
request, baseline_output, canary_output
)
asyncio.create_task(shadow_run())
return baseline_output
""",
"use_case": [
"Pre-canary validation",
"High-risk model changes",
"When quality impact is unknown",
],
}
Monitoring During Rollout
def rollout_monitoring():
return {
"dashboards": {
"split_view": "Show baseline and canary metrics side by side",
"comparison": "Output similarity over time",
"user_impact": "Feedback rate by model version",
},
"alerts": {
"relative": "Canary metrics relative to baseline",
"absolute": "Canary metrics against thresholds",
"trend": "Degradation over time in rollout",
},
"key_metrics": [
"Error rate by model version",
"Latency percentiles by model version",
"Output length distribution",
"User regeneration rate",
"Safety classifier triggers",
],
}
The Rollout Checklist
def canary_rollout_checklist():
return [
"[ ] Shadow mode completed successfully",
"[ ] Automatic rollback configured",
"[ ] Comparison logging enabled",
"[ ] Dashboards showing split metrics",
"[ ] Alerts configured for both versions",
"[ ] Rollback procedure tested",
"[ ] On-call team briefed on rollout",
"[ ] User feedback channel monitored",
"[ ] Each stage gates passed before advancing",
]
Model changes are among the highest-risk deployments. The model itself might be fine, but interaction with real traffic reveals problems benchmarks miss. Canary deployments are how you find those problems at 1% instead of 100%.