Overview
This guide covers advanced usage patterns:- Deterministic A/B testing
- Metrics aggregation across versions
- Production-ready patterns
- Real-world use cases
A/B Testing with Consistent Routing
The same subject always gets the same version, ensuring consistent user experiences.from switchport import Switchport
client = Switchport()
def demonstrate_ab_testing():
"""Show that same subject gets same version."""
users = ["user_001", "user_002", "user_003"]
# First execution - assign versions
print("First execution:")
user_versions = {}
for user_id in users:
response = client.prompts.execute(
prompt_key="product-pitch",
subject={"user_id": user_id},
variables={"product": "Pro Plan"}
)
user_versions[user_id] = response.version_name
print(f" {user_id}: {response.version_name}")
# Second execution - verify consistency
print("\nSecond execution (should be same):")
for user_id in users:
response = client.prompts.execute(
prompt_key="product-pitch",
subject={"user_id": user_id},
variables={"product": "Pro Plan"}
)
is_same = "✓" if response.version_name == user_versions[user_id] else "✗"
print(f" {user_id}: {response.version_name} {is_same}")
demonstrate_ab_testing()
Metrics Aggregation Pattern
Record metrics with the same subject to aggregate by version:from switchport import Switchport
client = Switchport()
def user_interaction_workflow(user_id: str, user_rating: float):
"""Complete workflow: execute prompt and track metrics."""
# Execute prompt with subject identification
response = client.prompts.execute(
prompt_key="welcome-email",
subject={"user_id": user_id, "segment": "premium"},
variables={"name": f"User {user_id}"}
)
# In real app: send email, show content, etc.
print(f"User {user_id} saw version: {response.version_name}")
# Record metric with SAME subject
client.metrics.record(
metric_key="satisfaction",
value=user_rating,
subject={"user_id": user_id, "segment": "premium"}
)
# Simulate multiple user interactions
feedback_data = [
("user_001", 5.0),
("user_002", 4.5),
("user_003", 4.0),
("user_004", 4.8),
("user_005", 3.5),
]
for user_id, rating in feedback_data:
user_interaction_workflow(user_id, rating)
print("\nMetrics recorded! Check dashboard for aggregated results per version.")
Email Campaign A/B Test
Complete email campaign with open rate tracking:from switchport import Switchport
import time
from typing import List
client = Switchport()
class EmailCampaign:
"""Email campaign with A/B testing."""
def __init__(self, campaign_name: str):
self.campaign_name = campaign_name
def send_email(self, user_id: str, user_email: str, user_name: str):
"""Send personalized email to user."""
# Execute prompt to generate email
response = client.prompts.execute(
prompt_key="marketing-email",
subject={
"user_id": user_id,
"campaign": self.campaign_name
},
variables={
"name": user_name,
"campaign": self.campaign_name
}
)
# In real app: actually send email via SendGrid, etc.
print(f"Sent to {user_email} (version: {response.version_name})")
return response.version_name
def track_open(self, user_id: str):
"""Track when user opens email."""
client.metrics.record(
metric_key="email_opened",
value=True,
subject={
"user_id": user_id,
"campaign": self.campaign_name
}
)
def track_click(self, user_id: str):
"""Track when user clicks link in email."""
client.metrics.record(
metric_key="email_clicked",
value=True,
subject={
"user_id": user_id,
"campaign": self.campaign_name
}
)
def track_conversion(self, user_id: str):
"""Track when user converts."""
client.metrics.record(
metric_key="conversion",
value=True,
subject={
"user_id": user_id,
"campaign": self.campaign_name
}
)
# Run campaign
campaign = EmailCampaign("summer_2025")
# Send to users
users = [
("user_001", "alice@example.com", "Alice"),
("user_002", "bob@example.com", "Bob"),
("user_003", "charlie@example.com", "Charlie"),
]
for user_id, email, name in users:
campaign.send_email(user_id, email, name)
# Simulate user interactions
campaign.track_open("user_001")
campaign.track_click("user_001")
campaign.track_conversion("user_001")
campaign.track_open("user_002")
campaign.track_click("user_002")
campaign.track_open("user_003")
print("Campaign metrics recorded! View in dashboard.")
Customer Support Chatbot
A/B test different conversation styles:from switchport import Switchport
client = Switchport()
class SupportBot:
"""Customer support chatbot with A/B testing."""
def get_response(self, user_id: str, user_message: str) -> str:
"""Get bot response for user message."""
response = client.prompts.execute(
prompt_key="support-bot",
subject={"user_id": user_id},
variables={
"user_message": user_message,
"context": self._get_user_context(user_id)
}
)
return response.text
def record_satisfaction(self, user_id: str, rating: float):
"""Record user satisfaction rating."""
client.metrics.record(
metric_key="chat_satisfaction",
value=rating,
subject={"user_id": user_id}
)
def record_resolution(self, user_id: str, resolved: bool):
"""Record whether issue was resolved."""
client.metrics.record(
metric_key="issue_resolved",
value=resolved,
subject={"user_id": user_id}
)
def _get_user_context(self, user_id: str) -> str:
"""Get user context (purchase history, tier, etc.)."""
# In real app: fetch from database
return "Premium user, active for 2 years"
# Usage
bot = SupportBot()
# User conversation
user_id = "user_123"
response1 = bot.get_response(
user_id,
"I'm having trouble with my subscription"
)
print(f"Bot: {response1}")
response2 = bot.get_response(
user_id,
"It won't let me upgrade"
)
print(f"Bot: {response2}")
# After conversation, collect feedback
bot.record_satisfaction(user_id, 4.5)
bot.record_resolution(user_id, True)
Product Description Generator
Test different description styles and track conversions:from switchport import Switchport
client = Switchport()
class ProductDescriptionGenerator:
"""Generate product descriptions with A/B testing."""
def generate_description(
self,
product_id: str,
user_segment: str,
product_name: str,
features: List[str]
) -> str:
"""Generate product description."""
response = client.prompts.execute(
prompt_key="product-description",
subject={
"product_id": product_id,
"segment": user_segment
},
variables={
"product_name": product_name,
"features": ", ".join(features)
}
)
return response.text
def track_view(self, product_id: str, user_segment: str):
"""Track product view."""
client.metrics.record(
metric_key="product_viewed",
value=True,
subject={
"product_id": product_id,
"segment": user_segment
}
)
def track_add_to_cart(self, product_id: str, user_segment: str):
"""Track add to cart."""
client.metrics.record(
metric_key="added_to_cart",
value=True,
subject={
"product_id": product_id,
"segment": user_segment
}
)
def track_purchase(self, product_id: str, user_segment: str, amount: float):
"""Track purchase."""
client.metrics.record(
metric_key="purchased",
value=True,
subject={
"product_id": product_id,
"segment": user_segment
}
)
client.metrics.record(
metric_key="purchase_amount",
value=amount,
subject={
"product_id": product_id,
"segment": user_segment
}
)
# Usage
generator = ProductDescriptionGenerator()
description = generator.generate_description(
product_id="prod_123",
user_segment="premium",
product_name="Enterprise Widget",
features=["Feature A", "Feature B", "Feature C"]
)
print(f"Generated description:\n{description}\n")
# Track user journey
generator.track_view("prod_123", "premium")
generator.track_add_to_cart("prod_123", "premium")
generator.track_purchase("prod_123", "premium", 299.99)
Multi-Metric Tracking
Track multiple metrics for comprehensive analysis:from switchport import Switchport
client = Switchport()
def complete_user_interaction(user_id: str):
"""Track complete user interaction with multiple metrics."""
subject = {"user_id": user_id}
# Execute prompt
response = client.prompts.execute(
prompt_key="onboarding-flow",
subject=subject,
variables={"name": f"User {user_id}"}
)
print(f"User {user_id} in version: {response.version_name}")
# Track multiple metrics
metrics_to_record = [
("satisfaction", 4.5), # Float
("completed_onboarding", True), # Boolean
("sentiment", "positive"), # Enum
("time_spent_seconds", 125.5), # Float
("converted_to_paid", True), # Boolean
]
for metric_key, value in metrics_to_record:
client.metrics.record(
metric_key=metric_key,
value=value,
subject=subject # Same subject for all!
)
print(f"Recorded {len(metrics_to_record)} metrics")
# Track for multiple users
for i in range(1, 6):
complete_user_interaction(f"user_{i:03d}")
Gradual Rollout Pattern
Test a new version with a small percentage of users:from switchport import Switchport
client = Switchport()
def gradual_rollout_example():
"""
Demonstrate gradual rollout pattern.
In dashboard:
- Start: v1: 90%, v2: 10%
- If metrics good: v1: 50%, v2: 50%
- Finally: v2: 100%
"""
print("Testing 100 users...")
version_counts = {"v1": 0, "v2": 0, "other": 0}
for i in range(100):
response = client.prompts.execute(
prompt_key="new-feature",
subject={"user_id": f"user_{i:03d}"},
variables={"feature": "New UI"}
)
version = response.version_name
if version in version_counts:
version_counts[version] += 1
else:
version_counts["other"] += 1
print("Version distribution:")
for version, count in version_counts.items():
print(f" {version}: {count}%")
gradual_rollout_example()
Complete Production Example
"""
Production-ready example with error handling, logging, and retries.
"""
from switchport import (
Switchport,
SwitchportError,
PromptNotFoundError,
APIError
)
import logging
from typing import Optional
# Configure logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
client = Switchport()
def execute_prompt_with_fallback(
prompt_key: str,
user_id: str,
variables: dict,
fallback_text: Optional[str] = None
) -> str:
"""Execute prompt with error handling and fallback."""
subject = {"user_id": user_id}
try:
response = client.prompts.execute(
prompt_key=prompt_key,
subject=subject,
variables=variables
)
logger.info(
f"Prompt executed successfully",
extra={
"prompt_key": prompt_key,
"version": response.version_name,
"request_id": response.request_id
}
)
return response.text
except PromptNotFoundError:
logger.error(f"Prompt '{prompt_key}' not found")
return fallback_text or "Welcome!"
except APIError as e:
logger.error(
f"API error executing prompt",
extra={
"prompt_key": prompt_key,
"status_code": e.status_code,
"error": str(e)
}
)
return fallback_text or "Welcome!"
except SwitchportError as e:
logger.error(f"Switchport error: {e}")
return fallback_text or "Welcome!"
# Usage
text = execute_prompt_with_fallback(
prompt_key="welcome-message",
user_id="user_123",
variables={"name": "Alice"},
fallback_text="Welcome to our platform!"
)
print(text)
Next Steps
Error Handling
Learn comprehensive error handling patterns
A/B Testing Guide
Deep dive into A/B testing

