Overview
This guide covers robust error handling patterns for production use:- Catching specific exceptions
- Implementing fallback behavior
- Retry logic with exponential backoff
- Logging and monitoring
- Graceful degradation
Basic Error Handling
Handle the most common errors:from switchport import (
Switchport,
AuthenticationError,
PromptNotFoundError,
MetricNotFoundError,
APIError
)
client = Switchport()
try:
response = client.prompts.execute("my-prompt")
print(response.text)
except AuthenticationError:
print("Authentication failed. Check your API key.")
except PromptNotFoundError:
print("Prompt not found. Create it in the dashboard.")
except APIError as e:
print(f"API error: {e}")
print(f"Status code: {e.status_code}")
except Exception as e:
print(f"Unexpected error: {e}")
Fallback Behavior
Provide default responses when the API fails:from switchport import Switchport, SwitchportError
client = Switchport()
def get_welcome_message(user_name: str) -> str:
"""Get welcome message with fallback."""
try:
response = client.prompts.execute(
prompt_key="welcome-message",
variables={"name": user_name}
)
return response.text
except SwitchportError as e:
print(f"Switchport error: {e}")
# Fallback to default message
return f"Welcome, {user_name}! Thanks for joining us."
# Usage
message = get_welcome_message("Alice")
print(message)
Retry Logic
Implement retry logic with exponential backoff for transient failures:from switchport import Switchport, APIError
import time
from typing import Optional
client = Switchport()
def execute_with_retry(
prompt_key: str,
max_retries: int = 3,
initial_delay: float = 1.0,
**kwargs
):
"""Execute prompt with exponential backoff retry."""
delay = initial_delay
for attempt in range(max_retries):
try:
return client.prompts.execute(prompt_key, **kwargs)
except APIError as e:
# Only retry on 5xx errors or rate limiting
if e.status_code and e.status_code >= 500 or e.status_code == 429:
if attempt < max_retries - 1:
print(f"Attempt {attempt + 1} failed. Retrying in {delay}s...")
time.sleep(delay)
delay *= 2 # Exponential backoff
else:
print(f"Max retries reached. Giving up.")
raise
else:
# Don't retry on 4xx errors (client errors)
raise
# Usage
try:
response = execute_with_retry(
prompt_key="my-prompt",
max_retries=3,
variables={"name": "Alice"}
)
print(response.text)
except APIError as e:
print(f"Failed after retries: {e}")
Logging for Production
Comprehensive logging for debugging and monitoring:from switchport import (
Switchport,
SwitchportError,
PromptNotFoundError,
APIError
)
import logging
from typing import Dict, Any
# Configure logging
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger(__name__)
client = Switchport()
def execute_prompt_with_logging(
prompt_key: str,
subject: Dict[str, Any],
variables: Dict[str, Any]
):
"""Execute prompt with comprehensive logging."""
logger.info(
"Executing prompt",
extra={
"prompt_key": prompt_key,
"subject": subject,
"variables": variables
}
)
try:
response = client.prompts.execute(
prompt_key=prompt_key,
subject=subject,
variables=variables
)
logger.info(
"Prompt executed successfully",
extra={
"prompt_key": prompt_key,
"version": response.version_name,
"request_id": response.request_id,
"model": response.model
}
)
return response
except PromptNotFoundError as e:
logger.error(
"Prompt not found",
extra={
"prompt_key": prompt_key,
"error": str(e)
}
)
raise
except APIError as e:
logger.error(
"API error",
extra={
"prompt_key": prompt_key,
"status_code": e.status_code,
"error": str(e),
"response_data": e.response_data
}
)
raise
except SwitchportError as e:
logger.error(
"Switchport error",
extra={
"prompt_key": prompt_key,
"error_type": type(e).__name__,
"error": str(e)
}
)
raise
# Usage
try:
response = execute_prompt_with_logging(
prompt_key="welcome-message",
subject={"user_id": "user_123"},
variables={"name": "Alice"}
)
except SwitchportError as e:
logger.exception("Failed to execute prompt")
Circuit Breaker Pattern
Prevent cascading failures with a circuit breaker:from switchport import Switchport, APIError
import time
from enum import Enum
class CircuitState(Enum):
CLOSED = "closed" # Normal operation
OPEN = "open" # Failing, reject requests
HALF_OPEN = "half_open" # Testing if recovered
class CircuitBreaker:
"""Circuit breaker for Switchport API calls."""
def __init__(
self,
failure_threshold: int = 5,
timeout: float = 60.0
):
self.failure_threshold = failure_threshold
self.timeout = timeout
self.failure_count = 0
self.last_failure_time = None
self.state = CircuitState.CLOSED
def call(self, func, *args, **kwargs):
"""Execute function with circuit breaker protection."""
if self.state == CircuitState.OPEN:
# Check if timeout has passed
if time.time() - self.last_failure_time > self.timeout:
self.state = CircuitState.HALF_OPEN
else:
raise Exception("Circuit breaker is OPEN")
try:
result = func(*args, **kwargs)
# Success - reset on half-open
if self.state == CircuitState.HALF_OPEN:
self.state = CircuitState.CLOSED
self.failure_count = 0
return result
except (APIError, Exception) as e:
self.failure_count += 1
self.last_failure_time = time.time()
if self.failure_count >= self.failure_threshold:
self.state = CircuitState.OPEN
raise
# Usage
client = Switchport()
circuit_breaker = CircuitBreaker(failure_threshold=5, timeout=60.0)
def execute_with_circuit_breaker(prompt_key: str):
"""Execute prompt with circuit breaker."""
return circuit_breaker.call(
client.prompts.execute,
prompt_key
)
try:
response = execute_with_circuit_breaker("my-prompt")
print(response.text)
except Exception as e:
print(f"Circuit breaker prevented call or call failed: {e}")
Graceful Degradation
Handle failures gracefully without breaking user experience:from switchport import Switchport, SwitchportError
from typing import Optional
client = Switchport()
class AIContentGenerator:
"""Content generator with graceful degradation."""
def __init__(self):
self.fallback_templates = {
"welcome-message": "Welcome, {name}! We're glad to have you here.",
"product-description": "{product} - A great product for you.",
"email-subject": "Important update about {topic}"
}
def generate_content(
self,
template_key: str,
variables: dict,
subject: Optional[dict] = None
) -> str:
"""Generate content with AI, fall back to template on failure."""
try:
# Try AI generation
response = client.prompts.execute(
prompt_key=template_key,
subject=subject or {},
variables=variables
)
return response.text
except SwitchportError as e:
# Log the error
print(f"AI generation failed: {e}")
# Fall back to template
template = self.fallback_templates.get(template_key)
if template:
return template.format(**variables)
else:
return "Content temporarily unavailable."
# Usage
generator = AIContentGenerator()
# This will use AI if available, template if not
welcome = generator.generate_content(
"welcome-message",
{"name": "Alice"},
{"user_id": "user_123"}
)
print(welcome)
Metrics Recording with Error Handling
Ensure metrics recording doesn’t break your application:from switchport import Switchport, SwitchportError
import logging
logger = logging.getLogger(__name__)
client = Switchport()
def record_metric_safely(
metric_key: str,
value: any,
subject: dict
) -> bool:
"""Record metric with error handling, return success status."""
try:
result = client.metrics.record(
metric_key=metric_key,
value=value,
subject=subject
)
return result.success
except SwitchportError as e:
logger.error(
f"Failed to record metric '{metric_key}': {e}",
extra={
"metric_key": metric_key,
"value": value,
"subject": subject
}
)
# Don't raise - metrics are non-critical
return False
# Usage - won't break even if metrics fail
user_id = "user_123"
# Execute prompt
try:
response = client.prompts.execute(
prompt_key="welcome",
subject={"user_id": user_id}
)
print(response.text)
except SwitchportError as e:
print(f"Prompt execution failed: {e}")
# Record metric (won't break if it fails)
success = record_metric_safely(
metric_key="satisfaction",
value=4.5,
subject={"user_id": user_id}
)
if success:
print("Metric recorded")
else:
print("Metric recording failed, but continuing...")
Timeout Handling
Handle timeout errors specifically:from switchport import Switchport, APIError
import requests
client = Switchport()
def execute_with_timeout(prompt_key: str, timeout: int = 30):
"""Execute prompt with custom timeout."""
try:
# The SDK uses requests with default 30s timeout
# You can't override it directly, so handle timeout errors
response = client.prompts.execute(prompt_key)
return response
except APIError as e:
if "timeout" in str(e).lower():
print(f"Request timed out after {timeout}s")
# Implement fallback or retry
raise TimeoutError("Prompt execution timed out")
raise
# Usage
try:
response = execute_with_timeout("slow-prompt")
except TimeoutError:
print("Using cached response due to timeout...")
Complete Production Example
A production-ready implementation with all best practices:"""
Production-ready error handling example.
Includes: retries, circuit breaker, fallback, logging, metrics.
"""
from switchport import (
Switchport,
SwitchportError,
APIError,
PromptNotFoundError
)
import logging
import time
from typing import Optional, Dict, Any
from enum import Enum
# Configure logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
# Initialize client
client = Switchport()
class PromptExecutor:
"""Production-ready prompt executor."""
def __init__(self, max_retries: int = 3):
self.max_retries = max_retries
self.failure_count = 0
self.circuit_open = False
def execute(
self,
prompt_key: str,
subject: Dict[str, Any],
variables: Dict[str, Any],
fallback_text: Optional[str] = None
) -> str:
"""
Execute prompt with comprehensive error handling.
Features:
- Retry logic with exponential backoff
- Circuit breaker
- Fallback behavior
- Comprehensive logging
"""
# Check circuit breaker
if self.circuit_open:
logger.warning("Circuit breaker is open, using fallback")
return self._get_fallback(prompt_key, variables, fallback_text)
delay = 1.0
for attempt in range(self.max_retries):
try:
logger.info(
f"Executing prompt (attempt {attempt + 1}/{self.max_retries})",
extra={"prompt_key": prompt_key}
)
response = client.prompts.execute(
prompt_key=prompt_key,
subject=subject,
variables=variables
)
# Success - reset failure count
self.failure_count = 0
logger.info(
"Prompt executed successfully",
extra={
"prompt_key": prompt_key,
"version": response.version_name,
"request_id": response.request_id
}
)
return response.text
except PromptNotFoundError as e:
# Don't retry on client errors
logger.error(f"Prompt not found: {prompt_key}")
return self._get_fallback(prompt_key, variables, fallback_text)
except APIError as e:
self.failure_count += 1
# Open circuit breaker after threshold
if self.failure_count >= 5:
self.circuit_open = True
logger.error("Circuit breaker opened")
# Retry on 5xx or 429
if e.status_code and (e.status_code >= 500 or e.status_code == 429):
if attempt < self.max_retries - 1:
logger.warning(
f"API error, retrying in {delay}s",
extra={"status_code": e.status_code}
)
time.sleep(delay)
delay *= 2
continue
# Don't retry on other errors
logger.error(f"API error: {e}")
return self._get_fallback(prompt_key, variables, fallback_text)
except Exception as e:
logger.exception(f"Unexpected error: {e}")
return self._get_fallback(prompt_key, variables, fallback_text)
# Max retries reached
logger.error(f"Max retries reached for {prompt_key}")
return self._get_fallback(prompt_key, variables, fallback_text)
def _get_fallback(
self,
prompt_key: str,
variables: Dict[str, Any],
fallback_text: Optional[str]
) -> str:
"""Get fallback text."""
if fallback_text:
return fallback_text
# Default fallback templates
defaults = {
"welcome-message": f"Welcome, {variables.get('name', 'there')}!",
"error-message": "We're experiencing technical difficulties."
}
return defaults.get(prompt_key, "Content temporarily unavailable.")
# Usage
executor = PromptExecutor(max_retries=3)
result = executor.execute(
prompt_key="welcome-message",
subject={"user_id": "user_123"},
variables={"name": "Alice"},
fallback_text="Welcome to our platform!"
)
print(result)
Next Steps
Exceptions Reference
Learn about all exception types
Advanced Examples
See more advanced patterns

