The Engineering Reality of Monitoring Real-Time Conversations
Explore the technical challenges of building real-time conversation monitoring systems, from handling massive concurrency to integrating AI for instant analysis.
Read more →Testing is crucial for building reliable Python applications. Whether you’re developing web APIs, data pipelines, or machine learning models, comprehensive testing ensures your code works correctly and catches bugs before they reach production.
This guide covers everything you need to know about Python testing, from fundamentals to advanced automation strategies.
Effective testing provides multiple benefits:
Structure your test suite following the testing pyramid:
/\
/ \ E2E Tests (Few, Slow, Expensive)
/____\
/ \ Integration Tests (Some, Medium Speed)
/________\
/ \ Unit Tests (Many, Fast, Cheap)
/____________\
Distribution:
This distribution optimizes for fast feedback and maintainability.
The most popular Python testing framework, and for good reason:
Advantages:
Installation:
pip install pytest pytest-cov pytest-asyncio pytest-mock
Basic Example:
def test_user_creation():
user = User(name="Alice", email="alice@example.com")
assert user.name == "Alice"
assert user.email == "alice@example.com"
assert user.is_active is True
Python’s built-in testing framework:
When to Use:
Example:
import unittest
class TestUser(unittest.TestCase):
def test_user_creation(self):
user = User(name="Alice")
self.assertEqual(user.name, "Alice")
self.assertTrue(user.is_active)
Each test should verify one specific behavior:
# Good - Clear, single purpose
def test_user_creation_sets_name():
user = User(name="Alice")
assert user.name == "Alice"
def test_user_creation_sets_default_active_status():
user = User(name="Alice")
assert user.is_active is True
# Bad - Testing multiple things
def test_user():
user = User(name="Alice")
assert user.name == "Alice"
assert user.is_active is True
assert user.created_at is not None
assert user.email is None
Test names should clearly describe what they verify:
# Good naming
def test_deposit_increases_account_balance():
pass
def test_withdraw_with_insufficient_funds_raises_error():
pass
def test_transfer_between_accounts_updates_both_balances():
pass
# Bad naming
def test_account():
pass
def test_transaction():
pass
def test_1():
pass
Structure tests with Arrange, Act, Assert:
def test_order_total_with_discount():
# Arrange - Set up test data
order = Order()
order.add_item(Item(name="Book", price=20.00))
order.add_item(Item(name="Pen", price=5.00))
discount = Discount(percentage=10)
# Act - Execute the behavior
total = order.calculate_total(discount)
# Assert - Verify the result
assert total == 22.50 # (20 + 5) * 0.9
Leverage pytest fixtures to reduce duplication:
import pytest
@pytest.fixture
def sample_user():
"""Create a standard test user"""
return User(
name="Test User",
email="test@example.com",
role="member"
)
@pytest.fixture
def database_session():
"""Create a test database session"""
session = create_test_session()
yield session
session.rollback()
session.close()
def test_user_can_be_saved(sample_user, database_session):
database_session.add(sample_user)
database_session.commit()
retrieved = database_session.query(User).first()
assert retrieved.name == "Test User"
Test multiple scenarios efficiently:
import pytest
@pytest.mark.parametrize("input_value,expected", [
(0, "zero"),
(1, "positive"),
(-1, "negative"),
(100, "positive"),
(-100, "negative"),
])
def test_number_classification(input_value, expected):
result = classify_number(input_value)
assert result == expected
@pytest.mark.parametrize("email", [
"invalid.email",
"@example.com",
"user@",
"user name@example.com",
"",
])
def test_invalid_email_validation(email):
with pytest.raises(ValidationError):
validate_email(email)
Mock external dependencies to:
from unittest.mock import Mock, patch, MagicMock
# Mocking a function
def test_send_email_on_user_registration():
with patch('myapp.email.send_email') as mock_send:
register_user("alice@example.com", "password123")
mock_send.assert_called_once_with(
to="alice@example.com",
subject="Welcome!",
template="welcome"
)
# Mocking an object
def test_payment_processing():
mock_gateway = Mock()
mock_gateway.charge.return_value = {"status": "success", "id": "ch_123"}
processor = PaymentProcessor(mock_gateway)
result = processor.process_payment(amount=100, token="tok_visa")
assert result["status"] == "success"
mock_gateway.charge.assert_called_once_with(
amount=10000, # cents
token="tok_visa"
)
Simpler syntax with pytest-mock:
def test_api_call_with_retry(mocker):
# Mock the requests.get function
mock_get = mocker.patch('requests.get')
mock_get.side_effect = [
Exception("Connection error"), # First call fails
Mock(status_code=200, json=lambda: {"data": "success"}) # Second succeeds
]
result = fetch_data_with_retry("https://api.example.com/data")
assert result == {"data": "success"}
assert mock_get.call_count == 2
# Good - Mock at the boundary
@patch('myapp.services.external_api_client')
def test_service_uses_api(mock_client):
mock_client.get_data.return_value = {"id": 1}
result = my_service.process()
assert result["id"] == 1
# Bad - Mocking too much internal logic
@patch('myapp.services.parse_data')
@patch('myapp.services.validate_data')
@patch('myapp.services.transform_data')
def test_service(m1, m2, m3):
# Too many mocks = fragile test
pass
Test how components work together:
import pytest
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
@pytest.fixture(scope="function")
def db_session():
"""Create a fresh database for each test"""
engine = create_engine("sqlite:///:memory:")
Base.metadata.create_all(engine)
Session = sessionmaker(bind=engine)
session = Session()
yield session
session.close()
def test_user_repository_creates_user(db_session):
repo = UserRepository(db_session)
user = repo.create(
name="Alice",
email="alice@example.com"
)
assert user.id is not None
# Verify in database
retrieved = db_session.query(User).filter_by(email="alice@example.com").first()
assert retrieved is not None
assert retrieved.name == "Alice"
from fastapi.testclient import TestClient
from myapp import app
client = TestClient(app)
def test_create_user_endpoint():
response = client.post(
"/users",
json={"name": "Alice", "email": "alice@example.com"}
)
assert response.status_code == 201
data = response.json()
assert data["name"] == "Alice"
assert "id" in data
def test_get_user_endpoint():
# Create user
create_response = client.post(
"/users",
json={"name": "Bob", "email": "bob@example.com"}
)
user_id = create_response.json()["id"]
# Get user
get_response = client.get(f"/users/{user_id}")
assert get_response.status_code == 200
assert get_response.json()["name"] == "Bob"
Test asynchronous functions:
import pytest
import asyncio
@pytest.mark.asyncio
async def test_async_fetch_data():
result = await fetch_data_async("https://api.example.com")
assert result["status"] == "success"
@pytest.mark.asyncio
async def test_concurrent_requests():
results = await asyncio.gather(
fetch_data_async("https://api.example.com/1"),
fetch_data_async("https://api.example.com/2"),
fetch_data_async("https://api.example.com/3"),
)
assert len(results) == 3
assert all(r["status"] == "success" for r in results)
@pytest.mark.asyncio
async def test_async_service_with_mock(mocker):
mock_fetch = mocker.patch('myapp.fetch_data_async')
mock_fetch.return_value = {"data": "mocked"}
service = DataService()
result = await service.process()
assert result == {"data": "mocked"}
mock_fetch.assert_called_once()
# Run tests with coverage
pytest --cov=myapp --cov-report=html --cov-report=term
# View HTML report
open htmlcov/index.html
Target: Aim for 80-90% coverage, not 100%
Focus on:
Don’t obsess over:
Add to pytest.ini:
[pytest]
addopts = --cov=myapp --cov-fail-under=80
Or pyproject.toml:
[tool.pytest.ini_options]
addopts = "--cov=myapp --cov-fail-under=80"
name: Tests
on: [push, pull_request]
jobs:
test:
runs-on: ubuntu-latest
services:
postgres:
image: postgres:15
env:
POSTGRES_PASSWORD: postgres
options: >-
--health-cmd pg_isready
--health-interval 10s
--health-timeout 5s
--health-retries 5
steps:
- uses: actions/checkout@v3
- name: Set up Python
uses: actions/setup-python@v4
with:
python-version: '3.11'
- name: Install dependencies
run: |
pip install -r requirements.txt
pip install pytest pytest-cov
- name: Run tests
env:
DATABASE_URL: postgresql://postgres:postgres@localhost/test
run: |
pytest --cov=myapp --cov-report=xml
- name: Upload coverage
uses: codecov/codecov-action@v3
test:
image: python:3.11
services:
- postgres:15
variables:
POSTGRES_DB: test
POSTGRES_PASSWORD: postgres
DATABASE_URL: postgresql://postgres:postgres@postgres/test
before_script:
- pip install -r requirements.txt
- pip install pytest pytest-cov
script:
- pytest --cov=myapp --cov-report=term --cov-report=xml
coverage: '/TOTAL.*\s+(\d+%)$/'
artifacts:
reports:
coverage_report:
coverage_format: cobertura
path: coverage.xml
✅ Write tests first (TDD) or immediately after code ✅ Keep tests fast (unit tests < 100ms) ✅ Make tests independent (no shared state) ✅ Use descriptive names ✅ Test edge cases and error conditions ✅ Mock external dependencies ✅ Run tests automatically in CI/CD ✅ Maintain tests like production code
❌ Skip tests for “simple” code ❌ Test implementation details ❌ Share state between tests ❌ Ignore failing tests ❌ Use sleep() for timing issues ❌ Test third-party libraries ❌ Couple tests to implementation
Test properties that should always hold:
from hypothesis import given, strategies as st
@given(st.lists(st.integers()))
def test_reverse_twice_returns_original(lst):
assert reverse(reverse(lst)) == lst
@given(st.integers(min_value=0), st.integers(min_value=0))
def test_addition_commutative(a, b):
assert add(a, b) == add(b, a)
Verify test quality with mutation testing:
pip install mutmut
mutmut run
mutmut results
Mutmut modifies your code and checks if tests catch the changes.
Test complex outputs:
from syrupy import snapshot
def test_api_response_structure(snapshot):
response = api.get_user(123)
assert response == snapshot
# Bad - Too many mocks
@patch('module.func1')
@patch('module.func2')
@patch('module.func3')
@patch('module.func4')
@patch('module.func5')
def test_something(m1, m2, m3, m4, m5):
# This test is fragile and tests nothing useful
pass
Solution: Test at a higher level or refactor code
# Bad - Flaky timing-dependent test
def test_async_operation():
start_async_operation()
time.sleep(1) # Hope it's done
assert operation_completed()
Solution: Use proper async testing or polling
# Bad - Depends on test execution order
def test_create():
global user_id
user_id = create_user()
def test_update():
update_user(user_id) # Breaks if test_create doesn't run first
Solution: Make tests independent with fixtures
Here’s a practical testing strategy for a Python web application:
tests/
├── unit/
│ ├── test_models.py
│ ├── test_services.py
│ └── test_utils.py
├── integration/
│ ├── test_api.py
│ ├── test_database.py
│ └── test_external_services.py
├── e2e/
│ └── test_user_journeys.py
├── conftest.py # Shared fixtures
└── pytest.ini # Configuration
Effective Python testing requires:
Testing is an investment that pays dividends through:
At Async Squad Labs, we help teams implement comprehensive testing strategies that catch bugs early and enable rapid, confident development. From setting up test infrastructure to writing test suites, we ensure your Python applications are reliable and maintainable.
Need help with test automation? Contact us to discuss your testing challenges.
Related articles: AI Integration Guide | Go Microservices | Elixir Benefits