Command Reference
DigitalOcean CLI (doctl)
Authentication and Setup
# Install doctl
sudo snap install doctl
# Create alias for easier usage
echo "alias doctl='snap run doctl'" >> ~/.bash_aliases
# Authenticate with DigitalOcean
doctl auth init --context azmx
# Enter your DigitalOcean API token
# Switch between contexts
doctl auth switch --context azmx
doctl auth remove --context old-context
Kubernetes (DOKS) Management
# List clusters
doctl k cluster list
# Get cluster details
doctl k cluster get k8s-cluster-name
# Download kubeconfig
doctl k cluster kubeconfig save cluster-name
# Connect to cluster with kubectl
kubectl get nodes --kubeconfig=/path/to/cluster-kubeconfig.yaml
# Set environment variable for easier access
export KUBECONFIG=/path/to/cluster-kubeconfig.yaml
kubectl get nodes
Container Registry
# Login to DigitalOcean Container Registry
docker login registry.digitalocean.com
# Use token as both username and password
# Tag and push image
docker tag your-image:latest registry.digitalocean.com/your-registry/your-image
docker push registry.digitalocean.com/your-registry/your-image
# Logout from registry
doctl registry logout
Spaces (Object Storage)
# List spaces
doctl compute spaces list
# Create new space
doctl compute spaces create my-space --region nyc3
# Upload file to space
doctl compute spaces cp local-file.txt do:my-space/path/
# Download file from space
doctl compute spaces cp do:my-space/path/file.txt local-file.txt
# Sync directory with space
doctl compute spaces sync local-directory/ do:my-space/remote-directory/
PostgreSQL Management
SSH Tunneling (Standard Convention)
# Test/Development Environment (Port 5433)
ssh -L 5433:localhost:5432 user@test-server
# Production Environment (Port 5434)
ssh -L 5434:localhost:5432 user@prod-server
# Background tunnel
ssh -f -N -L 5433:localhost:5432 user@test-server
# Kill background tunnel
ps aux | grep ssh
kill <ssh-process-id>
Database Connection
# Connect via psql (Test Environment)
psql "host=localhost port=5433 user=username dbname=database_name"
# Connect via psql (Production Environment)
psql "host=localhost port=5434 user=username dbname=database_name"
# Connect with SSL requirement
psql "sslmode=require host=localhost port=5433 user=username dbname=database_name"
# Execute SQL file
psql -h localhost -p 5433 -U username -d database_name -f script.sql
# Dump database
pg_dump -h localhost -p 5433 -U username database_name > backup.sql
# Restore database
psql -h localhost -p 5433 -U username database_name < backup.sql
pgAdmin Connection
# pgAdmin connection settings:
# Host: localhost
# Port: 5433 (test) or 5434 (prod)
# Maintenance Database: postgres
# Username: admin_username
# Password: admin_password
Django Management
Environment Setup
# Create virtual environment
python3 -m venv venv
source venv/bin/activate # Linux/Mac
# or
venv\Scripts\activate # Windows
# Install dependencies
pip install -r requirements.txt
# Environment-specific settings
export DJANGO_SETTINGS_MODULE=project.settings.development
export DJANGO_SETTINGS_MODULE=project.settings.production
Database Operations
# Create migrations
python manage.py makemigrations
# Apply migrations
python manage.py migrate
# Create superuser
python manage.py createsuperuser
# Load fixture data
python manage.py loaddata fixtures/initial_data.json
# Dump data
python manage.py dumpdata app.model --indent 2 > fixtures/data.json
# Database shell
python manage.py dbshell
# Django shell
python manage.py shell
Development Server
# Run development server
python manage.py runserver
python manage.py runserver 0.0.0.0:8000
# Run with specific settings
python manage.py runserver --settings=project.settings.development
# Collect static files
python manage.py collectstatic --noinput
# Check for issues
python manage.py check
python manage.py check --deploy
Testing
# Run all tests
python manage.py test
# Run specific app tests
python manage.py test apps.users
# Run with coverage
coverage run --source='.' manage.py test
coverage report
coverage html
# Run tests with pytest
pytest
pytest apps/users/tests/
pytest --cov=apps --cov-report=html
Celery Task Management
Celery Worker Operations
# Start Celery worker
celery -A project worker -l info
# Start worker with specific queues
celery -A project worker -Q high_priority,normal -l info
# Start worker in background
celery -A project worker -l info --detach
# Stop workers gracefully
celery -A project control shutdown
# Restart workers
celery -A project control restart
Celery Beat (Scheduler)
# Start Celery beat scheduler
celery -A project beat -l info
# Start beat with custom schedule
celery -A project beat -l info --schedule=/path/to/schedule
# Run beat in background
celery -A project beat -l info --detach
Monitoring and Control
# Monitor with Flower
celery -A project flower
# Inspect active tasks
celery -A project inspect active
# Inspect registered tasks
celery -A project inspect registered
# Purge all tasks
celery -A project purge
# Revoke task
celery -A project control revoke task-id
# Get task status
celery -A project result task-id
Git Commands (GitHub Flow)
Branch Management
# Create and switch to feature branch
git checkout -b feature/user-authentication
# Push new branch to remote
git push -u origin feature/user-authentication
# List branches
git branch -a
# Delete local branch
git branch -d feature/completed-feature
# Delete remote branch
git push origin --delete feature/completed-feature
# Sync with main
git checkout main
git pull origin main
git checkout feature/my-branch
git merge main
Commit Operations
# Stage changes
git add .
git add -A
git add file1.py file2.js
# Commit with message
git commit -m "feat(auth): add JWT token validation"
# Amend last commit
git commit --amend -m "Updated commit message"
# Interactive staging
git add -p
# Show commit history
git log --oneline
git log --graph --decorate --all
Remote Operations
# Add remote
git remote add origin https://github.com/azmx/repo.git
# Fetch changes
git fetch origin
git fetch --all
# Pull changes
git pull origin main
git pull --rebase origin main
# Push changes
git push origin feature/my-branch
git push --force-with-lease origin feature/my-branch
Stashing
# Stash changes
git stash
git stash save "Work in progress on feature X"
# List stashes
git stash list
# Apply stash
git stash apply
git stash apply stash@{1}
# Pop stash (apply and remove)
git stash pop
# Drop stash
git stash drop stash@{1}
Docker Operations
Container Management
# Build image
docker build -t app-name:tag .
# Run container
docker run -d -p 8000:8000 --name app-container app-name:tag
# Run with environment variables
docker run -d -p 8000:8000 --env-file .env app-name:tag
# Stop container
docker stop app-container
# Remove container
docker rm app-container
# View logs
docker logs app-container
docker logs -f app-container # Follow logs
Image Management
# List images
docker images
# Remove image
docker rmi image-name:tag
# Tag image
docker tag app-name:latest registry.com/app-name:v1.0
# Push image
docker push registry.com/app-name:v1.0
# Pull image
docker pull registry.com/app-name:v1.0
# Clean up unused images
docker image prune
docker system prune
Docker Compose
# Start services
docker-compose up
docker-compose up -d # Detached mode
# Stop services
docker-compose down
# Rebuild and start
docker-compose up --build
# View logs
docker-compose logs
docker-compose logs app-service
# Scale services
docker-compose up --scale web=3
Kubernetes Commands
Cluster Information
# Get cluster info
kubectl cluster-info
# Get nodes
kubectl get nodes
kubectl describe node node-name
# Get namespaces
kubectl get namespaces
# Set default namespace
kubectl config set-context --current --namespace=my-namespace
Pod Management
# Get pods
kubectl get pods
kubectl get pods -A # All namespaces
kubectl get pods -o wide
# Describe pod
kubectl describe pod pod-name
# Get pod logs
kubectl logs pod-name
kubectl logs -f pod-name # Follow logs
kubectl logs pod-name -c container-name # Specific container
# Execute command in pod
kubectl exec -it pod-name -- bash
kubectl exec pod-name -- ls /app
Service and Deployment Management
# Get services
kubectl get services
kubectl get svc
# Get deployments
kubectl get deployments
kubectl get deploy
# Scale deployment
kubectl scale deployment/app-deployment --replicas=3
# Update deployment image
kubectl set image deployment/app-deployment app=new-image:tag
# Rollback deployment
kubectl rollout undo deployment/app-deployment
# Check rollout status
kubectl rollout status deployment/app-deployment
Configuration Management
# Get configmaps
kubectl get configmaps
kubectl describe configmap config-name
# Get secrets
kubectl get secrets
kubectl describe secret secret-name
# Create secret
kubectl create secret generic my-secret --from-literal=key=value
# Apply configuration
kubectl apply -f deployment.yaml
kubectl apply -f . # All YAML files in directory
# Delete resources
kubectl delete -f deployment.yaml
kubectl delete pod pod-name
Monitoring and Debugging
System Information
# Check disk space
df -h
# Check memory usage
free -h
# Check CPU usage
top
htop
# Check process
ps aux | grep python
pgrep -f "celery worker"
# Check port usage
netstat -tulpn | grep :8000
lsof -i :8000
Log Management
# View system logs
journalctl -u nginx
journalctl -f # Follow logs
# Application logs
tail -f /var/log/app/error.log
tail -n 100 /var/log/app/access.log
# Search in logs
grep "ERROR" /var/log/app/error.log
grep -r "specific text" /var/log/
Network Debugging
# Test connectivity
ping google.com
curl -I https://api.example.com
telnet hostname 80
# DNS lookup
nslookup domain.com
dig domain.com
# Network configuration
ip addr show
netstat -rn # Routing table
Backup and Restore
Database Backup
# PostgreSQL backup
pg_dump -h localhost -p 5433 -U username database_name | gzip > backup_$(date +%Y%m%d).sql.gz
# Restore PostgreSQL backup
gunzip -c backup_20250120.sql.gz | psql -h localhost -p 5433 -U username database_name
# Automated backup script
#!/bin/bash
BACKUP_DIR="/backups"
DATE=$(date +%Y%m%d_%H%M%S)
pg_dump -h localhost -p 5433 -U username database_name > $BACKUP_DIR/backup_$DATE.sql
File System Backup
# Create tar archive
tar -czf backup_$(date +%Y%m%d).tar.gz /path/to/directory
# Extract tar archive
tar -xzf backup_20250120.tar.gz
# Rsync backup
rsync -av --delete /source/directory/ /backup/directory/
# Remote rsync
rsync -av --delete -e ssh /local/directory/ user@remote:/backup/directory/
Performance Monitoring
Application Performance
# Django debug toolbar setup
pip install django-debug-toolbar
# Monitor database queries
python manage.py shell
from django.db import connection
print(connection.queries)
# Profile Python code
python -m cProfile -o profile.stats manage.py runserver
python -c "import pstats; pstats.Stats('profile.stats').sort_stats('time').print_stats(10)"
# Memory profiling
pip install memory-profiler
@profile
def my_function():
pass
python -m memory_profiler script.py
System Performance
# I/O statistics
iostat -x 1
# Network statistics
iftop
netstat -i
# Disk usage analysis
du -sh /path/to/directory
find /path -type f -size +100M
# Database performance
# PostgreSQL slow query log
# Enable in postgresql.conf:
# log_min_duration_statement = 1000
This command reference provides quick access to the most commonly used commands for the AzmX development environment.