Deploy your Crave.js backend API to production with confidence
FROM node:18-alpine
WORKDIR /app
# Copy package files
COPY package*.json ./
RUN npm ci --only=production
# Copy source code
COPY . .
# Create non-root user
RUN addgroup -g 1001 -S nodejs
RUN adduser -S craveapi -u 1001
# Set ownership
RUN chown -R craveapi:nodejs /app
USER craveapi
# Expose port
EXPOSE 8000
# Health check
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
CMD curl -f http://localhost:8000/health || exit 1
CMD ["npm", "start"]
# ecs-task-definition.json
{
"family": "crave-api-task",
"networkMode": "awsvpc",
"requiresCompatibilities": ["FARGATE"],
"cpu": "512",
"memory": "1024",
"executionRoleArn": "arn:aws:iam::account:role/ecsTaskExecutionRole",
"containerDefinitions": [
{
"name": "crave-api",
"image": "your-account.dkr.ecr.region.amazonaws.com/crave-api:latest",
"portMappings": [
{
"containerPort": 8000,
"protocol": "tcp"
}
],
"environment": [
{
"name": "NODE_ENV",
"value": "production"
},
{
"name": "PORT",
"value": "8000"
}
],
"secrets": [
{
"name": "MONGODB_URI",
"valueFrom": "arn:aws:secretsmanager:region:account:secret:crave-mongodb-uri"
},
{
"name": "STRIPE_SECRET_KEY",
"valueFrom": "arn:aws:secretsmanager:region:account:secret:crave-stripe-key"
}
],
"logConfiguration": {
"logDriver": "awslogs",
"options": {
"awslogs-group": "/ecs/crave-api",
"awslogs-region": "us-east-1",
"awslogs-stream-prefix": "ecs"
}
}
}
]
}
# cloud-run-service.yaml
apiVersion: serving.knative.dev/v1
kind: Service
metadata:
name: crave-api
annotations:
run.googleapis.com/ingress: all
spec:
template:
metadata:
annotations:
autoscaling.knative.dev/maxScale: "10"
run.googleapis.com/memory: "1Gi"
run.googleapis.com/cpu: "1000m"
spec:
containers:
- image: gcr.io/your-project/crave-api:latest
ports:
- containerPort: 8000
env:
- name: NODE_ENV
value: production
- name: PORT
value: "8000"
- name: MONGODB_URI
valueFrom:
secretKeyRef:
name: crave-secrets
key: mongodb-uri
- name: STRIPE_SECRET_KEY
valueFrom:
secretKeyRef:
name: crave-secrets
key: stripe-secret-key
# Install PM2 globally
npm install -g pm2
# Create ecosystem file
cat > ecosystem.config.js << 'EOF'
module.exports = {
apps: [{
name: 'crave-api',
script: 'npm',
args: 'start',
instances: 'max',
exec_mode: 'cluster',
env: {
NODE_ENV: 'development',
PORT: 8000
},
env_production: {
NODE_ENV: 'production',
PORT: 8000,
MONGODB_URI: 'mongodb://localhost:27017/cravedb',
REDIS_URL: 'redis://localhost:6379'
},
log_date_format: 'YYYY-MM-DD HH:mm:ss Z',
error_file: './logs/err.log',
out_file: './logs/out.log',
log_file: './logs/combined.log'
}]
};
EOF
# Start the application
pm2 start ecosystem.config.js --env production
pm2 save
pm2 startup
.env
file for production:
# Application
NODE_ENV=production
PORT=8000
API_BASE_URL=https://api.yourrestaurant.com
# Database
MONGODB_URI=mongodb://username:password@host:27017/cravedb?authSource=admin
REDIS_URL=redis://username:password@host:6379
# Authentication
JWT_SECRET=your-super-secret-jwt-key-here
API_KEY_SALT=your-api-key-salt-here
# Payment Processing
STRIPE_SECRET_KEY=sk_live_your_stripe_secret_key
STRIPE_WEBHOOK_SECRET=whsec_your_webhook_secret
# Email Services
SMTP_HOST=smtp.yourprovider.com
SMTP_PORT=587
SMTP_USER=your_email@yourrestaurant.com
SMTP_PASS=your_email_password
# File Storage
AWS_ACCESS_KEY_ID=your_aws_access_key
AWS_SECRET_ACCESS_KEY=your_aws_secret_key
AWS_REGION=us-east-1
S3_BUCKET_NAME=your-restaurant-images
# Monitoring
SENTRY_DSN=https://your-sentry-dsn@sentry.io/project
LOG_LEVEL=info
# Rate Limiting
RATE_LIMIT_WINDOW_MS=60000
RATE_LIMIT_MAX_REQUESTS=100
module.exports = {
port: 8000,
cors: {
origin: ['http://localhost:3000', 'http://localhost:3001'],
credentials: true
},
rateLimit: {
windowMs: 60 * 1000, // 1 minute
max: 1000 // Very high limit for development
},
logging: {
level: 'debug'
}
};
# /etc/nginx/sites-available/crave-api
upstream crave_api {
least_conn;
server 127.0.0.1:8000;
server 127.0.0.1:8001;
server 127.0.0.1:8002;
}
server {
listen 80;
server_name api.yourrestaurant.com;
return 301 https://$server_name$request_uri;
}
server {
listen 443 ssl http2;
server_name api.yourrestaurant.com;
ssl_certificate /etc/letsencrypt/live/api.yourrestaurant.com/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/api.yourrestaurant.com/privkey.pem;
# Security headers
add_header X-Frame-Options "SAMEORIGIN" always;
add_header X-Content-Type-Options "nosniff" always;
add_header X-XSS-Protection "1; mode=block" always;
# Gzip compression
gzip on;
gzip_vary on;
gzip_min_length 1024;
gzip_types text/plain text/css text/xml text/javascript application/javascript application/json;
location / {
proxy_pass http://crave_api;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection 'upgrade';
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_cache_bypass $http_upgrade;
proxy_read_timeout 86400;
}
location /health {
access_log off;
proxy_pass http://crave_api;
}
}
apiVersion: apps/v1
kind: Deployment
metadata:
name: crave-api
spec:
replicas: 3
selector:
matchLabels:
app: crave-api
template:
metadata:
labels:
app: crave-api
spec:
containers:
- name: crave-api
image: your-registry/crave-api:latest
ports:
- containerPort: 8000
env:
- name: NODE_ENV
value: production
- name: PORT
value: "8000"
envFrom:
- secretRef:
name: crave-api-secrets
resources:
requests:
memory: "256Mi"
cpu: "250m"
limits:
memory: "512Mi"
cpu: "500m"
livenessProbe:
httpGet:
path: /health
port: 8000
initialDelaySeconds: 30
periodSeconds: 10
readinessProbe:
httpGet:
path: /health
port: 8000
initialDelaySeconds: 5
periodSeconds: 5
---
apiVersion: v1
kind: Service
metadata:
name: crave-api-service
spec:
selector:
app: crave-api
ports:
- protocol: TCP
port: 80
targetPort: 8000
type: LoadBalancer
// config/database.js
const mongoose = require('mongoose');
const connectDB = async () => {
try {
const conn = await mongoose.connect(process.env.MONGODB_URI, {
useNewUrlParser: true,
useUnifiedTopology: true,
maxPoolSize: 10,
serverSelectionTimeoutMS: 5000,
socketTimeoutMS: 45000,
family: 4,
// Production optimizations
bufferMaxEntries: 0,
bufferCommands: false,
autoIndex: process.env.NODE_ENV !== 'production'
});
console.log(`MongoDB Connected: ${conn.connection.host}`);
// Handle connection events
mongoose.connection.on('error', err => {
console.error('MongoDB connection error:', err);
});
mongoose.connection.on('disconnected', () => {
console.log('MongoDB disconnected');
});
} catch (error) {
console.error('Database connection failed:', error);
process.exit(1);
}
};
module.exports = connectDB;
// config/redis.js
const redis = require('redis');
const client = redis.createClient({
url: process.env.REDIS_URL,
retry_strategy: (options) => {
if (options.error && options.error.code === 'ECONNREFUSED') {
return new Error('Redis server refused connection');
}
if (options.total_retry_time > 1000 * 60 * 60) {
return new Error('Redis retry time exhausted');
}
if (options.attempt > 10) {
return undefined;
}
return Math.min(options.attempt * 100, 3000);
}
});
client.on('error', (err) => {
console.error('Redis Client Error:', err);
});
client.on('connect', () => {
console.log('Redis Client Connected');
});
module.exports = client;
// routes/health.js
const express = require('express');
const mongoose = require('mongoose');
const redis = require('../config/redis');
const router = express.Router();
router.get('/health', async (req, res) => {
const health = {
timestamp: new Date().toISOString(),
status: 'healthy',
version: process.env.npm_package_version,
environment: process.env.NODE_ENV,
checks: {}
};
// Database check
try {
await mongoose.connection.db.admin().ping();
health.checks.database = 'healthy';
} catch (error) {
health.checks.database = 'unhealthy';
health.status = 'unhealthy';
}
// Redis check
try {
await redis.ping();
health.checks.redis = 'healthy';
} catch (error) {
health.checks.redis = 'unhealthy';
health.status = 'unhealthy';
}
// Memory check
const memoryUsage = process.memoryUsage();
health.checks.memory = {
rss: Math.round(memoryUsage.rss / 1024 / 1024),
heapUsed: Math.round(memoryUsage.heapUsed / 1024 / 1024),
heapTotal: Math.round(memoryUsage.heapTotal / 1024 / 1024)
};
res.status(health.status === 'healthy' ? 200 : 503).json(health);
});
module.exports = router;
// config/logger.js
const winston = require('winston');
const logger = winston.createLogger({
level: process.env.LOG_LEVEL || 'info',
format: winston.format.combine(
winston.format.timestamp(),
winston.format.errors({ stack: true }),
winston.format.json()
),
defaultMeta: { service: 'crave-api' },
transports: [
new winston.transports.File({ filename: 'logs/error.log', level: 'error' }),
new winston.transports.File({ filename: 'logs/combined.log' })
]
});
if (process.env.NODE_ENV !== 'production') {
logger.add(new winston.transports.Console({
format: winston.format.simple()
}));
}
module.exports = logger;
# Install certbot
sudo apt-get update
sudo apt-get install certbot python3-certbot-nginx
# Generate SSL certificate
sudo certbot --nginx -d api.yourrestaurant.com
# Auto-renewal
sudo crontab -e
# Add this line:
0 12 * * * /usr/bin/certbot renew --quiet
// server.js
const https = require('https');
const fs = require('fs');
const app = require('./app');
if (process.env.NODE_ENV === 'production') {
const options = {
key: fs.readFileSync('/etc/letsencrypt/live/api.yourrestaurant.com/privkey.pem'),
cert: fs.readFileSync('/etc/letsencrypt/live/api.yourrestaurant.com/fullchain.pem')
};
https.createServer(options, app).listen(443, () => {
console.log('HTTPS Server running on port 443');
});
} else {
app.listen(8000, () => {
console.log('HTTP Server running on port 8000');
});
}
#!/bin/bash
# backup-mongodb.sh
DATE=$(date +%Y%m%d_%H%M%S)
BACKUP_DIR="/backups/mongodb"
DB_NAME="cravedb"
mkdir -p $BACKUP_DIR
# Create backup
mongodump --uri="$MONGODB_URI" --db=$DB_NAME --out=$BACKUP_DIR/$DATE
# Compress backup
tar -czf $BACKUP_DIR/backup_$DATE.tar.gz -C $BACKUP_DIR $DATE
# Upload to S3 (optional)
aws s3 cp $BACKUP_DIR/backup_$DATE.tar.gz s3://your-backups-bucket/mongodb/
# Clean up local files older than 7 days
find $BACKUP_DIR -name "backup_*.tar.gz" -mtime +7 -delete
find $BACKUP_DIR -type d -name "20*" -mtime +7 -exec rm -rf {} +
echo "Backup completed: backup_$DATE.tar.gz"
# Add to crontab
0 2 * * * /path/to/backup-mongodb.sh >> /var/log/backup.log 2>&1