Microservices Architecture with Node.js and Docker
Microservices architecture has become the standard for building scalable, maintainable applications. Let's explore how to design and implement a production-ready microservices system using Node.js and Docker.
Why Microservices?
Microservices offer significant advantages over monolithic architecture:
- Scalability - Scale services independently based on demand
- Flexibility - Use different technologies for different services
- Resilience - Failure in one service doesn't crash the entire system
- Faster Development - Teams can work on services independently
- Easy Deployment - Deploy and update services without downtime
- Better Organization - Clear boundaries and responsibilities
Architecture Design Principles
Single Responsibility
Each service should do one thing well.
javascript
// ❌ Bad: Auth service doing too much
class AuthService {
async login(credentials) {}
async register(userData) {}
async sendEmail(to, subject, body) {}
async processPayment(amount) {}
async generateReport() {}
}
// ✅ Good: Separate services
// auth-service/
class AuthService {
async login(credentials) {}
async register(userData) {}
async refreshToken(token) {}
}
// email-service/
class EmailService {
async sendEmail(to, subject, body) {}
async sendTemplateEmail(to, template, data) {}
}
// payment-service/
class PaymentService {
async processPayment(amount, method) {}
async refund(transactionId) {}
}
Service Communication
Choose the right communication pattern.
Synchronous (REST/gRPC)
javascript
// user-service/routes/users.js
import express from "express";
import axios from "axios";
const router = express.Router();
router.get("/:id", async (req, res) => {
try {
const user = await User.findById(req.params.id);
// Call order service to get user's orders
const ordersResponse = await axios.get(
http://order-service:3001/orders/user/${user.id},
{
timeout: 5000,
headers: {
Authorization: req.headers.authorization,
},
}
);
res.json({
...user.toJSON(),
orders: ordersResponse.data,
});
} catch (error) {
if (error.code === "ECONNREFUSED") {
// Service unavailable - return partial data
res.json({ ...user.toJSON(), orders: [] });
} else {
res.status(500).json({ error: error.message });
}
}
});
export default router;
Asynchronous (Message Queue)
javascript
// order-service/events/publisher.js
import amqp from 'amqplib'
class OrderEventPublisher {
constructor() {
this.connection = null
this.channel = null
}
async connect() {
this.connection = await amqp.connect(process.env.RABBITMQ_URL)
this.channel = await this.connection.createChannel()
await this.channel.assertExchange('orders', 'topic', { durable: true })
}
async publishOrderCreated(order) {
const message = {
eventType: 'ORDER_CREATED',
timestamp: new Date().toISOString(),
data: {
orderId: order.id,
userId: order.userId,
items: order.items,
total: order.total
}
}
this.channel.publish(
'orders',
'order.created',
Buffer.from(JSON.stringify(message)),
{ persistent: true }
)
console.log('Published ORDER_CREATED event:', order.id)
}
}
export default new OrderEventPublisher()
// inventory-service/events/consumer.js
import amqp from 'amqplib'
import { reserveStock } from '../services/inventory'
class OrderEventConsumer {
async connect() {
const connection = await amqp.connect(process.env.RABBITMQ_URL)
const channel = await connection.createChannel()
await channel.assertExchange('orders', 'topic', { durable: true })
const queue = await channel.assertQueue('inventory-order-events', {
durable: true
})
await channel.bindQueue(queue.queue, 'orders', 'order.created')
channel.consume(queue.queue, async (msg) => {
if (msg) {
const event = JSON.parse(msg.content.toString())
try {
await this.handleOrderCreated(event.data)
channel.ack(msg)
} catch (error) {
console.error('Error processing event:', error)
// Retry logic or dead letter queue
channel.nack(msg, false, true)
}
}
})
}
async handleOrderCreated(orderData) {
console.log('Processing order:', orderData.orderId)
for (const item of orderData.items) {
await reserveStock(item.productId, item.quantity)
}
}
}
export default new OrderEventConsumer()
Service Structure
Standard microservice project structure:
user-service/
├── src/
│ ├── config/
│ │ └── database.js
│ ├── models/
│ │ └── User.js
│ ├── routes/
│ │ └── users.js
│ ├── services/
│ │ └── userService.js
│ ├── middleware/
│ │ ├── auth.js
│ │ └── errorHandler.js
│ ├── events/
│ │ ├── publisher.js
│ │ └── consumer.js
│ └── app.js
├── tests/
│ ├── unit/
│ └── integration/
├── Dockerfile
├── docker-compose.yml
├── package.json
└── .env.example
Docker Containerization
Service Dockerfile
dockerfile
# user-service/Dockerfile
FROM node:20-alpine AS builder
WORKDIR /app
# Copy package files
COPY package*.json ./
# Install dependencies
RUN npm ci --only=production
# Copy source code
COPY . .
# Production stage
FROM node:20-alpine
WORKDIR /app
# Create non-root user
RUN addgroup -g 1001 -S nodejs && \
adduser -S nodejs -u 1001
# Copy from builder
COPY --from=builder --chown=nodejs:nodejs /app /app
# Switch to non-root user
USER nodejs
# Expose port
EXPOSE 3000
# Health check
HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \
CMD node -e "require('http').get('http://localhost:3000/health', (r) => {process.exit(r.statusCode === 200 ? 0 : 1)})"
# Start application
CMD ["node", "src/app.js"]
Docker Compose for Development
yaml
# docker-compose.yml
version: "3.8"
services:
# API Gateway
api-gateway:
build:
context: ./api-gateway
dockerfile: Dockerfile
ports:
- "3000:3000"
environment:
- NODE_ENV=development
- USER_SERVICE_URL=http://user-service:3001
- ORDER_SERVICE_URL=http://order-service:3002
- PRODUCT_SERVICE_URL=http://product-service:3003
depends_on:
- user-service
- order-service
- product-service
networks:
- microservices
# User Service
user-service:
build:
context: ./user-service
dockerfile: Dockerfile
environment:
- NODE_ENV=development
- DATABASE_URL=postgresql://postgres:password@user-db:5432/users
- RABBITMQ_URL=amqp://rabbitmq:5672
- JWT_SECRET=${JWT_SECRET}
depends_on:
- user-db
- rabbitmq
networks:
- microservices
user-db:
image: postgres:16-alpine
environment:
- POSTGRES_DB=users
- POSTGRES_PASSWORD=password
volumes:
- user-data:/var/lib/postgresql/data
networks:
- microservices
# Order Service
order-service:
build:
context: ./order-service
dockerfile: Dockerfile
environment:
- NODE_ENV=development
- DATABASE_URL=postgresql://postgres:password@order-db:5432/orders
- RABBITMQ_URL=amqp://rabbitmq:5672
depends_on:
- order-db
- rabbitmq
networks:
- microservices
order-db:
image: postgres:16-alpine
environment:
- POSTGRES_DB=orders
- POSTGRES_PASSWORD=password
volumes:
- order-data:/var/lib/postgresql/data
networks:
- microservices
# Product Service
product-service:
build:
context: ./product-service
dockerfile: Dockerfile
environment:
- NODE_ENV=development
- MONGODB_URL=mongodb://product-db:27017/products
- REDIS_URL=redis://redis:6379
depends_on:
- product-db
- redis
networks:
- microservices
product-db:
image: mongo:7
volumes:
- product-data:/data/db
networks:
- microservices
# Message Broker
rabbitmq:
image: rabbitmq:3-management-alpine
ports:
- "5672:5672"
- "15672:15672"
environment:
- RABBITMQ_DEFAULT_USER=admin
- RABBITMQ_DEFAULT_PASS=admin
volumes:
- rabbitmq-data:/var/lib/rabbitmq
networks:
- microservices
# Cache
redis:
image: redis:7-alpine
ports:
- "6379:6379"
volumes:
- redis-data:/data
networks:
- microservices
volumes:
user-data:
order-data:
product-data:
rabbitmq-data:
redis-data:
networks:
microservices:
driver: bridge
API Gateway Pattern
Centralize routing and cross-cutting concerns.
javascript
// api-gateway/src/app.js
import express from "express";
import { createProxyMiddleware } from "http-proxy-middleware";
import rateLimit from "express-rate-limit";
import helmet from "helmet";
import cors from "cors";
import jwt from "jsonwebtoken";
const app = express();
// Security
app.use(helmet());
app.use(cors());
// Rate limiting
const limiter = rateLimit({
windowMs: 15 * 60 * 1000,
max: 100,
});
app.use(limiter);
// Authentication middleware
const authenticate = (req, res, next) => {
const token = req.headers.authorization?.split(" ")[1];
if (!token) {
return res.status(401).json({ error: "No token provided" });
}
try {
const decoded = jwt.verify(token, process.env.JWT_SECRET);
req.user = decoded;
next();
} catch (error) {
res.status(401).json({ error: "Invalid token" });
}
};
// Route to services
app.use(
"/api/users",
createProxyMiddleware({
target: process.env.USER_SERVICE_URL,
changeOrigin: true,
pathRewrite: { "^/api/users": "" },
onProxyReq: (proxyReq, req) => {
// Forward user info
if (req.user) {
proxyReq.setHeader("X-User-Id", req.user.id);
proxyReq.setHeader("X-User-Role", req.user.role);
}
},
})
);
app.use(
"/api/orders",
authenticate,
createProxyMiddleware({
target: process.env.ORDER_SERVICE_URL,
changeOrigin: true,
pathRewrite: { "^/api/orders": "" },
})
);
app.use(
"/api/products",
createProxyMiddleware({
target: process.env.PRODUCT_SERVICE_URL,
changeOrigin: true,
pathRewrite: { "^/api/products": "" },
})
);
// Health check
app.get("/health", (req, res) => {
res.json({ status: "healthy" });
});
const PORT = process.env.PORT || 3000;
app.listen(PORT, () => {
console.log(API Gateway running on port ${PORT});
});
Service Discovery
Implement service registry for dynamic discovery.
javascript
// service-registry/index.js
import express from "express";
const app = express();
app.use(express.json());
const services = new Map();
// Register service
app.post("/register", (req, res) => {
const { name, host, port, healthCheck } = req.body;
const serviceId = ${name}-${Date.now()};
services.set(serviceId, {
id: serviceId,
name,
url: http://${host}:${port},
healthCheck,
registeredAt: new Date(),
lastHeartbeat: new Date(),
});
res.json({ serviceId });
});
// Heartbeat
app.post("/heartbeat/:serviceId", (req, res) => {
const service = services.get(req.params.serviceId);
if (service) {
service.lastHeartbeat = new Date();
res.json({ status: "ok" });
} else {
res.status(404).json({ error: "Service not found" });
}
});
// Discover services
app.get("/discover/:serviceName", (req, res) => {
const instances = Array.from(services.values())
.filter((s) => s.name === req.params.serviceName)
.filter((s) => {
const timeSinceHeartbeat = Date.now() - s.lastHeartbeat.getTime();
return timeSinceHeartbeat < 30000; // 30 seconds
});
if (instances.length === 0) {
return res.status(404).json({ error: "No healthy instances" });
}
// Round-robin load balancing
const instance = instances[Math.floor(Math.random() * instances.length)];
res.json(instance);
});
app.listen(8500, () => {
console.log("Service registry running on port 8500");
});
Kubernetes Deployment
Deploy to production with Kubernetes.
yaml
# user-service/k8s/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: user-service
labels:
app: user-service
spec:
replicas: 3
selector:
matchLabels:
app: user-service
template:
metadata:
labels:
app: user-service
spec:
containers:
- name: user-service
image: your-registry/user-service:latest
ports:
- containerPort: 3000
env:
- name: NODE_ENV
value: "production"
- name: DATABASE_URL
valueFrom:
secretKeyRef:
name: user-service-secrets
key: database-url
- name: JWT_SECRET
valueFrom:
secretKeyRef:
name: user-service-secrets
key: jwt-secret
resources:
requests:
memory: "256Mi"
cpu: "250m"
limits:
memory: "512Mi"
cpu: "500m"
livenessProbe:
httpGet:
path: /health
port: 3000
initialDelaySeconds: 30
periodSeconds: 10
readinessProbe:
httpGet:
path: /ready
port: 3000
initialDelaySeconds: 5
periodSeconds: 5
---
apiVersion: v1
kind: Service
metadata:
name: user-service
spec:
selector:
app: user-service
ports:
- protocol: TCP
port: 80
targetPort: 3000
type: ClusterIP
---
apiVersion: autoscaling/v2
kind: HorizontalPodAutoscaler
metadata:
name: user-service-hpa
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: user-service
minReplicas: 3
maxReplicas: 10
metrics:
- type: Resource
resource:
name: cpu
target:
type: Utilization
averageUtilization: 70
Monitoring and Observability
Track service health and performance.
javascript
// monitoring/prometheus.js
import client from "prom-client";
const register = new client.Registry();
// Default metrics
client.collectDefaultMetrics({ register });
// Custom metrics
const httpRequestDuration = new client.Histogram({
name: "http_request_duration_seconds",
help: "Duration of HTTP requests in seconds",
labelNames: ["method", "route", "status_code"],
buckets: [0.1, 0.5, 1, 2, 5],
});
const httpRequestTotal = new client.Counter({
name: "http_requests_total",
help: "Total number of HTTP requests",
labelNames: ["method", "route", "status_code"],
});
register.registerMetric(httpRequestDuration);
register.registerMetric(httpRequestTotal);
// Middleware
export const metricsMiddleware = (req, res, next) => {
const start = Date.now();
res.on("finish", () => {
const duration = (Date.now() - start) / 1000;
httpRequestDuration
.labels(req.method, req.route?.path || req.path, res.statusCode)
.observe(duration);
httpRequestTotal
.labels(req.method, req.route?.path || req.path, res.statusCode)
.inc();
});
next();
};
export { register };
Best Practices
1. Database per Service
2. API Versioning
3. Circuit Breaker Pattern
4. Distributed Tracing
5. Centralized Logging
6. Automated Testing
7. CI/CD Pipeline
8. Security (mTLS, API Keys)
Conclusion
Microservices architecture enables building scalable, resilient systems. Start with a solid foundation: proper service boundaries, containerization, orchestration, and observability. Remember: microservices add complexity, so only adopt them when the benefits outweigh the costs.
Happy architecting! 🏗️